From 59b8f13e140b634db7d4fffb7dfbe35640b36e26 Mon Sep 17 00:00:00 2001 From: Michael Doyle Date: Sun, 2 Jun 2024 23:29:25 -0400 Subject: [PATCH] Flatten model configuration (i.e. extend from GenerationCommonConfig --- package-lock.json | 11 +++--- plugins/anthropic/src/claude.ts | 40 +++++++++++++--------- plugins/anthropic/tests/claude_test.ts | 18 ++++------ plugins/cohere/package.json | 2 +- plugins/cohere/src/command.ts | 24 +++++--------- plugins/groq/src/groq_models.ts | 9 ++--- plugins/groq/tests/groq_test.ts | 2 +- plugins/mistral/package.json | 2 +- plugins/mistral/src/mistral_llms.ts | 41 ++++++++--------------- plugins/openai/src/dalle.ts | 17 +++++----- plugins/openai/src/gpt.ts | 39 +++++++++++----------- plugins/openai/src/tts.ts | 13 ++++---- plugins/openai/src/whisper.ts | 18 +++++----- plugins/openai/tests/gpt_test.ts | 46 ++++++++++++++------------ 14 files changed, 128 insertions(+), 154 deletions(-) diff --git a/package-lock.json b/package-lock.json index 07e5df50..b75452cd 100644 --- a/package-lock.json +++ b/package-lock.json @@ -12958,7 +12958,6 @@ "version": "0.1.13", "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.13.tgz", "integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==", - "dev": true, "optional": true, "dependencies": { "iconv-lite": "^0.6.2" @@ -12968,7 +12967,6 @@ "version": "0.6.3", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "dev": true, "optional": true, "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" @@ -29611,7 +29609,6 @@ "version": "5.4.5", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz", "integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==", - "dev": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -31394,8 +31391,8 @@ "typescript": "^4.9.5" }, "peerDependencies": { - "@genkit-ai/ai": "0.5.0", - "@genkit-ai/core": "0.5.0" + "@genkit-ai/ai": "^0.5.0", + "@genkit-ai/core": "^0.5.0" } }, "plugins/cohere/node_modules/typescript": { @@ -31613,8 +31610,8 @@ "typescript": "^4.9.5" }, "peerDependencies": { - "@genkit-ai/ai": "0.5.0", - "@genkit-ai/core": "0.5.0" + "@genkit-ai/ai": "^0.5.0", + "@genkit-ai/core": "^0.5.0" } }, "plugins/mistral/node_modules/typescript": { diff --git a/plugins/anthropic/src/claude.ts b/plugins/anthropic/src/claude.ts index 77da37e2..f11fcb8b 100644 --- a/plugins/anthropic/src/claude.ts +++ b/plugins/anthropic/src/claude.ts @@ -16,6 +16,8 @@ import { Message } from '@genkit-ai/ai'; import { + GenerationCommonConfigSchema, + ModelAction, defineModel, modelRef, type CandidateData, @@ -35,19 +37,21 @@ const API_NAME_MAP: Record = { 'claude-3-haiku': 'claude-3-haiku-20240307', }; -const AnthropicConfigSchema = z.object({ - tool_choice: z.union([ - z.object({ - type: z.literal('auto'), - }), - z.object({ - type: z.literal('any'), - }), - z.object({ - type: z.literal('tool'), - name: z.string(), - }), - ]), +const AnthropicConfigSchema = GenerationCommonConfigSchema.extend({ + tool_choice: z + .union([ + z.object({ + type: z.literal('auto'), + }), + z.object({ + type: z.literal('any'), + }), + z.object({ + type: z.literal('tool'), + name: z.string(), + }), + ]) + .optional(), metadata: z .object({ user_id: z.string().optional(), @@ -394,7 +398,7 @@ function fromAnthropicContentBlockChunk( */ export function toAnthropicRequestBody( modelName: string, - request: GenerateRequest, + request: GenerateRequest, stream?: boolean ): Anthropic.Beta.Tools.Messages.MessageCreateParams { const model = SUPPORTED_CLAUDE_MODELS[modelName]; @@ -411,8 +415,9 @@ export function toAnthropicRequestBody( top_p: request.config?.topP, temperature: request.config?.temperature, stop_sequences: request.config?.stopSequences, + metadata: request.config?.metadata, + tool_choice: request.config?.tool_choice, stream, - ...(request.config?.custom || {}), }; if (request.output?.format && request.output.format !== 'text') { @@ -434,7 +439,10 @@ export function toAnthropicRequestBody( * @returns The defined Claude model. * @throws An error if the specified model is not supported. */ -export function claudeModel(name: string, client: Anthropic) { +export function claudeModel( + name: string, + client: Anthropic +): ModelAction { const modelId = `anthropic/${name}`; const model = SUPPORTED_CLAUDE_MODELS[name]; if (!model) throw new Error(`Unsupported model: ${name}`); diff --git a/plugins/anthropic/tests/claude_test.ts b/plugins/anthropic/tests/claude_test.ts index db95bd83..8f8905d2 100644 --- a/plugins/anthropic/tests/claude_test.ts +++ b/plugins/anthropic/tests/claude_test.ts @@ -310,10 +310,8 @@ describe('toAnthropicRequestBody', () => { ], output: { format: 'text' }, config: { - custom: { - metadata: { - user_id: 'exampleUser123', - }, + metadata: { + user_id: 'exampleUser123', }, }, }, @@ -345,10 +343,8 @@ describe('toAnthropicRequestBody', () => { ], output: { format: 'text' }, config: { - custom: { - metadata: { - user_id: 'exampleUser123', - }, + metadata: { + user_id: 'exampleUser123', }, }, }, @@ -380,10 +376,8 @@ describe('toAnthropicRequestBody', () => { ], output: { format: 'text' }, config: { - custom: { - metadata: { - user_id: 'exampleUser123', - }, + metadata: { + user_id: 'exampleUser123', }, }, }, diff --git a/plugins/cohere/package.json b/plugins/cohere/package.json index 2d984e5d..0ae527c4 100644 --- a/plugins/cohere/package.json +++ b/plugins/cohere/package.json @@ -57,4 +57,4 @@ "build": "npm-run-all build:clean check compile", "build:watch": "tsup-node --watch" } -} \ No newline at end of file +} diff --git a/plugins/cohere/src/command.ts b/plugins/cohere/src/command.ts index 90c46d13..bb5c55df 100644 --- a/plugins/cohere/src/command.ts +++ b/plugins/cohere/src/command.ts @@ -19,7 +19,9 @@ import { CandidateData, defineModel, GenerateRequest, + GenerationCommonConfigSchema, MessageData, + ModelAction, modelRef, Part, Role, @@ -31,7 +33,7 @@ import { ChatStreamEndEventFinishReason } from 'cohere-ai/api'; import z from 'zod'; -export const CohereConfigSchema = z.object({ +export const CohereConfigSchema = GenerationCommonConfigSchema.extend({ frequencyPenalty: z.number().min(-2).max(2).optional(), logitBias: z.record(z.string(), z.number().min(-100).max(100)).optional(), logProbs: z.boolean().optional(), @@ -381,21 +383,9 @@ function fromCohereStreamEvent( export function toCohereRequestBody( modelName: string, - request: GenerateRequest + request: GenerateRequest ): Cohere.ChatRequest | Cohere.ChatStreamRequest { // Note: these types are the same in the Cohere API (not on the surface, e.g. one uses ChatRequestToolResultsItem and the other uses ChatStreamRequestToolResultsItem, but when the types are unwrapped they are exactly the same) - const mapToSnakeCase = >( - obj: T - ): Record => { - return Object.entries(obj).reduce((acc, [key, value]) => { - const snakeCaseKey = key.replace( - /[A-Z]/g, - (letter) => `_${letter.toLowerCase()}` - ); - acc[snakeCaseKey] = value; - return acc; - }, {}); - }; const model = SUPPORTED_COMMAND_MODELS[modelName]; if (!model) throw new Error(`Unsupported model: ${modelName}`); const mappedModelName = request.config?.version || modelName; @@ -423,7 +413,6 @@ export function toCohereRequestBody( rawPrompting: request.config?.rawPrompting, tools: request.tools?.map(toCohereTool), // toolResults: request.messages?.map(toCohereToolResult), - ...mapToSnakeCase(request.config?.custom || {}), }; for (const key in body) { @@ -436,7 +425,10 @@ export function toCohereRequestBody( /** * */ -export function commandModel(name: string, client: CohereClient) { +export function commandModel( + name: string, + client: CohereClient +): ModelAction { const modelId = `cohere/${name}`; const model = SUPPORTED_COMMAND_MODELS[name]; if (!model) throw new Error(`Unsupported model: ${name}`); diff --git a/plugins/groq/src/groq_models.ts b/plugins/groq/src/groq_models.ts index 4c2c4194..6e993ba8 100644 --- a/plugins/groq/src/groq_models.ts +++ b/plugins/groq/src/groq_models.ts @@ -19,6 +19,7 @@ import { CandidateData, defineModel, GenerateRequest, + GenerationCommonConfigSchema, MessageData, modelRef, Part, @@ -36,11 +37,7 @@ import { import z from 'zod'; -export const GroqConfigSchema = z.object({ - temperature: z.number().min(0).max(1).optional(), - maxTokens: z.number().int().min(1).max(2048).optional(), - topP: z.number().min(0).max(1).optional(), - stop: z.string().optional(), +export const GroqConfigSchema = GenerationCommonConfigSchema.extend({ stream: z.boolean().optional(), frequencyPenalty: z.number().optional(), logitBias: z.record(z.number()).optional(), @@ -400,7 +397,7 @@ export function toGroqRequestBody( tools: request.tools?.map(toGroqTool), model: request.config?.version || DEFAULT_MODEL_VERSION[modelName], temperature: request.config?.temperature, - max_tokens: request.config?.maxTokens, + max_tokens: request.config?.maxOutputTokens, top_p: request.config?.topP, stop: request.config?.stopSequences, n: request.candidates, diff --git a/plugins/groq/tests/groq_test.ts b/plugins/groq/tests/groq_test.ts index 9cf9afb4..7bb91a96 100644 --- a/plugins/groq/tests/groq_test.ts +++ b/plugins/groq/tests/groq_test.ts @@ -118,7 +118,7 @@ describe('toGroqRequestBody', () => { config: { temperature: 0.7, stopSequences: ['\n'], - maxTokens: 100, + maxOutputTokens: 100, topP: 0.9, frequencyPenalty: 0.5, logitBias: { diff --git a/plugins/mistral/package.json b/plugins/mistral/package.json index 218d2a4a..2c830be5 100644 --- a/plugins/mistral/package.json +++ b/plugins/mistral/package.json @@ -57,4 +57,4 @@ "build": "npm-run-all build:clean check compile", "build:watch": "tsup-node --watch" } -} \ No newline at end of file +} diff --git a/plugins/mistral/src/mistral_llms.ts b/plugins/mistral/src/mistral_llms.ts index 48515176..fbb8d804 100644 --- a/plugins/mistral/src/mistral_llms.ts +++ b/plugins/mistral/src/mistral_llms.ts @@ -19,7 +19,9 @@ import { CandidateData, defineModel, GenerateRequest, + GenerationCommonConfigSchema, MessageData, + ModelAction, modelRef, Part, Role, @@ -38,13 +40,6 @@ import type { import z from 'zod'; -export const MistralConfigSchema = z.object({ - temperature: z.number().min(0).max(1).optional(), - maxTokens: z.number().int().optional(), - topP: z.number().min(0).max(1).optional(), - stopSequences: z.array(z.string()).optional(), -}); - export const openMistral7B = modelRef({ name: 'mistral/open-mistral-7b', info: { @@ -58,7 +53,7 @@ export const openMistral7B = modelRef({ output: ['text', 'json'], }, }, - configSchema: MistralConfigSchema, + configSchema: GenerationCommonConfigSchema, }); export const openMistral8x7B = modelRef({ @@ -104,7 +99,7 @@ export const openMistralSmall = modelRef({ output: ['text', 'json'], }, }, - configSchema: MistralConfigSchema, + configSchema: GenerationCommonConfigSchema, }); export const openMistralMedium = modelRef({ @@ -120,7 +115,7 @@ export const openMistralMedium = modelRef({ output: ['text', 'json'], }, }, - configSchema: MistralConfigSchema, + configSchema: GenerationCommonConfigSchema, }); export const openMistralLarge = modelRef({ @@ -136,7 +131,7 @@ export const openMistralLarge = modelRef({ output: ['text', 'json'], }, }, - configSchema: MistralConfigSchema, + configSchema: GenerationCommonConfigSchema, }); function toMistralRole(role: Role): string { @@ -254,27 +249,15 @@ function fromMistralChunkChoice( export function toMistralRequestBody( modelName: string, - request: GenerateRequest + request: GenerateRequest ) { - const mapToSnakeCase = >( - obj: T - ): Record => { - return Object.entries(obj).reduce((acc, [key, value]) => { - const snakeCaseKey = key.replace( - /[A-Z]/g, - (letter) => `_${letter.toLowerCase()}` - ); - acc[snakeCaseKey] = value; - return acc; - }, {}); - }; const model = SUPPORTED_MISTRAL_MODELS[modelName]; if (!model) throw new Error(`Unsupported model: ${modelName}`); const mistralMessages = toMistralMessages(request.messages); const mappedModelName = request.config?.version || modelName; let responseFormat; - if (request.config?.responseFormat !== 'json') { + if (request.output?.format !== 'json') { responseFormat = { type: 'json_object' }; } else { responseFormat = null; @@ -283,13 +266,12 @@ export function toMistralRequestBody( messages: mistralMessages, tools: request.tools?.map(toMistralTool), model: mappedModelName, - max_tokens: request.config?.maxTokens, + max_tokens: request.config?.maxOutputTokens, temperature: request.config?.temperature, top_p: request.config?.topP, n: request.candidates, stop_sequences: request.config?.stopSequences, responseFormat: responseFormat, - ...mapToSnakeCase(request.config?.custom || {}), } as ChatRequest; for (const key in body) { @@ -299,7 +281,10 @@ export function toMistralRequestBody( return body; } -export function mistralModel(name: string, client: any) { +export function mistralModel( + name: string, + client: any +): ModelAction { //Ugly any type, should be MistralClient but cannot import it here const modelId = `mistral/${name}`; const model = SUPPORTED_MISTRAL_MODELS[name]; diff --git a/plugins/openai/src/dalle.ts b/plugins/openai/src/dalle.ts index cacd4378..a25af76c 100644 --- a/plugins/openai/src/dalle.ts +++ b/plugins/openai/src/dalle.ts @@ -16,6 +16,7 @@ import { Message } from '@genkit-ai/ai'; import { + GenerationCommonConfigSchema, defineModel, modelRef, type GenerateRequest, @@ -29,7 +30,7 @@ import { } from 'openai/resources/images.mjs'; import { z } from 'zod'; -export const DallE3ConfigSchema = z.object({ +export const DallE3ConfigSchema = GenerationCommonConfigSchema.extend({ size: z.enum(['1024x1024', '1792x1024', '1024x1792']).optional(), style: z.enum(['vivid', 'natural']).optional(), user: z.string().optional(), @@ -53,19 +54,17 @@ export const dallE3 = modelRef({ }); function toDallE3Request( - request: GenerateRequest & { - config?: { custom?: z.infer }; - } + request: GenerateRequest ): ImageGenerateParams { const options = { model: 'dall-e-3', prompt: new Message(request.messages[0]).text(), n: request.candidates || 1, - size: request.config?.custom?.size, - style: request.config?.custom?.style, - user: request.config?.custom?.user, - quality: request.config?.custom?.quality, - response_format: request.config?.custom?.response_format || 'b64_json', + size: request.config?.size, + style: request.config?.style, + user: request.config?.user, + quality: request.config?.quality, + response_format: request.config?.response_format || 'b64_json', }; for (const k in options) { if (options[k] === undefined) { diff --git a/plugins/openai/src/gpt.ts b/plugins/openai/src/gpt.ts index 2e4085b3..b35f1afb 100644 --- a/plugins/openai/src/gpt.ts +++ b/plugins/openai/src/gpt.ts @@ -16,6 +16,8 @@ import { Message } from '@genkit-ai/ai'; import { + GenerationCommonConfigSchema, + ModelAction, defineModel, modelRef, type CandidateData, @@ -53,7 +55,7 @@ const MODELS_SUPPORTING_OPENAI_RESPONSE_FORMAT = [ 'gpt-3.5-turbo-1106', ]; -export const OpenAiConfigSchema = z.object({ +export const OpenAiConfigSchema = GenerationCommonConfigSchema.extend({ frequencyPenalty: z.number().min(-2).max(2).optional(), logitBias: z.record(z.string(), z.number().min(-100).max(100)).optional(), logProbs: z.boolean().optional(), @@ -408,20 +410,8 @@ function fromOpenAiChunkChoice( */ export function toOpenAiRequestBody( modelName: string, - request: GenerateRequest + request: GenerateRequest ) { - const mapToSnakeCase = >( - obj: T - ): Record => { - return Object.entries(obj).reduce((acc, [key, value]) => { - const snakeCaseKey = key.replace( - /[A-Z]/g, - (letter) => `_${letter.toLowerCase()}` - ); - acc[snakeCaseKey] = value; - return acc; - }, {}); - }; const model = SUPPORTED_GPT_MODELS[modelName]; if (!model) throw new Error(`Unsupported model: ${modelName}`); const openAiMessages = toOpenAiMessages( @@ -430,15 +420,21 @@ export function toOpenAiRequestBody( ); const mappedModelName = request.config?.version || modelName; const body = { - messages: openAiMessages, - tools: request.tools?.map(toOpenAiTool), model: mappedModelName, - max_tokens: request.config?.maxOutputTokens, + messages: openAiMessages, temperature: request.config?.temperature, + max_tokens: request.config?.maxOutputTokens, top_p: request.config?.topP, - n: request.candidates, stop: request.config?.stopSequences, - ...mapToSnakeCase(request.config?.custom || {}), + frequency_penalty: request.config?.frequencyPenalty, + logit_bias: request.config?.logitBias, + logprobs: request.config?.logProbs, // logprobs not snake case! + presence_penalty: request.config?.presencePenalty, + seed: request.config?.seed, + top_logprobs: request.config?.topLogProbs, // logprobs not snake case! + user: request.config?.user, + tools: request.tools?.map(toOpenAiTool), + n: request.candidates, } as ChatCompletionCreateParamsNonStreaming; const response_format = request.output?.format; @@ -480,7 +476,10 @@ export function toOpenAiRequestBody( * @returns The defined GPT model. * @throws An error if the specified model is not supported. */ -export function gptModel(name: string, client: OpenAI) { +export function gptModel( + name: string, + client: OpenAI +): ModelAction { const modelId = `openai/${name}`; const model = SUPPORTED_GPT_MODELS[name]; if (!model) throw new Error(`Unsupported model: ${name}`); diff --git a/plugins/openai/src/tts.ts b/plugins/openai/src/tts.ts index f470071c..8cea6ca0 100644 --- a/plugins/openai/src/tts.ts +++ b/plugins/openai/src/tts.ts @@ -16,6 +16,7 @@ import { Message } from '@genkit-ai/ai'; import { + GenerationCommonConfigSchema, defineModel, modelRef, type GenerateRequest, @@ -26,7 +27,7 @@ import OpenAI from 'openai'; import { type SpeechCreateParams } from 'openai/resources/audio/index.mjs'; import { z } from 'zod'; -export const TTSConfigSchema = z.object({ +export const TTSConfigSchema = GenerationCommonConfigSchema.extend({ voice: z .enum(['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']) .optional() @@ -83,17 +84,15 @@ export const RESPONSE_FORMAT_MEDIA_TYPES = { function toTTSRequest( modelName: string, - request: GenerateRequest & { - config?: { custom?: z.infer }; - } + request: GenerateRequest ): SpeechCreateParams { const mappedModelName = request.config?.version || modelName; const options: SpeechCreateParams = { model: mappedModelName, input: new Message(request.messages[0]).text(), - voice: request.config?.custom?.voice ?? 'alloy', - speed: request.config?.custom?.speed, - response_format: request.config?.custom?.response_format, + voice: request.config?.voice ?? 'alloy', + speed: request.config?.speed, + response_format: request.config?.response_format, }; for (const k in options) { if (options[k] === undefined) { diff --git a/plugins/openai/src/whisper.ts b/plugins/openai/src/whisper.ts index 88651bda..36817b21 100644 --- a/plugins/openai/src/whisper.ts +++ b/plugins/openai/src/whisper.ts @@ -16,6 +16,7 @@ import { Message } from '@genkit-ai/ai'; import { + GenerationCommonConfigSchema, defineModel, modelRef, type GenerateRequest, @@ -29,7 +30,7 @@ import { } from 'openai/resources/audio/index.mjs'; import { z } from 'zod'; -export const Whisper1ConfigSchema = z.object({ +export const Whisper1ConfigSchema = GenerationCommonConfigSchema.extend({ language: z.string().optional(), timestamp_granularities: z.array(z.enum(['word', 'segment'])).optional(), response_format: z @@ -53,9 +54,7 @@ export const whisper1 = modelRef({ }); function toWhisper1Request( - request: GenerateRequest & { - config?: { custom?: z.infer }; - } + request: GenerateRequest ): TranscriptionCreateParams { const message = new Message(request.messages[0]); const media = message.media(); @@ -76,22 +75,25 @@ function toWhisper1Request( file: mediaFile, prompt: message.text(), temperature: request.config?.temperature, - language: request.config?.custom?.language, - timestamp_granularities: request.config?.custom?.timestamp_granularities, + language: request.config?.language, + timestamp_granularities: request.config?.timestamp_granularities, }; const outputFormat = request.output?.format; - const customFormat = request.config?.custom?.response_format; + const customFormat = request.config?.response_format; if (outputFormat && customFormat) { if ( outputFormat === 'json' && customFormat !== 'json' && - customFormat !== 'json_verbose' + customFormat !== 'verbose_json' ) { throw new Error( `Custom response format ${customFormat} is not compatible with output format ${outputFormat}` ); } } + if (outputFormat === 'media') { + throw new Error(`Output format ${outputFormat} is not supported.`); + } options.response_format = customFormat || outputFormat || 'text'; for (const k in options) { if (options[k] === undefined) { diff --git a/plugins/openai/tests/gpt_test.ts b/plugins/openai/tests/gpt_test.ts index b31ddb84..94fdffb8 100644 --- a/plugins/openai/tests/gpt_test.ts +++ b/plugins/openai/tests/gpt_test.ts @@ -17,7 +17,11 @@ import { GenerateRequest, MessageData } from '@genkit-ai/ai/model'; import assert from 'node:assert'; import { describe, it } from 'node:test'; -import { toOpenAiMessages, toOpenAiRequestBody } from '../src/gpt.js'; +import { + OpenAiConfigSchema, + toOpenAiMessages, + toOpenAiRequestBody, +} from '../src/gpt.js'; describe('toOpenAiMessages', () => { const testCases = [ @@ -145,20 +149,18 @@ describe('toOpenAiRequestBody', () => { tools: [], output: { format: 'text' }, config: { - custom: { - frequencyPenalty: 0.7, - logitBias: { - science: 12, - technology: 8, - politics: -5, - sports: 3, - }, - logProbs: true, - presencePenalty: -0.3, - seed: 42, - topLogProbs: 10, - user: 'exampleUser123', - }, + frequencyPenalty: 0.7, + logitBias: { + science: 12, + technology: 8, + politics: -5, + sports: 3, + }, + logProbs: true, + presencePenalty: -0.3, + seed: 42, + topLogProbs: 10, + user: 'exampleUser123', }, }, expectedOutput: { @@ -177,10 +179,10 @@ describe('toOpenAiRequestBody', () => { politics: -5, sports: 3, }, - log_probs: true, + logprobs: true, presence_penalty: -0.3, seed: 42, - top_log_probs: 10, + top_logprobs: 10, user: 'exampleUser123', }, }, @@ -571,7 +573,7 @@ describe('toOpenAiRequestBody', () => { it(test.should, () => { const actualOutput = toOpenAiRequestBody( test.modelName, - test.genkitRequest as GenerateRequest + test.genkitRequest as GenerateRequest ); assert.deepStrictEqual(actualOutput, test.expectedOutput); }); @@ -720,11 +722,11 @@ describe('toOpenAiRequestBody', () => { }; const actualOutput1 = toOpenAiRequestBody( modelName, - genkitRequestTextFormat as GenerateRequest + genkitRequestTextFormat as GenerateRequest ); const actualOutput2 = toOpenAiRequestBody( modelName, - genkitRequestJsonFormat as GenerateRequest + genkitRequestJsonFormat as GenerateRequest ); assert.deepStrictEqual(actualOutput1, expectedOutput); assert.deepStrictEqual(actualOutput2, expectedOutput); @@ -872,11 +874,11 @@ describe('toOpenAiRequestBody', () => { }; const actualOutput1 = toOpenAiRequestBody( modelName, - genkitRequestTextFormat as GenerateRequest + genkitRequestTextFormat as GenerateRequest ); const actualOutput2 = toOpenAiRequestBody( modelName, - genkitRequestJsonFormat as GenerateRequest + genkitRequestJsonFormat as GenerateRequest ); assert.deepStrictEqual(actualOutput1, expectedOutput); assert.deepStrictEqual(actualOutput2, expectedOutput);