From 5eb25bde1ef08c5779e3a8bb58cf97e366716535 Mon Sep 17 00:00:00 2001 From: Jack Williams <1736957+jpwilliams@users.noreply.github.com> Date: Fri, 22 Nov 2024 13:18:16 +0000 Subject: [PATCH] Move providers to adapters and models --- .../src/components/InngestStepTools.test.ts | 24 ++-- .../src/components/InngestStepTools.ts | 73 ++++++++--- packages/inngest/src/components/ai/adapter.ts | 102 +++++++++++++++ .../ai/{providers => adapters}/openai.ts | 70 +---------- packages/inngest/src/components/ai/index.ts | 15 ++- .../src/components/ai/models/gemini.ts | 55 ++++++++ .../src/components/ai/models/openai.ts | 78 ++++++++++++ .../inngest/src/components/ai/provider.ts | 117 ------------------ packages/inngest/src/helpers/consts.ts | 1 + 9 files changed, 312 insertions(+), 223 deletions(-) create mode 100644 packages/inngest/src/components/ai/adapter.ts rename packages/inngest/src/components/ai/{providers => adapters}/openai.ts (91%) create mode 100644 packages/inngest/src/components/ai/models/gemini.ts create mode 100644 packages/inngest/src/components/ai/models/openai.ts delete mode 100644 packages/inngest/src/components/ai/provider.ts diff --git a/packages/inngest/src/components/InngestStepTools.test.ts b/packages/inngest/src/components/InngestStepTools.test.ts index 7125c792..cf9bf447 100644 --- a/packages/inngest/src/components/InngestStepTools.test.ts +++ b/packages/inngest/src/components/InngestStepTools.test.ts @@ -252,7 +252,7 @@ describe("ai", () => { test("return Step step op code", async () => { await expect( step.ai.infer("step", { - provider: openai({ model: "gpt-3.5-turbo" }), + model: openai({ model: "gpt-3.5-turbo" }), body: { messages: [], }, @@ -265,7 +265,7 @@ describe("ai", () => { test("returns `id` as ID", async () => { await expect( step.ai.infer("id", { - provider: openai({ model: "gpt-3.5-turbo" }), + model: openai({ model: "gpt-3.5-turbo" }), body: { messages: [], }, @@ -278,7 +278,7 @@ describe("ai", () => { test("return ID by default", async () => { await expect( step.ai.infer("id", { - provider: openai({ model: "gpt-3.5-turbo" }), + model: openai({ model: "gpt-3.5-turbo" }), body: { messages: [], }, @@ -293,7 +293,7 @@ describe("ai", () => { step.ai.infer( { id: "id", name: "name" }, { - provider: openai({ model: "gpt-3.5-turbo" }), + model: openai({ model: "gpt-3.5-turbo" }), body: { messages: [], }, @@ -304,8 +304,8 @@ describe("ai", () => { }); }); - test("requires a provider", () => { - // @ts-expect-error Missing provider + test("requires a model", () => { + // @ts-expect-error Missing model () => step.ai.infer("id", { body: { messages: [] } }); }); @@ -313,14 +313,14 @@ describe("ai", () => { () => // @ts-expect-error Missing body step.ai.infer("id", { - provider: openai({ model: "gpt-3.5-turbo" }), + model: openai({ model: "gpt-3.5-turbo" }), }); }); - test("provider requires the correct body", () => { + test("model requires the correct body", () => { () => step.ai.infer("id", { - provider: openai({ model: "gpt-3.5-turbo" }), + model: openai({ model: "gpt-3.5-turbo" }), // @ts-expect-error Invalid body body: {}, }); @@ -329,7 +329,7 @@ describe("ai", () => { test("accepts the correct body", () => { () => step.ai.infer("id", { - provider: openai({ model: "gpt-3.5-turbo" }), + model: openai({ model: "gpt-3.5-turbo" }), body: { messages: [], }, @@ -339,7 +339,7 @@ describe("ai", () => { test("uses default model if none given", async () => { await expect( step.ai.infer("id", { - provider: openai({ model: "gpt-3.5-turbo" }), + model: openai({ model: "gpt-3.5-turbo" }), body: { messages: [], }, @@ -356,7 +356,7 @@ describe("ai", () => { test("can overwrite model", async () => { await expect( step.ai.infer("id", { - provider: openai({ model: "gpt-3.5-turbo" }), + model: openai({ model: "gpt-3.5-turbo" }), body: { model: "gpt-3.5-something-else", messages: [], diff --git a/packages/inngest/src/components/InngestStepTools.ts b/packages/inngest/src/components/InngestStepTools.ts index d94856b4..29f580bf 100644 --- a/packages/inngest/src/components/InngestStepTools.ts +++ b/packages/inngest/src/components/InngestStepTools.ts @@ -30,12 +30,7 @@ import { } from "./Inngest.js"; import { InngestFunction } from "./InngestFunction.js"; import { InngestFunctionReference } from "./InngestFunctionReference.js"; -import { - openai, - type InferOptions, - type InferOutput, - type Provider, -} from "./ai/index.js"; +import { gemini, openai, type AiAdapter } from "./ai/index.js"; import { type InngestExecution } from "./execution/InngestExecution.js"; @@ -355,15 +350,15 @@ export const createStepTools = ( * function and it will be displayed and editable in the UI. */ infer: createTool< - ( + ( idOrOptions: StepOptionsOrId, - options: InferOptions - ) => Promise> + options: AiInferOpts + ) => Promise> >(({ id, name }, options) => { - const providerCopy = { ...options.provider }; + const modelCopy = { ...options.model }; - // Allow the provider to mutate options and body for this call - options.provider.onCall?.(providerCopy, options.body); + // Allow the model to mutate options and body for this call + options.model.onCall?.(modelCopy, options.body); return { id, @@ -371,10 +366,10 @@ export const createStepTools = ( displayName: name ?? id, opts: { type: "step.ai.infer", - url: providerCopy.url, - headers: providerCopy.headers, - auth_key: providerCopy.authKey, - format: providerCopy.format, + url: modelCopy.url, + headers: modelCopy.headers, + auth_key: modelCopy.authKey, + format: modelCopy.format, body: options.body, }, }; @@ -391,15 +386,23 @@ export const createStepTools = ( wrap: createStepRun("step.ai.wrap"), /** - * Providers for AI inference and other AI-related tasks. + * Models for AI inference and other AI-related tasks. */ - providers: { + models: { /** - * Create an OpenAI provider using the OpenAI chat format. + * Create an OpenAI model using the OpenAI chat format. * - * By default it targets the `https://api.openai.com` base URL. + * By default it targets the `https://api.openai.com/v1/` base URL. */ openai, + + /** + * Create a Gemini model using the OpenAI chat format. + * + * By default it targets the `https://generativelanguage.googleapis.com/v1beta/` + * base URL. + */ + gemini, }, }, @@ -666,3 +669,33 @@ type WaitForEventOpts< "match", "if" >; + +/** + * Options for `step.ai.infer()`. + */ +type AiInferOpts = { + /** + * The model to use for the inference. Create a model by importing from + * `"inngest"` or by using `step.ai.models.*`. + * + * @example Import `openai()` + * ```ts + * import { openai } from "inngest"; + * + * const model = openai({ model: "gpt-4" }); + * ``` + * + * @example Use a model from `step.ai.models` + * ```ts + * async ({ step }) => { + * const model = step.ai.models.openai({ model: "gpt-4" }); + * } + * ``` + */ + model: TModel; + + /** + * The input to pass to the model. + */ + body: AiAdapter.Input; +}; diff --git a/packages/inngest/src/components/ai/adapter.ts b/packages/inngest/src/components/ai/adapter.ts new file mode 100644 index 00000000..eca939e1 --- /dev/null +++ b/packages/inngest/src/components/ai/adapter.ts @@ -0,0 +1,102 @@ +/** + * A symbol used internally to define the types for a model whilst keeping + * generics clean. Must not be exported outside of this module. + */ +export declare const types: unique symbol; +export type types = typeof types; + +/** + * An AI model, defining the I/O format and typing, and how to call the model. + * + * Models should extend this interface to define their own input and output + * types. + */ +export interface AiAdapter { + /** + * The I/O format for the adapter. + */ + format: AiAdapter.Format; + + /** + * The input and output types for this AI I/O format. + * + * This is not accessible externally, and is only used internally to define + * the user-facing types for each model in a way that avoids using generics. + */ + [types]: { + /** + * The input typing for the format. + */ + // eslint-disable-next-line @typescript-eslint/no-explicit-any + input: any; + /** + * The output typing for the format. + */ + // eslint-disable-next-line @typescript-eslint/no-explicit-any + output: any; + }; + + /** + * The URL to use for the format. + */ + url?: string; + + /** + * Headers to pass to the format. + */ + headers?: Record; + + /** + * The authentication key to use for the format. + */ + authKey: string; + + /** + * Given the model and a body, mutate them as needed. This is useful for + * addressing any dynamic changes to the model options or body based on each + * other, such as the target URL changing based on a model. + */ + onCall?: ( + /** + * The model to use for the inference. + */ + model: this, + + /** + * The input to pass to the model. + */ + body: this[types]["input"] + ) => void; +} + +/** + * An AI model, defining the I/O format and typing, and how to call the model. + * + * Models should extend this interface to define their own input and output + * types. + */ +export namespace AiAdapter { + /** + * A helper used to infer the input type of an adapter. + */ + export type Input = TAdapter[types]["input"]; + + /** + * A helper used to infer the output type of an adapter. + */ + export type Output = TAdapter[types]["output"]; + + /** + * Supported I/O formats for AI models. + */ + export type Format = "openai-chat"; // | "anthropic" | "gemini" | "bedrock"; + + /** + * A function that creates a model that adheres to an existng AI adapter + * interface. + */ + export type ModelCreator< + TInput extends unknown[], + TOutput extends AiAdapter, + > = (...args: TInput) => TOutput; +} diff --git a/packages/inngest/src/components/ai/providers/openai.ts b/packages/inngest/src/components/ai/adapters/openai.ts similarity index 91% rename from packages/inngest/src/components/ai/providers/openai.ts rename to packages/inngest/src/components/ai/adapters/openai.ts index 20ed7d38..f8f17c30 100644 --- a/packages/inngest/src/components/ai/providers/openai.ts +++ b/packages/inngest/src/components/ai/adapters/openai.ts @@ -1,25 +1,11 @@ -import { envKeys } from "../../../helpers/consts.js"; -import { processEnv } from "../../../helpers/env.js"; -import { type Provider, type types } from "../provider.js"; +import { type AiAdapter, type types } from "../adapter.js"; /** - * IDs of models to use. See the [model endpoint - * compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) - * table for details on which models work with the Chat API. + * An OpenAI model using the OpenAI format for I/O. */ -export type OpenAiModel = - | "gpt-4o" - | "chatgpt-4o-latest" - | "gpt-4o-mini" - | "gpt-4" - | "gpt-3.5-turbo"; - -/** - * An OpenAI provider for the OpenAI Chat Completions API. - */ -export interface OpenAiProvider extends Provider { +export interface OpenAiAiAdapter extends AiAdapter { /** - * The format of the I/O for this provider. + * The format of the I/O for this model. */ format: "openai-chat"; @@ -671,51 +657,3 @@ export interface OpenAiProvider extends Provider { }; }; } - -/** - * Options for creating an OpenAI provider. - */ -export interface OpenAiProviderOptions { - /** - * ID of the model to use. See the [model endpoint - * compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) - * table for details on which models work with the Chat API. - */ - model: OpenAiModel; - - /** - * The OpenAI API key to use for authenticating your request. By default we'll - * search for and use the `OPENAI_API_KEY` environment variable. - */ - apiKey?: string; - - /** - * The base URL for the OpenAI API. - * - * @default "https://api.openai.com" - */ - baseURL?: string; -} - -/** - * Create an OpenAI provider using the OpenAI chat format. - * - * By default it targets the `https://api.openai.com` base URL. - */ -export const openai = (options: OpenAiProviderOptions): OpenAiProvider => { - const authKey = options.apiKey || processEnv(envKeys.OpenAiApiKey) || ""; - - const url = new URL( - "/v1/chat/completions", - options.baseURL || "https://api.openai.com" - ); - - return { - url: url.href, - authKey, - format: "openai-chat", - onCall(provider, body) { - body.model ||= options.model; - }, - } as OpenAiProvider; -}; diff --git a/packages/inngest/src/components/ai/index.ts b/packages/inngest/src/components/ai/index.ts index 2a6d306f..0ad47c01 100644 --- a/packages/inngest/src/components/ai/index.ts +++ b/packages/inngest/src/components/ai/index.ts @@ -1,9 +1,8 @@ -export type { - InferFormat, - InferInput, - InferOptions, - InferOutput, - Provider, -} from "./provider.js"; +export type { AiAdapter } from "./adapter.js"; -export * from "./providers/openai.js"; +// Adapters +export * from "./adapters/openai.js"; + +// Models +export * from "./models/gemini.js"; +export * from "./models/openai.js"; diff --git a/packages/inngest/src/components/ai/models/gemini.ts b/packages/inngest/src/components/ai/models/gemini.ts new file mode 100644 index 00000000..0ed781e4 --- /dev/null +++ b/packages/inngest/src/components/ai/models/gemini.ts @@ -0,0 +1,55 @@ +import { envKeys } from "../../../helpers/consts.js"; +import { processEnv } from "../../../helpers/env.js"; +import { type AiAdapter } from "../adapter.js"; +import { type OpenAi, openai } from "./openai.js"; + +/** + * Create a Gemini model using the OpenAI chat format. + * + * By default it targets the `https://generativelanguage.googleapis.com/v1beta/` + * base URL. + */ +export const gemini: AiAdapter.ModelCreator< + [options: Gemini.AiModelOptions], + Gemini.AiModel +> = (options) => { + const apiKey = options.apiKey || processEnv(envKeys.GeminiApiKey); + const baseUrl = + options.baseUrl || "https://generativelanguage.googleapis.com/v1beta/"; + const model = options.model as OpenAi.Model; + + return openai({ + ...options, + apiKey, + baseUrl, + model, + }); +}; + +export namespace Gemini { + /** + * IDs of models to use. + */ + export type Model = + | "gemini-1.5-flash" + | "gemini-1.5-flash-8b" + | "gemini-1.5-pro" + | "gemini-1.0-pro" + | "text-embedding-004" + | "aqa"; + + /** + * Options for creating a Gemini model. + */ + export interface AiModelOptions extends Omit { + /** + * ID of the model to use. + */ + model: Gemini.Model; + } + + /** + * A Gemini model using the OpenAI format for I/O. + */ + export type AiModel = OpenAi.AiModel; +} diff --git a/packages/inngest/src/components/ai/models/openai.ts b/packages/inngest/src/components/ai/models/openai.ts new file mode 100644 index 00000000..189ad6e0 --- /dev/null +++ b/packages/inngest/src/components/ai/models/openai.ts @@ -0,0 +1,78 @@ +import { envKeys } from "../../../helpers/consts.js"; +import { processEnv } from "../../../helpers/env.js"; +import { type AiAdapter } from "../adapter.js"; +import { type OpenAiAiAdapter } from "../adapters/openai.js"; + +/** + * Create an OpenAI model using the OpenAI chat format. + * + * By default it targets the `https://api.openai.com/v1/` base URL. + */ +export const openai: AiAdapter.ModelCreator< + [options: OpenAi.AiModelOptions], + OpenAi.AiModel +> = (options) => { + const authKey = options.apiKey || processEnv(envKeys.OpenAiApiKey) || ""; + + // Ensure we add a trailing slash to our base URL if it doesn't have one, + // otherwise we'll replace the path instead of appending it. + let baseUrl = options.baseUrl || "https://api.openai.com/v1/"; + if (!baseUrl.endsWith("/")) { + baseUrl += "/"; + } + + const url = new URL("chat/completions", baseUrl); + + return { + url: url.href, + authKey, + format: "openai-chat", + onCall(_, body) { + body.model ||= options.model; + }, + } as OpenAi.AiModel; +}; + +export namespace OpenAi { + /** + * IDs of models to use. See the [model endpoint + * compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) + * table for details on which models work with the Chat API. + */ + export type Model = + | "gpt-4o" + | "chatgpt-4o-latest" + | "gpt-4o-mini" + | "gpt-4" + | "gpt-3.5-turbo"; + + /** + * Options for creating an OpenAI model. + */ + export interface AiModelOptions { + /** + * ID of the model to use. See the [model endpoint + * compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) + * table for details on which models work with the Chat API. + */ + model: Model; + + /** + * The OpenAI API key to use for authenticating your request. By default we'll + * search for and use the `OPENAI_API_KEY` environment variable. + */ + apiKey?: string; + + /** + * The base URL for the OpenAI API. + * + * @default "https://api.openai.com" + */ + baseUrl?: string; + } + + /** + * An OpenAI model using the OpenAI format for I/O. + */ + export type AiModel = OpenAiAiAdapter; +} diff --git a/packages/inngest/src/components/ai/provider.ts b/packages/inngest/src/components/ai/provider.ts deleted file mode 100644 index 0a3b2674..00000000 --- a/packages/inngest/src/components/ai/provider.ts +++ /dev/null @@ -1,117 +0,0 @@ -/** - * A symbol used internally to define the types for a provider whilst keeping - * generics clean. Must not be exported outside of this module. - */ -export declare const types: unique symbol; -export type types = typeof types; - -/** - * Supported I/O formats for AI providers. - */ -export type InferFormat = "openai-chat"; // | "anthropic" | "gemini" | "bedrock"; - -/** - * Options for `step.ai.infer()`. - */ -export interface InferOptions { - /** - * The provider to use for the inference. Create a provider by importing from - * `"inngest"` or by using `step.ai.providers.*`. - * - * @example Import `openai()` - * ```ts - * import { openai } from "inngest"; - * - * const provider = openai({ model: "gpt-4" }); - * ``` - * - * @example Use a provider from `step.ai.providers` - * ```ts - * async ({ step }) => { - * const provider = step.ai.providers.openai({ model: "gpt-4" }); - * } - * ``` - */ - provider: TProvider; - - /** - * The input to pass to the provider. - */ - body: InferInput; -} - -/** - * A helper used to infer the input type of a provider. - */ -export type InferInput = TProvider[types]["input"]; - -/** - * A helper used to infer the output type of a provider. - */ -export type InferOutput = - TProvider[types]["output"]; - -/** - * An AI inference provider, defining the I/O format and typing. - * - * Providers should extend this interface to define their own input and output - * types. - */ -export interface Provider { - /** - * The input and output types for the provider. - * - * This is not accessible externally, and is only used internally to define - * the user-facing types for each provider in a way that avoids using - * generics. - */ - [types]: { - /** - * The input typing for the provider. - */ - // eslint-disable-next-line @typescript-eslint/no-explicit-any - input: any; - /** - * The output typing for the provider. - */ - // eslint-disable-next-line @typescript-eslint/no-explicit-any - output: any; - }; - - /** - * The URL to use for the provider. - */ - url?: string; - - /** - * Headers to pass to the provider. - */ - headers?: Record; - - /** - * The authentication key to use for the provider. - */ - authKey: string; - - /** - * The format of the provider. - */ - format: InferFormat; - - /** - * Given the provider and a body, mutate them as needed. This is useful for - * addressing any dynamic changes to the provider options or body based on - * each other, such as the target URL changing based on a model. - */ - onCall?: ( - /** - * The provider to use for the inference. - */ - provider: this, - - /** - * The input to pass to the provider. - */ - body: this[types]["input"] - ) => void; -} diff --git a/packages/inngest/src/helpers/consts.ts b/packages/inngest/src/helpers/consts.ts index 2d7c6ae0..22d57509 100644 --- a/packages/inngest/src/helpers/consts.ts +++ b/packages/inngest/src/helpers/consts.ts @@ -113,6 +113,7 @@ export enum envKeys { VercelEnvKey = "VERCEL_ENV", OpenAiApiKey = "OPENAI_API_KEY", + GeminiApiKey = "GEMINI_API_KEY", } /**