Skip to content

Commit

Permalink
Move providers to adapters and models
Browse files Browse the repository at this point in the history
  • Loading branch information
jpwilliams committed Nov 22, 2024
1 parent 14a671e commit 5eb25bd
Show file tree
Hide file tree
Showing 9 changed files with 312 additions and 223 deletions.
24 changes: 12 additions & 12 deletions packages/inngest/src/components/InngestStepTools.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ describe("ai", () => {
test("return Step step op code", async () => {
await expect(
step.ai.infer("step", {
provider: openai({ model: "gpt-3.5-turbo" }),
model: openai({ model: "gpt-3.5-turbo" }),
body: {
messages: [],
},
Expand All @@ -265,7 +265,7 @@ describe("ai", () => {
test("returns `id` as ID", async () => {
await expect(
step.ai.infer("id", {
provider: openai({ model: "gpt-3.5-turbo" }),
model: openai({ model: "gpt-3.5-turbo" }),
body: {
messages: [],
},
Expand All @@ -278,7 +278,7 @@ describe("ai", () => {
test("return ID by default", async () => {
await expect(
step.ai.infer("id", {
provider: openai({ model: "gpt-3.5-turbo" }),
model: openai({ model: "gpt-3.5-turbo" }),
body: {
messages: [],
},
Expand All @@ -293,7 +293,7 @@ describe("ai", () => {
step.ai.infer(
{ id: "id", name: "name" },
{
provider: openai({ model: "gpt-3.5-turbo" }),
model: openai({ model: "gpt-3.5-turbo" }),
body: {
messages: [],
},
Expand All @@ -304,23 +304,23 @@ describe("ai", () => {
});
});

test("requires a provider", () => {
// @ts-expect-error Missing provider
test("requires a model", () => {
// @ts-expect-error Missing model
() => step.ai.infer("id", { body: { messages: [] } });
});

test("requires a body", () => {
() =>
// @ts-expect-error Missing body
step.ai.infer("id", {
provider: openai({ model: "gpt-3.5-turbo" }),
model: openai({ model: "gpt-3.5-turbo" }),
});
});

test("provider requires the correct body", () => {
test("model requires the correct body", () => {
() =>
step.ai.infer("id", {
provider: openai({ model: "gpt-3.5-turbo" }),
model: openai({ model: "gpt-3.5-turbo" }),
// @ts-expect-error Invalid body
body: {},
});
Expand All @@ -329,7 +329,7 @@ describe("ai", () => {
test("accepts the correct body", () => {
() =>
step.ai.infer("id", {
provider: openai({ model: "gpt-3.5-turbo" }),
model: openai({ model: "gpt-3.5-turbo" }),
body: {
messages: [],
},
Expand All @@ -339,7 +339,7 @@ describe("ai", () => {
test("uses default model if none given", async () => {
await expect(
step.ai.infer("id", {
provider: openai({ model: "gpt-3.5-turbo" }),
model: openai({ model: "gpt-3.5-turbo" }),
body: {
messages: [],
},
Expand All @@ -356,7 +356,7 @@ describe("ai", () => {
test("can overwrite model", async () => {
await expect(
step.ai.infer("id", {
provider: openai({ model: "gpt-3.5-turbo" }),
model: openai({ model: "gpt-3.5-turbo" }),
body: {
model: "gpt-3.5-something-else",
messages: [],
Expand Down
73 changes: 53 additions & 20 deletions packages/inngest/src/components/InngestStepTools.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,7 @@ import {
} from "./Inngest.js";
import { InngestFunction } from "./InngestFunction.js";
import { InngestFunctionReference } from "./InngestFunctionReference.js";
import {
openai,
type InferOptions,
type InferOutput,
type Provider,
} from "./ai/index.js";
import { gemini, openai, type AiAdapter } from "./ai/index.js";

import { type InngestExecution } from "./execution/InngestExecution.js";

Expand Down Expand Up @@ -355,26 +350,26 @@ export const createStepTools = <TClient extends Inngest.Any>(
* function and it will be displayed and editable in the UI.
*/
infer: createTool<
<TProvider extends Provider>(
<TAdapter extends AiAdapter>(
idOrOptions: StepOptionsOrId,
options: InferOptions<TProvider>
) => Promise<InferOutput<TProvider>>
options: AiInferOpts<TAdapter>
) => Promise<AiAdapter.Output<TAdapter>>
>(({ id, name }, options) => {
const providerCopy = { ...options.provider };
const modelCopy = { ...options.model };

// Allow the provider to mutate options and body for this call
options.provider.onCall?.(providerCopy, options.body);
// Allow the model to mutate options and body for this call
options.model.onCall?.(modelCopy, options.body);

return {
id,
op: StepOpCode.AiGateway,
displayName: name ?? id,
opts: {
type: "step.ai.infer",
url: providerCopy.url,
headers: providerCopy.headers,
auth_key: providerCopy.authKey,
format: providerCopy.format,
url: modelCopy.url,
headers: modelCopy.headers,
auth_key: modelCopy.authKey,
format: modelCopy.format,
body: options.body,
},
};
Expand All @@ -391,15 +386,23 @@ export const createStepTools = <TClient extends Inngest.Any>(
wrap: createStepRun("step.ai.wrap"),

/**
* Providers for AI inference and other AI-related tasks.
* Models for AI inference and other AI-related tasks.
*/
providers: {
models: {
/**
* Create an OpenAI provider using the OpenAI chat format.
* Create an OpenAI model using the OpenAI chat format.
*
* By default it targets the `https://api.openai.com` base URL.
* By default it targets the `https://api.openai.com/v1/` base URL.
*/
openai,

/**
* Create a Gemini model using the OpenAI chat format.
*
* By default it targets the `https://generativelanguage.googleapis.com/v1beta/`
* base URL.
*/
gemini,
},
},

Expand Down Expand Up @@ -666,3 +669,33 @@ type WaitForEventOpts<
"match",
"if"
>;

/**
* Options for `step.ai.infer()`.
*/
type AiInferOpts<TModel extends AiAdapter> = {
/**
* The model to use for the inference. Create a model by importing from
* `"inngest"` or by using `step.ai.models.*`.
*
* @example Import `openai()`
* ```ts
* import { openai } from "inngest";
*
* const model = openai({ model: "gpt-4" });
* ```
*
* @example Use a model from `step.ai.models`
* ```ts
* async ({ step }) => {
* const model = step.ai.models.openai({ model: "gpt-4" });
* }
* ```
*/
model: TModel;

/**
* The input to pass to the model.
*/
body: AiAdapter.Input<TModel>;
};
102 changes: 102 additions & 0 deletions packages/inngest/src/components/ai/adapter.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
/**
* A symbol used internally to define the types for a model whilst keeping
* generics clean. Must not be exported outside of this module.
*/
export declare const types: unique symbol;
export type types = typeof types;

/**
* An AI model, defining the I/O format and typing, and how to call the model.
*
* Models should extend this interface to define their own input and output
* types.
*/
export interface AiAdapter {
/**
* The I/O format for the adapter.
*/
format: AiAdapter.Format;

/**
* The input and output types for this AI I/O format.
*
* This is not accessible externally, and is only used internally to define
* the user-facing types for each model in a way that avoids using generics.
*/
[types]: {
/**
* The input typing for the format.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
input: any;
/**
* The output typing for the format.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
output: any;
};

/**
* The URL to use for the format.
*/
url?: string;

/**
* Headers to pass to the format.
*/
headers?: Record<string, string>;

/**
* The authentication key to use for the format.
*/
authKey: string;

/**
* Given the model and a body, mutate them as needed. This is useful for
* addressing any dynamic changes to the model options or body based on each
* other, such as the target URL changing based on a model.
*/
onCall?: (
/**
* The model to use for the inference.
*/
model: this,

/**
* The input to pass to the model.
*/
body: this[types]["input"]
) => void;
}

/**
* An AI model, defining the I/O format and typing, and how to call the model.
*
* Models should extend this interface to define their own input and output
* types.
*/
export namespace AiAdapter {
/**
* A helper used to infer the input type of an adapter.
*/
export type Input<TAdapter extends AiAdapter> = TAdapter[types]["input"];

/**
* A helper used to infer the output type of an adapter.
*/
export type Output<TAdapter extends AiAdapter> = TAdapter[types]["output"];

/**
* Supported I/O formats for AI models.
*/
export type Format = "openai-chat"; // | "anthropic" | "gemini" | "bedrock";

/**
* A function that creates a model that adheres to an existng AI adapter
* interface.
*/
export type ModelCreator<
TInput extends unknown[],
TOutput extends AiAdapter,
> = (...args: TInput) => TOutput;
}
Original file line number Diff line number Diff line change
@@ -1,25 +1,11 @@
import { envKeys } from "../../../helpers/consts.js";
import { processEnv } from "../../../helpers/env.js";
import { type Provider, type types } from "../provider.js";
import { type AiAdapter, type types } from "../adapter.js";

/**
* IDs of models to use. See the [model endpoint
* compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
* table for details on which models work with the Chat API.
* An OpenAI model using the OpenAI format for I/O.
*/
export type OpenAiModel =
| "gpt-4o"
| "chatgpt-4o-latest"
| "gpt-4o-mini"
| "gpt-4"
| "gpt-3.5-turbo";

/**
* An OpenAI provider for the OpenAI Chat Completions API.
*/
export interface OpenAiProvider extends Provider {
export interface OpenAiAiAdapter extends AiAdapter {
/**
* The format of the I/O for this provider.
* The format of the I/O for this model.
*/
format: "openai-chat";

Expand Down Expand Up @@ -671,51 +657,3 @@ export interface OpenAiProvider extends Provider {
};
};
}

/**
* Options for creating an OpenAI provider.
*/
export interface OpenAiProviderOptions {
/**
* ID of the model to use. See the [model endpoint
* compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
* table for details on which models work with the Chat API.
*/
model: OpenAiModel;

/**
* The OpenAI API key to use for authenticating your request. By default we'll
* search for and use the `OPENAI_API_KEY` environment variable.
*/
apiKey?: string;

/**
* The base URL for the OpenAI API.
*
* @default "https://api.openai.com"
*/
baseURL?: string;
}

/**
* Create an OpenAI provider using the OpenAI chat format.
*
* By default it targets the `https://api.openai.com` base URL.
*/
export const openai = (options: OpenAiProviderOptions): OpenAiProvider => {
const authKey = options.apiKey || processEnv(envKeys.OpenAiApiKey) || "";

const url = new URL(
"/v1/chat/completions",
options.baseURL || "https://api.openai.com"
);

return {
url: url.href,
authKey,
format: "openai-chat",
onCall(provider, body) {
body.model ||= options.model;
},
} as OpenAiProvider;
};
Loading

0 comments on commit 5eb25bd

Please sign in to comment.