Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

openai-experimental[patch]: Adds experimental raw response field to OpenAI chat models #6087

Merged
merged 4 commits into from
Jul 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 30 additions & 12 deletions libs/langchain-openai/src/chat_models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,8 @@ export function messageToOpenAIRole(message: BaseMessage): OpenAIRoleEnum {

function openAIResponseToChatMessage(
message: OpenAIClient.Chat.Completions.ChatCompletionMessage,
messageId: string
rawResponse: OpenAIClient.Chat.Completions.ChatCompletion,
includeRawResponse?: boolean
): BaseMessage {
const rawToolCalls: OpenAIToolCall[] | undefined = message.tool_calls as
| OpenAIToolCall[]
Expand All @@ -146,15 +147,19 @@ function openAIResponseToChatMessage(
invalidToolCalls.push(makeInvalidToolCall(rawToolCall, e.message));
}
}
const additional_kwargs: Record<string, unknown> = {
function_call: message.function_call,
tool_calls: rawToolCalls,
};
if (includeRawResponse !== undefined) {
additional_kwargs.__raw_response = rawResponse;
}
return new AIMessage({
content: message.content || "",
tool_calls: toolCalls,
invalid_tool_calls: invalidToolCalls,
additional_kwargs: {
function_call: message.function_call,
tool_calls: rawToolCalls,
},
id: messageId,
additional_kwargs,
id: rawResponse.id,
});
}
default:
Expand All @@ -165,12 +170,13 @@ function openAIResponseToChatMessage(
function _convertDeltaToMessageChunk(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
delta: Record<string, any>,
messageId: string,
defaultRole?: OpenAIRoleEnum
rawResponse: OpenAIClient.Chat.Completions.ChatCompletionChunk,
defaultRole?: OpenAIRoleEnum,
includeRawResponse?: boolean
) {
const role = delta.role ?? defaultRole;
const content = delta.content ?? "";
let additional_kwargs;
let additional_kwargs: Record<string, unknown>;
if (delta.function_call) {
additional_kwargs = {
function_call: delta.function_call,
Expand All @@ -182,6 +188,9 @@ function _convertDeltaToMessageChunk(
} else {
additional_kwargs = {};
}
if (includeRawResponse) {
additional_kwargs.__raw_response = rawResponse;
}
if (role === "user") {
return new HumanMessageChunk({ content });
} else if (role === "assistant") {
Expand All @@ -201,7 +210,7 @@ function _convertDeltaToMessageChunk(
content,
tool_call_chunks: toolCallChunks,
additional_kwargs,
id: messageId,
id: rawResponse.id,
});
} else if (role === "system") {
return new SystemMessageChunk({ content });
Expand Down Expand Up @@ -415,6 +424,8 @@ export class ChatOpenAI<

organization?: string;

__includeRawResponse?: boolean;

protected client: OpenAIClient;

protected clientConfig: ClientOptions;
Expand Down Expand Up @@ -485,6 +496,7 @@ export class ChatOpenAI<
this.stop = fields?.stopSequences ?? fields?.stop;
this.stopSequences = this?.stop;
this.user = fields?.user;
this.__includeRawResponse = fields?.__includeRawResponse;

if (this.azureOpenAIApiKey || this.azureADTokenProvider) {
if (!this.azureOpenAIApiInstanceName && !this.azureOpenAIBasePath) {
Expand Down Expand Up @@ -648,7 +660,12 @@ export class ChatOpenAI<
if (!delta) {
continue;
}
const chunk = _convertDeltaToMessageChunk(delta, data.id, defaultRole);
const chunk = _convertDeltaToMessageChunk(
delta,
data,
defaultRole,
this.__includeRawResponse
);
defaultRole = delta.role ?? defaultRole;
const newTokenIndices = {
prompt: options.promptIndex ?? 0,
Expand Down Expand Up @@ -797,7 +814,8 @@ export class ChatOpenAI<
text,
message: openAIResponseToChatMessage(
part.message ?? { role: "assistant" },
data.id
data,
this.__includeRawResponse
),
};
generation.generationInfo = {
Expand Down
18 changes: 18 additions & 0 deletions libs/langchain-openai/src/tests/azure/chat_models.int.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,24 @@ import { AzureChatOpenAI } from "../../azure/chat_models.js";
// Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hey team, I've flagged the added beforeAll block in the PR for review as it explicitly sets environment variables using process.env. This change should be reviewed to ensure it aligns with our environment variable management practices.

const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND;

beforeAll(() => {
if (!process.env.AZURE_OPENAI_API_KEY) {
process.env.AZURE_OPENAI_API_KEY = process.env.TEST_AZURE_OPENAI_API_KEY;
}
if (!process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME) {
process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME =
process.env.TEST_AZURE_OPENAI_API_DEPLOYMENT_NAME;
}
if (!process.env.AZURE_OPENAI_BASE_PATH) {
process.env.AZURE_OPENAI_BASE_PATH =
process.env.TEST_AZURE_OPENAI_BASE_PATH;
}
if (!process.env.AZURE_OPENAI_API_VERSION) {
process.env.AZURE_OPENAI_API_VERSION =
process.env.TEST_AZURE_OPENAI_API_VERSION;
}
});

test("Test Azure ChatOpenAI call method", async () => {
const chat = new AzureChatOpenAI({
modelName: "gpt-3.5-turbo",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,24 @@ import { AIMessageChunk } from "@langchain/core/messages";
import { AzureChatOpenAI } from "../../azure/chat_models.js";
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hey team, I've flagged the recent change in the PR for review as it explicitly accesses and sets environment variables using process.env. Please take a look and ensure it aligns with our best practices for handling environment variables. Thanks!

import { ChatOpenAICallOptions } from "../../chat_models.js";

beforeAll(() => {
if (!process.env.AZURE_OPENAI_API_KEY) {
process.env.AZURE_OPENAI_API_KEY = process.env.TEST_AZURE_OPENAI_API_KEY;
}
if (!process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME) {
process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME =
process.env.TEST_AZURE_OPENAI_API_DEPLOYMENT_NAME;
}
if (!process.env.AZURE_OPENAI_BASE_PATH) {
process.env.AZURE_OPENAI_BASE_PATH =
process.env.TEST_AZURE_OPENAI_BASE_PATH;
}
if (!process.env.AZURE_OPENAI_API_VERSION) {
process.env.AZURE_OPENAI_API_VERSION =
process.env.TEST_AZURE_OPENAI_API_VERSION;
}
});

class AzureChatOpenAIStandardIntegrationTests extends ChatModelIntegrationTests<
ChatOpenAICallOptions,
AIMessageChunk
Expand Down
19 changes: 19 additions & 0 deletions libs/langchain-openai/src/tests/azure/embeddings.int.test.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,25 @@
/* eslint-disable no-process-env */
import { test, expect } from "@jest/globals";
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hey there! I noticed that the recent PR includes changes to access and set environment variables using process.env. This comment is to flag the change for maintainers to review the handling of environment variables in the code. Great work on the PR!

import { AzureOpenAIEmbeddings as OpenAIEmbeddings } from "../../azure/embeddings.js";

beforeAll(() => {
if (!process.env.AZURE_OPENAI_API_KEY) {
process.env.AZURE_OPENAI_API_KEY = process.env.TEST_AZURE_OPENAI_API_KEY;
}
if (!process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME) {
process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME =
process.env.TEST_AZURE_OPENAI_API_DEPLOYMENT_NAME;
}
if (!process.env.AZURE_OPENAI_BASE_PATH) {
process.env.AZURE_OPENAI_BASE_PATH =
process.env.TEST_AZURE_OPENAI_BASE_PATH;
}
if (!process.env.AZURE_OPENAI_API_VERSION) {
process.env.AZURE_OPENAI_API_VERSION =
process.env.TEST_AZURE_OPENAI_API_VERSION;
}
});

test("Test AzureOpenAIEmbeddings.embedQuery", async () => {
const embeddings = new OpenAIEmbeddings();
const res = await embeddings.embedQuery("Hello world");
Expand Down
18 changes: 18 additions & 0 deletions libs/langchain-openai/src/tests/azure/llms.int.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,24 @@ import { AzureOpenAI } from "../../azure/llms.js";
// Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hey team, just a heads up that I've flagged the recent changes in the llms.int.test.ts file for review. The added beforeAll block sets environment variables based on corresponding TEST_ prefixed variables, so it's important to ensure everything is handled correctly. Let me know if you have any questions!

const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND;

beforeAll(() => {
if (!process.env.AZURE_OPENAI_API_KEY) {
process.env.AZURE_OPENAI_API_KEY = process.env.TEST_AZURE_OPENAI_API_KEY;
}
if (!process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME) {
process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME =
process.env.TEST_AZURE_OPENAI_API_DEPLOYMENT_NAME;
}
if (!process.env.AZURE_OPENAI_BASE_PATH) {
process.env.AZURE_OPENAI_BASE_PATH =
process.env.TEST_AZURE_OPENAI_BASE_PATH;
}
if (!process.env.AZURE_OPENAI_API_VERSION) {
process.env.AZURE_OPENAI_API_VERSION =
process.env.TEST_AZURE_OPENAI_API_VERSION;
}
});

test("Test Azure OpenAI invoke", async () => {
const model = new AzureOpenAI({
maxTokens: 5,
Expand Down
26 changes: 26 additions & 0 deletions libs/langchain-openai/src/tests/chat_models-extended.int.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -291,3 +291,29 @@ test("Few shotting with tool calls", async () => {
console.log(res);
expect(res.content).toContain("24");
});

test("Test ChatOpenAI with raw response", async () => {
const chat = new ChatOpenAI({
modelName: "gpt-3.5-turbo-1106",
maxTokens: 128,
__includeRawResponse: true,
});
const message = new HumanMessage("Hello!");
const res = await chat.invoke([message]);
expect(res.additional_kwargs.__raw_response).toBeDefined();
});

test("Test ChatOpenAI with raw response", async () => {
const chat = new ChatOpenAI({
modelName: "gpt-3.5-turbo-1106",
maxTokens: 128,
__includeRawResponse: true,
});
const message = new HumanMessage("Hello!");
const stream = await chat.stream([message]);
for await (const chunk of stream) {
expect(
chunk.additional_kwargs.__raw_response || chunk.usage_metadata
).toBeDefined();
}
});
8 changes: 7 additions & 1 deletion libs/langchain-openai/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,13 @@ export interface OpenAIChatInput extends OpenAIBaseInput {
topLogprobs?: number;

/** ChatGPT messages to pass as a prefix to the prompt */
prefixMessages?: OpenAIClient.Chat.CreateChatCompletionRequestMessage[];
prefixMessages?: OpenAIClient.Chat.ChatCompletionMessageParam[];

/**
* Whether to include the raw OpenAI response in the output message's "additional_kwargs" field.
* Currently in experimental beta.
*/
__includeRawResponse?: boolean;
}

export declare interface AzureOpenAIInput {
Expand Down
Loading