Skip to content

Commit

Permalink
feat(anthropic): add tool call history support in chat messages
Browse files Browse the repository at this point in the history
Add proper message format transformation for tool calls and results in chat
history when using Anthropic agent. This allows Anthropic agent to handle
tool history consistently with OpenAI agent, which already had this
functionality.

Previously, chat history with tool calls would cause API errors with
Anthropic but worked fine with OpenAI. Now both agents handle the same
message format correctly.
  • Loading branch information
erik-balfe committed Dec 12, 2024
1 parent 9e648a0 commit 2ec1502
Show file tree
Hide file tree
Showing 5 changed files with 453 additions and 259 deletions.
8 changes: 8 additions & 0 deletions .changeset/pink-days-appear.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
---
"@llamaindex/anthropic": minor
"@llamaindex/llamaindex-test": minor
"llamaindex": minor
"@llamaindex/core": minor
---

added support for tool calls with results in message history for athropic agent
2 changes: 1 addition & 1 deletion packages/core/src/llms/type.ts
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ export interface LLM<
): Promise<CompletionResponse>;
}

export type MessageType = "user" | "assistant" | "system" | "memory";
export type MessageType = "user" | "assistant" | "system" | "memory" | "tool";

export type TextChatMessage<AdditionalMessageOptions extends object = object> =
{
Expand Down
1 change: 1 addition & 0 deletions packages/llamaindex/src/llm/gemini/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,7 @@ export class GeminiHelper {
system: "user",
assistant: "model",
memory: "user",
tool: "user",
};

public static readonly ROLES_FROM_GEMINI: Record<
Expand Down
306 changes: 249 additions & 57 deletions packages/llamaindex/tests/llm/index.test.ts
Original file line number Diff line number Diff line change
@@ -1,18 +1,26 @@
import { setEnvs } from "@llamaindex/env";
import { Anthropic } from "llamaindex";
import { beforeAll, describe, expect, test } from "vitest";
import type { MessageParam } from "@anthropic-ai/sdk/resources/messages";
import { Anthropic, OpenAI, type ChatMessage } from "llamaindex";
import { describe, expect, test } from "vitest";

beforeAll(() => {
setEnvs({
ANTHROPIC_API_KEY: "valid",
});
});
describe("Message Formatting", () => {
describe("Basic Message Formatting", () => {
test("OpenAI formats basic messages correctly", () => {
const inputMessages: ChatMessage[] = [
{ content: "Hello", role: "user" },
{ content: "Hi there!", role: "assistant" },
{ content: "Be helpful", role: "system" },
];
const expectedOutput = [
{ role: "user", content: "Hello" },
{ role: "assistant", content: "Hi there!" },
{ role: "system", content: "Be helpful" },
];
expect(OpenAI.toOpenAIMessage(inputMessages)).toEqual(expectedOutput);
});

describe("Anthropic llm", () => {
test("format messages", () => {
const anthropic = new Anthropic();
expect(
anthropic.formatMessages([
test("Anthropic formats basic messages correctly", () => {
const anthropic = new Anthropic();
const inputMessages: ChatMessage[] = [
{
content: "You are a helpful assistant.",
role: "assistant",
Expand All @@ -21,20 +29,53 @@ describe("Anthropic llm", () => {
content: "Hello?",
role: "user",
},
]),
).toEqual([
{
content: "You are a helpful assistant.",
role: "assistant",
},
{
content: "Hello?",
role: "user",
},
]);
];
const expectedOutput: MessageParam[] = [
{
content: "You are a helpful assistant.",
role: "assistant",
},
{
content: "Hello?",
role: "user",
},
];

expect(anthropic.formatMessages(inputMessages)).toEqual(expectedOutput);
});

test("OpenAI handles system messages correctly", () => {
const inputMessages: ChatMessage[] = [
{ content: "You are a coding assistant", role: "system" },
{ content: "Hello", role: "user" },
];
const expectedOutput = [
{ role: "system", content: "You are a coding assistant" },
{ role: "user", content: "Hello" },
];
expect(OpenAI.toOpenAIMessage(inputMessages)).toEqual(expectedOutput);
});

expect(
anthropic.formatMessages([
test("Anthropic handles multi-turn conversation correctly", () => {
const anthropic = new Anthropic();
const inputMessages: ChatMessage[] = [
{ content: "Hi", role: "user" },
{ content: "Hello! How can I help?", role: "assistant" },
{ content: "What's the weather?", role: "user" },
];
const expectedOutput: MessageParam[] = [
{ content: "Hi", role: "user" },
{ content: "Hello! How can I help?", role: "assistant" },
{ content: "What's the weather?", role: "user" },
];
expect(anthropic.formatMessages(inputMessages)).toEqual(expectedOutput);
});
});

describe("Advanced Message Formatting", () => {
test("Anthropic handles system messages and merging", () => {
const anthropic = new Anthropic();
const inputMessages: ChatMessage[] = [
{
content: "You are a helpful assistant.",
role: "assistant",
Expand All @@ -51,24 +92,24 @@ describe("Anthropic llm", () => {
content: "What is your name?",
role: "user",
},
]),
).toEqual([
{
content: "You are a helpful assistant.",
role: "assistant",
},
{
content: "Hello?\nWhat is your name?",
role: "user",
},
]);

expect(
anthropic.formatMessages([
];
const expectedOutput: MessageParam[] = [
{
content: "You are a helpful assistant.",
role: "assistant",
},
{
content: "Hello?\nWhat is your name?",
role: "user",
},
];

expect(anthropic.formatMessages(inputMessages)).toEqual(expectedOutput);
});

test("Anthropic handles image content", () => {
const anthropic = new Anthropic();
const inputMessages: ChatMessage[] = [
{
content: [
{
Expand All @@ -84,29 +125,180 @@ describe("Anthropic llm", () => {
],
role: "user",
},
]),
).toEqual([
];
const expectedOutput: MessageParam[] = [
{
role: "user",
content: [
{
type: "text",
text: "What do you see in the image?",
},
{
type: "image",
source: {
type: "base64",
media_type: "image/jpeg",
data: "/9j/4AAQSkZJRgABAQEASABIAAD/2wBDAAQDAwQDAwQEAwQFBAQFBgoHBgYGBg0JCggKDw0QEA8NDw4RExgUERIXEg4PFRwVFxkZGxsbEBQdHx0aHxgaGxr/2wBDAQQFBQYFBgwHBwwaEQ8RGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhr/wAARCAAgACADASIAAhEBAxEB/8QAGAABAAMBAAAAAAAAAAAAAAAACAQHCQb/xAAvEAABAgUCBAUDBQEAAAAAAAACAQMEBQYHERIhAAgTYSIxMkJxI2KCFBVBUVKh/8QAGAEAAwEBAAAAAAAAAAAAAAAAAwQFAQL/xAAnEQABBAECAwkAAAAAAAAAAAACAQMEEQAFMiExYRITFCJBcXKBof/aAAwDAQACEQMRAD8Aufmb5mnbWREFRdvIMZ3cWcaBh2NHUGEFwtIKQp63CX0h+S7YQgRzGSq6kgqGAS8NQRc6fmkIMWwSxJEyP+m0bwggQr5iIom6KnnxXty61jK+uJUVUxzxm/M5g5EASr6G9WGwTsIIIp2FOHJfi0kyvzS9Cv0zGwEF+2whOAUY4a6mnm2lREURLPoTggNG5tS6xpmOT4GQptwNUZc6sbexzcZRVSTKTOgudMPEL0j7E2uQNOxIqcaYcqXNaxe2HKnauBiAraDZ6n0k0tTBpPNwE9pptqDP3DtlBC1Q8qNw5K4AwLEunYkWMwcYg6fnqoH/ADPHA2/qeZWquhJJ3pODmEhmg/qGl2XAloebL5HWK/K8dOMOM7xVPfJrMhmQiq0SFXOlyPc+jIq3lwakpeYNq27K491kfvbzls07ECiSdlThhWKvj1LLx0VVLWGqSBuFJ1jc3WBEUb8K4TUieHz3xni7ea3lSZvZDhUVImxAVtBso39VdLUe0nk2a+0030n+K7YUc95/J66tRIp3SVXUpGyUI7wvPxDBoJ/UaLIuIqtuInRwiiqp4z3XbBYr3cGp9P30zJXiSjk1HLsqdIvxvzV1q8ZtB3ppa5bkwZkDz7LsF09Qxgi0Roa6UUU1LnxYH5JP74D1LUjNrkXigabc6kZM5vPFZi3NPi3dVXnFT+EQUM17IvEi1tL1xUkcEHb+lo6duvRUO644wwSDpaPWgG7sAApIKqqqm4jvxo1yvcrjdoTiqtrQ2I+u5nr19ItbUA2a5IAX3GvuP8U2ypMS5pSwFC5peTtM0lnSkMWVVUJb48a+8//Z",
},
},
],
},
];

expect(anthropic.formatMessages(inputMessages)).toEqual(expectedOutput);
});
});

describe("Tool Message Formatting", () => {
const toolCallMessages: ChatMessage[] = [
{
role: "user",
content: "What's the weather in London?",
},
{
content: "You are a helpful assistant.",
role: "assistant",
content: "Let me check the weather.",
options: {
toolCall: [
{
id: "call_123",
name: "weather",
input: JSON.stringify({ location: "London" }),
},
],
},
},
{
content: [
{
text: "What do you see in the image?",
type: "text",
role: "tool",
content: "The weather in London is sunny, +20°C",
options: {
toolResult: {
id: "call_123",
},
{
source: {
data: "/9j/4AAQSkZJRgABAQEASABIAAD/2wBDAAQDAwQDAwQEAwQFBAQFBgoHBgYGBg0JCggKDw0QEA8NDw4RExgUERIXEg4PFRwVFxkZGxsbEBQdHx0aHxgaGxr/2wBDAQQFBQYFBgwHBwwaEQ8RGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhr/wAARCAAgACADASIAAhEBAxEB/8QAGAABAAMBAAAAAAAAAAAAAAAACAQHCQb/xAAvEAABAgUCBAUDBQEAAAAAAAACAQMEBQYHERIhAAgTYSIxMkJxI2KCFBVBUVKh/8QAGAEAAwEBAAAAAAAAAAAAAAAAAwQFAQL/xAAnEQABBAECAwkAAAAAAAAAAAACAQMEEQAFMiExYRITFCJBcXKBof/aAAwDAQACEQMRAD8Aufmb5mnbWREFRdvIMZ3cWcaBh2NHUGEFwtIKQp63CX0h+S7YQgRzGSq6kgqGAS8NQRc6fmkIMWwSxJEyP+m0bwggQr5iIom6KnnxXty61jK+uJUVUxzxm/M5g5EASr6G9WGwTsIIIp2FOHJfi0kyvzS9Cv0zGwEF+2whOAUY4a6mnm2lREURLPoTggNG5tS6xpmOT4GQptwNUZc6sbexzcZRVSTKTOgudMPEL0j7E2uQNOxIqcaYcqXNaxe2HKnauBiAraDZ6n0k0tTBpPNwE9pptqDP3DtlBC1Q8qNw5K4AwLEunYkWMwcYg6fnqoH/ADPHA2/qeZWquhJJ3pODmEhmg/qGl2XAloebL5HWK/K8dOMOM7xVPfJrMhmQiq0SFXOlyPc+jIq3lwakpeYNq27K491kfvbzls07ECiSdlThhWKvj1LLx0VVLWGqSBuFJ1jc3WBEUb8K4TUieHz3xni7ea3lSZvZDhUVImxAVtBso39VdLUe0nk2a+0030n+K7YUc95/J66tRIp3SVXUpGyUI7wvPxDBoJ/UaLIuIqtuInRwiiqp4z3XbBYr3cGp9P30zJXiSjk1HLsqdIvxvzV1q8ZtB3ppa5bkwZkDz7LsF09Qxgi0Roa6UUU1LnxYH5JP74D1LUjNrkXigabc6kZM5vPFZi3NPi3dVXnFT+EQUM17IvEi1tL1xUkcEHb+lo6duvRUO644wwSDpaPWgG7sAApIKqqqm4jvxo1yvcrjdoTiqtrQ2I+u5nr19ItbUA2a5IAX3GvuP8U2ypMS5pSwFC5peTtM0lnSkMWVVUJb48a+8//Z",
media_type: "image/jpeg",
type: "base64",
},
},
];

test("OpenAI formats tool calls correctly", () => {
const expectedOutput = [
{
role: "user",
content: "What's the weather in London?",
},
{
role: "assistant",
content: "Let me check the weather.",
tool_calls: [
{
id: "call_123",
type: "function",
function: {
name: "weather",
arguments: JSON.stringify({ location: "London" }),
},
},
],
},
{
role: "tool",
content: "The weather in London is sunny, +20°C",
tool_call_id: "call_123",
},
];

expect(OpenAI.toOpenAIMessage(toolCallMessages)).toEqual(expectedOutput);
});

test("Anthropic formats tool calls correctly", () => {
const anthropic = new Anthropic();
const expectedOutput: MessageParam[] = [
{
role: "user",
content: "What's the weather in London?",
},
{
role: "assistant",
content: [
{
type: "text",
text: "Let me check the weather.",
},
{
type: "tool_use",
id: "call_123",
name: "weather",
input: {
location: "London",
},
},
],
},
{
role: "user",
content: [
{
type: "tool_result",
tool_use_id: "call_123",
content: "The weather in London is sunny, +20°C",
},
type: "image",
],
},
];

expect(anthropic.formatMessages(toolCallMessages)).toEqual(
expectedOutput,
);
});

test("OpenAI formats multiple tool calls correctly", () => {
const multiToolMessages: ChatMessage[] = [
{
role: "assistant",
content: "Let me check both weather and time.",
options: {
toolCall: [
{
id: "weather_123",
name: "weather",
input: JSON.stringify({ location: "London" }),
},
{
id: "time_456",
name: "time",
input: JSON.stringify({ timezone: "GMT" }),
},
],
},
],
role: "user",
},
]);
},
];

const expectedOutput = [
{
role: "assistant",
content: "Let me check both weather and time.",
tool_calls: [
{
id: "weather_123",
type: "function",
function: {
name: "weather",
arguments: JSON.stringify({ location: "London" }),
},
},
{
id: "time_456",
type: "function",
function: {
name: "time",
arguments: JSON.stringify({ timezone: "GMT" }),
},
},
],
},
];

expect(OpenAI.toOpenAIMessage(multiToolMessages)).toEqual(expectedOutput);
});
});
});
Loading

0 comments on commit 2ec1502

Please sign in to comment.