From 6e136add1cb5c9e2d74bbd17faac5e11248ad0ea Mon Sep 17 00:00:00 2001 From: Matt Lindsey Date: Tue, 12 Nov 2024 06:43:29 -0500 Subject: [PATCH 1/3] Rename auto_tools_execute --- README.md | 4 +-- examples/assistant_chat.rb | 2 +- lib/langchain/assistant.rb | 22 ++++++------ spec/langchain/assistant/assistant_spec.rb | 40 +++++++++++----------- 4 files changed, 34 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index ec244289..b7db5dae 100644 --- a/README.md +++ b/README.md @@ -512,7 +512,7 @@ assistant.add_message_and_run!( messages = assistant.messages # Run the assistant with automatic tool execution -assistant.run(auto_tool_execution: true) +assistant.run(: true) # If you want to stream the response, you can add a response handler assistant = Langchain::Assistant.new( @@ -524,7 +524,7 @@ assistant = Langchain::Assistant.new( # print(response_chunk.inspect) end assistant.add_message(content: "Hello") -assistant.run(auto_tool_execution: true) +assistant.run(: true) ``` Note that streaming is not currently supported for all LLMs. diff --git a/examples/assistant_chat.rb b/examples/assistant_chat.rb index 681c687f..dfce8a4f 100644 --- a/examples/assistant_chat.rb +++ b/examples/assistant_chat.rb @@ -51,7 +51,7 @@ def prompt_for_message break end - assistant.add_message_and_run content: user_message, auto_tool_execution: true + assistant.add_message_and_run content: user_message, execute_tools: true puts assistant.messages.last.content end rescue Interrupt diff --git a/lib/langchain/assistant.rb b/lib/langchain/assistant.rb index 279a246a..f939c369 100644 --- a/lib/langchain/assistant.rb +++ b/lib/langchain/assistant.rb @@ -129,9 +129,9 @@ def add_messages(messages:) # Run the assistant # - # @param auto_tool_execution [Boolean] Whether or not to automatically run tools + # @param execute_tools [Boolean] Whether or not to automatically run tools # @return [Array] The messages - def run(auto_tool_execution: false) + def run(execute_tools: false) if messages.empty? Langchain.logger.warn("#{self.class} - No messages to process") @state = :completed @@ -139,7 +139,7 @@ def run(auto_tool_execution: false) end @state = :in_progress - @state = handle_state until run_finished?(auto_tool_execution) + @state = handle_state until run_finished?(execute_tools) messages end @@ -148,17 +148,17 @@ def run(auto_tool_execution: false) # # @return [Array] The messages def run! - run(auto_tool_execution: true) + run(execute_tools: true) end # Add a user message and run the assistant # # @param content [String] The content of the message - # @param auto_tool_execution [Boolean] Whether or not to automatically run tools + # @param execute_tools [Boolean] Whether or not to automatically run tools # @return [Array] The messages - def add_message_and_run(content: nil, image_url: nil, auto_tool_execution: false) + def add_message_and_run(content: nil, image_url: nil, execute_tools: false) add_message(content: content, image_url: image_url, role: "user") - run(auto_tool_execution: auto_tool_execution) + run(execute_tools: execute_tools) end # Add a user message and run the assistant with automatic tool execution @@ -166,7 +166,7 @@ def add_message_and_run(content: nil, image_url: nil, auto_tool_execution: false # @param content [String] The content of the message # @return [Array] The messages def add_message_and_run!(content: nil, image_url: nil) - add_message_and_run(content: content, image_url: image_url, auto_tool_execution: true) + add_message_and_run(content: content, image_url: image_url, execute_tools: true) end # Submit tool output @@ -233,12 +233,12 @@ def validate_tool_choice!(tool_choice) # Check if the run is finished # - # @param auto_tool_execution [Boolean] Whether or not to automatically run tools + # @param execute_tools [Boolean] Whether or not to automatically run tools # @return [Boolean] Whether the run is finished - def run_finished?(auto_tool_execution) + def run_finished?(execute_tools) finished_states = [:completed, :failed] - requires_manual_action = (@state == :requires_action) && !auto_tool_execution + requires_manual_action = (@state == :requires_action) && !execute_tools finished_states.include?(@state) || requires_manual_action end diff --git a/spec/langchain/assistant/assistant_spec.rb b/spec/langchain/assistant/assistant_spec.rb index fe291bd3..26e74c40 100644 --- a/spec/langchain/assistant/assistant_spec.rb +++ b/spec/langchain/assistant/assistant_spec.rb @@ -223,7 +223,7 @@ } end - context "when auto_tool_execution is false" do + context "when execute_tools is false" do before do allow(subject.llm).to receive(:chat) .with( @@ -241,14 +241,14 @@ end it "runs the assistant" do - subject.run(auto_tool_execution: false) + subject.run(execute_tools: false) expect(subject.messages.last.role).to eq("assistant") expect(subject.messages.last.tool_calls).to eq([raw_openai_response["choices"][0]["message"]["tool_calls"]][0]) end it "records the used tokens totals" do - subject.run(auto_tool_execution: false) + subject.run(execute_tools: false) expect(subject.total_tokens).to eq(109) expect(subject.total_prompt_tokens).to eq(91) @@ -256,7 +256,7 @@ end end - context "when auto_tool_execution is true" do + context "when execute_tools is true" do let(:raw_openai_response2) do { "id" => "chatcmpl-96P6eEMDDaiwzRIHJZAliYHQ8ov3q", @@ -299,7 +299,7 @@ end it "runs the assistant and automatically executes tool calls" do - subject.run(auto_tool_execution: true) + subject.run(execute_tools: true) expect(subject.messages[-2].role).to eq("tool") expect(subject.messages[-2].content).to eq("4.0") @@ -309,7 +309,7 @@ end it "records the used tokens totals" do - subject.run(auto_tool_execution: true) + subject.run(execute_tools: true) expect(subject.total_tokens).to eq(134) expect(subject.total_prompt_tokens).to eq(121) @@ -590,7 +590,7 @@ } end - context "when auto_tool_execution is false" do + context "when execute_tools is false" do before do allow(subject.llm).to receive(:chat) .with( @@ -607,14 +607,14 @@ end it "runs the assistant" do - subject.run(auto_tool_execution: false) + subject.run(execute_tools: false) expect(subject.messages.last.role).to eq("assistant") expect(subject.messages.last.tool_calls).to eq([raw_mistralai_response["choices"][0]["message"]["tool_calls"]][0]) end it "records the used tokens totals" do - subject.run(auto_tool_execution: false) + subject.run(execute_tools: false) expect(subject.total_tokens).to eq(109) expect(subject.total_prompt_tokens).to eq(91) @@ -622,7 +622,7 @@ end end - context "when auto_tool_execution is true" do + context "when execute_tools is true" do let(:raw_mistralai_response2) do { "id" => "chatcmpl-96P6eEMDDaiwzRIHJZAliYHQ8ov3q", @@ -664,7 +664,7 @@ end it "runs the assistant and automatically executes tool calls" do - subject.run(auto_tool_execution: true) + subject.run(execute_tools: true) expect(subject.messages[-2].role).to eq("tool") expect(subject.messages[-2].content).to eq("4.0") @@ -674,7 +674,7 @@ end it "records the used tokens totals" do - subject.run(auto_tool_execution: true) + subject.run(execute_tools: true) expect(subject.total_tokens).to eq(134) expect(subject.total_prompt_tokens).to eq(121) @@ -938,7 +938,7 @@ } end - context "when auto_tool_execution is false" do + context "when execute_tools is false" do before do allow(subject.llm).to receive(:chat) .with( @@ -952,14 +952,14 @@ it "runs the assistant" do subject.add_message(role: "user", content: "Please calculate 2+2") - subject.run(auto_tool_execution: false) + subject.run(execute_tools: false) expect(subject.messages.last.role).to eq("model") expect(subject.messages.last.tool_calls).to eq([raw_google_gemini_response["candidates"][0]["content"]["parts"]][0]) end end - context "when auto_tool_execution is true" do + context "when execute_tools is true" do let(:raw_google_gemini_response2) do { "candidates" => [ @@ -999,7 +999,7 @@ subject.add_message(role: "user", content: "Please calculate 2+2") subject.add_message(role: "model", tool_calls: raw_google_gemini_response["candidates"][0]["content"]["parts"]) - subject.run(auto_tool_execution: true) + subject.run(execute_tools: true) expect(subject.messages[-2].role).to eq("function") expect(subject.messages[-2].content).to eq("4.0") @@ -1146,7 +1146,7 @@ end end - context "when auto_tool_execution is false" do + context "when execute_tools is false" do before do allow(subject.llm).to receive(:chat) .with( @@ -1160,7 +1160,7 @@ it "runs the assistant" do subject.add_message(role: "user", content: "Please calculate 2+2") - subject.run(auto_tool_execution: false) + subject.run(execute_tools: false) expect(subject.messages.last.role).to eq("assistant") expect(subject.messages.last.tool_calls).to eq([raw_anthropic_response["content"].last]) @@ -1178,7 +1178,7 @@ end end - context "when auto_tool_execution is true" do + context "when execute_tools is true" do let(:raw_anthropic_response2) do { "role" => "assistant", @@ -1229,7 +1229,7 @@ tool_calls: [raw_anthropic_response["content"].last] ) - subject.run(auto_tool_execution: true) + subject.run(execute_tools: true) expect(subject.messages[-2].role).to eq("tool_result") expect(subject.messages[-2].content).to eq("4.0") From 9cb01edad13c5cb5e83a0f8ac26465d2a440a611 Mon Sep 17 00:00:00 2001 From: Matt Lindsey Date: Tue, 12 Nov 2024 09:54:04 -0500 Subject: [PATCH 2/3] Fix and update README --- README.md | 6 ++++-- lib/langchain/assistant.rb | 8 ++++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index b7db5dae..cf0b635f 100644 --- a/README.md +++ b/README.md @@ -512,7 +512,9 @@ assistant.add_message_and_run!( messages = assistant.messages # Run the assistant with automatic tool execution -assistant.run(: true) +assistant.run() +# OR run the assistant without executing tools +assistant.run(execute_tools: false) # If you want to stream the response, you can add a response handler assistant = Langchain::Assistant.new( @@ -524,7 +526,7 @@ assistant = Langchain::Assistant.new( # print(response_chunk.inspect) end assistant.add_message(content: "Hello") -assistant.run(: true) +assistant.run() ``` Note that streaming is not currently supported for all LLMs. diff --git a/lib/langchain/assistant.rb b/lib/langchain/assistant.rb index f939c369..73b6a5d5 100644 --- a/lib/langchain/assistant.rb +++ b/lib/langchain/assistant.rb @@ -138,8 +138,12 @@ def run(execute_tools: false) return end - @state = :in_progress - @state = handle_state until run_finished?(execute_tools) + if !execute_tools + @state = :completed + else + @state = :in_progress + @state = handle_state until run_finished?(execute_tools) + end messages end From 0a4eca172e8f5568dee57c289aac334b902d3795 Mon Sep 17 00:00:00 2001 From: Matt Lindsey Date: Tue, 12 Nov 2024 09:57:39 -0500 Subject: [PATCH 3/3] Fix README --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index cf0b635f..df9d70a8 100644 --- a/README.md +++ b/README.md @@ -512,7 +512,7 @@ assistant.add_message_and_run!( messages = assistant.messages # Run the assistant with automatic tool execution -assistant.run() +assistant.run(execute_tools: true) # OR run the assistant without executing tools assistant.run(execute_tools: false) @@ -526,7 +526,7 @@ assistant = Langchain::Assistant.new( # print(response_chunk.inspect) end assistant.add_message(content: "Hello") -assistant.run() +assistant.run(execute_tools: true) ``` Note that streaming is not currently supported for all LLMs.