From 10830ccbcd4ef42a647c866852534fab43af07d6 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 08:29:25 +0000 Subject: [PATCH 01/11] fix: properly structure completions in Ollama provider Co-Authored-By: Alex Reibman --- agentops/llms/providers/ollama.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/agentops/llms/providers/ollama.py b/agentops/llms/providers/ollama.py index 480cae8b..00a12645 100644 --- a/agentops/llms/providers/ollama.py +++ b/agentops/llms/providers/ollama.py @@ -23,7 +23,6 @@ def handle_stream_chunk(chunk: dict): message = chunk.get("message", {"role": None, "content": ""}) if chunk.get("done"): - llm_event.completion["content"] += message.get("content") llm_event.end_timestamp = get_ISO_time() llm_event.model = f'ollama/{chunk.get("model")}' llm_event.returns = chunk @@ -33,26 +32,33 @@ def handle_stream_chunk(chunk: dict): self.client.record(llm_event) if llm_event.completion is None: - llm_event.completion = message + llm_event.completion = { + "role": message.get("role"), + "content": message.get("content", ""), + "tool_calls": None, + "function_call": None + } else: - llm_event.completion["content"] += message.get("content") + llm_event.completion["content"] += message.get("content", "") if inspect.isgenerator(response): - def generator(): for chunk in response: handle_stream_chunk(chunk) yield chunk - return generator() llm_event.end_timestamp = get_ISO_time() - llm_event.model = f'ollama/{response["model"]}' llm_event.returns = response llm_event.agent_id = check_call_stack_for_agent_id() llm_event.prompt = kwargs["messages"] - llm_event.completion = response["message"] + llm_event.completion = { + "role": response["message"].get("role"), + "content": response["message"].get("content", ""), + "tool_calls": None, + "function_call": None + } self._safe_record(session, llm_event) return response From 6d17b957ffb8e5b7d57aed46ac61e80bc86b0ae4 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 08:33:15 +0000 Subject: [PATCH 02/11] style: apply ruff-format changes Co-Authored-By: Alex Reibman --- agentops/llms/providers/ollama.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/agentops/llms/providers/ollama.py b/agentops/llms/providers/ollama.py index 00a12645..7c9ff854 100644 --- a/agentops/llms/providers/ollama.py +++ b/agentops/llms/providers/ollama.py @@ -36,16 +36,18 @@ def handle_stream_chunk(chunk: dict): "role": message.get("role"), "content": message.get("content", ""), "tool_calls": None, - "function_call": None + "function_call": None, } else: llm_event.completion["content"] += message.get("content", "") if inspect.isgenerator(response): + def generator(): for chunk in response: handle_stream_chunk(chunk) yield chunk + return generator() llm_event.end_timestamp = get_ISO_time() @@ -57,7 +59,7 @@ def generator(): "role": response["message"].get("role"), "content": response["message"].get("content", ""), "tool_calls": None, - "function_call": None + "function_call": None, } self._safe_record(session, llm_event) From 23c9b134785b763b4b4aed778aaf87c0457520c3 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 08:41:15 +0000 Subject: [PATCH 03/11] style: add trailing commas and proper spacing Co-Authored-By: Alex Reibman --- agentops/llms/providers/ollama.py | 1 + 1 file changed, 1 insertion(+) diff --git a/agentops/llms/providers/ollama.py b/agentops/llms/providers/ollama.py index 7c9ff854..96b9b7c2 100644 --- a/agentops/llms/providers/ollama.py +++ b/agentops/llms/providers/ollama.py @@ -62,6 +62,7 @@ def generator(): "function_call": None, } + self._safe_record(session, llm_event) return response From 0351d6530504b9ed335619bb35a2b1d82b1991b6 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 08:44:23 +0000 Subject: [PATCH 04/11] style: remove extra blank lines Co-Authored-By: Alex Reibman --- agentops/llms/providers/ollama.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/agentops/llms/providers/ollama.py b/agentops/llms/providers/ollama.py index 96b9b7c2..ece199fc 100644 --- a/agentops/llms/providers/ollama.py +++ b/agentops/llms/providers/ollama.py @@ -61,8 +61,6 @@ def generator(): "tool_calls": None, "function_call": None, } - - self._safe_record(session, llm_event) return response From a27df1ab63a8d71c3df4dfcdea82825098e009eb Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 08:48:39 +0000 Subject: [PATCH 05/11] docs: add Ollama example script Co-Authored-By: Alex Reibman --- examples/ollama_examples/ollama_examples.py | 124 ++++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 examples/ollama_examples/ollama_examples.py diff --git a/examples/ollama_examples/ollama_examples.py b/examples/ollama_examples/ollama_examples.py new file mode 100644 index 00000000..bb69b19c --- /dev/null +++ b/examples/ollama_examples/ollama_examples.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python +# coding: utf-8 + +# # AgentOps Ollama Integration +# +# This example demonstrates how to use AgentOps to monitor your Ollama LLM calls. +# +# First let's install the required packages +# +# > ⚠️ **Important**: Make sure you have Ollama installed and running locally before running this notebook. You can install it from [ollama.ai](https://ollama.com). + +# In[ ]: + + + + +# Then import them + +# In[2]: + + +import ollama +import agentops +import os +from dotenv import load_dotenv + + +# Next, we'll set our API keys. For Ollama, we'll need to make sure Ollama is running locally. +# [Get an AgentOps API key](https://agentops.ai/settings/projects) +# +# 1. Create an environment variable in a .env file or other method. By default, the AgentOps `init()` function will look for an environment variable named `AGENTOPS_API_KEY`. Or... +# 2. Replace `` below and pass in the optional `api_key` parameter to the AgentOps `init(api_key=...)` function. Remember not to commit your API key to a public repo! + +# In[3]: + + +# Let's load our environment variables +load_dotenv() + +AGENTOPS_API_KEY = os.getenv("AGENTOPS_API_KEY") or "" + + +# In[ ]: + + +# Initialize AgentOps with some default tags +agentops.init(AGENTOPS_API_KEY, default_tags=["ollama-example"]) + + +# Now let's make some basic calls to Ollama. Make sure you have pulled the model first, use the following or replace with whichever model you want to use. + +# In[ ]: + + +ollama.pull("mistral") + + +# In[ ]: + + +# Basic completion, +response = ollama.chat(model='mistral', + messages=[{ + 'role': 'user', + 'content': 'What are the benefits of using AgentOps for monitoring LLMs?', + }] +) +print(response['message']['content']) + + +# Let's try streaming responses as well + +# In[ ]: + + +# Streaming Example +stream = ollama.chat( + model='mistral', + messages=[{ + 'role': 'user', + 'content': 'Write a haiku about monitoring AI agents', + }], + stream=True +) + +for chunk in stream: + print(chunk['message']['content'], end='') + + +# In[ ]: + + +# Conversation Example +messages = [ + { + 'role': 'user', + 'content': 'What is AgentOps?' + }, + { + 'role': 'assistant', + 'content': 'AgentOps is a monitoring and observability platform for LLM applications.' + }, + { + 'role': 'user', + 'content': 'Can you give me 3 key features?' + } +] + +response = ollama.chat( + model='mistral', + messages=messages +) +print(response['message']['content']) + + +# > 💡 **Note**: In production environments, you should add proper error handling around the Ollama calls and use `agentops.end_session("Error")` when exceptions occur. + +# Finally, let's end our AgentOps session + +# In[ ]: + + +agentops.end_session("Success") + From 2bdbb60ddc3838a474f2c3e5e3f4d2fd27945b81 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 08:53:20 +0000 Subject: [PATCH 06/11] style: fix formatting in Ollama example script Co-Authored-By: Alex Reibman --- examples/ollama_examples/ollama_examples.py | 60 +++++++++++---------- 1 file changed, 31 insertions(+), 29 deletions(-) diff --git a/examples/ollama_examples/ollama_examples.py b/examples/ollama_examples/ollama_examples.py index bb69b19c..b3cce802 100644 --- a/examples/ollama_examples/ollama_examples.py +++ b/examples/ollama_examples/ollama_examples.py @@ -2,11 +2,11 @@ # coding: utf-8 # # AgentOps Ollama Integration -# +# # This example demonstrates how to use AgentOps to monitor your Ollama LLM calls. -# +# # First let's install the required packages -# +# # > ⚠️ **Important**: Make sure you have Ollama installed and running locally before running this notebook. You can install it from [ollama.ai](https://ollama.com). # In[ ]: @@ -27,7 +27,7 @@ # Next, we'll set our API keys. For Ollama, we'll need to make sure Ollama is running locally. # [Get an AgentOps API key](https://agentops.ai/settings/projects) -# +# # 1. Create an environment variable in a .env file or other method. By default, the AgentOps `init()` function will look for an environment variable named `AGENTOPS_API_KEY`. Or... # 2. Replace `` below and pass in the optional `api_key` parameter to the AgentOps `init(api_key=...)` function. Remember not to commit your API key to a public repo! @@ -59,13 +59,16 @@ # Basic completion, -response = ollama.chat(model='mistral', - messages=[{ - 'role': 'user', - 'content': 'What are the benefits of using AgentOps for monitoring LLMs?', - }] +response = ollama.chat( + model="mistral", + messages=[ + { + "role": "user", + "content": "What are the benefits of using AgentOps for monitoring LLMs?", + }, + ], ) -print(response['message']['content']) +print(response["message"]["content"]) # Let's try streaming responses as well @@ -75,16 +78,18 @@ # Streaming Example stream = ollama.chat( - model='mistral', - messages=[{ - 'role': 'user', - 'content': 'Write a haiku about monitoring AI agents', - }], - stream=True + model="mistral", + messages=[ + { + "role": "user", + "content": "Write a haiku about monitoring AI agents", + }, + ], + stream=True, ) for chunk in stream: - print(chunk['message']['content'], end='') + print(chunk["message"]["content"], end="") # In[ ]: @@ -93,24 +98,21 @@ # Conversation Example messages = [ { - 'role': 'user', - 'content': 'What is AgentOps?' + "role": "user", + "content": "What is AgentOps?", }, { - 'role': 'assistant', - 'content': 'AgentOps is a monitoring and observability platform for LLM applications.' + "role": "assistant", + "content": "AgentOps is a monitoring and observability platform for LLM applications.", }, { - 'role': 'user', - 'content': 'Can you give me 3 key features?' - } + "role": "user", + "content": "Can you give me 3 key features?", + }, ] -response = ollama.chat( - model='mistral', - messages=messages -) -print(response['message']['content']) +response = ollama.chat(model="mistral", messages=messages) +print(response["message"]["content"]) # > 💡 **Note**: In production environments, you should add proper error handling around the Ollama calls and use `agentops.end_session("Error")` when exceptions occur. From a7c841a05fc7e11cf3f2113ebc4b976a318d9d83 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 08:57:09 +0000 Subject: [PATCH 07/11] style: remove extra blank lines from Ollama example script Co-Authored-By: Alex Reibman --- examples/ollama_examples/ollama_examples.py | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/examples/ollama_examples/ollama_examples.py b/examples/ollama_examples/ollama_examples.py index b3cce802..a219a7f8 100644 --- a/examples/ollama_examples/ollama_examples.py +++ b/examples/ollama_examples/ollama_examples.py @@ -11,20 +11,15 @@ # In[ ]: - - - # Then import them # In[2]: - import ollama import agentops import os from dotenv import load_dotenv - # Next, we'll set our API keys. For Ollama, we'll need to make sure Ollama is running locally. # [Get an AgentOps API key](https://agentops.ai/settings/projects) # @@ -33,31 +28,22 @@ # In[3]: - # Let's load our environment variables load_dotenv() AGENTOPS_API_KEY = os.getenv("AGENTOPS_API_KEY") or "" - -# In[ ]: - - # Initialize AgentOps with some default tags agentops.init(AGENTOPS_API_KEY, default_tags=["ollama-example"]) - # Now let's make some basic calls to Ollama. Make sure you have pulled the model first, use the following or replace with whichever model you want to use. # In[ ]: - ollama.pull("mistral") - # In[ ]: - # Basic completion, response = ollama.chat( model="mistral", @@ -70,12 +56,10 @@ ) print(response["message"]["content"]) - # Let's try streaming responses as well # In[ ]: - # Streaming Example stream = ollama.chat( model="mistral", @@ -91,10 +75,8 @@ for chunk in stream: print(chunk["message"]["content"], end="") - # In[ ]: - # Conversation Example messages = [ { @@ -114,13 +96,11 @@ response = ollama.chat(model="mistral", messages=messages) print(response["message"]["content"]) - # > 💡 **Note**: In production environments, you should add proper error handling around the Ollama calls and use `agentops.end_session("Error")` when exceptions occur. # Finally, let's end our AgentOps session # In[ ]: - agentops.end_session("Success") From 74226fca5f9b2198d81acdf76f115be4170a2075 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 09:02:04 +0000 Subject: [PATCH 08/11] style: remove trailing blank line from Ollama example script Co-Authored-By: Alex Reibman --- examples/ollama_examples/ollama_examples.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/examples/ollama_examples/ollama_examples.py b/examples/ollama_examples/ollama_examples.py index a219a7f8..17394b9a 100644 --- a/examples/ollama_examples/ollama_examples.py +++ b/examples/ollama_examples/ollama_examples.py @@ -10,11 +10,9 @@ # > ⚠️ **Important**: Make sure you have Ollama installed and running locally before running this notebook. You can install it from [ollama.ai](https://ollama.com). # In[ ]: - # Then import them # In[2]: - import ollama import agentops import os @@ -27,7 +25,6 @@ # 2. Replace `` below and pass in the optional `api_key` parameter to the AgentOps `init(api_key=...)` function. Remember not to commit your API key to a public repo! # In[3]: - # Let's load our environment variables load_dotenv() @@ -39,11 +36,9 @@ # Now let's make some basic calls to Ollama. Make sure you have pulled the model first, use the following or replace with whichever model you want to use. # In[ ]: - ollama.pull("mistral") # In[ ]: - # Basic completion, response = ollama.chat( model="mistral", @@ -59,7 +54,6 @@ # Let's try streaming responses as well # In[ ]: - # Streaming Example stream = ollama.chat( model="mistral", @@ -76,7 +70,6 @@ print(chunk["message"]["content"], end="") # In[ ]: - # Conversation Example messages = [ { @@ -101,6 +94,5 @@ # Finally, let's end our AgentOps session # In[ ]: - agentops.end_session("Success") From c64d7d2829f10acc081b57fa52bcd9ac3bb8f26c Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 09:06:18 +0000 Subject: [PATCH 09/11] style: apply ruff-format changes to ollama examples Co-Authored-By: Alex Reibman --- examples/ollama_examples/ollama_examples.py | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/ollama_examples/ollama_examples.py b/examples/ollama_examples/ollama_examples.py index 17394b9a..ca9e4674 100644 --- a/examples/ollama_examples/ollama_examples.py +++ b/examples/ollama_examples/ollama_examples.py @@ -95,4 +95,3 @@ # In[ ]: agentops.end_session("Success") - From d9a0a378b53fa1860b525566a202fe1e327777d8 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 09:15:22 +0000 Subject: [PATCH 10/11] fix: ensure proper event tracking in Ollama provider Co-Authored-By: Alex Reibman --- agentops/llms/providers/ollama.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/agentops/llms/providers/ollama.py b/agentops/llms/providers/ollama.py index ece199fc..e944469c 100644 --- a/agentops/llms/providers/ollama.py +++ b/agentops/llms/providers/ollama.py @@ -18,6 +18,8 @@ class OllamaProvider(InstrumentedProvider): def handle_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None) -> dict: llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs) + if session is not None: + llm_event.session_id = session.session_id def handle_stream_chunk(chunk: dict): message = chunk.get("message", {"role": None, "content": ""}) @@ -29,7 +31,7 @@ def handle_stream_chunk(chunk: dict): llm_event.returns["message"] = llm_event.completion llm_event.prompt = kwargs["messages"] llm_event.agent_id = check_call_stack_for_agent_id() - self.client.record(llm_event) + self._safe_record(session, llm_event) if llm_event.completion is None: llm_event.completion = { @@ -103,7 +105,7 @@ def patched_function(*args, **kwargs): # Call the original function with its original arguments init_timestamp = get_ISO_time() result = original_func["ollama.Client.chat"](*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp) + return self.handle_response(result, kwargs, init_timestamp, session=kwargs.get("session", None)) # Override the original method with the patched one Client.chat = patched_function @@ -111,13 +113,14 @@ def patched_function(*args, **kwargs): def _override_chat_async_client(self): from ollama import AsyncClient + original_func = {} original_func["ollama.AsyncClient.chat"] = AsyncClient.chat async def patched_function(*args, **kwargs): # Call the original function with its original arguments init_timestamp = get_ISO_time() result = await original_func["ollama.AsyncClient.chat"](*args, **kwargs) - return self.handle_response(result, kwargs, init_timestamp) + return self.handle_response(result, kwargs, init_timestamp, session=kwargs.get("session", None)) # Override the original method with the patched one AsyncClient.chat = patched_function From a8270545ba9f654ea59577965d44782095fca07b Mon Sep 17 00:00:00 2001 From: Alex Reibman Date: Tue, 17 Dec 2024 01:29:49 -0800 Subject: [PATCH 11/11] Delete examples/ollama_examples/ollama_examples.py --- examples/ollama_examples/ollama_examples.py | 97 --------------------- 1 file changed, 97 deletions(-) delete mode 100644 examples/ollama_examples/ollama_examples.py diff --git a/examples/ollama_examples/ollama_examples.py b/examples/ollama_examples/ollama_examples.py deleted file mode 100644 index ca9e4674..00000000 --- a/examples/ollama_examples/ollama_examples.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# # AgentOps Ollama Integration -# -# This example demonstrates how to use AgentOps to monitor your Ollama LLM calls. -# -# First let's install the required packages -# -# > ⚠️ **Important**: Make sure you have Ollama installed and running locally before running this notebook. You can install it from [ollama.ai](https://ollama.com). - -# In[ ]: -# Then import them - -# In[2]: -import ollama -import agentops -import os -from dotenv import load_dotenv - -# Next, we'll set our API keys. For Ollama, we'll need to make sure Ollama is running locally. -# [Get an AgentOps API key](https://agentops.ai/settings/projects) -# -# 1. Create an environment variable in a .env file or other method. By default, the AgentOps `init()` function will look for an environment variable named `AGENTOPS_API_KEY`. Or... -# 2. Replace `` below and pass in the optional `api_key` parameter to the AgentOps `init(api_key=...)` function. Remember not to commit your API key to a public repo! - -# In[3]: -# Let's load our environment variables -load_dotenv() - -AGENTOPS_API_KEY = os.getenv("AGENTOPS_API_KEY") or "" - -# Initialize AgentOps with some default tags -agentops.init(AGENTOPS_API_KEY, default_tags=["ollama-example"]) - -# Now let's make some basic calls to Ollama. Make sure you have pulled the model first, use the following or replace with whichever model you want to use. - -# In[ ]: -ollama.pull("mistral") - -# In[ ]: -# Basic completion, -response = ollama.chat( - model="mistral", - messages=[ - { - "role": "user", - "content": "What are the benefits of using AgentOps for monitoring LLMs?", - }, - ], -) -print(response["message"]["content"]) - -# Let's try streaming responses as well - -# In[ ]: -# Streaming Example -stream = ollama.chat( - model="mistral", - messages=[ - { - "role": "user", - "content": "Write a haiku about monitoring AI agents", - }, - ], - stream=True, -) - -for chunk in stream: - print(chunk["message"]["content"], end="") - -# In[ ]: -# Conversation Example -messages = [ - { - "role": "user", - "content": "What is AgentOps?", - }, - { - "role": "assistant", - "content": "AgentOps is a monitoring and observability platform for LLM applications.", - }, - { - "role": "user", - "content": "Can you give me 3 key features?", - }, -] - -response = ollama.chat(model="mistral", messages=messages) -print(response["message"]["content"]) - -# > 💡 **Note**: In production environments, you should add proper error handling around the Ollama calls and use `agentops.end_session("Error")` when exceptions occur. - -# Finally, let's end our AgentOps session - -# In[ ]: -agentops.end_session("Success")