Skip to content

Commit

Permalink
guide from doc v1
Browse files Browse the repository at this point in the history
  • Loading branch information
constantinidan committed Aug 20, 2024
1 parent 2dcd0b0 commit db86f6e
Show file tree
Hide file tree
Showing 5 changed files with 466 additions and 0 deletions.
Binary file added img/attachments.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
52 changes: 52 additions & 0 deletions python/create-a-dataset/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
---
title: Create and Populate a Dataset
---

In this Python example, you will learn how to create a Dataset and Populate it with example items. We will create items from a list of values. If you want to create a Dataset from existing Runs, Steps or Generations from production data, check the API reference ([Python](/python-client/api-reference), [TypeScript](typescript-client/api-reference/dataset#add-a-step-to-a-dataset)).

Let's create a dataset consisting of questions and answers to movie titles.

# 1. Connect to the client

```Python Python
from literalai import LiteralClient
import os

literal_client = LiteralClient(api_key=os.getenv("LITERAL_API_KEY"))
```

# 2. Create a Dataset

Before we can add items to a Dataset, we need to create one.

```Python Python
dataset = literal_client.api.create_dataset(
name = "movie_titles",
description = "Gold standard dataset of movie title q&a",
type = "key_value"
)
```

# 3. Populate the Dataset

Next, we add local items to this dataset

```python Python
# example items
items = [
{"input": "A movie about love", "expected_output": "Love Actually"},
{"input": "A movie about space travel", "expected_output": "Interstellar"},
{"input": "A movie about science fiction", "expected_output": "Dune"},
{"input": "A movie about superheroes", "expected_output": "The Avengers"},
{"input": "A movie about adventure", "expected_output": "The Lord of the Rings"},
{"input": "A movie about vikings", "expected_output": "Vikings"},
]

# upload to Literal AI
for item in items:
literal_client.api.create_dataset_item(
dataset_id = dataset.id,
input = { "content": item["input"] },
expected_output = { "content": item["expected_output"] }
)
```
159 changes: 159 additions & 0 deletions python/distributed-tracing/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
---
title: Distributed Tracing
---

Distributed tracing is a method for monitoring and visualizing the flow of requests through a distributed system. This enables the **tracking** of the request as it interacts with an external weather service and the subsequent processing by the AI model. By using **distributed tracing**, developers can gain insights into the **performance** of their system, **debug** issues, and understand the behavior of complex, interconnected services.

## Typescript

In the TypeScript example, a `LiteralClient` is used to create a step that logs a particular action, in this case, fetching weather data. This step is associated with a `parentStepId`, which allows for the **tracing** of the request's journey through the system.

```typescript server.ts
import * as http from "http";
import { Step, LiteralClient } from "@literalai/client";
const literalClient = new LiteralClient();

const server = http.createServer(async (req, res) => {
if (req.method === 'POST' && req.url === '/get-weather') {
let body = '';
req.on('data', (chunk) => {
body += chunk.toString(); // convert Buffer to string
});
req.on('end', async () => {
const { location, unit, parentStepId } = JSON.parse(body);
console.log(location, unit, parentStepId);

// Log the step with the received parent_step_id
const weatherData = literalClient
.step({
type: 'tool',
name: 'get_weather',
input: { location, unit },
parentId: parentStepId
})
.wrap(() => {
// Mock API call to the weather service
const weatherData = mockWeatherApiCall(location, unit);

return weatherData;
});

res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(weatherData));
});
} else {
res.writeHead(404, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: 'Not found' }));
}
});

const PORT = process.env.PORT || 3000;

// Mock function to simulate weather API response
function mockWeatherApiCall(location: string, unit = 'celsius') {
// This is a mock function, replace with actual API call if needed
const weatherApiResponse = {
location: location,
temperature: unit === 'celsius' ? '15°C' : '59°F',
condition: 'Partly Cloudy',
humidity: '68%',
windSpeed: '10 km/h'
};
return weatherApiResponse;
}

server.listen(PORT, () => {
console.log(`Server running on port ${PORT}`);
});
```

## Python

In the Python code, the `LiteralClient` is used to instrument the OpenAI client, and a decorator `@literal_client.step` is used to mark the function `run()` as a traceable step.

```python app.py
from openai import OpenAI
import json
from literalai import LiteralClient

literal_client = LiteralClient()
literal_client.instrument_openai()
client = OpenAI()

# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API

@literal_client.step(type="run", name="weather_agent")
def run():
# Step 1: send the conversation and available functions to the model
step = literal_client.get_current_step()
messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
},
}
]
response = literal_client.chat.completions.create(
model="gpt-3.5-turbo-0125",
messages=messages,
tools=tools,
tool_choice="auto", # auto is default, but we'll be explicit
)
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
# Step 2: check if the model wanted to call a function
if tool_calls:
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors

messages.append(response_message) # extend conversation with assistant's reply
# Step 4: send the info for each function call and function response to the model
for tool_call in tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
import requests

function_response = requests.post(
"http://localhost:3000/get-weather",
headers={"Content-Type": "application/json"},
json={
"location": function_args.get("location"),
"unit": function_args.get("unit"),
"parent_step_id": step.id
}
).json()

messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": json.dumps(function_response),
}
) # extend conversation with function response
print(messages)
second_response = literal_client.chat.completions.create(
model="gpt-3.5-turbo-0125",
messages=messages,
) # get a new response from the model where it can see the function response
return second_response

print(run())

literal_client.flush_and_stop()
```
181 changes: 181 additions & 0 deletions python/monitor-conversational-ai-agent/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,181 @@
---
title: Conversational Agent Monitoring - FastAPI
---

This code integrates an asynchronous OpenAI client with a Literal AI client to create a conversational agent.
It utilizes Literal AI's step decorators for structured logging and tool orchestration within a conversational flow.
The agent can process user messages, make decisions on tool usage, and generate responses based on a predefined set of tools and a maximum iteration limit to prevent infinite loops.

**This example demonstrates thread-based monitoring, allowing for detailed tracking and analysis of conversational threads.**

```bash .env
LITERAL_API_KEY=
OPENAI_API_KEY=
```


```bash
pip install uvicorn
```

```python server.py
import json
from openai import AsyncOpenAI
from openai.types.chat import *
from literalai import LiteralClient
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel

from dotenv import load_dotenv
load_dotenv()


client = AsyncOpenAI()
lc = LiteralClient()
lc.instrument_openai()

MAX_ITER = 5


# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API
@lc.step(type="tool", name="get_current_weather")
def get_current_weather(location, unit=None):
"""Get the current weather in a given location"""
unit = unit or "Farenheit"
weather_info = {
"location": location,
"temperature": "72",
"unit": unit,
"forecast": ["sunny", "windy"],
}

return json.dumps(weather_info)


tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
},
}
]


@lc.step(type="run")
async def run(message_history):

tool_called = True
cur_iter = 0
while tool_called and MAX_ITER:
settings = {
"model": "gpt-4-turbo-preview",
"tools": tools,
"tool_choice": "auto",
}
# https://github.com/openai/openai-python/issues/777
for message in message_history:
if "function_call" in message and message["function_call"] is None:
del message["function_call"]
if "tool_calls" in message and message["tool_calls"] is None:
del message["tool_calls"]

response: ChatCompletion = await literalClient.chat.completions.create(
messages=message_history, **settings
)

message: ChatCompletionMessage = response.choices[0].message

message_history.append(message)
if not message.tool_calls:
tool_called = False

for tool_call in message.tool_calls or []:
if tool_call.type == "function":
# print(globals().keys())
func = globals()[tool_call.function.name]
res = func(tool_call.function.arguments)
message_history.append({
"role": "tool",
"name": tool_call.function.name,
"content": res,
"tool_call_id": tool_call.id,
})

cur_iter += 1

return message_history


# --------------------------------------------------------------
# --------------------------- SERVER ---------------------------
# --------------------------------------------------------------

app = FastAPI()

class ProcessInput(BaseModel):
message_history: list
thread_id: str

@app.post("/process/")
async def process_conversation(input: ProcessInput):

with lc.thread(thread_id=input.thread_id, name="demo_thread") as thread:
message_history = input.message_history
lc.message(content=message_history[-1]["content"], name="user", type="user_message")
message_history = await run(message_history)
lc.message(content=message_history[-1].content, name="assistant", type="assistant_message")

return {"message_history": message_history}

```


```bash
uvicorn thread-fastapi:app --reload
```

Then, you can send requests from a client:

```python client.py
import requests

url = 'http://127.0.0.1:8000/process/'

import uuid

thread_id = str(uuid.uuid4())

# First query
data1 = {
"message_history": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "what's the weather in sf"}],
"thread_id": thread_id
}

response1 = requests.post(url, json=data1)

# Second query
data2 = {
"message_history": response1.json()["message_history"] + [{"role": "user", "content": "what's the weather in paris"}],
"thread_id": thread_id
}

response2 = requests.post(url, json=data2)
if response2.status_code == 200:
print(response2.json())
else:
print(f"Error: {response2.status_code}, {response2.text}")
```
Loading

0 comments on commit db86f6e

Please sign in to comment.