Skip to content

Commit

Permalink
Update documentation
Browse files Browse the repository at this point in the history
  • Loading branch information
allenanie committed Dec 10, 2024
1 parent 9735c27 commit f21de5f
Show file tree
Hide file tree
Showing 4 changed files with 122 additions and 58 deletions.
24 changes: 23 additions & 1 deletion _modules/opto/utils/llm.html
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,9 @@ <h1></h1>

<h1>Source code for opto.utils.llm</h1><div class="highlight"><pre>
<span></span><span class="kn">from</span> <span class="nn">typing</span> <span class="kn">import</span> <span class="n">List</span><span class="p">,</span> <span class="n">Tuple</span><span class="p">,</span> <span class="n">Dict</span><span class="p">,</span> <span class="n">Any</span><span class="p">,</span> <span class="n">Callable</span><span class="p">,</span> <span class="n">Union</span>
<span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">time</span>
<span class="kn">import</span> <span class="nn">json</span>
<span class="kn">import</span> <span class="nn">autogen</span> <span class="c1"># We import autogen here to avoid the need of installing autogen</span>

<div class="viewcode-block" id="AbstractModel">
Expand Down Expand Up @@ -441,7 +443,12 @@ <h1>Source code for opto.utils.llm</h1><div class="highlight"><pre>

<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">config_list</span><span class="p">:</span> <span class="n">List</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span> <span class="n">filter_dict</span><span class="p">:</span> <span class="n">Dict</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span> <span class="n">reset_freq</span><span class="p">:</span> <span class="n">Union</span><span class="p">[</span><span class="nb">int</span><span class="p">,</span> <span class="kc">None</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">if</span> <span class="n">config_list</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">config_list</span> <span class="o">=</span> <span class="n">autogen</span><span class="o">.</span><span class="n">config_list_from_json</span><span class="p">(</span><span class="s2">&quot;OAI_CONFIG_LIST&quot;</span><span class="p">)</span>
<span class="k">try</span><span class="p">:</span>
<span class="n">config_list</span> <span class="o">=</span> <span class="n">autogen</span><span class="o">.</span><span class="n">config_list_from_json</span><span class="p">(</span><span class="s2">&quot;OAI_CONFIG_LIST&quot;</span><span class="p">)</span>
<span class="k">except</span><span class="p">:</span>
<span class="n">config_list</span> <span class="o">=</span> <span class="n">auto_construct_oai_config_list_from_env</span><span class="p">()</span>
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">update</span><span class="p">({</span><span class="s2">&quot;OAI_CONFIG_LIST&quot;</span><span class="p">:</span> <span class="n">json</span><span class="o">.</span><span class="n">dumps</span><span class="p">(</span><span class="n">config_list</span><span class="p">)})</span>
<span class="n">config_list</span> <span class="o">=</span> <span class="n">autogen</span><span class="o">.</span><span class="n">config_list_from_json</span><span class="p">(</span><span class="s2">&quot;OAI_CONFIG_LIST&quot;</span><span class="p">)</span>
<span class="k">if</span> <span class="n">filter_dict</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">config_list</span> <span class="o">=</span> <span class="n">autogen</span><span class="o">.</span><span class="n">filter_config_list</span><span class="p">(</span><span class="n">config_list</span><span class="p">,</span> <span class="n">filter_dict</span><span class="p">)</span>

Expand Down Expand Up @@ -499,6 +506,21 @@ <h1>Source code for opto.utils.llm</h1><div class="highlight"><pre>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_model</span><span class="o">.</span><span class="n">create</span><span class="p">(</span><span class="o">**</span><span class="n">config</span><span class="p">)</span></div>
</div>


<span class="k">def</span> <span class="nf">auto_construct_oai_config_list_from_env</span><span class="p">()</span> <span class="o">-&gt;</span> <span class="n">List</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Collect various API keys saved in the environment and return a format like:</span>
<span class="sd"> [{&quot;model&quot;: &quot;gpt-4&quot;, &quot;api_key&quot;: xxx}, {&quot;model&quot;: &quot;claude-3.5-sonnet&quot;, &quot;api_key&quot;: xxx}]</span>

<span class="sd"> Note this is a lazy function that defaults to gpt-40 and claude-3.5-sonnet.</span>
<span class="sd"> If you want to specify your own model, please provide an OAI_CONFIG_LIST in the environment or as a file</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">config_list</span> <span class="o">=</span> <span class="p">[]</span>
<span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;OPENAI_API_KEY&quot;</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">config_list</span><span class="o">.</span><span class="n">append</span><span class="p">({</span><span class="s2">&quot;model&quot;</span><span class="p">:</span> <span class="s2">&quot;gpt-4o&quot;</span><span class="p">,</span> <span class="s2">&quot;api_key&quot;</span><span class="p">:</span> <span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;OPENAI_API_KEY&quot;</span><span class="p">)})</span>
<span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;ANTHROPIC_API_KEY&quot;</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">config_list</span><span class="o">.</span><span class="n">append</span><span class="p">({</span><span class="s2">&quot;model&quot;</span><span class="p">:</span> <span class="s2">&quot;claude-3-5-sonnet-latest&quot;</span><span class="p">,</span> <span class="s2">&quot;api_key&quot;</span><span class="p">:</span> <span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;ANTHROPIC_API_KEY&quot;</span><span class="p">)})</span>
<span class="k">return</span> <span class="n">config_list</span>
</pre></div>

</article>
Expand Down
75 changes: 51 additions & 24 deletions _sources/intro.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,13 @@
**Trace is a Python library for tracing and optimizing workflows end-to-end by using LLM-powered generative optimizers.**
**It can record *traces* of operations on any Python objects and functions, and automatically construct an execution graph that is useful when LLMs are used as optimizers.**


<a href="https://colab.research.google.com/github/microsoft/Trace/blob/experimental/docs/examples/basic/greeting.ipynb" rel="nofollow" target="_blank"><img src="https://camo.githubusercontent.com/96889048f8a9014fdeba2a891f97150c6aac6e723f5190236b10215a97ed41f3/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667" alt="Open In Colab" data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" style="width: 120px;"></a>

Our implementation is minimal and purely based on Python. It does not involve any API calls or library-specific dependencies, so it is composable with other libraries and tools.
Trace features an API design inspired by PyTorch Autograd's gradient tape mechanism, which we adopted to reduce the learning curve of using Trace.
These features make Trace an intuitive and flexible framework for building self-adapting AI agents.

Each application of Trace is defined by an **agent**, a source of **feedback**, and an **optimizer**.
Enabling traces of operations on Python objects allows us to capture the execution flow of an agent, including AI systems that involve LLMs.
In the example below, we show how Trace can optimize an entire AI system end-to-end.

```{image} images/agent_workflow.png
:alt: overview
:class: bg-primary mb-1
Expand All @@ -29,28 +28,56 @@ This step is the **declare** phase where a user chooses how to represent the age
After the user has declared the inputs and operations, Trace captures the execution flow of the program as a graph. This step is the **forward** phase.
Finally, the user can optimize the entire program, such as by updating the LLM instructions, using Trace. This step is the **optimize** phase.

```python
@trace.model
class Agent:

def __init__(self, system_prompt):
self.system_prompt = system_prompt
self.instruct1 = trace.node("Decide the language", trainable=True)
self.instruct2 = trace.node("Extract name", trainable=True)

def __call__(self, user_query):
# First LLM
response = call_llm(self.system_prompt, self.instruct1, user_query)
en_or_es = self.decide_lang(response)
# Second LLM
user_name = call_llm(self.system_prompt, self.instruct2, user_query)
greeting = self.greet(en_or_es, user_name)
return greeting

@trace.bundle(trainable=True)
def decide_lang(self, response):
"""Map the language into a variable"""
return

@trace.bundle(trainable=True)
def greet(self, lang, user_name):
"""Produce a greeting based on the language"""
greeting = "Hola"
return f"{greeting}, {user_name}!"
```

::::{grid}
::class-container: text-center :gutter: 3

:::{grid-item-card} Native Python Support

Write Python programs as usual and use Trace to capture the execution flow of the program as a graph.
:::

:::{grid-item-card} Trace Graph as Protocol

Trace graph represents the execution flow of the program, a universal representation protocol for AI systems.
:::

:::{grid-item-card} End-to-End Optimization

Optimize the entire AI system end-to-end with Trace-graph-compatible optimizers.
:::

::::

Each application of Trace is defined by an **agent**, a source of **feedback**, and an **optimizer**.
Enabling traces of operations on Python objects allows us to capture the execution flow of an agent, including AI systems that involve LLMs.
In the example below, we show how Trace can optimize an entire AI system end-to-end.

```python
agent = Agent("You are a sales assistant.")
optimizer = OptoPrime(agent.parameters())

try:
greeting = agent("Hola, soy Juan.")
feedback = feedback_fn(greeting.data, 'es')
# feedback = "Correct" or "Incorrect"
except ExecutionError as e:
greeting = e.exception_node
feedback = greeting.data,

optimizer.zero_feedback()
optimizer.backward(greeting, feedback)
optimizer.step()
```

----

Expand Down
Loading

0 comments on commit f21de5f

Please sign in to comment.