Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 21 additions & 44 deletions docs/getting_started.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1635,18 +1635,14 @@
"source": [
"from llama_stack_client.lib.agents.agent import Agent\n",
"from llama_stack_client.lib.agents.event_logger import EventLogger\n",
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
"from termcolor import cprint\n",
"\n",
"agent_config = AgentConfig(\n",
"agent = Agent(\n",
" client, \n",
" model=model_id,\n",
" instructions=\"You are a helpful assistant\",\n",
" toolgroups=[\"builtin::websearch\"],\n",
" input_shields=[],\n",
" output_shields=[],\n",
" enable_session_persistence=False,\n",
" tools=[\"builtin::websearch\"],\n",
")\n",
"agent = Agent(client, agent_config)\n",
"user_prompts = [\n",
" \"Hello\",\n",
" \"Which teams played in the NBA western conference finals of 2024\",\n",
Expand Down Expand Up @@ -1815,7 +1811,6 @@
"import uuid\n",
"from llama_stack_client.lib.agents.agent import Agent\n",
"from llama_stack_client.lib.agents.event_logger import EventLogger\n",
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
"from termcolor import cprint\n",
"from llama_stack_client.types import Document\n",
"\n",
Expand All @@ -1841,11 +1836,11 @@
" vector_db_id=vector_db_id,\n",
" chunk_size_in_tokens=512,\n",
")\n",
"agent_config = AgentConfig(\n",
"rag_agent = Agent(\n",
" client, \n",
" model=model_id,\n",
" instructions=\"You are a helpful assistant\",\n",
" enable_session_persistence=False,\n",
" toolgroups = [\n",
" tools = [\n",
" {\n",
" \"name\": \"builtin::rag/knowledge_search\",\n",
" \"args\" : {\n",
Expand All @@ -1854,7 +1849,6 @@
" }\n",
" ],\n",
")\n",
"rag_agent = Agent(client, agent_config)\n",
"session_id = rag_agent.create_session(\"test-session\")\n",
"user_prompts = [\n",
" \"What are the top 5 topics that were explained? Only list succinct bullet points.\",\n",
Expand Down Expand Up @@ -1978,23 +1972,19 @@
"source": [
"from llama_stack_client.types.agents.turn_create_params import Document\n",
"\n",
"agent_config = AgentConfig(\n",
" sampling_params = {\n",
" \"max_tokens\" : 4096,\n",
" \"temperature\": 0.0\n",
" },\n",
"codex_agent = Agent(\n",
" client, \n",
" model=\"meta-llama/Llama-3.1-8B-Instruct\",\n",
" instructions=\"You are a helpful assistant\",\n",
" toolgroups=[\n",
" tools=[\n",
" \"builtin::code_interpreter\",\n",
" \"builtin::websearch\"\n",
" ],\n",
" tool_choice=\"auto\",\n",
" input_shields=[],\n",
" output_shields=[],\n",
" enable_session_persistence=False,\n",
" sampling_params = {\n",
" \"max_tokens\" : 4096,\n",
" \"temperature\": 0.0\n",
" },\n",
")\n",
"codex_agent = Agent(client, agent_config)\n",
"session_id = codex_agent.create_session(\"test-session\")\n",
"\n",
"\n",
Expand Down Expand Up @@ -2904,18 +2894,14 @@
"# NBVAL_SKIP\n",
"from llama_stack_client.lib.agents.agent import Agent\n",
"from llama_stack_client.lib.agents.event_logger import EventLogger\n",
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
"from termcolor import cprint\n",
"\n",
"agent_config = AgentConfig(\n",
"agent = Agent(\n",
" client, \n",
" model=model_id,\n",
" instructions=\"You are a helpful assistant\",\n",
" toolgroups=[\"mcp::filesystem\"],\n",
" input_shields=[],\n",
" output_shields=[],\n",
" enable_session_persistence=False,\n",
" tools=[\"mcp::filesystem\"],\n",
")\n",
"agent = Agent(client, agent_config)\n",
"user_prompts = [\n",
" \"Hello\",\n",
" \"list all the files /content\",\n",
Expand Down Expand Up @@ -3010,17 +2996,13 @@
"source": [
"from llama_stack_client.lib.agents.agent import Agent\n",
"from llama_stack_client.lib.agents.event_logger import EventLogger\n",
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
"\n",
"agent_config = AgentConfig(\n",
"agent = Agent(\n",
" client, \n",
" model=\"meta-llama/Llama-3.3-70B-Instruct\",\n",
" instructions=\"You are a helpful assistant. Use search tool to answer the questions. \",\n",
" toolgroups=[\"builtin::websearch\"],\n",
" input_shields=[],\n",
" output_shields=[],\n",
" enable_session_persistence=False,\n",
" tools=[\"builtin::websearch\"],\n",
")\n",
"agent = Agent(client, agent_config)\n",
"user_prompts = [\n",
" \"Which teams played in the NBA western conference finals of 2024. Search the web for the answer.\",\n",
" \"In which episode and season of South Park does Bill Cosby (BSM-471) first appear? Give me the number and title. Search the web for the answer.\",\n",
Expand Down Expand Up @@ -4346,16 +4328,11 @@
}
],
"source": [
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
"\n",
"agent_config = AgentConfig(\n",
"agent = Agent(\n",
" client, \n",
" model=vision_model_id,\n",
" instructions=\"You are a helpful assistant\",\n",
" enable_session_persistence=False,\n",
" toolgroups=[],\n",
")\n",
"\n",
"agent = Agent(client, agent_config)\n",
"session_id = agent.create_session(\"test-session\")\n",
"\n",
"response = agent.create_turn(\n",
Expand Down
77 changes: 34 additions & 43 deletions docs/notebooks/Llama_Stack_Agent_Workflows.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@
"source": [
"from llama_stack_client import LlamaStackClient\n",
"from llama_stack.distribution.library_client import LlamaStackAsLibraryClient\n",
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
"from llama_stack_client.lib.agents.agent import Agent\n",
"from rich.pretty import pprint\n",
"import json\n",
Expand All @@ -71,20 +70,12 @@
"\n",
"MODEL_ID = \"meta-llama/Llama-3.3-70B-Instruct\"\n",
"\n",
"base_agent_config = AgentConfig(\n",
"base_agent_config = dict(\n",
" model=MODEL_ID,\n",
" instructions=\"You are a helpful assistant.\",\n",
" sampling_params={\n",
" \"strategy\": {\"type\": \"top_p\", \"temperature\": 1.0, \"top_p\": 0.9},\n",
" },\n",
" toolgroups=[],\n",
" tool_config={\n",
" \"tool_choice\": \"auto\",\n",
" \"tool_prompt_format\": \"python_list\",\n",
" },\n",
" input_shields=[],\n",
" output_shields=[],\n",
" enable_session_persistence=False,\n",
")"
]
},
Expand Down Expand Up @@ -172,7 +163,7 @@
}
],
"source": [
"vanilla_agent_config = AgentConfig({\n",
"vanilla_agent_config = {\n",
" **base_agent_config,\n",
" \"instructions\": \"\"\"\n",
" You are a helpful assistant capable of structuring data extraction and formatting. \n",
Expand All @@ -189,9 +180,9 @@
" Employee satisfaction is at 87 points.\n",
" Operating margin improved to 34%.\n",
" \"\"\",\n",
"})\n",
"}\n",
"\n",
"vanilla_agent = Agent(client, vanilla_agent_config)\n",
"vanilla_agent = Agent(client, **vanilla_agent_config)\n",
"prompt_chaining_session_id = vanilla_agent.create_session(session_name=f\"vanilla_agent_{uuid.uuid4()}\")\n",
"\n",
"prompts = [\n",
Expand Down Expand Up @@ -778,7 +769,7 @@
],
"source": [
"# 1. Define a couple of specialized agents\n",
"billing_agent_config = AgentConfig({\n",
"billing_agent_config = {\n",
" **base_agent_config,\n",
" \"instructions\": \"\"\"You are a billing support specialist. Follow these guidelines:\n",
" 1. Always start with \"Billing Support Response:\"\n",
Expand All @@ -789,9 +780,9 @@
" \n",
" Keep responses professional but friendly.\n",
" \"\"\",\n",
"})\n",
"}\n",
"\n",
"technical_agent_config = AgentConfig({\n",
"technical_agent_config = {\n",
" **base_agent_config,\n",
" \"instructions\": \"\"\"You are a technical support engineer. Follow these guidelines:\n",
" 1. Always start with \"Technical Support Response:\"\n",
Expand All @@ -802,9 +793,9 @@
" \n",
" Use clear, numbered steps and technical details.\n",
" \"\"\",\n",
"})\n",
"}\n",
"\n",
"account_agent_config = AgentConfig({\n",
"account_agent_config = {\n",
" **base_agent_config,\n",
" \"instructions\": \"\"\"You are an account security specialist. Follow these guidelines:\n",
" 1. Always start with \"Account Support Response:\"\n",
Expand All @@ -815,9 +806,9 @@
" \n",
" Maintain a serious, security-focused tone.\n",
" \"\"\",\n",
"})\n",
"}\n",
"\n",
"product_agent_config = AgentConfig({\n",
"product_agent_config = {\n",
" **base_agent_config,\n",
" \"instructions\": \"\"\"You are a product specialist. Follow these guidelines:\n",
" 1. Always start with \"Product Support Response:\"\n",
Expand All @@ -828,21 +819,21 @@
" \n",
" Be educational and encouraging in tone.\n",
" \"\"\",\n",
"})\n",
"}\n",
"\n",
"specialized_agents = {\n",
" \"billing\": Agent(client, billing_agent_config),\n",
" \"technical\": Agent(client, technical_agent_config),\n",
" \"account\": Agent(client, account_agent_config),\n",
" \"product\": Agent(client, product_agent_config),\n",
" \"billing\": Agent(client, **billing_agent_config),\n",
" \"technical\": Agent(client, **technical_agent_config),\n",
" \"account\": Agent(client, **account_agent_config),\n",
" \"product\": Agent(client, **product_agent_config),\n",
"}\n",
"\n",
"# 2. Define a routing agent\n",
"class OutputSchema(BaseModel):\n",
" reasoning: str\n",
" support_team: str\n",
"\n",
"routing_agent_config = AgentConfig({\n",
"routing_agent_config = {\n",
" **base_agent_config,\n",
" \"instructions\": f\"\"\"You are a routing agent. Analyze the user's input and select the most appropriate support team from these options: \n",
"\n",
Expand All @@ -862,9 +853,9 @@
" \"type\": \"json_schema\",\n",
" \"json_schema\": OutputSchema.model_json_schema()\n",
" }\n",
"})\n",
"}\n",
"\n",
"routing_agent = Agent(client, routing_agent_config)\n",
"routing_agent = Agent(client, **routing_agent_config)\n",
"\n",
"# 3. Create a session for all agents\n",
"routing_agent_session_id = routing_agent.create_session(session_name=f\"routing_agent_{uuid.uuid4()}\")\n",
Expand Down Expand Up @@ -1725,17 +1716,17 @@
"from concurrent.futures import ThreadPoolExecutor\n",
"from typing import List\n",
"\n",
"worker_agent_config = AgentConfig({\n",
"worker_agent_config = {\n",
" **base_agent_config,\n",
" \"instructions\": \"\"\"You are a helpful assistant that can analyze the impact of market changes on stakeholders.\n",
" Analyze how market changes will impact this stakeholder group.\n",
" Provide specific impacts and recommended actions.\n",
" Format with clear sections and priorities.\n",
" \"\"\",\n",
"})\n",
"}\n",
"\n",
"def create_worker_task(task: str):\n",
" worker_agent = Agent(client, worker_agent_config)\n",
" worker_agent = Agent(client, **worker_agent_config)\n",
" worker_session_id = worker_agent.create_session(session_name=f\"worker_agent_{uuid.uuid4()}\")\n",
" task_response = worker_agent.create_turn(\n",
" messages=[{\"role\": \"user\", \"content\": task}],\n",
Expand Down Expand Up @@ -2248,7 +2239,7 @@
" thoughts: str\n",
" response: str\n",
"\n",
"generator_agent_config = AgentConfig({\n",
"generator_agent_config = {\n",
" **base_agent_config,\n",
" \"instructions\": \"\"\"Your goal is to complete the task based on <user input>. If there are feedback \n",
" from your previous generations, you should reflect on them to improve your solution\n",
Expand All @@ -2263,13 +2254,13 @@
" \"type\": \"json_schema\",\n",
" \"json_schema\": GeneratorOutputSchema.model_json_schema()\n",
" }\n",
"})\n",
"}\n",
"\n",
"class EvaluatorOutputSchema(BaseModel):\n",
" evaluation: str\n",
" feedback: str\n",
"\n",
"evaluator_agent_config = AgentConfig({\n",
"evaluator_agent_config = {\n",
" **base_agent_config,\n",
" \"instructions\": \"\"\"Evaluate this following code implementation for:\n",
" 1. code correctness\n",
Expand All @@ -2293,10 +2284,10 @@
" \"type\": \"json_schema\",\n",
" \"json_schema\": EvaluatorOutputSchema.model_json_schema()\n",
" }\n",
"})\n",
"}\n",
"\n",
"generator_agent = Agent(client, generator_agent_config)\n",
"evaluator_agent = Agent(client, evaluator_agent_config)\n",
"generator_agent = Agent(client, **generator_agent_config)\n",
"evaluator_agent = Agent(client, **evaluator_agent_config)\n",
"generator_session_id = generator_agent.create_session(session_name=f\"generator_agent_{uuid.uuid4()}\")\n",
"evaluator_session_id = evaluator_agent.create_session(session_name=f\"evaluator_agent_{uuid.uuid4()}\")\n",
"\n",
Expand Down Expand Up @@ -2628,7 +2619,7 @@
" analysis: str\n",
" tasks: List[Dict[str, str]]\n",
"\n",
"orchestrator_agent_config = AgentConfig({\n",
"orchestrator_agent_config = {\n",
" **base_agent_config,\n",
" \"instructions\": \"\"\"Your job is to analyize the task provided by the user andbreak it down into 2-3 distinct approaches:\n",
"\n",
Expand All @@ -2651,9 +2642,9 @@
" \"type\": \"json_schema\",\n",
" \"json_schema\": OrchestratorOutputSchema.model_json_schema()\n",
" }\n",
"})\n",
"}\n",
"\n",
"worker_agent_config = AgentConfig({\n",
"worker_agent_config = {\n",
" **base_agent_config,\n",
" \"instructions\": \"\"\"You will be given a task guideline. Generate content based on the provided\n",
" task, following the style and guideline descriptions. \n",
Expand All @@ -2662,7 +2653,7 @@
"\n",
" Response: Your content here, maintaining the specified style and fully addressing requirements.\n",
" \"\"\",\n",
"})\n"
"}\n"
]
},
{
Expand All @@ -2673,7 +2664,7 @@
"source": [
"def orchestrator_worker_workflow(task, context):\n",
" # single orchestrator agent\n",
" orchestrator_agent = Agent(client, orchestrator_agent_config)\n",
" orchestrator_agent = Agent(client, **orchestrator_agent_config)\n",
" orchestrator_session_id = orchestrator_agent.create_session(session_name=f\"orchestrator_agent_{uuid.uuid4()}\")\n",
"\n",
" orchestrator_response = orchestrator_agent.create_turn(\n",
Expand All @@ -2689,7 +2680,7 @@
" workers = {}\n",
" # spawn multiple worker agents\n",
" for task in orchestrator_result[\"tasks\"]:\n",
" worker_agent = Agent(client, worker_agent_config)\n",
" worker_agent = Agent(client, **worker_agent_config)\n",
" worker_session_id = worker_agent.create_session(session_name=f\"worker_agent_{uuid.uuid4()}\")\n",
" workers[task[\"type\"]] = worker_agent\n",
" \n",
Expand Down
Loading