Skip to content

Fix comments to end with periods #5

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
May 19, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
78 changes: 39 additions & 39 deletions src/agents/_run_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ class ProcessedResponse:
tools_used: list[str] # Names of all tools used, including hosted tools

def has_tools_to_run(self) -> bool:
# Handoffs, functions and computer actions need local processing
# Handoffs, functions, and computer actions need local processing.
# Hosted tools have already run, so there's nothing to do.
return any(
[
Expand Down Expand Up @@ -202,9 +202,9 @@ async def execute_tools_and_side_effects(
cls,
*,
agent: Agent[TContext],
# The original input to the Runner
# The original input to the Runner.
original_input: str | list[TResponseInputItem],
# Everything generated by Runner since the original input, but before the current step
# Everything generated by Runner since the original input, but before the current step.
pre_step_items: list[RunItem],
new_response: ModelResponse,
processed_response: ProcessedResponse,
Expand All @@ -213,13 +213,13 @@ async def execute_tools_and_side_effects(
context_wrapper: RunContextWrapper[TContext],
run_config: RunConfig,
) -> SingleStepResult:
# Make a copy of the generated items
# Make a copy of the generated items.
pre_step_items = list(pre_step_items)

new_step_items: list[RunItem] = []
new_step_items.extend(processed_response.new_items)

# First, lets run the tool calls - function tools and computer actions
# First, let us run the tool calls: function tools and computer actions.
function_results, computer_results = await asyncio.gather(
cls.execute_function_tool_calls(
agent=agent,
Expand All @@ -239,7 +239,7 @@ async def execute_tools_and_side_effects(
new_step_items.extend([result.run_item for result in function_results])
new_step_items.extend(computer_results)

# Second, check if there are any handoffs
# Second, check whether there are any handoffs.
if run_handoffs := processed_response.handoffs:
return await cls.execute_handoffs(
agent=agent,
Expand All @@ -253,7 +253,7 @@ async def execute_tools_and_side_effects(
run_config=run_config,
)

# Third, we'll check if the tool use should result in a final output
# Third, we check whether the tool use should result in a final output.
check_tool_use = await cls._check_for_final_output_from_tools(
agent=agent,
tool_results=function_results,
Expand All @@ -262,7 +262,7 @@ async def execute_tools_and_side_effects(
)

if check_tool_use.is_final_output:
# If the output type is str, then let's just stringify it
# If the output type is str, then just stringify it.
if not agent.output_type or agent.output_type is str:
check_tool_use.final_output = str(check_tool_use.final_output)

Expand All @@ -283,17 +283,17 @@ async def execute_tools_and_side_effects(
context_wrapper=context_wrapper,
)

# Now we can check if the model also produced a final output
# Now we can check whether the model also produced a final output.
message_items = [item for item in new_step_items if isinstance(item, MessageOutputItem)]

# We'll use the last content output as the final output
# We use the last content output as the final output.
potential_final_output_text = (
ItemHelpers.extract_last_text(message_items[-1].raw_item) if message_items else None
)

# There are two possibilities that lead to a final output:
# 1. Structured output schema => always leads to a final output
# 2. Plain text output schema => only leads to a final output if there are no tool calls
# There are two possibilities that lead to a final output.
# 1. Structured output schema always leads to a final output.
# 2. Plain text output schema leads to a final output only if there are no tool calls.
if output_schema and not output_schema.is_plain_text() and potential_final_output_text:
final_output = output_schema.validate_json(potential_final_output_text)
return await cls.execute_final_output(
Expand All @@ -320,7 +320,7 @@ async def execute_tools_and_side_effects(
context_wrapper=context_wrapper,
)
else:
# If there's no final output, we can just run again
# If there is no final output, we can run again.
return SingleStepResult(
original_input=original_input,
model_response=new_response,
Expand Down Expand Up @@ -392,21 +392,21 @@ def process_model_response(
logger.warning(f"Unexpected output type, ignoring: {type(output)}")
continue

# At this point we know it's a function tool call
# At this point we know it is a function tool call.
if not isinstance(output, ResponseFunctionToolCall):
continue

tools_used.append(output.name)

# Handoffs
# Handoffs.
if output.name in handoff_map:
items.append(HandoffCallItem(raw_item=output, agent=agent))
handoff = ToolRunHandoff(
tool_call=output,
handoff=handoff_map[output.name],
)
run_handoffs.append(handoff)
# Regular function tool call
# Regular function tool call.
else:
if output.name not in function_map:
_error_tracing.attach_error_to_current_span(
Expand Down Expand Up @@ -513,7 +513,7 @@ async def execute_computer_actions(
config: RunConfig,
) -> list[RunItem]:
results: list[RunItem] = []
# Need to run these serially, because each action can affect the computer state
# These must run serially because each action can affect the computer state.
for action in actions:
results.append(
await ComputerAction.execute(
Expand Down Expand Up @@ -541,7 +541,7 @@ async def execute_handoffs(
context_wrapper: RunContextWrapper[TContext],
run_config: RunConfig,
) -> SingleStepResult:
# If there is more than one handoff, add tool responses that reject those handoffs
# If there is more than one handoff, add tool responses that reject those handoffs.
multiple_handoffs = len(run_handoffs) > 1
if multiple_handoffs:
output_message = "Multiple handoffs detected, ignoring this one."
Expand Down Expand Up @@ -576,7 +576,7 @@ async def execute_handoffs(
)
)

# Append a tool output item for the handoff
# Append a tool output item for the handoff.
new_step_items.append(
HandoffOutputItem(
agent=agent,
Expand All @@ -589,7 +589,7 @@ async def execute_handoffs(
)
)

# Execute handoff hooks
# Execute handoff hooks.
await asyncio.gather(
hooks.on_handoff(
context=context_wrapper,
Expand All @@ -607,7 +607,7 @@ async def execute_handoffs(
),
)

# If there's an input filter, filter the input for the next agent
# If there is an input filter, filter the input for the next agent.
input_filter = handoff.input_filter or (
run_config.handoff_input_filter if run_config else None
)
Expand Down Expand Up @@ -669,7 +669,7 @@ async def execute_final_output(
hooks: RunHooks[TContext],
context_wrapper: RunContextWrapper[TContext],
) -> SingleStepResult:
# Run the on_end hooks
# Run the on_end hooks.
await cls.run_final_output_hooks(agent, hooks, context_wrapper, final_output)

return SingleStepResult(
Expand Down Expand Up @@ -862,22 +862,22 @@ async def execute(
),
)

# Cache screenshots to avoid resending duplicate images.
image_id, is_new = _cache_screenshot(output)
if is_new:
image_url = f"data:image/png;base64,{output}"
raw_output = {
"type": "computer_screenshot",
"image_url": image_url,
"image_id": image_id,
}
final_output = image_url
else:
raw_output = {
"type": "computer_screenshot_ref",
"image_id": image_id,
}
final_output = image_id
# TODO: Cache screenshots; avoid resending duplicate images.
image_id, is_new = _cache_screenshot(output)
if is_new:
image_url = f"data:image/png;base64,{output}"
raw_output = {
"type": "computer_screenshot",
"image_url": image_url,
"image_id": image_id,
}
final_output = image_url
else:
raw_output = {
"type": "computer_screenshot_ref",
"image_id": image_id,
}
final_output = image_id
return ToolCallOutputItem(
agent=agent,
output=final_output,
Expand Down
7 changes: 3 additions & 4 deletions src/agents/agent_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,7 @@ def __init__(self, output_type: type[Any], strict_json_schema: bool = True):
self._output_schema = self._type_adapter.json_schema()
return

# We should wrap for things that are not plain text, and for things that would definitely
# not be a JSON Schema object.
# We should wrap for things that are not plain text and for things that would definitely not be a JSON Schema object.
self._is_wrapped = not _is_subclass_of_base_model_or_dict(output_type)

if self._is_wrapped:
Expand Down Expand Up @@ -172,11 +171,11 @@ def _is_subclass_of_base_model_or_dict(t: Any) -> bool:
if not isinstance(t, type):
return False

# If it's a generic alias, 'origin' will be the actual type, e.g. 'list'
# If it is a generic alias, 'origin' will be the actual type, e.g. 'list'.
origin = get_origin(t)

allowed_types = (BaseModel, dict)
# If it's a generic alias e.g. list[str], then we should check the origin type i.e. list
# If it is a generic alias, e.g. list[str], then we should check the origin type, i.e. list.
return issubclass(origin or t, allowed_types)


Expand Down
4 changes: 2 additions & 2 deletions src/agents/extensions/handoff_prompt.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# A recommended prompt prefix for agents that use handoffs. We recommend including this or
# similar instructions in any agents that use handoffs.
# A recommended prompt prefix for agents that use handoffs.
# We recommend including this or similar instructions in any agents that use handoffs.
RECOMMENDED_PROMPT_PREFIX = (
"# System context\n"
"You are part of a multi-agent system called the Agents SDK, designed to make agent "
Expand Down
4 changes: 2 additions & 2 deletions src/agents/extensions/visualization.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,14 +43,14 @@ def get_all_nodes(agent: Agent, parent: Optional[Agent] = None) -> str:
"""
parts = []

# Start and end the graph
# Start and end the graph.
parts.append(
'"__start__" [label="__start__", shape=ellipse, style=filled, '
"fillcolor=lightblue, width=0.5, height=0.3];"
'"__end__" [label="__end__", shape=ellipse, style=filled, '
"fillcolor=lightblue, width=0.5, height=0.3];"
)
# Ensure parent agent node is colored
# Ensure the parent agent node is colored.
if not parent:
parts.append(
f'"{agent.name}" [label="{agent.name}", shape=box, style=filled, '
Expand Down
Loading