Skip to content

Commit

Permalink
Merge pull request #555 from lion-agi/update-react2
Browse files Browse the repository at this point in the history
Various Updates:  check pr
  • Loading branch information
ohdearquant authored Jan 29, 2025
2 parents 837d3c7 + 43dacc3 commit d2d0995
Show file tree
Hide file tree
Showing 14 changed files with 4,272 additions and 5,966 deletions.
36 changes: 35 additions & 1 deletion lionagi/operations/ReAct/ReAct.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ async def ReAct(
analysis_model: iModel | None = None,
verbose_analysis: bool = False,
verbose_length: int = None,
include_token_usage_to_model: bool = True,
continue_after_failed_response: bool = False,
**kwargs,
):
outs = []
Expand All @@ -73,6 +75,8 @@ async def ReAct(
verbose_analysis=verbose_analysis,
display_as=display_as,
verbose_length=verbose_length,
include_token_usage_to_model=include_token_usage_to_model,
continue_after_failed_response=continue_after_failed_response,
**kwargs,
):
analysis, str_ = i
Expand Down Expand Up @@ -101,6 +105,8 @@ async def ReAct(
analysis_model=analysis_model,
display_as=display_as,
verbose_length=verbose_length,
include_token_usage_to_model=include_token_usage_to_model,
continue_after_failed_response=continue_after_failed_response,
**kwargs,
):
outs.append(i)
Expand Down Expand Up @@ -131,6 +137,8 @@ async def ReActStream(
verbose_analysis: bool = False,
display_as: Literal["json", "yaml"] = "yaml",
verbose_length: int = None,
include_token_usage_to_model: bool = True,
continue_after_failed_response: bool = False,
**kwargs,
) -> AsyncGenerator:
irfm: FieldModel | None = None
Expand Down Expand Up @@ -213,6 +221,9 @@ async def ReActStream(
kwargs_for_operate = copy(kwargs)
kwargs_for_operate["actions"] = True
kwargs_for_operate["reason"] = True
kwargs_for_operate["include_token_usage_to_model"] = (
include_token_usage_to_model
)

# Step 1: Generate initial ReAct analysis
analysis: ReActAnalysis = await branch.operate(
Expand Down Expand Up @@ -255,7 +266,7 @@ async def ReActStream(
if isinstance(analysis, dict)
else False
)
and (extensions if max_extensions else 0) > 0
and (extensions - 1 if max_extensions else 0) > 0
):
new_instruction = None
if extensions == max_extensions:
Expand All @@ -272,6 +283,9 @@ async def ReActStream(
operate_kwargs["reason"] = True
operate_kwargs["response_format"] = ReActAnalysis
operate_kwargs["action_strategy"] = analysis.action_strategy
operate_kwargs["include_token_usage_to_model"] = (
include_token_usage_to_model
)
if analysis.action_batch_size:
operate_kwargs["action_batch_size"] = analysis.action_batch_size
if irfm:
Expand All @@ -289,6 +303,7 @@ async def ReActStream(
operate_kwargs["guidance"] = guide + operate_kwargs.get(
"guidance", ""
)
operate_kwargs["reasoning_effort"] = reasoning_effort

analysis = await branch.operate(
instruction=new_instruction,
Expand All @@ -298,6 +313,16 @@ async def ReActStream(
)
round_count += 1

if isinstance(analysis, dict) and all(
i is None for i in analysis.values()
):
if not continue_after_failed_response:
raise ValueError(
"All values in the response are None. "
"This might be due to a failed response. "
"Set `continue_after_failed_response=True` to ignore this error."
)

# If verbose, show round analysis
if verbose_analysis:
str_ = f"\n### ReAct Round No.{round_count} Analysis:\n"
Expand Down Expand Up @@ -329,6 +354,15 @@ async def ReActStream(
response_format=response_format,
**(response_kwargs or {}),
)
if isinstance(analysis, dict) and all(
i is None for i in analysis.values()
):
if not continue_after_failed_response:
raise ValueError(
"All values in the response are None. "
"This might be due to a failed response. "
"Set `continue_after_failed_response=True` to ignore this error."
)
except Exception:
out = branch.msgs.last_response.response

Expand Down
7 changes: 5 additions & 2 deletions lionagi/operations/ReAct/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ class ReActAnalysis(BaseModel):
2) A list of planned actions to perform before finalizing,
3) Indication whether more expansions/rounds are needed,
4) Additional tuning knobs: how to handle validation, how to execute actions, etc.
Remember do not repeat yourself, and aim to use the most efficient way to achieve
the goal to user's satisfaction.
"""

# Standard ReAct strings for controlling expansions:
Expand All @@ -38,11 +40,12 @@ class ReActAnalysis(BaseModel):
"If you are not ready to finalize, set extension_needed to True. "
"hint: you should set extension_needed to True if the overall goal"
"is not yet achieved. Do not set it to False, if you are just providing"
"an interim answer. You have up to {extensions} expansions. Please continue."
"an interim answer. You have up to {extensions} expansions. Please "
"strategize accordingly and continue."
)
CONTINUE_EXT_PROMPT: ClassVar[str] = (
"Another round is available. You may do multiple actions if needed. "
"You have up to {extensions} expansions. Please continue."
"You have up to {extensions} expansions. Please strategize accordingly and continue."
)
ANSWER_PROMPT: ClassVar[str] = (
"Given your reasoning and actions, please now provide the final answer "
Expand Down
12 changes: 7 additions & 5 deletions lionagi/operations/chat/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ async def chat(
image_detail: Literal["low", "high", "auto"] = None,
plain_content: str = None,
return_ins_res_message: bool = False,
include_token_usage_to_model: bool = False,
**kwargs,
) -> tuple[Instruction, AssistantResponse]:
ins: Instruction = branch.msgs.create_instruction(
Expand Down Expand Up @@ -151,11 +152,12 @@ async def chat(
kwargs["messages"] = [i.chat_msg for i in messages]
imodel = imodel or branch.chat_model

meth = (
imodel.invoke
if ("stream" not in kwargs or not kwargs["stream"])
else imodel.stream
)
meth = imodel.invoke
if "stream" not in kwargs or not kwargs["stream"]:
kwargs["include_token_usage_to_model"] = include_token_usage_to_model
else:
meth = imodel.stream

api_call = await meth(**kwargs)
branch._log_manager.log(Log.create(api_call))

Expand Down
2 changes: 2 additions & 0 deletions lionagi/operations/communicate/communicate.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ async def communicate(
fuzzy_match_kwargs=None,
clear_messages=False,
operative_model=None,
include_token_usage_to_model: bool = False,
**kwargs,
):
if operative_model:
Expand Down Expand Up @@ -80,6 +81,7 @@ async def communicate(
image_detail=image_detail,
plain_content=plain_content,
return_ins_res_message=True,
include_token_usage_to_model=include_token_usage_to_model,
**kwargs,
)
branch.msgs.add_message(instruction=ins)
Expand Down
3 changes: 2 additions & 1 deletion lionagi/operations/interpret/interpret.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ async def interpret(
instruction = (
"You are given a user's raw instruction or question. Your task is to rewrite it into a clearer,"
"more structured prompt for an LLM or system, making any implicit or missing details explicit. "
"Return only the re-written prompt."
"Return only the re-written prompt. Do not assume any details not mentioned in the input, nor "
"give additional instruction than what is explicitly stated."
)
guidance = (
f"Domain hint: {domain or 'general'}. "
Expand Down
2 changes: 2 additions & 0 deletions lionagi/operations/operate/operate.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ async def operate(
] = "return_value",
operative_model: type[BaseModel] = None,
request_model: type[BaseModel] = None,
include_token_usage_to_model: bool = False,
**kwargs,
) -> list | BaseModel | None | dict | str:
if operative_model:
Expand Down Expand Up @@ -138,6 +139,7 @@ async def operate(
image_detail=image_detail,
tool_schemas=tool_schemas,
return_ins_res_message=True,
include_token_usage_to_model=include_token_usage_to_model,
**kwargs,
)
branch.msgs.add_message(instruction=ins)
Expand Down
26 changes: 26 additions & 0 deletions lionagi/service/endpoints/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,11 +349,37 @@ class APICalling(Event):
endpoint: EndPoint = Field(exclude=True)
is_cached: bool = Field(default=False, exclude=True)
should_invoke_endpoint: bool = Field(default=True, exclude=True)
include_token_usage_to_model: bool = Field(
default=False,
exclude=True,
description="Whether to include token usage information into instruction messages",
)

@model_validator(mode="after")
def _validate_streaming(self) -> Self:
if self.payload.get("stream") is True:
self.streaming = True

if self.include_token_usage_to_model:
if isinstance(self.payload["messages"][-1], dict):
required_tokens = self.required_tokens
self.payload["messages"][-1][
"content"
] += f"\n\nEstimated Current Token Usage: {required_tokens}"
if "model" in self.payload:
if (
self.payload["model"].startswith("gpt-4")
or "o1mini" in self.payload["model"]
or "o1-preview" in self.payload["model"]
):
self.payload["messages"][-1]["content"] += "/128_000"
elif "o1" in self.payload["model"]:
self.payload["messages"][-1]["content"] += "/200_000"
elif "sonnet" in self.payload["model"]:
self.payload["messages"][-1]["content"] += "/200_000"
elif "haiku" in self.payload["model"]:
self.payload["messages"][-1]["content"] += "/200_000"

return self

@property
Expand Down
17 changes: 14 additions & 3 deletions lionagi/service/imodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,9 @@ def __init__(
else:
self.streaming_process_func = streaming_process_func

def create_api_calling(self, **kwargs) -> APICalling:
def create_api_calling(
self, include_token_usage_to_model: bool = False, **kwargs
) -> APICalling:
"""Constructs an `APICalling` object from endpoint-specific payload.
Args:
Expand All @@ -183,6 +185,7 @@ def create_api_calling(self, **kwargs) -> APICalling:
endpoint=self.endpoint,
is_cached=payload.get("is_cached", False),
should_invoke_endpoint=self.should_invoke_endpoint,
include_token_usage_to_model=include_token_usage_to_model,
)

async def process_chunk(self, chunk) -> None:
Expand All @@ -200,7 +203,12 @@ async def process_chunk(self, chunk) -> None:
return await self.streaming_process_func(chunk)
return self.streaming_process_func(chunk)

async def stream(self, api_call=None, **kwargs) -> AsyncGenerator:
async def stream(
self,
api_call=None,
include_token_usage_to_model: bool = False,
**kwargs,
) -> AsyncGenerator:
"""Performs a streaming API call with the given arguments.
Args:
Expand All @@ -214,7 +222,10 @@ async def stream(self, api_call=None, **kwargs) -> AsyncGenerator:
"""
if api_call is None:
kwargs["stream"] = True
api_call = self.create_api_calling(**kwargs)
api_call = self.create_api_calling(
include_token_usage_to_model=include_token_usage_to_model,
**kwargs,
)
await self.executor.append(api_call)

if (
Expand Down
2 changes: 2 additions & 0 deletions lionagi/service/providers/openai_/chat_completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def create_payload(self, **kwargs) -> dict:
payload.pop("top_p", None)
if payload["messages"][0].get("role") == "system":
payload["messages"][0]["role"] = "developer"
else:
payload.pop("reasoning_effort", None)

return {
"payload": payload,
Expand Down
16 changes: 16 additions & 0 deletions lionagi/session/branch.py
Original file line number Diff line number Diff line change
Expand Up @@ -941,6 +941,7 @@ async def operate(
] = "return_value",
operative_model: type[BaseModel] = None,
request_model: type[BaseModel] = None,
include_token_usage_to_model: bool = False,
**kwargs,
) -> list | BaseModel | None | dict | str:
"""
Expand Down Expand Up @@ -1028,6 +1029,8 @@ async def operate(
Alias for `response_format`.
request_model (type[BaseModel], optional):
Another alias for `response_format`.
include_token_usage_to_model:
If `True`, includes token usage in the model messages.
**kwargs:
Additional keyword arguments passed to the LLM via `branch.chat()`.
Expand Down Expand Up @@ -1080,6 +1083,7 @@ async def operate(
operative_model=operative_model,
request_model=request_model,
imodel=imodel,
include_token_usage_to_model=include_token_usage_to_model,
**kwargs,
)

Expand All @@ -1106,6 +1110,7 @@ async def communicate(
fuzzy_match_kwargs: dict = None,
clear_messages: bool = False,
operative_model: type[BaseModel] = None,
include_token_usage_to_model: bool = False,
**kwargs,
):
"""
Expand Down Expand Up @@ -1190,6 +1195,7 @@ async def communicate(
fuzzy_match_kwargs=fuzzy_match_kwargs,
clear_messages=clear_messages,
operative_model=operative_model,
include_token_usage_to_model=include_token_usage_to_model,
**kwargs,
)

Expand Down Expand Up @@ -1639,6 +1645,7 @@ async def ReAct(
analysis_model: iModel | None = None,
verbose: bool = False,
verbose_length: int = None,
include_token_usage_to_model: bool = True,
**kwargs,
):
"""
Expand Down Expand Up @@ -1688,6 +1695,12 @@ async def ReAct(
analysis_model (iModel | None, optional):
A custom LLM model for generating the ReAct analysis steps. If `None`,
uses the branch's default `chat_model`.
include_token_usage_to_model:
If `True`, includes token usage in the model messages.
verbose (bool):
If `True`, logs detailed information about the process.
verbose_length (int):
If `verbose=True`, limits the length of logged strings to this value.
**kwargs:
Additional keyword arguments passed into the initial `branch.operate()` call.
Expand Down Expand Up @@ -1733,6 +1746,7 @@ async def ReAct(
intermediate_listable=intermediate_listable,
reasoning_effort=reasoning_effort,
display_as=display_as,
include_token_usage_to_model=include_token_usage_to_model,
**kwargs,
)

Expand All @@ -1758,6 +1772,7 @@ async def ReActStream(
verbose: bool = False,
display_as: Literal["json", "yaml"] = "yaml",
verbose_length: int = None,
include_token_usage_to_model: bool = True,
**kwargs,
) -> AsyncGenerator:
from lionagi.operations.ReAct.ReAct import ReActStream
Expand All @@ -1784,6 +1799,7 @@ async def ReActStream(
verbose_analysis=True,
display_as=display_as,
verbose_length=verbose_length,
include_token_usage_to_model=include_token_usage_to_model,
**kwargs,
):
analysis, str_ = result
Expand Down
2 changes: 1 addition & 1 deletion lionagi/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.9.4"
__version__ = "0.9.5"
Loading

0 comments on commit d2d0995

Please sign in to comment.