diff --git a/lionagi/core/direct/__init__.py b/lionagi/core/direct/__init__.py index 8015eb30b..d663dcd50 100644 --- a/lionagi/core/direct/__init__.py +++ b/lionagi/core/direct/__init__.py @@ -1,6 +1,7 @@ from .predict import predict from .select import select from .score import score +from .react import react from .vote import vote -__all__ = ["predict", "select", "score", "vote"] +__all__ = ["predict", "select", "score", "vote", "react"] diff --git a/lionagi/core/direct/predict.py b/lionagi/core/direct/predict.py index 706dd1b54..ad5c9cfcc 100644 --- a/lionagi/core/direct/predict.py +++ b/lionagi/core/direct/predict.py @@ -6,9 +6,10 @@ confidence score, and reason for the prediction. """ -from pydantic import Field from lionagi.libs import func_call -from ..prompt.prompt_template import ScoredTemplate +from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field + +from ..prompt.scored_template import ScoredTemplate from ..branch import Branch diff --git a/lionagi/core/direct/react.py b/lionagi/core/direct/react.py new file mode 100644 index 000000000..3e46bf8ef --- /dev/null +++ b/lionagi/core/direct/react.py @@ -0,0 +1,167 @@ +from lionagi.libs import func_call, convert, AsyncUtil + +from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field +from ..prompt.action_template import ActionedTemplate +from ..branch import Branch + + +class ReactTemplate(ActionedTemplate): + template_name: str = "default_react" + sentence: str | list | dict = Field( + default_factory=str, + description="the given sentence(s) to reason and take actions on", + ) + + def __init__( + self, + sentence=None, + instruction=None, + confidence_score=False, + **kwargs, + ): + super().__init__(**kwargs) + + self.sentence = sentence + self.task = f"Think step by step. Perform reasoning and prepare actions with given tools only.Instruction: {instruction}. Absolutely DO NOT MAKE UP FUNCTIONS !!!" + + if confidence_score: + self.output_fields.append("confidence_score") + + +async def _react( + sentence, + *, + instruction=None, + branch=None, + confidence_score=False, + retries=2, + delay=0.5, + backoff_factor=2, + default_value=None, + timeout=None, + branch_name=None, + system=None, + messages=None, + service=None, + sender=None, + llmconfig=None, + tools=None, + datalogger=None, + persist_path=None, + tool_manager=None, + return_branch=False, + **kwargs, +): + + if "temperature" not in kwargs: + kwargs["temperature"] = 0.1 + + instruction = instruction or "" + + branch = branch or Branch( + name=branch_name, + system=system, + messages=messages, + service=service, + sender=sender, + llmconfig=llmconfig, + tools=tools, + datalogger=datalogger, + persist_path=persist_path, + tool_manager=tool_manager, + ) + + _template = ReactTemplate( + sentence=sentence, + instruction=instruction, + confidence_score=confidence_score, + ) + + await func_call.rcall( + branch.chat, + prompt_template=_template, + retries=retries, + delay=delay, + backoff_factor=backoff_factor, + default=default_value, + timeout=timeout, + **kwargs, + ) + + if _template.action_needed: + actions = _template.actions + tasks = [branch.tool_manager.invoke(i.values()) for i in actions] + results = await AsyncUtil.execute_tasks(*tasks) + + a = [] + for idx, item in enumerate(actions): + res = { + "function": item["function"], + "arguments": item["arguments"], + "output": results[idx], + } + branch.add_message(response=res) + a.append(res) + + _template.__setattr__("action_response", a) + + return (_template, branch) if return_branch else _template + + +async def react( + sentence, + *, + instruction=None, + num_instances=1, + branch=None, + confidence_score=False, + retries=2, + delay=0.5, + backoff_factor=2, + default_value=None, + timeout=None, + branch_name=None, + system=None, + messages=None, + service=None, + sender=None, + llmconfig=None, + tools=None, + datalogger=None, + persist_path=None, + tool_manager=None, + return_branch=False, + **kwargs, +): + + async def _inner(i=0): + return await _react( + sentence=sentence, + instruction=instruction, + num_instances=num_instances, + branch=branch, + confidence_score=confidence_score, + retries=retries, + delay=delay, + backoff_factor=backoff_factor, + default_value=default_value, + timeout=timeout, + branch_name=branch_name, + system=system, + messages=messages, + service=service, + sender=sender, + llmconfig=llmconfig, + tools=tools, + datalogger=datalogger, + persist_path=persist_path, + tool_manager=tool_manager, + return_branch=return_branch, + **kwargs, + ) + + if num_instances == 1: + return await _inner() + + elif num_instances > 1: + return await func_call.alcall(range(num_instances), _inner) diff --git a/lionagi/core/direct/score.py b/lionagi/core/direct/score.py index 160c140c4..2a90e5ace 100644 --- a/lionagi/core/direct/score.py +++ b/lionagi/core/direct/score.py @@ -12,7 +12,7 @@ from pydantic import Field import numpy as np from lionagi.libs import func_call, convert -from ..prompt.prompt_template import ScoredTemplate +from ..prompt.scored_template import ScoredTemplate from ..branch import Branch @@ -183,6 +183,7 @@ async def _score( async def score( sentence, + *, num_instances=1, instruction=None, score_range=(1, 10), diff --git a/lionagi/core/direct/select.py b/lionagi/core/direct/select.py index 207605d44..1d051e3ea 100644 --- a/lionagi/core/direct/select.py +++ b/lionagi/core/direct/select.py @@ -13,7 +13,7 @@ from pydantic import Field from lionagi.libs import func_call, StringMatch -from ..prompt.prompt_template import ScoredTemplate +from ..prompt.scored_template import ScoredTemplate from ..branch import Branch @@ -39,6 +39,9 @@ class SelectTemplate(ScoredTemplate): answer: Enum | str = Field( default_factory=str, description="selection from given choices" ) + choices: list = Field( + default_factory=list, description="the given choices" + ) signature: str = "sentence -> answer" diff --git a/lionagi/core/messages/schema.py b/lionagi/core/messages/schema.py index d16e3fc6e..40dd56cc1 100644 --- a/lionagi/core/messages/schema.py +++ b/lionagi/core/messages/schema.py @@ -173,7 +173,7 @@ def __init__( if output_fields: format_ = f""" - Follow the following response format. + MUST EXACTLY Follow the following response format. NO ADDITIONAL COMMENTS ALLOWED! ```json {output_fields} ``` diff --git a/lionagi/core/prompt/action_template.py b/lionagi/core/prompt/action_template.py new file mode 100644 index 000000000..dcf0a1178 --- /dev/null +++ b/lionagi/core/prompt/action_template.py @@ -0,0 +1,26 @@ +from typing import Any +from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field + +from .scored_template import ScoredTemplate + + +class ActionRequest: ... + + +class ActionedTemplate(ScoredTemplate): + + action_needed: bool | None = Field( + False, description="true if actions are needed else false" + ) + + actions: list[dict | ActionRequest | Any] | None = Field( + default_factory=list, + description="""provide The list of action(s) to take, each action in {"function": function_name, "arguments": {param1:..., param2:..., ...}}. Leave blank if no further actions are needed, you must use provided parameters for each action, DO NOT MAKE UP KWARG NAME!!!""", + ) + + answer: str | dict | Any | None = Field( + default_factory=str, + description="output answer to the questions asked if further actions are not needed, leave blank if an accurate answer cannot be provided from context during this step", + ) + + signature: str = "sentence -> reason, action_needed, actions, answer" diff --git a/lionagi/core/prompt/field_validator.py b/lionagi/core/prompt/field_validator.py index 0603de22f..25ed20c9d 100644 --- a/lionagi/core/prompt/field_validator.py +++ b/lionagi/core/prompt/field_validator.py @@ -6,7 +6,45 @@ maps data types to their corresponding validation functions. """ -from lionagi.libs import convert, StringMatch +from lionagi.libs import convert, StringMatch, ParseUtil + + +def _has_action_keys(dict_): + return list(dict_.keys()) >= ["function", "arguments"] + + +def check_action_field(x, fix_=True, **kwargs): + if ( + isinstance(x, list) + and convert.is_same_dtype(x, dict) + and all(_has_action_keys(y) for y in x) + ): + return x + try: + x = _fix_action_field(x, fix_) + return x + except Exception as e: + raise ValueError("Invalid action field type.") from e + + +def _fix_action_field(x, discard_=True): + corrected = [] + if isinstance(x, str): + x = ParseUtil.fuzzy_parse_json(x) + + try: + x = convert.to_list(x) + + for i in x: + i = convert.to_dict(i) + if _has_action_keys(i): + corrected.append(i) + elif not discard_: + raise ValueError(f"Invalid action field: {i}") + except Exception as e: + raise ValueError(f"Invalid action field: {e}") from e + + return corrected def check_number_field(x, fix_=True, **kwargs): @@ -236,4 +274,5 @@ def _fix_enum_field(x, choices, **kwargs): "bool": check_bool_field, "str": check_str_field, "enum": check_enum_field, + "action": check_action_field, } diff --git a/lionagi/core/prompt/prompt_template.py b/lionagi/core/prompt/prompt_template.py index 6e5f8362e..d078bcb13 100644 --- a/lionagi/core/prompt/prompt_template.py +++ b/lionagi/core/prompt/prompt_template.py @@ -207,6 +207,10 @@ def _validate_field(self, k, v, choices=None, fix_=False, **kwargs): setattr(self, k, v_) return True + if "lionagi.core.prompt.action_template.actionrequest" in str_: + self.__setattr__(k, validation_funcs["action"](v)) + return True + elif "bool" in str_: self.__setattr__(k, validation_funcs["bool"](v, fix_=fix_, **kwargs)) return True @@ -227,48 +231,50 @@ def _process_input(self, fix_=False): if k not in kwargs: kwargs = {k: {}} - try: - if ( - self.model_fields[k].json_schema_extra["choices"] is not None - and "choices" in self.model_fields[k].json_schema_extra + if self._field_has_choices(k): + self.choices[k] = self.model_fields[k].json_schema_extra["choices"] + if self._validate_field( + k, v, choices=self.choices[k], fix_=fix_, **kwargs[k] ): - self.choices[k] = self.model_fields[k].json_schema_extra["choices"] - if self._validate_field( - k, v, choices=self.choices[k], fix_=fix_, **kwargs[k] - ): - continue - else: - raise ValueError(f"{k} has no choices") - - except Exception as e: - if self._validate_field(k, v, fix_=fix_, **kwargs[k]): continue else: - raise ValueError(f"failed to validate field {k}") from e + raise ValueError(f"{k} has no choices") + + elif self._validate_field(k, v, fix_=fix_, **kwargs[k]): + continue + else: + raise ValueError(f"failed to validate field {k}") + + def _field_has_choices(self, k): + try: + a = ( + self.model_fields[k].json_schema_extra["choices"] is not None + and "choices" in self.model_fields[k].json_schema_extra + ) + return a if isinstance(a, bool) else False + except Exception: + return False def _process_response(self, out_, fix_=True): kwargs = self.out_validation_kwargs.copy() for k, v in out_.items(): if k not in kwargs: kwargs = {k: {}} - try: - if ( - self.model_fields[k].json_schema_extra["choices"] is not None - and "choices" in self.model_fields[k].json_schema_extra + + if self._field_has_choices(k): + self.choices[k] = self.model_fields[k].json_schema_extra["choices"] + if self._validate_field( + k, v, choices=self.choices[k], fix_=fix_, **kwargs[k] ): - self.choices[k] = self.model_fields[k].json_schema_extra["choices"] - if self._validate_field( - k, v, choices=self.choices[k], fix_=fix_, **kwargs[k] - ): - continue - else: - raise ValueError(f"{k} has no choices") - - except Exception as e: - if self._validate_field(k, v, fix_=fix_, **kwargs[k]): continue else: - raise ValueError(f"failed to validate field {k}") from e + raise ValueError(f"{k} has no choices") + + elif self._validate_field(k, v, fix_=fix_, **kwargs[k]): + continue + + else: + raise ValueError(f"failed to validate field {k} with value {v}") @property def in_(self): @@ -288,16 +294,6 @@ def process(self, in_=None, out_=None): return self -class ScoredTemplate(PromptTemplate): - confidence_score: float | None = Field( - -1, - description="a numeric score between 0 to 1 formatted in num:0.2f", - ) - reason: str | None = Field( - default_factory=str, description="brief reason for the given output" - ) - - # class Weather(PromptTemplate): # sunny: bool = Field(True, description="true if the weather is sunny outside else false") # rainy: bool = Field(False, description="true if it is raining outside else false") diff --git a/lionagi/core/prompt/scored_template.py b/lionagi/core/prompt/scored_template.py new file mode 100644 index 000000000..ab05bcd9e --- /dev/null +++ b/lionagi/core/prompt/scored_template.py @@ -0,0 +1,13 @@ +from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field + +from .prompt_template import PromptTemplate + + +class ScoredTemplate(PromptTemplate): + confidence_score: float | None = Field( + -1, + description="a numeric score between 0 to 1 formatted in num:0.2f", + ) + reason: str | None = Field( + default_factory=str, description="brief reason for the given output" + ) diff --git a/lionagi/core/tool/manual.py b/lionagi/core/tool/manual.py index e69de29bb..05c218845 100644 --- a/lionagi/core/tool/manual.py +++ b/lionagi/core/tool/manual.py @@ -0,0 +1 @@ +# TODO: tool manual, instruction on how to use the tool for LLM diff --git a/lionagi/core/tool/tool_manager.py b/lionagi/core/tool/tool_manager.py index 39aca8193..995451bcf 100644 --- a/lionagi/core/tool/tool_manager.py +++ b/lionagi/core/tool/tool_manager.py @@ -38,7 +38,7 @@ def name_existed(self, name: str) -> bool: def has_tools(self): return self.registry != {} - def _register_tool(self, tool: Tool) -> None: + def _register_tool(self, tool: Tool | Callable) -> None: """ Registers a tool in the registry. Raises a TypeError if the object is not an instance of Tool. @@ -48,6 +48,8 @@ def _register_tool(self, tool: Tool) -> None: Raises: TypeError: If the provided object is not an instance of Tool. """ + if isinstance(tool, Callable): + tool = func_to_tool(tool)[0] if not isinstance(tool, Tool): raise TypeError("Please register a Tool object.") name = tool.schema_["function"]["name"] diff --git a/lionagi/integrations/bridge/pydantic_/base_model.py b/lionagi/integrations/bridge/pydantic_/base_model.py deleted file mode 100644 index bbd72e958..000000000 --- a/lionagi/integrations/bridge/pydantic_/base_model.py +++ /dev/null @@ -1,7 +0,0 @@ -from pydantic import BaseModel, Field, ValidationError, AliasChoices, field_serializer - -ln_BaseModel = BaseModel -ln_Field = Field -ln_field_serializer = field_serializer -ln_AliasChoices = AliasChoices -ln_ValidationError = ValidationError diff --git a/lionagi/integrations/bridge/pydantic_/pydantic_bridge.py b/lionagi/integrations/bridge/pydantic_/pydantic_bridge.py index e69de29bb..6162d765e 100644 --- a/lionagi/integrations/bridge/pydantic_/pydantic_bridge.py +++ b/lionagi/integrations/bridge/pydantic_/pydantic_bridge.py @@ -0,0 +1 @@ +from pydantic import BaseModel, Field, ValidationError, AliasChoices, field_serializer diff --git a/lionagi/integrations/provider/ollama.py b/lionagi/integrations/provider/ollama.py index 043cc4255..97a5181ca 100644 --- a/lionagi/integrations/provider/ollama.py +++ b/lionagi/integrations/provider/ollama.py @@ -11,7 +11,7 @@ "seed", "stop", "stream", - "temperature", + # "temperature", "top_p", "tools", "tool_choice", diff --git a/lionagi/version.py b/lionagi/version.py index 40551518a..78e94cde8 100644 --- a/lionagi/version.py +++ b/lionagi/version.py @@ -1 +1 @@ -__version__ = "0.0.313" +__version__ = "0.0.314" diff --git a/notebooks/direct_react.ipynb b/notebooks/direct_react.ipynb new file mode 100644 index 000000000..c7bebf037 --- /dev/null +++ b/notebooks/direct_react.ipynb @@ -0,0 +1,144 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "reason-action with a single LLM call" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "def multiply(number1: float, number2: float):\n", + " \"\"\"\n", + " Perform multiplication on two numbers.\n", + "\n", + " Args:\n", + " number1: First number to multiply.\n", + " number2: Second number to multiply.\n", + "\n", + " Returns:\n", + " The product of number1 and number2.\n", + " \n", + " \"\"\"\n", + " return number1 * number2\n", + "\n", + "sys_mul = \"you are asked to perform as a function picker and parameter provider\"\n", + "instruction = \"Think step by step, understand the following basic math question and provide parameters (number1, number2) for function calling\"\n", + "question1 = \"A school is ordering laptops for its students. If each classroom has 25 students and the school wants to provide a laptop for each student in its 8 classrooms, how many laptops in total does the school need to order?\"" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from lionagi import Branch, Services\n", + "from lionagi.core.direct.react import react\n", + "\n", + "service = Services.OpenAI()\n", + "branch = Branch(system=sys_mul, tools=multiply, service=service)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Reason : To find the total number of laptops needed, multiply the number of students per classroom by the number of classrooms.\n", + "Action Responses: [{'function': 'multiply', 'arguments': {'number1': 25, 'number2': 8}, 'output': 200}]\n" + ] + } + ], + "source": [ + "out_ = await react(question1, branch=branch, instruction=instruction, model=\"gpt-4-turbo-preview\")\n", + "\n", + "print(\"Reason :\", out_.reason)\n", + "print(\"Action Responses: \", out_.action_response)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "service = Services.OpenRouter()\n", + "branch = Branch(system=sys_mul, tools=multiply, service=service)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Reason : To calculate the total number of laptops needed, we need to multiply the number of students per classroom by the number of classrooms.\n", + "Action Responses: [{'function': 'multiply', 'arguments': {'number1': 25, 'number2': 8}, 'output': 200}]\n" + ] + } + ], + "source": [ + "out_ = await react(question1, branch=branch, instruction=instruction, model=\"anthropic/claude-3-opus\")\n", + "\n", + "print(\"Reason :\", out_.reason)\n", + "print(\"Action Responses: \", out_.action_response)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Reason : To find the total number of laptops the school needs to order, we need to multiply the number of students per classroom by the number of classrooms.\n", + "Action Responses: [{'function': 'multiply', 'arguments': {'number1': 25, 'number2': 8}, 'output': 200}]\n" + ] + } + ], + "source": [ + "branch.clear_messages()\n", + "out_ = await react(question1, branch=branch, instruction=instruction, model=\"anthropic/claude-3-haiku\")\n", + "\n", + "print(\"Reason :\", out_.reason)\n", + "print(\"Action Responses: \", out_.action_response)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}