Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added additional keys parameter to _prompt_ #149

Merged
merged 6 commits into from
May 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 15 additions & 2 deletions agential/cog/functional/expel.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,7 @@ def _build_compare_prompt(
success_trial: str,
failed_trial: str,
is_full: bool,
additional_keys: Dict[str, str] = {},
) -> str:
"""Constructs a comparison prompt for an AI by combining system instructions, task details, and a list of existing insights.

Expand All @@ -189,6 +190,7 @@ def _build_compare_prompt(
success_trial (str): A description or example of a successful trial for the task.
failed_trial (str): A description or example of a failed trial for the task.
is_full (bool): A flag indicating whether the prompt should be in its full form or not. This affects the suffix of the critique summary.
additional_keys (Dict[str, str]): Additional keys to format the prompt. Defaults to {}.

Returns:
str: A fully constructed prompt ready to be presented to the AI. The prompt includes a prefixed system instruction, task details formatted according to human critique template,
Expand All @@ -214,6 +216,7 @@ def _build_compare_prompt(
if insights
else ""
),
**additional_keys,
}

human_critique_summary_message = PromptTemplate.from_template(
Expand All @@ -232,6 +235,7 @@ def _build_all_success_prompt(
insights: List[Dict[str, Any]],
success_trajs_str: str,
is_full: bool,
additional_keys: Dict[str, str] = {},
) -> str:
"""Constructs a prompt focused on critiquing and enhancing existing insights based on successful task trials.

Expand All @@ -241,6 +245,7 @@ def _build_all_success_prompt(
insights (List[Dict[str, Any]]): A list of strings where each string represents an existing insight with a score. If the list is empty, it is treated as if there are no existing insights.
success_trajs_str (str): A string containing descriptions of successful trials related to the task. These descriptions are meant to provide context for the AI's critique of the existing insights.
is_full (bool): A boolean flag that determines the verbosity of the critique summary's suffix. If `True`, a more comprehensive suffix is used.
additional_keys (Dict[str, str]): Additional keys to format the prompt. Defaults to {}.

Returns:
str: A string that combines the system's instruction, the task context with successful trials, and the existing insights into a coherent prompt.
Expand All @@ -267,7 +272,7 @@ def _build_all_success_prompt(

human_critique_summary_message = PromptTemplate.from_template(
HUMAN_CRITIQUE_EXISTING_INSIGHTS_ALL_SUCCESS_TEMPLATE
).format(**human_format_dict)
).format(**human_format_dict, **additional_keys)
critique_summary_suffix = (
CRITIQUE_SUMMARY_SUFFIX_FULL if is_full else CRITIQUE_SUMMARY_SUFFIX_NOT_FULL
)
Expand All @@ -285,6 +290,7 @@ def _prompt_compare_critique(
failed_trial: str,
is_full: bool,
replace_newline: bool = False,
additional_keys: Dict[str, str] = {},
) -> str:
"""Generates a critique from an LLM based on a comparison between successful and failed task trials, within the context of existing insights.

Expand All @@ -298,6 +304,7 @@ def _prompt_compare_critique(
failed_trial (str): A description of a failed trial for the task.
is_full (bool): A flag indicating if the full version of the critique summary should be used.
replace_newline (bool, optional): If `True`, newline characters in the LLM's output will be replaced with empty strings, defaulting to `False`.
additional_keys (Dict[str, str]): Additional keys to format the prompt. Defaults to {}.

Returns:
str: The critique generated by the LLM, potentially with newline characters removed, based on the `replace_newline` parameter.
Expand All @@ -308,6 +315,7 @@ def _prompt_compare_critique(
success_trial=success_trial,
failed_trial=failed_trial,
is_full=is_full,
additional_keys=additional_keys,
)
out = llm(
[
Expand All @@ -329,6 +337,7 @@ def _prompt_all_success_critique(
success_trajs_str: str,
is_full: bool,
replace_newline: bool = False,
additional_keys: Dict[str, str] = {},
) -> str:
"""Generates a critique from an LLM based on a compilation of successful task trials in the context of existing insights.

Expand All @@ -340,12 +349,16 @@ def _prompt_all_success_critique(
success_trajs_str (str): A string concatenating descriptions of successful trials related to the task.
is_full (bool): Indicates whether the full critique summary is to be used in the prompt.
replace_newline (bool, optional): If set to `True`, newline characters in the LLM output will be replaced with empty strings. The default is `False`.
additional_keys (Dict[str, str]): Additional keys to format the prompt. Defaults to {}.

Returns:
str: The generated critique from the LLM, optionally with newline characters removed depending on the `replace_newline` parameter.
"""
prompt = _build_all_success_prompt(
insights=insights, success_trajs_str=success_trajs_str, is_full=is_full
insights=insights,
success_trajs_str=success_trajs_str,
is_full=is_full,
additional_keys=additional_keys,
)
out = llm(
[
Expand Down
8 changes: 8 additions & 0 deletions agential/cog/functional/react.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
"""Functional module for ReAct."""

from typing import Dict, List, Optional, Tuple

from langchain.prompts import PromptTemplate
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages.human import HumanMessage
Expand All @@ -16,6 +18,7 @@ def _build_agent_prompt(
scratchpad: str,
examples: str,
max_steps: int,
additional_keys: Dict[str, str] = {},
prompt: str = REACT_INSTRUCTION_HOTPOTQA,
) -> str:
"""Constructs a prompt template for the agent.
Expand All @@ -28,6 +31,7 @@ def _build_agent_prompt(
scratchpad (str): Additional scratchpad information to be included.
examples (str): Fewshot examples.
max_steps (int): Max number of steps.
additional_keys (Dict[str, str]): Additional keys to format the prompt. Defaults to {}.
prompt (str, optional): Prompt template string. Defaults to REACT_INSTRUCTION_HOTPOTQA. Must include question,
scratchpad, examples, and max_steps.

Expand All @@ -39,6 +43,7 @@ def _build_agent_prompt(
scratchpad=scratchpad,
examples=examples,
max_steps=max_steps,
**additional_keys,
)
return prompt

Expand All @@ -49,6 +54,7 @@ def _prompt_agent(
scratchpad: str,
examples: str,
max_steps: int,
additional_keys: Dict[str, str] = {},
prompt: str = REACT_INSTRUCTION_HOTPOTQA,
) -> str:
"""Generates a response from the LLM based on a given question and scratchpad.
Expand All @@ -62,6 +68,7 @@ def _prompt_agent(
scratchpad (str): Additional context or information for the language model.
examples (str): Fewshot examples.
max_steps (int): Maximum number of steps.
additional_keys (Dict[str, str]): Additional keys to format the prompt. Defaults to {}.
prompt (str, optional): Prompt template string. Defaults to REACT_INSTRUCTION_HOTPOTQA. Must include question,
scratchpad, examples, and max_steps.

Expand All @@ -73,6 +80,7 @@ def _prompt_agent(
scratchpad=scratchpad,
examples=examples,
max_steps=max_steps,
additional_keys=additional_keys,
prompt=prompt,
)
out = llm(
Expand Down
Loading
Loading