Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add prompt to tools response #123

Merged
merged 2 commits into from
Oct 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
657 changes: 331 additions & 326 deletions poetry.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ python = ">=3.10,<3.13"
open-autonomy = "==0.13.1"
openai = "==0.27.2"
requests = "==2.28.2"
mech-client = "==0.2.5"
mech-client = "==0.2.7"
py-multibase = "==1.0.3"
py-multicodec = "==0.2.1"
grpcio = "==1.53.0"
Expand Down
6 changes: 3 additions & 3 deletions tools/native_transfer_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def make_request_openai_request(
def native_transfer(
prompt: str,
api_key: str,
) -> Tuple[str, Optional[Dict[str, Any]]]:
) -> Tuple[str, Optional[str], Optional[Dict[str, Any]]]:
"""Perform native transfer."""
tool_prompt = NATIVE_TRANSFER_PROMPT.format(user_prompt=prompt)
response = make_request_openai_request(prompt=tool_prompt, api_key=api_key)
Expand All @@ -102,15 +102,15 @@ def native_transfer(
"value": int(parsed_txs["wei_value"]),
}

return response, transaction
return response, prompt, transaction


AVAILABLE_TOOLS = {
"native": native_transfer,
}


def run(**kwargs) -> Tuple[str, Optional[Dict[str, Any]]]:
def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]]]:
"""Run the task"""
prompt = kwargs["prompt"]
api_key = kwargs["api_keys"]["openai"]
Expand Down
4 changes: 2 additions & 2 deletions tools/openai_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
ALLOWED_TOOLS = [PREFIX + value for values in ENGINES.values() for value in values]


def run(**kwargs) -> Tuple[str, Optional[Dict[str, Any]]]:
def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]]]:
"""Run the task"""
openai.api_key = kwargs["api_keys"]["openai"]
max_tokens = kwargs.get("max_tokens", DEFAULT_OPENAI_SETTINGS["max_tokens"])
Expand Down Expand Up @@ -75,4 +75,4 @@ def run(**kwargs) -> Tuple[str, Optional[Dict[str, Any]]]:
timeout=120,
presence_penalty=0,
)
return response.choices[0].text, None
return response.choices[0].text, prompt, None
4 changes: 2 additions & 2 deletions tools/optimization_by_prompting.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ def fetch_additional_information(
return "\n".join(["- " + text for text in texts])


def run(**kwargs) -> Tuple[str, Optional[Dict[str, Any]]]:
def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]]]:
"""Run the task"""
tool = kwargs["tool"]
prompt = kwargs["prompt"]
Expand Down Expand Up @@ -385,4 +385,4 @@ def run(**kwargs) -> Tuple[str, Optional[Dict[str, Any]]]:
request_timeout=150,
stop=None,
)
return response.choices[0].message.content, None
return response.choices[0].message.content, prediction_prompt, None
4 changes: 2 additions & 2 deletions tools/prediction_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ def fetch_additional_information(
return "\n".join(["- " + text for text in texts])


def run(**kwargs) -> Tuple[str, Optional[Dict[str, Any]]]:
def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]]]:
"""Run the task"""
tool = kwargs["tool"]
prompt = kwargs["prompt"]
Expand Down Expand Up @@ -273,4 +273,4 @@ def run(**kwargs) -> Tuple[str, Optional[Dict[str, Any]]]:
request_timeout=150,
stop=None,
)
return response.choices[0].message.content, None
return response.choices[0].message.content, prediction_prompt, None
4 changes: 2 additions & 2 deletions tools/prediction_request_claude.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def fetch_additional_information(
return "\n".join(["- " + text for text in texts])


def run(**kwargs) -> Tuple[str, Optional[Dict[str, Any]]]:
def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]]]:
"""Run the task"""
tool = kwargs["tool"]
prompt = kwargs["prompt"]
Expand Down Expand Up @@ -256,4 +256,4 @@ def run(**kwargs) -> Tuple[str, Optional[Dict[str, Any]]]:
max_tokens_to_sample=300,
prompt=prediction_prompt,
)
return completion.completion, None
return completion.completion, prediction_prompt, None
4 changes: 2 additions & 2 deletions tools/prediction_request_sme.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def get_sme_role(engine, temperature, max_tokens, prompt) -> Tuple[str, str]:
return sme["sme"], sme["sme_introduction"]


def run(**kwargs) -> Tuple[str, Optional[Dict[str, Any]]]:
def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]]]:
"""Run the task"""
tool = kwargs["tool"]
prompt = kwargs["prompt"]
Expand Down Expand Up @@ -347,4 +347,4 @@ def run(**kwargs) -> Tuple[str, Optional[Dict[str, Any]]]:
request_timeout=150,
stop=None,
)
return response.choices[0].message.content, None
return response.choices[0].message.content, prediction_prompt, None
4 changes: 2 additions & 2 deletions tools/sme_generation_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@
task question: "{question}"
"""

def run(**kwargs) -> Tuple[str, Optional[Dict[str, Any]]]:
def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]]]:
"""Generate SME roles for a given market question

Raises:
Expand Down Expand Up @@ -102,4 +102,4 @@ def run(**kwargs) -> Tuple[str, Optional[Dict[str, Any]]]:
generated_sme_roles = json.loads(generated_sme_roles)
except json.decoder.JSONDecodeError as e:
return f"Failed to generate SME roles due to {e}", None
return response.choices[0].message.content, None
return response.choices[0].message.content, json.dumps(messages), None
6 changes: 3 additions & 3 deletions tools/stabilityai_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ class FinishReason(Enum):
ERROR = 2


def run(**kwargs: Any) -> Tuple[str, Optional[Dict[str, Any]]]:
def run(**kwargs: Any) -> Tuple[str, Optional[str], Optional[Dict[str, Any]]]:
"""Run the task"""

api_key = kwargs["api_keys"]["stabilityai"]
Expand Down Expand Up @@ -112,5 +112,5 @@ def run(**kwargs: Any) -> Tuple[str, Optional[Dict[str, Any]]]:
json=json_params,
)
if response.status_code == 200:
return json.dumps(response.json()), None
return (f"Error: Non-200 response ({response.status_code}): {response.text}",)
return json.dumps(response.json()), None, None
return (f"Error: Non-200 response ({response.status_code}): {response.text}", None, None)
2 changes: 1 addition & 1 deletion tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ deps =
open-autonomy==0.13.1
openai==0.27.2
requests==2.28.2
; mech-client==0.2.5
mech-client==0.2.7
py-multibase==1.0.3
py-multicodec==0.2.1
grpcio==1.53.0
Expand Down
Loading