Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: print out LLM message history and tools when there's an InvalidOutputError #53

Merged
merged 2 commits into from
Oct 3, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 29 additions & 5 deletions docetl/operations/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -449,16 +449,34 @@ class InvalidOutputError(Exception):
Attributes:
message (str): Explanation of the error.
output (str): The invalid output that caused the exception.
expected_schema (Dict[str, Any]): The expected schema for the output.
messages (List[Dict[str, str]]): The messages sent to the LLM.
tools (Optional[List[Dict[str, str]]]): The tool calls generated by the LLM.
"""

def __init__(self, message: str, output: str, expected_schema: Dict[str, Any]):
def __init__(
self,
message: str,
output: str,
expected_schema: Dict[str, Any],
messages: List[Dict[str, str]],
tools: Optional[List[Dict[str, str]]] = None,
):
self.message = message
self.output = output
self.expected_schema = expected_schema
self.messages = messages
self.tools = tools
super().__init__(self.message)

def __str__(self):
return f"{self.message}\nInvalid output: {self.output}\nExpected schema: {self.expected_schema}"
return (
f"{self.message}\n"
f"Invalid output: {self.output}\n"
f"Expected schema: {self.expected_schema}\n"
f"Messages sent to LLM: {self.messages}\n"
f"Tool calls generated by LLM: {self.tools}"
)


def timeout(seconds):
Expand Down Expand Up @@ -707,7 +725,7 @@ def call_llm_with_gleaning(
cost = 0.0

# Parse the response
parsed_response = parse_llm_response(response, output_schema)
parsed_response = parse_llm_response(response, output_schema, messages=messages)
output = parsed_response[0]

messages = (
Expand Down Expand Up @@ -865,7 +883,7 @@ def parse_llm_response_helper(
InvalidOutputError: If the response is not valid.
"""
if not response:
raise InvalidOutputError("No response from LLM", [{}], schema)
raise InvalidOutputError("No response from LLM", [{}], schema, [], [])

# Parse the response based on the provided tools
if tools:
Expand Down Expand Up @@ -894,7 +912,9 @@ def parse_llm_response_helper(
tool_calls = response.choices[0].message.tool_calls

if not tool_calls:
raise InvalidOutputError("No tool calls in LLM response", [{}], schema)
raise InvalidOutputError(
"No tool calls in LLM response", [{}], schema, response.choices, []
)

outputs = []
for tool_call in tool_calls:
Expand All @@ -920,12 +940,16 @@ def parse_llm_response_helper(
"Could not decode LLM JSON response",
[tool_call.function.arguments],
schema,
response.choices,
tools,
)
except Exception as e:
raise InvalidOutputError(
f"Error parsing LLM response: {e}",
[tool_call.function.arguments],
schema,
response.choices,
tools,
)
return outputs

Expand Down
Loading