Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Initial LCEL Support #568

Merged
merged 4 commits into from
Feb 7, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/api_reference/response_structures.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@
- "ValidationResult"
- "PassResult"
- "FailResult"
- "ValidatorError"
- "ValidationError"
10 changes: 5 additions & 5 deletions docs/examples/input_validation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
"source": [
"When `fix` is specified as the on-fail handler, the prompt will automatically be amended before calling the LLM.\n",
"\n",
"In any other case (for example, `exception`), a `ValidatorError` will be returned in the outcome."
"In any other case (for example, `exception`), a `ValidationError` will be returned in the outcome."
]
},
{
Expand All @@ -67,13 +67,13 @@
],
"source": [
"import openai\n",
"from guardrails.errors import ValidatorError\n",
"from guardrails.errors import ValidationError\n",
"\n",
"try:\n",
" guard(\n",
" openai.chat.completions.create,\n",
" )\n",
"except ValidatorError as e:\n",
"except ValidationError as e:\n",
" print(e)"
]
},
Expand Down Expand Up @@ -115,7 +115,7 @@
" openai.chat.completions.create,\n",
" prompt=\"This is not two words\",\n",
" )\n",
"except ValidatorError as e:\n",
"except ValidationError as e:\n",
" print(e)"
]
}
Expand All @@ -136,7 +136,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.6"
"version": "3.11.7"
}
},
"nbformat": 4,
Expand Down
10 changes: 5 additions & 5 deletions docs/examples/response_is_on_topic.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@
"source": [
"import guardrails as gd\n",
"from guardrails.validators import OnTopic\n",
"from guardrails.errors import ValidatorError\n",
"from guardrails.errors import ValidationError\n",
"\n",
"# Create the Guard with the OnTopic Validator\n",
"guard = gd.Guard.from_string(\n",
Expand All @@ -159,7 +159,7 @@
" guard.parse(\n",
" llm_output=text,\n",
" )\n",
"except ValidatorError as e:\n",
"except ValidationError as e:\n",
" print(e)\n"
]
},
Expand Down Expand Up @@ -205,7 +205,7 @@
" guard.parse(\n",
" llm_output=text,\n",
" )\n",
"except ValidatorError as e:\n",
"except ValidationError as e:\n",
" print(e)"
]
},
Expand Down Expand Up @@ -258,7 +258,7 @@
" guard.parse(\n",
" llm_output=text,\n",
" )\n",
"except ValidatorError as e:\n",
"except ValidationError as e:\n",
" print(e)"
]
}
Expand All @@ -279,7 +279,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.6"
"version": "3.11.7"
}
},
"nbformat": 4,
Expand Down
19 changes: 17 additions & 2 deletions guardrails/errors/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,18 @@
from guardrails.validator_base import ValidatorError
# Never actually used in any validators so the description is misleading.
# The naming is confusing so we're updating it.
class ValidatorError(Exception):
"""
deprecated: 0.3.3
Use :class:`ValidationError` instead.
__all__ = ["ValidatorError"]
Base class for all validator errors.
"""


# Open to naming this something more generic like GuardrailsError or something,
# let's just decide in this PR
class ValidationError(Exception):
"""Top level validation error."""


__all__ = ["ValidatorError", "ValidationError"]
3 changes: 3 additions & 0 deletions guardrails/functional/chain/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from guardrails.functional.chain.guard import Guard

__all__ = ["Guard"]
43 changes: 43 additions & 0 deletions guardrails/functional/chain/guard.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import json
from copy import deepcopy
from typing import Dict, Optional, TypeVar, cast

from langchain_core.messages import BaseMessage
from langchain_core.runnables import Runnable, RunnableConfig

from guardrails.errors import ValidationError
from guardrails.functional.guard import Guard as FGuard

T = TypeVar("T", str, BaseMessage)


class Guard(FGuard, Runnable):
def invoke(self, input: T, config: Optional[RunnableConfig] = None) -> T:
output = BaseMessage(content="", type="")
str_input = None
input_is_chat_message = False
if isinstance(input, BaseMessage):
input_is_chat_message = True
str_input = str(input.content)
output = deepcopy(input)
else:
str_input = str(input)

response = self.validate(str_input)

validated_output = response.validated_output
if not validated_output:
raise ValidationError(
(
"The response from the LLM failed validation!"
"See `guard.history` for more details."
)
)

if isinstance(validated_output, Dict):
validated_output = json.dumps(validated_output)

if input_is_chat_message:
output.content = validated_output
return cast(T, output)
return cast(T, validated_output)
26 changes: 13 additions & 13 deletions guardrails/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

from guardrails.classes.history import Call, Inputs, Iteration, Outputs
from guardrails.datatypes import verify_metadata_requirements
from guardrails.errors import ValidationError
from guardrails.llm_providers import (
AsyncPromptCallableBase,
OpenAICallable,
Expand All @@ -25,7 +26,6 @@
SkeletonReAsk,
reasks_to_dict,
)
from guardrails.validator_base import ValidatorError

add_destinations(logger.debug)

Expand Down Expand Up @@ -332,11 +332,11 @@ def validate_msg_history(
)
iteration.outputs.validation_output = validated_msg_history
if isinstance(validated_msg_history, ReAsk):
raise ValidatorError(
raise ValidationError(
f"Message history validation failed: " f"{validated_msg_history}"
)
if validated_msg_history != msg_str:
raise ValidatorError("Message history validation failed")
raise ValidationError("Message history validation failed")

def prepare_msg_history(
self,
Expand Down Expand Up @@ -372,9 +372,9 @@ def validate_prompt(
)
iteration.outputs.validation_output = validated_prompt
if validated_prompt is None:
raise ValidatorError("Prompt validation failed")
raise ValidationError("Prompt validation failed")
if isinstance(validated_prompt, ReAsk):
raise ValidatorError(f"Prompt validation failed: {validated_prompt}")
raise ValidationError(f"Prompt validation failed: {validated_prompt}")
return Prompt(validated_prompt)

def validate_instructions(
Expand All @@ -393,9 +393,9 @@ def validate_instructions(
)
iteration.outputs.validation_output = validated_instructions
if validated_instructions is None:
raise ValidatorError("Instructions validation failed")
raise ValidationError("Instructions validation failed")
if isinstance(validated_instructions, ReAsk):
raise ValidatorError(
raise ValidationError(
f"Instructions validation failed: {validated_instructions}"
)
return Instructions(validated_instructions)
Expand Down Expand Up @@ -1020,12 +1020,12 @@ async def async_prepare(
iteration, msg_str, self.metadata
)
if isinstance(validated_msg_history, ReAsk):
raise ValidatorError(
raise ValidationError(
f"Message history validation failed: "
f"{validated_msg_history}"
)
if validated_msg_history != msg_str:
raise ValidatorError("Message history validation failed")
raise ValidationError("Message history validation failed")
elif prompt is not None:
if isinstance(prompt, str):
prompt = Prompt(prompt)
Expand Down Expand Up @@ -1053,9 +1053,9 @@ async def async_prepare(
)
iteration.outputs.validation_output = validated_prompt
if validated_prompt is None:
raise ValidatorError("Prompt validation failed")
raise ValidationError("Prompt validation failed")
if isinstance(validated_prompt, ReAsk):
raise ValidatorError(
raise ValidationError(
f"Prompt validation failed: {validated_prompt}"
)
prompt = Prompt(validated_prompt)
Expand All @@ -1072,9 +1072,9 @@ async def async_prepare(
)
iteration.outputs.validation_output = validated_instructions
if validated_instructions is None:
raise ValidatorError("Instructions validation failed")
raise ValidationError("Instructions validation failed")
if isinstance(validated_instructions, ReAsk):
raise ValidatorError(
raise ValidationError(
f"Instructions validation failed: {validated_instructions}"
)
instructions = Instructions(validated_instructions)
Expand Down
4 changes: 0 additions & 4 deletions guardrails/validator_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,6 @@
from pydantic import BaseModel, Field


class ValidatorError(Exception):
"""Base class for all validator errors."""


class Filter:
pass

Expand Down
12 changes: 3 additions & 9 deletions guardrails/validator_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,12 @@

from guardrails.classes.history import Iteration
from guardrails.datatypes import FieldValidation
from guardrails.errors import ValidationError
from guardrails.logger import logger
from guardrails.utils.logs_utils import ValidatorLogs
from guardrails.utils.reask_utils import FieldReAsk, ReAsk
from guardrails.utils.safe_get import safe_get
from guardrails.validator_base import (
FailResult,
Filter,
PassResult,
Refrain,
Validator,
ValidatorError,
)
from guardrails.validator_base import FailResult, Filter, PassResult, Refrain, Validator


def key_not_empty(key: str) -> bool:
Expand Down Expand Up @@ -57,7 +51,7 @@ def perform_correction(
fail_results=results,
)
if on_fail_descriptor == "exception":
raise ValidatorError(
raise ValidationError(
"Validation failed for field with errors: "
+ ", ".join([result.error_message for result in results])
)
Expand Down
Loading