Skip to content

Commit 2dce5a0

Browse files
authored
refactor(llmrails)!: remove deprecated return_context argument (#1147)
The `return_context` argument has been removed from the LLMRails class methods as it was deprecated. Users are encouraged to use `GenerationOptions.output_vars = True` instead. This change simplifies the interface and eliminates deprecated functionality. BREAKING CHANGE: The `return_context` argument is no longer supported.
1 parent d82f0b9 commit 2dce5a0

File tree

1 file changed

+0
-23
lines changed

1 file changed

+0
-23
lines changed

nemoguardrails/rails/llm/llmrails.py

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -579,7 +579,6 @@ async def generate_async(
579579
options: Optional[Union[dict, GenerationOptions]] = None,
580580
state: Optional[Union[dict, State]] = None,
581581
streaming_handler: Optional[StreamingHandler] = None,
582-
return_context: bool = False,
583582
) -> Union[str, dict, GenerationResponse, Tuple[dict, dict]]:
584583
"""Generate a completion or a next message.
585584
@@ -602,7 +601,6 @@ async def generate_async(
602601
state: The state object that should be used as the starting point.
603602
streaming_handler: If specified, and the config supports streaming, the
604603
provided handler will be used for streaming.
605-
return_context: Whether to return the context at the end of the run.
606604
607605
Returns:
608606
The completion (when a prompt is provided) or the next message.
@@ -626,19 +624,6 @@ async def generate_async(
626624
# Save the generation options in the current async context.
627625
generation_options_var.set(options)
628626

629-
if return_context:
630-
warnings.warn(
631-
"The `return_context` argument is deprecated and will be removed in 0.9.0. "
632-
"Use `GenerationOptions.output_vars = True` instead.",
633-
DeprecationWarning,
634-
stacklevel=2,
635-
)
636-
637-
# And we use the generation options mechanism instead.
638-
if options is None:
639-
options = GenerationOptions()
640-
options.output_vars = True
641-
642627
if streaming_handler:
643628
streaming_handler_var.set(streaming_handler)
644629

@@ -866,12 +851,6 @@ async def generate_async(
866851
# Otherwise, we return the full context
867852
res.output_data = context
868853

869-
# If the `return_context` is used, then we return a tuple to keep
870-
# the interface compatible.
871-
# TODO: remove this in 0.10.0.
872-
if return_context:
873-
return new_message, context
874-
875854
_log = compute_generation_log(processing_log)
876855

877856
# Include information about activated rails and LLM calls if requested
@@ -996,7 +975,6 @@ def generate(
996975
self,
997976
prompt: Optional[str] = None,
998977
messages: Optional[List[dict]] = None,
999-
return_context: bool = False,
1000978
options: Optional[Union[dict, GenerationOptions]] = None,
1001979
state: Optional[dict] = None,
1002980
):
@@ -1016,7 +994,6 @@ def generate(
1016994
messages=messages,
1017995
options=options,
1018996
state=state,
1019-
return_context=return_context,
1020997
)
1021998
)
1022999

0 commit comments

Comments
 (0)