diff --git a/src/transformers/pipelines/base.py b/src/transformers/pipelines/base.py index 9f30665e590d7d..758484107b76f2 100644 --- a/src/transformers/pipelines/base.py +++ b/src/transformers/pipelines/base.py @@ -1048,9 +1048,9 @@ def check_model_type(self, supported_models: Union[List[str], dict]): def _sanitize_parameters(self, **pipeline_parameters): """ _sanitize_parameters will be called with any excessive named arguments from either `__init__` or `__call__` - methods. It should return 3 dictionnaries of the resolved parameters used by the various `preprocess`, - `forward` and `postprocess` methods. Do not fill dictionnaries if the caller didn't specify a kwargs. This - let's you keep defaults in function signatures, which is more "natural". + methods. It should return 3 dictionaries of the resolved parameters used by the various `preprocess`, + `forward` and `postprocess` methods. Do not fill dictionaries if the caller didn't specify a kwargs. This + lets you keep defaults in function signatures, which is more "natural". It is not meant to be called directly, it will be automatically called and the final parameters resolved by `__init__` and `__call__` diff --git a/src/transformers/pipelines/conversational.py b/src/transformers/pipelines/conversational.py index ca091074effb51..65afd6d40e0e4f 100644 --- a/src/transformers/pipelines/conversational.py +++ b/src/transformers/pipelines/conversational.py @@ -272,7 +272,7 @@ def __call__(self, conversations: Union[List[Dict], Conversation, List[Conversat Conversation to generate responses for. Inputs can also be passed as a list of dictionaries with `role` and `content` keys - in this case, they will be converted to `Conversation` objects automatically. Multiple conversations in either format may be passed as a list. - clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to clean up the potential extra spaces in the text output. generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method diff --git a/src/transformers/pipelines/text_generation.py b/src/transformers/pipelines/text_generation.py index df460a9334b1ca..ef64fb84dddda1 100644 --- a/src/transformers/pipelines/text_generation.py +++ b/src/transformers/pipelines/text_generation.py @@ -204,7 +204,7 @@ def __call__(self, text_inputs, **kwargs): return_full_text (`bool`, *optional*, defaults to `True`): If set to `False` only added text is returned, otherwise the full text is returned. Only meaningful if *return_text* is set to True. - clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to clean up the potential extra spaces in the text output. prefix (`str`, *optional*): Prefix added to prompt.