diff --git a/README.md b/README.md index dc5c1cecaaeb..ac2efecd793c 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ pip install --upgrade paddlenlp ### Transformer API: 强大的预训练模型生态底座 -覆盖**15**个网络结构和**67**个预训练模型参数,既包括百度自研的预训练模型如ERNIE系列, PLATO, SKEP等,也涵盖业界主流的中文预训练模型。也欢迎开发者进预训练模贡献!🤗 +覆盖**15**个网络结构和**67**个预训练模型参数,既包括百度自研的预训练模型如ERNIE系列, PLATO, SKEP等,也涵盖业界主流的中文预训练模型。也欢迎开发者贡献更多预训练模型!🤗 ```python from paddlenlp.transformers import * diff --git a/paddlenlp/transformers/albert/tokenizer.py b/paddlenlp/transformers/albert/tokenizer.py index e79a5c76d9cf..163a16bdaf4b 100644 --- a/paddlenlp/transformers/albert/tokenizer.py +++ b/paddlenlp/transformers/albert/tokenizer.py @@ -151,12 +151,12 @@ def __init__( if vocab_file is not None: self.tokenizer = AlbertChineseTokenizer( - vocab_file, + vocab_file=vocab_file, do_lower_case=False, ) elif sentencepiece_model_file is not None: self.tokenizer = AlbertEnglishTokenizer( - sentencepiece_model_file, + sentencepiece_model_file=sentencepiece_model_file, do_lower_case=True, ) else: