diff --git a/examples/language-modeling/run_clm.py b/examples/language-modeling/run_clm.py index 505f8f68c4ca83..3bc3fe40ff5da7 100755 --- a/examples/language-modeling/run_clm.py +++ b/examples/language-modeling/run_clm.py @@ -230,17 +230,19 @@ def main(): # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) + datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) if "validation" not in datasets.keys(): datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", + cache_dir=model_args.cache_dir, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", + cache_dir=model_args.cache_dir, ) else: data_files = {} @@ -255,7 +257,7 @@ def main(): ) if extension == "txt": extension = "text" - datasets = load_dataset(extension, data_files=data_files) + datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. diff --git a/examples/language-modeling/run_mlm.py b/examples/language-modeling/run_mlm.py index 2934fb0c23e813..1cb9fcc6cdd482 100755 --- a/examples/language-modeling/run_mlm.py +++ b/examples/language-modeling/run_mlm.py @@ -239,17 +239,19 @@ def main(): # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) + datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) if "validation" not in datasets.keys(): datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", + cache_dir=model_args.cache_dir, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", + cache_dir=model_args.cache_dir, ) else: data_files = {} @@ -260,7 +262,7 @@ def main(): extension = data_args.train_file.split(".")[-1] if extension == "txt": extension = "text" - datasets = load_dataset(extension, data_files=data_files) + datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. diff --git a/examples/language-modeling/run_mlm_flax.py b/examples/language-modeling/run_mlm_flax.py index 6ab6764931899a..8b395be539bab4 100755 --- a/examples/language-modeling/run_mlm_flax.py +++ b/examples/language-modeling/run_mlm_flax.py @@ -475,17 +475,19 @@ def generate_batch_splits(samples_idx: jnp.ndarray, batch_size: int) -> jnp.ndar # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) + datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) if "validation" not in datasets.keys(): datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", + cache_dir=model_args.cache_dir, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", + cache_dir=model_args.cache_dir, ) else: data_files = {} @@ -496,7 +498,7 @@ def generate_batch_splits(samples_idx: jnp.ndarray, batch_size: int) -> jnp.ndar extension = data_args.train_file.split(".")[-1] if extension == "txt": extension = "text" - datasets = load_dataset(extension, data_files=data_files) + datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. diff --git a/examples/language-modeling/run_plm.py b/examples/language-modeling/run_plm.py index f5c9c47b72241b..b5734815781579 100755 --- a/examples/language-modeling/run_plm.py +++ b/examples/language-modeling/run_plm.py @@ -236,17 +236,19 @@ def main(): # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) + datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) if "validation" not in datasets.keys(): datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", + cache_dir=model_args.cache_dir, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", + cache_dir=model_args.cache_dir, ) else: data_files = {} @@ -257,7 +259,7 @@ def main(): extension = data_args.train_file.split(".")[-1] if extension == "txt": extension = "text" - datasets = load_dataset(extension, data_files=data_files) + datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. diff --git a/examples/multiple-choice/run_swag.py b/examples/multiple-choice/run_swag.py index 04ad05affd8915..de012171938c68 100755 --- a/examples/multiple-choice/run_swag.py +++ b/examples/multiple-choice/run_swag.py @@ -268,10 +268,10 @@ def main(): if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.train_file.split(".")[-1] - datasets = load_dataset(extension, data_files=data_files) + datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) else: # Downloading and loading the swag dataset from the hub. - datasets = load_dataset("swag", "regular") + datasets = load_dataset("swag", "regular", cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. diff --git a/examples/question-answering/run_qa.py b/examples/question-answering/run_qa.py index fa76110b5139d0..d275a2992212c9 100755 --- a/examples/question-answering/run_qa.py +++ b/examples/question-answering/run_qa.py @@ -256,7 +256,7 @@ def main(): # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) + datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) else: data_files = {} if data_args.train_file is not None: @@ -269,7 +269,7 @@ def main(): if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] - datasets = load_dataset(extension, data_files=data_files, field="data") + datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. diff --git a/examples/question-answering/run_qa_beam_search.py b/examples/question-answering/run_qa_beam_search.py index 7a6d0b5bb43372..490ecae86eb79b 100755 --- a/examples/question-answering/run_qa_beam_search.py +++ b/examples/question-answering/run_qa_beam_search.py @@ -255,7 +255,7 @@ def main(): # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) + datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) else: data_files = {} if data_args.train_file is not None: @@ -267,7 +267,7 @@ def main(): if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] - datasets = load_dataset(extension, data_files=data_files, field="data") + datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. diff --git a/examples/seq2seq/run_summarization.py b/examples/seq2seq/run_summarization.py index 811c5a524215ff..0856d6dce5cb22 100755 --- a/examples/seq2seq/run_summarization.py +++ b/examples/seq2seq/run_summarization.py @@ -310,7 +310,7 @@ def main(): # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) + datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) else: data_files = {} if data_args.train_file is not None: @@ -322,7 +322,7 @@ def main(): if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] - datasets = load_dataset(extension, data_files=data_files) + datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. diff --git a/examples/seq2seq/run_translation.py b/examples/seq2seq/run_translation.py index a41da4e0abbeab..ff9a84bf68a840 100755 --- a/examples/seq2seq/run_translation.py +++ b/examples/seq2seq/run_translation.py @@ -294,7 +294,7 @@ def main(): # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) + datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) else: data_files = {} if data_args.train_file is not None: @@ -306,7 +306,7 @@ def main(): if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] - datasets = load_dataset(extension, data_files=data_files) + datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. diff --git a/examples/text-classification/run_glue.py b/examples/text-classification/run_glue.py index 94b52a4bd0ba54..0af73cf5ddb59f 100755 --- a/examples/text-classification/run_glue.py +++ b/examples/text-classification/run_glue.py @@ -239,7 +239,7 @@ def main(): # download the dataset. if data_args.task_name is not None: # Downloading and loading a dataset from the hub. - datasets = load_dataset("glue", data_args.task_name) + datasets = load_dataset("glue", data_args.task_name, cache_dir=model_args.cache_dir) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. @@ -263,10 +263,10 @@ def main(): if data_args.train_file.endswith(".csv"): # Loading a dataset from local csv files - datasets = load_dataset("csv", data_files=data_files) + datasets = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir) else: # Loading a dataset from local json files - datasets = load_dataset("json", data_files=data_files) + datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. diff --git a/examples/text-classification/run_xnli.py b/examples/text-classification/run_xnli.py index 1acb29b7e2436c..1c74e6e7bc0f6f 100755 --- a/examples/text-classification/run_xnli.py +++ b/examples/text-classification/run_xnli.py @@ -209,17 +209,19 @@ def main(): # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: - train_dataset = load_dataset("xnli", model_args.language, split="train") + train_dataset = load_dataset("xnli", model_args.language, split="train", cache_dir=model_args.cache_dir) else: - train_dataset = load_dataset("xnli", model_args.train_language, split="train") + train_dataset = load_dataset( + "xnli", model_args.train_language, split="train", cache_dir=model_args.cache_dir + ) label_list = train_dataset.features["label"].names if training_args.do_eval: - eval_dataset = load_dataset("xnli", model_args.language, split="validation") + eval_dataset = load_dataset("xnli", model_args.language, split="validation", cache_dir=model_args.cache_dir) label_list = eval_dataset.features["label"].names if training_args.do_predict: - test_dataset = load_dataset("xnli", model_args.language, split="test") + test_dataset = load_dataset("xnli", model_args.language, split="test", cache_dir=model_args.cache_dir) label_list = test_dataset.features["label"].names # Labels diff --git a/examples/token-classification/run_ner.py b/examples/token-classification/run_ner.py index 0fc08644b801d2..2f31b1f64da55f 100755 --- a/examples/token-classification/run_ner.py +++ b/examples/token-classification/run_ner.py @@ -229,7 +229,7 @@ def main(): # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) + datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) else: data_files = {} if data_args.train_file is not None: @@ -239,7 +239,7 @@ def main(): if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.train_file.split(".")[-1] - datasets = load_dataset(extension, data_files=data_files) + datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html.