From 4e3e13559aadd535b612d55f75855ee58c62f664 Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Sat, 1 Apr 2023 23:20:43 +0800 Subject: [PATCH 01/18] enc-dec imp --- requirements.txt | 2 + scripts/run_finetune_seq2seq.sh | 39 ++ src/lmflow/args.py | 5 +- src/lmflow/models/encoder_decoder_model.py | 22 + src/lmflow/models/hf_decoder_model.py | 2 + src/lmflow/models/hf_encoder_decoder_model.py | 459 ++++++++++++++++++ src/lmflow/pipeline/finetuner.py | 92 +++- 7 files changed, 608 insertions(+), 13 deletions(-) create mode 100755 scripts/run_finetune_seq2seq.sh create mode 100644 src/lmflow/models/encoder_decoder_model.py create mode 100644 src/lmflow/models/hf_encoder_decoder_model.py diff --git a/requirements.txt b/requirements.txt index 998a3436c..17384dbec 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,4 +6,6 @@ wandb==0.14.0 deepspeed==0.8.3 trl @ git+https://github.com/lvwerra/trl.git#egg=trl-0.4.1 sentencepiece +icetk==0.0.7 +cpm_kernels==1.0.11 transformers @ git+https://github.com/huggingface/transformers@c612628 \ No newline at end of file diff --git a/scripts/run_finetune_seq2seq.sh b/scripts/run_finetune_seq2seq.sh new file mode 100755 index 000000000..1bf001b82 --- /dev/null +++ b/scripts/run_finetune_seq2seq.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# Please run this script under ${project_id} in project directory of +# https://github.com/shizhediao/llm-ft +# COMMIT: d5fecf30ba8011067b10cf51fede53a5ab6574e4 + +deepspeed_args="--master_port=11000" # Default argument +if [ $# -ge 1 ]; then + deepspeed_args="$1" +fi + +exp_id=finetune +project_dir=$(cd "$(dirname $0)"/..; pwd) +output_dir=${project_dir}/output_models/${exp_id} +log_dir=${project_dir}/log/${exp_id} + +dataset_path=${project_dir}/data/alpaca/test + +mkdir -p ${output_dir} ${log_dir} + +deepspeed ${deepspeed_args} \ + examples/finetune.py \ + --model_name_or_path t5-base \ + --dataset_path ${dataset_path} \ + --output_dir ${output_dir} --overwrite_output_dir \ + --num_train_epochs 0.01 \ + --learning_rate 2e-5 \ + --block_size 512 \ + --per_device_train_batch_size 1 \ + --deepspeed configs/ds_config_zero3.json \ + --bf16 \ + --run_name finetune \ + --validation_split_percentage 0 \ + --logging_steps 20 \ + --do_train \ + --ddp_timeout 72000 \ + --save_steps 5000 \ + --dataloader_num_workers 1 \ + | tee ${log_dir}/train.log \ + 2> ${log_dir}/train.err diff --git a/src/lmflow/args.py b/src/lmflow/args.py index 06b54ef05..25f333eac 100644 --- a/src/lmflow/args.py +++ b/src/lmflow/args.py @@ -322,7 +322,10 @@ class FinetunerArguments(TrainingArguments): """ Adapt transformers.TrainingArguments """ - pass + is_seq2seq: bool = field( + default=False, + metadata={"help": "whether use seq2seq model"} + ) @dataclass diff --git a/src/lmflow/models/encoder_decoder_model.py b/src/lmflow/models/encoder_decoder_model.py new file mode 100644 index 000000000..9db0fc4a5 --- /dev/null +++ b/src/lmflow/models/encoder_decoder_model.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +# coding=utf-8 +"""A one-line summary of the module or program, terminated by a period. + +Leave one blank line. The rest of this docstring should contain an +overall description of the module or program. Optionally, it may also +contain a brief desription of exported classes and funcctions and/or usage +examples. + +Typical usage example: + + foo = ClassFoo() + bar = foo.FunctionBar() +""" + +from lmflow.models.base_model import BaseModel + + +class EncoderDecoderModel(BaseModel): + + def __init__(self, *args, **kwargs): + pass diff --git a/src/lmflow/models/hf_decoder_model.py b/src/lmflow/models/hf_decoder_model.py index 8a68443fd..3cb888477 100644 --- a/src/lmflow/models/hf_decoder_model.py +++ b/src/lmflow/models/hf_decoder_model.py @@ -18,6 +18,7 @@ and question answering. """ +import logging from typing import List, Union import deepspeed @@ -48,6 +49,7 @@ from lmflow.models.decoder_model import DecoderModel from lmflow.models.interfaces.tunable import Tunable +logger = logging.getLogger(__name__) class HFDecoderModel(DecoderModel, Tunable): r""" diff --git a/src/lmflow/models/hf_encoder_decoder_model.py b/src/lmflow/models/hf_encoder_decoder_model.py new file mode 100644 index 000000000..320441336 --- /dev/null +++ b/src/lmflow/models/hf_encoder_decoder_model.py @@ -0,0 +1,459 @@ +#!/usr/bin/env python +# coding=utf-8 +"""This is a class called HFEncoderDecoder which is a wrapper around transformers model and +tokenizer classes. It has several methods such as __init__, tokenize, and train that are +used for training and fine-tuning the model. The __init__ method takes in several arguments +such as model_args, tune_strategy, and ds_config, which are used to load the pretrained +model and tokenizer, and initialize the training settings. + +The tokenize method is used to tokenize the input text and return the input IDs and attention +masks that can be fed to the model for training or inference. + +This class supports different tune_strategy options such as 'normal', 'none', 'lora', and +'adapter', which allow for different fine-tuning settings of the model. However, the 'lora' +and 'adapter' strategies are not yet implemented. + +Overall, this class provides a convenient interface for loading and fine-tuning transformer +models and can be used for various NLP tasks such as language modeling, text classification, +and question answering. +""" +import logging +from typing import List, Union + +import deepspeed +from filelock import FileLock +from peft import ( + LoraConfig, + PeftModel, + TaskType, + get_peft_config, + get_peft_model, + prepare_model_for_int8_training, +) +import nltk # Here to have a nice missing dependency error message early on +import torch +import transformers +from transformers.deepspeed import HfDeepSpeedConfig + +from transformers.testing_utils import CaptureLogger + +from transformers import ( + CONFIG_MAPPING, + AutoConfig, + AutoModelForSeq2SeqLM, + AutoTokenizer, + DataCollatorForSeq2Seq, + HfArgumentParser, + MBart50Tokenizer, + MBart50TokenizerFast, + MBartTokenizer, + MBartTokenizerFast, + Seq2SeqTrainer, + Seq2SeqTrainingArguments, + set_seed, +) +from transformers.utils import check_min_version, is_offline_mode, send_example_telemetry +from lmflow.datasets.dataset import Dataset +from lmflow.models.encoder_decoder_model import EncoderDecoderModel +from lmflow.models.interfaces.tunable import Tunable + +logger = logging.getLogger(__name__) + +class HFDecoderModel(EncoderDecoderModel, Tunable): + r""" + Initializes a HFEncoderDecoderModel instance. + + Parameters + ------------ + + model_args : + Model arguments such as model name, path, revision, etc. + + tune_strategy : str or none, default="normal". + A string representing the dataset backend. Defaults to "huggingface". + + ds_config : + Deepspeed configuations. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + """ + + def __init__( + self, + model_args, + tune_strategy='normal', + ds_config=None, + *args, + **kwargs + ): + """ + Initializes a HFEncoderDecoderModel instance. + :param model_args: dictionary with model arguments such as model name, path, revision, etc. + :param tune_strategy: tuning strategy: normal, none, lora or adapter + :param ds_config: deepspeed configuration for distributed training + """ + + # See more about loading any type of standard or custom dataset (from + # files, python dict, pandas DataFrame, etc) at + # https://huggingface.co/docs/datasets/loading_datasets.html. + + # Load pretrained model and tokenizer + # + # Distributed training: The .from_pretrained methods guarantee that + # only one local process can concurrently download model & vocab. + + + if tune_strategy == 'normal': + try: + nltk.data.find("tokenizers/punkt") + except (LookupError, OSError): + if is_offline_mode(): + raise LookupError( + "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" + ) + with FileLock(".lock") as lock: + nltk.download("punkt", quiet=True) + + # A list of all multilingual tokenizer which require lang attribute. + MULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast] + + config_kwargs = { + "cache_dir": model_args.cache_dir, + "revision": model_args.model_revision, + "use_auth_token": True if model_args.use_auth_token else None, + } + if model_args.config_name: + config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) + elif model_args.model_name_or_path: + config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) + else: + config = CONFIG_MAPPING[model_args.model_type]() + logger.warning("You are instantiating a new config instance from scratch.") + if model_args.config_overrides is not None: + logger.info(f"Overriding config: {model_args.config_overrides}") + config.update_from_string(model_args.config_overrides) + logger.info(f"New config: {config}") + + tokenizer_kwargs = { + "cache_dir": model_args.cache_dir, + "use_fast": model_args.use_fast_tokenizer, + "revision": model_args.model_revision, + "use_auth_token": True if model_args.use_auth_token else None, + } + if model_args.tokenizer_name: + tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) + elif model_args.model_name_or_path: + tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) + else: + raise ValueError( + "You are instantiating a new tokenizer from scratch. This is" + " not supported by this script. You can do it from another" + " script, save it, and load it from here, using" + " --tokenizer_name." + ) + + if model_args.model_name_or_path: + torch_dtype = ( + model_args.torch_dtype + if model_args.torch_dtype in ["auto", None] + else getattr(torch, model_args.torch_dtype) + ) + model = AutoModelForSeq2SeqLM.from_pretrained( + model_args.model_name_or_path, + from_tf=bool(".ckpt" in model_args.model_name_or_path), + config=config, + cache_dir=model_args.cache_dir, + revision=model_args.model_revision, + use_auth_token=True if model_args.use_auth_token else None, + torch_dtype=torch_dtype, + ) + else: + model = AutoModelForSeq2SeqLM.from_config(config) + n_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values()) + logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") + + if model_args.use_lora: + peft_config = LoraConfig( + task_type=TaskType.SEQ_2_SEQ_LM, + inference_mode=False, + r=model_args.lora_r, + target_modules=["q_proj","v_proj"], + lora_alpha=model_args.lora_alpha, + lora_dropout=model_args.lora_dropout + ) + model = get_peft_model(model, peft_config) + model.print_trainable_parameters() + + + # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch + # on a small vocab and want a smaller embedding size, remove this test. + embedding_size = model.get_input_embeddings().weight.shape[0] + if len(tokenizer) > embedding_size: + model.resize_token_embeddings(len(tokenizer)) + + if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)): + if isinstance(tokenizer, MBartTokenizer): + model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.lang] + else: + model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.lang) + + if model.config.decoder_start_token_id is None: + raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") + + if ( + hasattr(model.config, "max_position_embeddings") + and model.config.max_position_embeddings < data_args.max_source_length + ): + if model_args.resize_position_embeddings is None: + logger.warning( + "Increasing the model's number of position embedding vectors from" + f" {model.config.max_position_embeddings} to {data_args.max_source_length}." + ) + model.resize_position_embeddings(data_args.max_source_length) + elif model_args.resize_position_embeddings: + model.resize_position_embeddings(data_args.max_source_length) + else: + raise ValueError( + f"`--max_source_length` is set to {data_args.max_source_length}, but the model only has" + f" {model.config.max_position_embeddings} position encodings. Consider either reducing" + f" `--max_source_length` to {model.config.max_position_embeddings} or to automatically resize the" + " model's position encodings by passing `--resize_position_embeddings`." + ) + prefix = data_args.source_prefix if data_args.source_prefix is not None else "" + + # Preprocessing the datasets. + # We need to tokenize inputs and targets. + if training_args.do_train: + if "train" not in raw_datasets: + raise ValueError("--do_train requires a train dataset") + column_names = raw_datasets["train"].column_names + elif training_args.do_eval: + if "validation" not in raw_datasets: + raise ValueError("--do_eval requires a validation dataset") + column_names = raw_datasets["validation"].column_names + elif training_args.do_predict: + if "test" not in raw_datasets: + raise ValueError("--do_predict requires a test dataset") + column_names = raw_datasets["test"].column_names + else: + logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") + return + + self.model_args = model_args + self.config = config + self.backend_model = model + self.tokenizer = tokenizer + self.tune_strategy = tune_strategy + + elif tune_strategy == 'none': + dschf = HfDeepSpeedConfig(ds_config) + self.backend_model = AutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path) + self.tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) + peft_model_id = model_args.lora_model_path + if peft_model_id is not None: + self.backend_model = PeftModel.from_pretrained( + self.backend_model, peft_model_id + ) + + deepspeed.init_distributed() + self.ds_engine = deepspeed.initialize(model=self.backend_model, config_params=ds_config)[0] + self.ds_engine.module.eval() + + elif tune_strategy == 'adapter': + raise NotImplementedError('adapter tune strategy not implemented') + + + def tokenize(self, dataset, *args, **kwargs): + """ + Tokenize the full dataset. + + Parameters + ------------ + dataset : + Text dataset. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + tokenized_datasets : + The tokenized dataset. + """ + model_args = self.model_args + + if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)): + assert ( + data_args.lang is not None + ), f"{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --lang argument" + + tokenizer.src_lang = data_args.lang + tokenizer.tgt_lang = data_args.lang + + # For multilingual translation models like mBART-50 and M2M100 we need to force the target language token + # as the first generated token. We ask the user to explicitly provide this as --forced_bos_token argument. + forced_bos_token_id = ( + tokenizer.lang_code_to_id[data_args.forced_bos_token] if data_args.forced_bos_token is not None else None + ) + model.config.forced_bos_token_id = forced_bos_token_id + + # Preprocessing the datasets. + # First we tokenize all the texts. + if dataset.get_backend() != "huggingface": + raise NotImplementedError( + "tokenization of datasets with non-huggingface backend are" + "not supported yet" + ) + + raw_datasets = dataset + hf_raw_datasets = dataset.get_backend_dataset() + column_names = list(hf_raw_datasets.features) + text_column_name = "text" if "text" in column_names else column_names[0] + + # since this will be pickled to avoid _LazyModule error in Hasher force + # logger loading before tokenize_function + tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") + if model_args.use_lora: + self.tokenizer.pad_token = 1 + + def tokenize_function(examples): + with CaptureLogger(tok_logger) as cl: + if not model_args.use_lora: + output = self.tokenizer(examples[text_column_name]) + else: + output = self.tokenizer( + examples[text_column_name], + truncation=True, + ) + # clm input could be much much longer than block_size + if "Token indices sequence length is longer than the" in cl.out: + tok_logger.warning( + "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" + " before being passed to the model." + ) + return output + + data_args = raw_datasets.get_data_args() + if not data_args.streaming: + tokenized_datasets = raw_datasets.map( + tokenize_function, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on dataset", + ) + else: + tokenized_datasets = raw_datasets.map( + tokenize_function, + batched=True, + remove_columns=column_names, + ) + return tokenized_datasets + + + def encode(self, input: Union[str, List[str]], *args, **kwargs ) -> List[int]: + """ + Perform encoding process of the tokenizer. + + Parameters + ------------ + inputs : str or list. + The text sequence. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The tokenized inputs. + """ + return self.tokenizer.encode(text=input, *args, **kwargs) + + + def decode(self, input, *args, **kwargs ) -> List[int]: + """ + Perform decoding process of the tokenizer. + + Parameters + ------------ + inputs : list. + The token sequence. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The text decoded from the token inputs. + """ + return self.tokenizer.decode(input, *args, **kwargs) + + + def inference(self, inputs, *args, **kwargs): + """ + Perform generation process of the model. + + Parameters + ------------ + inputs : + The sequence used as a prompt for the generation or as model inputs to the model. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The generated sequence output + """ + + + with torch.no_grad(): + outputs = self.ds_engine.module.generate( + input_ids=inputs, + synced_gpus=True, + pad_token_id=self.tokenizer.eos_token_id, + *args, + **kwargs + ) + return outputs + + + def get_max_length(self): + """ + Return max acceptable input length in terms of tokens. + """ + return self.tokenizer.model_max_length + + + def get_tokenizer(self): + """ + Return the tokenizer of the model. + """ + return self.tokenizer + + + def get_backend_model(self): + """ + Return the backend model. + """ + return self.backend_model diff --git a/src/lmflow/pipeline/finetuner.py b/src/lmflow/pipeline/finetuner.py index b6350aeee..45af477be 100644 --- a/src/lmflow/pipeline/finetuner.py +++ b/src/lmflow/pipeline/finetuner.py @@ -3,6 +3,8 @@ """The Finetuner class simplifies the process of running finetuning process on a language model for a TunableModel instance with given dataset. """ +from __future__ import absolute_import + import logging import os import sys @@ -13,6 +15,8 @@ from itertools import chain from transformers import ( Trainer, + Seq2SeqTrainer, + DataCollatorForSeq2Seq, default_data_collator, set_seed, ) @@ -20,7 +24,9 @@ from lmflow.datasets.dataset import Dataset from lmflow.pipeline.base_tuner import BaseTuner - +import evaluate +import numpy as np +import nltk logger = logging.getLogger(__name__) @@ -209,17 +215,79 @@ def tune(self, model, lm_dataset): # Initialize our Trainer training_args = finetuner_args - trainer = Trainer( - model=model.get_backend_model(), - args=training_args, - train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=None, - tokenizer=model.get_tokenizer(), - # Data collator will default to DataCollatorWithPadding, so we change it. - data_collator=default_data_collator, - compute_metrics=None, - preprocess_logits_for_metrics=None, - ) + + if finetuner_args.is_seq2seq: + # Data collator + label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id + data_collator = DataCollatorForSeq2Seq( + model.get_tokenizer(), + model=model, + label_pad_token_id=label_pad_token_id, + pad_to_multiple_of=8 if training_args.fp16 else None, + ) + + # Metric + metric = evaluate.load("rouge") + def postprocess_text(preds, labels): + preds = [pred.strip() for pred in preds] + labels = [label.strip() for label in labels] + + # rougeLSum expects newline after each sentence + preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds] + labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels] + + return preds, labels + + def compute_metrics(eval_preds): + preds, labels = eval_preds + if isinstance(preds, tuple): + preds = preds[0] + decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) + if data_args.ignore_pad_token_for_loss: + # Replace -100 in the labels as we can't decode them. + labels = np.where(labels != -100, labels, tokenizer.pad_token_id) + decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) + + # Some simple post-processing + decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) + + result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True) + result = {k: round(v * 100, 4) for k, v in result.items()} + prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds] + result["gen_len"] = np.mean(prediction_lens) + return result + # Override the decoding parameters of Seq2SeqTrainer + training_args.generation_max_length = ( + training_args.generation_max_length + if training_args.generation_max_length is not None + else data_args.val_max_target_length + ) + training_args.generation_num_beams = ( + data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams + ) + # Initialize our Trainer + trainer = Seq2SeqTrainer( + model=model, + args=training_args, + train_dataset=train_dataset if training_args.do_train else None, + eval_dataset=eval_dataset if training_args.do_eval else None, + tokenizer=tokenizer, + data_collator=data_collator, + compute_metrics=compute_metrics if training_args.predict_with_generate else None, + ) + + else: + trainer = Trainer( + model=model.get_backend_model(), + args=training_args, + train_dataset=train_dataset if training_args.do_train else None, + eval_dataset=None, + tokenizer=model.get_tokenizer(), + # Data collator will default to DataCollatorWithPadding, so we change it. + data_collator=default_data_collator, + compute_metrics=None, + preprocess_logits_for_metrics=None, + ) # Training if training_args.do_train: From 0af2ecd30f68fd9134d51f821148080e11fd07f9 Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Sat, 1 Apr 2023 23:20:43 +0800 Subject: [PATCH 02/18] enc-dec imp --- requirements.txt | 2 + scripts/run_finetune_seq2seq.sh | 39 ++ src/lmflow/args.py | 5 +- src/lmflow/models/encoder_decoder_model.py | 22 + src/lmflow/models/hf_decoder_model.py | 1 + src/lmflow/models/hf_encoder_decoder_model.py | 459 ++++++++++++++++++ src/lmflow/pipeline/finetuner.py | 92 +++- 7 files changed, 607 insertions(+), 13 deletions(-) create mode 100755 scripts/run_finetune_seq2seq.sh create mode 100644 src/lmflow/models/encoder_decoder_model.py create mode 100644 src/lmflow/models/hf_encoder_decoder_model.py diff --git a/requirements.txt b/requirements.txt index a1c11267b..50d12daba 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,8 @@ wandb==0.14.0 deepspeed==0.8.3 trl @ git+https://github.com/lvwerra/trl.git#egg=trl-0.4.1 sentencepiece +icetk==0.0.7 +cpm_kernels==1.0.11 transformers @ git+https://github.com/huggingface/transformers@c612628 flask flask_cors diff --git a/scripts/run_finetune_seq2seq.sh b/scripts/run_finetune_seq2seq.sh new file mode 100755 index 000000000..1bf001b82 --- /dev/null +++ b/scripts/run_finetune_seq2seq.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# Please run this script under ${project_id} in project directory of +# https://github.com/shizhediao/llm-ft +# COMMIT: d5fecf30ba8011067b10cf51fede53a5ab6574e4 + +deepspeed_args="--master_port=11000" # Default argument +if [ $# -ge 1 ]; then + deepspeed_args="$1" +fi + +exp_id=finetune +project_dir=$(cd "$(dirname $0)"/..; pwd) +output_dir=${project_dir}/output_models/${exp_id} +log_dir=${project_dir}/log/${exp_id} + +dataset_path=${project_dir}/data/alpaca/test + +mkdir -p ${output_dir} ${log_dir} + +deepspeed ${deepspeed_args} \ + examples/finetune.py \ + --model_name_or_path t5-base \ + --dataset_path ${dataset_path} \ + --output_dir ${output_dir} --overwrite_output_dir \ + --num_train_epochs 0.01 \ + --learning_rate 2e-5 \ + --block_size 512 \ + --per_device_train_batch_size 1 \ + --deepspeed configs/ds_config_zero3.json \ + --bf16 \ + --run_name finetune \ + --validation_split_percentage 0 \ + --logging_steps 20 \ + --do_train \ + --ddp_timeout 72000 \ + --save_steps 5000 \ + --dataloader_num_workers 1 \ + | tee ${log_dir}/train.log \ + 2> ${log_dir}/train.err diff --git a/src/lmflow/args.py b/src/lmflow/args.py index 7d71c4d88..3d2c6e9b8 100644 --- a/src/lmflow/args.py +++ b/src/lmflow/args.py @@ -330,7 +330,10 @@ class FinetunerArguments(TrainingArguments): """ Adapt transformers.TrainingArguments """ - pass + is_seq2seq: bool = field( + default=False, + metadata={"help": "whether use seq2seq model"} + ) @dataclass diff --git a/src/lmflow/models/encoder_decoder_model.py b/src/lmflow/models/encoder_decoder_model.py new file mode 100644 index 000000000..9db0fc4a5 --- /dev/null +++ b/src/lmflow/models/encoder_decoder_model.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +# coding=utf-8 +"""A one-line summary of the module or program, terminated by a period. + +Leave one blank line. The rest of this docstring should contain an +overall description of the module or program. Optionally, it may also +contain a brief desription of exported classes and funcctions and/or usage +examples. + +Typical usage example: + + foo = ClassFoo() + bar = foo.FunctionBar() +""" + +from lmflow.models.base_model import BaseModel + + +class EncoderDecoderModel(BaseModel): + + def __init__(self, *args, **kwargs): + pass diff --git a/src/lmflow/models/hf_decoder_model.py b/src/lmflow/models/hf_decoder_model.py index b20509dc7..f66546f0f 100644 --- a/src/lmflow/models/hf_decoder_model.py +++ b/src/lmflow/models/hf_decoder_model.py @@ -49,6 +49,7 @@ from lmflow.models.decoder_model import DecoderModel from lmflow.models.interfaces.tunable import Tunable +logger = logging.getLogger(__name__) logger = logging.getLogger(__name__) diff --git a/src/lmflow/models/hf_encoder_decoder_model.py b/src/lmflow/models/hf_encoder_decoder_model.py new file mode 100644 index 000000000..320441336 --- /dev/null +++ b/src/lmflow/models/hf_encoder_decoder_model.py @@ -0,0 +1,459 @@ +#!/usr/bin/env python +# coding=utf-8 +"""This is a class called HFEncoderDecoder which is a wrapper around transformers model and +tokenizer classes. It has several methods such as __init__, tokenize, and train that are +used for training and fine-tuning the model. The __init__ method takes in several arguments +such as model_args, tune_strategy, and ds_config, which are used to load the pretrained +model and tokenizer, and initialize the training settings. + +The tokenize method is used to tokenize the input text and return the input IDs and attention +masks that can be fed to the model for training or inference. + +This class supports different tune_strategy options such as 'normal', 'none', 'lora', and +'adapter', which allow for different fine-tuning settings of the model. However, the 'lora' +and 'adapter' strategies are not yet implemented. + +Overall, this class provides a convenient interface for loading and fine-tuning transformer +models and can be used for various NLP tasks such as language modeling, text classification, +and question answering. +""" +import logging +from typing import List, Union + +import deepspeed +from filelock import FileLock +from peft import ( + LoraConfig, + PeftModel, + TaskType, + get_peft_config, + get_peft_model, + prepare_model_for_int8_training, +) +import nltk # Here to have a nice missing dependency error message early on +import torch +import transformers +from transformers.deepspeed import HfDeepSpeedConfig + +from transformers.testing_utils import CaptureLogger + +from transformers import ( + CONFIG_MAPPING, + AutoConfig, + AutoModelForSeq2SeqLM, + AutoTokenizer, + DataCollatorForSeq2Seq, + HfArgumentParser, + MBart50Tokenizer, + MBart50TokenizerFast, + MBartTokenizer, + MBartTokenizerFast, + Seq2SeqTrainer, + Seq2SeqTrainingArguments, + set_seed, +) +from transformers.utils import check_min_version, is_offline_mode, send_example_telemetry +from lmflow.datasets.dataset import Dataset +from lmflow.models.encoder_decoder_model import EncoderDecoderModel +from lmflow.models.interfaces.tunable import Tunable + +logger = logging.getLogger(__name__) + +class HFDecoderModel(EncoderDecoderModel, Tunable): + r""" + Initializes a HFEncoderDecoderModel instance. + + Parameters + ------------ + + model_args : + Model arguments such as model name, path, revision, etc. + + tune_strategy : str or none, default="normal". + A string representing the dataset backend. Defaults to "huggingface". + + ds_config : + Deepspeed configuations. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + """ + + def __init__( + self, + model_args, + tune_strategy='normal', + ds_config=None, + *args, + **kwargs + ): + """ + Initializes a HFEncoderDecoderModel instance. + :param model_args: dictionary with model arguments such as model name, path, revision, etc. + :param tune_strategy: tuning strategy: normal, none, lora or adapter + :param ds_config: deepspeed configuration for distributed training + """ + + # See more about loading any type of standard or custom dataset (from + # files, python dict, pandas DataFrame, etc) at + # https://huggingface.co/docs/datasets/loading_datasets.html. + + # Load pretrained model and tokenizer + # + # Distributed training: The .from_pretrained methods guarantee that + # only one local process can concurrently download model & vocab. + + + if tune_strategy == 'normal': + try: + nltk.data.find("tokenizers/punkt") + except (LookupError, OSError): + if is_offline_mode(): + raise LookupError( + "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" + ) + with FileLock(".lock") as lock: + nltk.download("punkt", quiet=True) + + # A list of all multilingual tokenizer which require lang attribute. + MULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast] + + config_kwargs = { + "cache_dir": model_args.cache_dir, + "revision": model_args.model_revision, + "use_auth_token": True if model_args.use_auth_token else None, + } + if model_args.config_name: + config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) + elif model_args.model_name_or_path: + config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) + else: + config = CONFIG_MAPPING[model_args.model_type]() + logger.warning("You are instantiating a new config instance from scratch.") + if model_args.config_overrides is not None: + logger.info(f"Overriding config: {model_args.config_overrides}") + config.update_from_string(model_args.config_overrides) + logger.info(f"New config: {config}") + + tokenizer_kwargs = { + "cache_dir": model_args.cache_dir, + "use_fast": model_args.use_fast_tokenizer, + "revision": model_args.model_revision, + "use_auth_token": True if model_args.use_auth_token else None, + } + if model_args.tokenizer_name: + tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) + elif model_args.model_name_or_path: + tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) + else: + raise ValueError( + "You are instantiating a new tokenizer from scratch. This is" + " not supported by this script. You can do it from another" + " script, save it, and load it from here, using" + " --tokenizer_name." + ) + + if model_args.model_name_or_path: + torch_dtype = ( + model_args.torch_dtype + if model_args.torch_dtype in ["auto", None] + else getattr(torch, model_args.torch_dtype) + ) + model = AutoModelForSeq2SeqLM.from_pretrained( + model_args.model_name_or_path, + from_tf=bool(".ckpt" in model_args.model_name_or_path), + config=config, + cache_dir=model_args.cache_dir, + revision=model_args.model_revision, + use_auth_token=True if model_args.use_auth_token else None, + torch_dtype=torch_dtype, + ) + else: + model = AutoModelForSeq2SeqLM.from_config(config) + n_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values()) + logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") + + if model_args.use_lora: + peft_config = LoraConfig( + task_type=TaskType.SEQ_2_SEQ_LM, + inference_mode=False, + r=model_args.lora_r, + target_modules=["q_proj","v_proj"], + lora_alpha=model_args.lora_alpha, + lora_dropout=model_args.lora_dropout + ) + model = get_peft_model(model, peft_config) + model.print_trainable_parameters() + + + # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch + # on a small vocab and want a smaller embedding size, remove this test. + embedding_size = model.get_input_embeddings().weight.shape[0] + if len(tokenizer) > embedding_size: + model.resize_token_embeddings(len(tokenizer)) + + if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)): + if isinstance(tokenizer, MBartTokenizer): + model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.lang] + else: + model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.lang) + + if model.config.decoder_start_token_id is None: + raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") + + if ( + hasattr(model.config, "max_position_embeddings") + and model.config.max_position_embeddings < data_args.max_source_length + ): + if model_args.resize_position_embeddings is None: + logger.warning( + "Increasing the model's number of position embedding vectors from" + f" {model.config.max_position_embeddings} to {data_args.max_source_length}." + ) + model.resize_position_embeddings(data_args.max_source_length) + elif model_args.resize_position_embeddings: + model.resize_position_embeddings(data_args.max_source_length) + else: + raise ValueError( + f"`--max_source_length` is set to {data_args.max_source_length}, but the model only has" + f" {model.config.max_position_embeddings} position encodings. Consider either reducing" + f" `--max_source_length` to {model.config.max_position_embeddings} or to automatically resize the" + " model's position encodings by passing `--resize_position_embeddings`." + ) + prefix = data_args.source_prefix if data_args.source_prefix is not None else "" + + # Preprocessing the datasets. + # We need to tokenize inputs and targets. + if training_args.do_train: + if "train" not in raw_datasets: + raise ValueError("--do_train requires a train dataset") + column_names = raw_datasets["train"].column_names + elif training_args.do_eval: + if "validation" not in raw_datasets: + raise ValueError("--do_eval requires a validation dataset") + column_names = raw_datasets["validation"].column_names + elif training_args.do_predict: + if "test" not in raw_datasets: + raise ValueError("--do_predict requires a test dataset") + column_names = raw_datasets["test"].column_names + else: + logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") + return + + self.model_args = model_args + self.config = config + self.backend_model = model + self.tokenizer = tokenizer + self.tune_strategy = tune_strategy + + elif tune_strategy == 'none': + dschf = HfDeepSpeedConfig(ds_config) + self.backend_model = AutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path) + self.tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) + peft_model_id = model_args.lora_model_path + if peft_model_id is not None: + self.backend_model = PeftModel.from_pretrained( + self.backend_model, peft_model_id + ) + + deepspeed.init_distributed() + self.ds_engine = deepspeed.initialize(model=self.backend_model, config_params=ds_config)[0] + self.ds_engine.module.eval() + + elif tune_strategy == 'adapter': + raise NotImplementedError('adapter tune strategy not implemented') + + + def tokenize(self, dataset, *args, **kwargs): + """ + Tokenize the full dataset. + + Parameters + ------------ + dataset : + Text dataset. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + tokenized_datasets : + The tokenized dataset. + """ + model_args = self.model_args + + if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)): + assert ( + data_args.lang is not None + ), f"{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --lang argument" + + tokenizer.src_lang = data_args.lang + tokenizer.tgt_lang = data_args.lang + + # For multilingual translation models like mBART-50 and M2M100 we need to force the target language token + # as the first generated token. We ask the user to explicitly provide this as --forced_bos_token argument. + forced_bos_token_id = ( + tokenizer.lang_code_to_id[data_args.forced_bos_token] if data_args.forced_bos_token is not None else None + ) + model.config.forced_bos_token_id = forced_bos_token_id + + # Preprocessing the datasets. + # First we tokenize all the texts. + if dataset.get_backend() != "huggingface": + raise NotImplementedError( + "tokenization of datasets with non-huggingface backend are" + "not supported yet" + ) + + raw_datasets = dataset + hf_raw_datasets = dataset.get_backend_dataset() + column_names = list(hf_raw_datasets.features) + text_column_name = "text" if "text" in column_names else column_names[0] + + # since this will be pickled to avoid _LazyModule error in Hasher force + # logger loading before tokenize_function + tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") + if model_args.use_lora: + self.tokenizer.pad_token = 1 + + def tokenize_function(examples): + with CaptureLogger(tok_logger) as cl: + if not model_args.use_lora: + output = self.tokenizer(examples[text_column_name]) + else: + output = self.tokenizer( + examples[text_column_name], + truncation=True, + ) + # clm input could be much much longer than block_size + if "Token indices sequence length is longer than the" in cl.out: + tok_logger.warning( + "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" + " before being passed to the model." + ) + return output + + data_args = raw_datasets.get_data_args() + if not data_args.streaming: + tokenized_datasets = raw_datasets.map( + tokenize_function, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on dataset", + ) + else: + tokenized_datasets = raw_datasets.map( + tokenize_function, + batched=True, + remove_columns=column_names, + ) + return tokenized_datasets + + + def encode(self, input: Union[str, List[str]], *args, **kwargs ) -> List[int]: + """ + Perform encoding process of the tokenizer. + + Parameters + ------------ + inputs : str or list. + The text sequence. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The tokenized inputs. + """ + return self.tokenizer.encode(text=input, *args, **kwargs) + + + def decode(self, input, *args, **kwargs ) -> List[int]: + """ + Perform decoding process of the tokenizer. + + Parameters + ------------ + inputs : list. + The token sequence. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The text decoded from the token inputs. + """ + return self.tokenizer.decode(input, *args, **kwargs) + + + def inference(self, inputs, *args, **kwargs): + """ + Perform generation process of the model. + + Parameters + ------------ + inputs : + The sequence used as a prompt for the generation or as model inputs to the model. + + args : Optional. + Positional arguments. + + kwargs : Optional. + Keyword arguments. + + Returns + ------------ + outputs : + The generated sequence output + """ + + + with torch.no_grad(): + outputs = self.ds_engine.module.generate( + input_ids=inputs, + synced_gpus=True, + pad_token_id=self.tokenizer.eos_token_id, + *args, + **kwargs + ) + return outputs + + + def get_max_length(self): + """ + Return max acceptable input length in terms of tokens. + """ + return self.tokenizer.model_max_length + + + def get_tokenizer(self): + """ + Return the tokenizer of the model. + """ + return self.tokenizer + + + def get_backend_model(self): + """ + Return the backend model. + """ + return self.backend_model diff --git a/src/lmflow/pipeline/finetuner.py b/src/lmflow/pipeline/finetuner.py index b6350aeee..45af477be 100644 --- a/src/lmflow/pipeline/finetuner.py +++ b/src/lmflow/pipeline/finetuner.py @@ -3,6 +3,8 @@ """The Finetuner class simplifies the process of running finetuning process on a language model for a TunableModel instance with given dataset. """ +from __future__ import absolute_import + import logging import os import sys @@ -13,6 +15,8 @@ from itertools import chain from transformers import ( Trainer, + Seq2SeqTrainer, + DataCollatorForSeq2Seq, default_data_collator, set_seed, ) @@ -20,7 +24,9 @@ from lmflow.datasets.dataset import Dataset from lmflow.pipeline.base_tuner import BaseTuner - +import evaluate +import numpy as np +import nltk logger = logging.getLogger(__name__) @@ -209,17 +215,79 @@ def tune(self, model, lm_dataset): # Initialize our Trainer training_args = finetuner_args - trainer = Trainer( - model=model.get_backend_model(), - args=training_args, - train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=None, - tokenizer=model.get_tokenizer(), - # Data collator will default to DataCollatorWithPadding, so we change it. - data_collator=default_data_collator, - compute_metrics=None, - preprocess_logits_for_metrics=None, - ) + + if finetuner_args.is_seq2seq: + # Data collator + label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id + data_collator = DataCollatorForSeq2Seq( + model.get_tokenizer(), + model=model, + label_pad_token_id=label_pad_token_id, + pad_to_multiple_of=8 if training_args.fp16 else None, + ) + + # Metric + metric = evaluate.load("rouge") + def postprocess_text(preds, labels): + preds = [pred.strip() for pred in preds] + labels = [label.strip() for label in labels] + + # rougeLSum expects newline after each sentence + preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds] + labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels] + + return preds, labels + + def compute_metrics(eval_preds): + preds, labels = eval_preds + if isinstance(preds, tuple): + preds = preds[0] + decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) + if data_args.ignore_pad_token_for_loss: + # Replace -100 in the labels as we can't decode them. + labels = np.where(labels != -100, labels, tokenizer.pad_token_id) + decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) + + # Some simple post-processing + decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) + + result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True) + result = {k: round(v * 100, 4) for k, v in result.items()} + prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds] + result["gen_len"] = np.mean(prediction_lens) + return result + # Override the decoding parameters of Seq2SeqTrainer + training_args.generation_max_length = ( + training_args.generation_max_length + if training_args.generation_max_length is not None + else data_args.val_max_target_length + ) + training_args.generation_num_beams = ( + data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams + ) + # Initialize our Trainer + trainer = Seq2SeqTrainer( + model=model, + args=training_args, + train_dataset=train_dataset if training_args.do_train else None, + eval_dataset=eval_dataset if training_args.do_eval else None, + tokenizer=tokenizer, + data_collator=data_collator, + compute_metrics=compute_metrics if training_args.predict_with_generate else None, + ) + + else: + trainer = Trainer( + model=model.get_backend_model(), + args=training_args, + train_dataset=train_dataset if training_args.do_train else None, + eval_dataset=None, + tokenizer=model.get_tokenizer(), + # Data collator will default to DataCollatorWithPadding, so we change it. + data_collator=default_data_collator, + compute_metrics=None, + preprocess_logits_for_metrics=None, + ) # Training if training_args.do_train: From 97e018d90b2320051dd4fa9249b51629cfdad3c2 Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Thu, 6 Apr 2023 12:15:42 +0800 Subject: [PATCH 03/18] support finetuning seq2seq model --- examples/evaluate.py | 43 ------- examples/finetune.py | 11 +- scripts/run_evaluation_with_lora.sh | 6 +- scripts/run_finetune_seq2seq.sh | 39 ------- src/lmflow/args.py | 106 +++++++++++++++++- src/lmflow/models/auto_model.py | 7 +- src/lmflow/models/hf_encoder_decoder_model.py | 89 ++++++++------- src/lmflow/pipeline/auto_pipeline.py | 1 + src/lmflow/pipeline/finetuner.py | 44 +------- 9 files changed, 174 insertions(+), 172 deletions(-) delete mode 100644 examples/evaluate.py delete mode 100755 scripts/run_finetune_seq2seq.sh diff --git a/examples/evaluate.py b/examples/evaluate.py deleted file mode 100644 index 8403ddc66..000000000 --- a/examples/evaluate.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2023 Statistics and Machine Learning Research Group at HKUST. All rights reserved. -"""A one-line summary of the module or program, terminated by a period. - -Leave one blank line. The rest of this docstring should contain an -overall description of the module or program. Optionally, it may also -contain a brief description of exported classes and functions and/or usage -examples. - -Typical usage example: - - foo = ClassFoo() - bar = foo.FunctionBar() -""" -import json -from transformers import HfArgumentParser - -from lmflow.datasets.dataset import Dataset -from lmflow.pipeline.auto_pipeline import AutoPipeline -from lmflow.models.auto_model import AutoModel -from lmflow.args import ModelArguments, DatasetArguments, AutoArguments - - -pipeline_name = "evaluator" -PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name) - -parser = HfArgumentParser((ModelArguments, DatasetArguments, PipelineArguments)) -model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses() - -with open (pipeline_args.deepspeed, "r") as f: - ds_config = json.load(f) - -model = AutoModel.get_model(model_args, tune_strategy='none', ds_config=ds_config) -dataset = Dataset(data_args) - -evaluator = AutoPipeline.get_pipeline( - pipeline_name=pipeline_name, - model_args=model_args, - data_args=data_args, - pipeline_args=pipeline_args, -) -evaluator.evaluate(model=model, dataset=dataset) diff --git a/examples/finetune.py b/examples/finetune.py index 8feb7ce17..bf83fde2a 100644 --- a/examples/finetune.py +++ b/examples/finetune.py @@ -52,7 +52,16 @@ def main(): pipeline_args=pipeline_args, ) dataset = Dataset(data_args) - model = AutoModel.get_model(model_args) + model = AutoModel.get_model( + model_args, + lang=data_args.lang, + forced_bos_token=data_args.forced_bos_token, + source_prefix = data_args.source_prefix, + streaming = data_args.streaming, + preprocessing_num_workers = data_args.preprocessing_num_workers, + overwrite_cache = data_args.overwrite_cache, + max_source_length = data_args.max_source_length + ) # Tokenization and text grouping must be done in the main process with pipeline_args.main_process_first(desc="dataset map tokenization"): diff --git a/scripts/run_evaluation_with_lora.sh b/scripts/run_evaluation_with_lora.sh index e72c56305..1dcaff8dc 100755 --- a/scripts/run_evaluation_with_lora.sh +++ b/scripts/run_evaluation_with_lora.sh @@ -3,11 +3,11 @@ # --model_name_or_path specifies the original huggingface model # --lora_model_path specifies the model difference introduced by finetuning, # i.e. the one saved by ./scripts/run_finetune_with_lora.sh -CUDA_VISIBLE_DEVICES=0 \ +CUDA_VISIBLE_DEVICES=0,2 \ deepspeed examples/evaluate.py \ --answer_type text \ - --model_name_or_path facebook/galactica-1.3b \ - --lora_model_path output_models/finetune_with_lora \ + --model_name_or_path pinkmanlove/llama-7b-hf \ + --lora_model_path output_models/llama7b-lora-170k \ --dataset_path data/alpaca/test \ --prompt_structure "Input: {input}" \ --deepspeed examples/ds_config.json diff --git a/scripts/run_finetune_seq2seq.sh b/scripts/run_finetune_seq2seq.sh deleted file mode 100755 index 1bf001b82..000000000 --- a/scripts/run_finetune_seq2seq.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -# Please run this script under ${project_id} in project directory of -# https://github.com/shizhediao/llm-ft -# COMMIT: d5fecf30ba8011067b10cf51fede53a5ab6574e4 - -deepspeed_args="--master_port=11000" # Default argument -if [ $# -ge 1 ]; then - deepspeed_args="$1" -fi - -exp_id=finetune -project_dir=$(cd "$(dirname $0)"/..; pwd) -output_dir=${project_dir}/output_models/${exp_id} -log_dir=${project_dir}/log/${exp_id} - -dataset_path=${project_dir}/data/alpaca/test - -mkdir -p ${output_dir} ${log_dir} - -deepspeed ${deepspeed_args} \ - examples/finetune.py \ - --model_name_or_path t5-base \ - --dataset_path ${dataset_path} \ - --output_dir ${output_dir} --overwrite_output_dir \ - --num_train_epochs 0.01 \ - --learning_rate 2e-5 \ - --block_size 512 \ - --per_device_train_batch_size 1 \ - --deepspeed configs/ds_config_zero3.json \ - --bf16 \ - --run_name finetune \ - --validation_split_percentage 0 \ - --logging_steps 20 \ - --do_train \ - --ddp_timeout 72000 \ - --save_steps 5000 \ - --dataloader_num_workers 1 \ - | tee ${log_dir}/train.log \ - 2> ${log_dir}/train.err diff --git a/src/lmflow/args.py b/src/lmflow/args.py index 3d2c6e9b8..540d4ee67 100644 --- a/src/lmflow/args.py +++ b/src/lmflow/args.py @@ -20,6 +20,7 @@ from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TrainingArguments, + Seq2SeqTrainingArguments ) MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) @@ -99,6 +100,10 @@ class ModelArguments: default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) + is_seq2seq: bool = field( + default=False, + metadata={"help": "whether use seq2seq model"} + ) config_overrides: Optional[str] = field( default=None, metadata={ @@ -165,6 +170,15 @@ class ModelArguments: default=True, metadata={"help": "Whether use disk mapping when memory is not enough."} ) + resize_position_embeddings: Optional[bool] = field( + default=None, + metadata={ + "help": ( + "Whether to automatically resize the position embeddings if `max_source_length` exceeds " + "the model's position embeddings." + ) + }, + ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): @@ -225,6 +239,8 @@ class DatasetArguments: each parameter, such as a help message. """ + lang: Optional[str] = field(default=None, metadata={"help": "Language id for summarization."}) + dataset_path: Optional[str] = field( default=None, metadata={"help": "The path of the dataset to use."} ) @@ -309,6 +325,83 @@ class DatasetArguments: default=None, metadata={"help": "Evaluation File Path"}, ) + max_source_length: Optional[int] = field( + default=1024, + metadata={ + "help": ( + "The maximum total input sequence length after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded." + ) + }, + ) + max_target_length: Optional[int] = field( + default=128, + metadata={ + "help": ( + "The maximum total sequence length for target text after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded." + ) + }, + ) + val_max_target_length: Optional[int] = field( + default=None, + metadata={ + "help": ( + "The maximum total sequence length for validation target text after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`." + "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " + "during ``evaluate`` and ``predict``." + ) + }, + ) + pad_to_max_length: bool = field( + default=False, + metadata={ + "help": ( + "Whether to pad all samples to model maximum sentence length. " + "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " + "efficient on GPU but very bad for TPU." + ) + }, + ) + max_predict_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of prediction examples to this " + "value if set." + ) + }, + ) + num_beams: Optional[int] = field( + default=None, + metadata={ + "help": ( + "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " + "which is used during ``evaluate`` and ``predict``." + ) + }, + ) + ignore_pad_token_for_loss: bool = field( + default=True, + metadata={ + "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." + }, + ) + source_prefix: Optional[str] = field( + default="", metadata={"help": "A prefix to add before every source text (useful for T5 models)."} + ) + + forced_bos_token: Optional[str] = field( + default=None, + metadata={ + "help": ( + "The token to force as the first generated token after the decoder_start_token_id." + "Useful for multilingual models like mBART where the first generated token" + "needs to be the target language token (Usually it is the target language token)" + ) + }, + ) def __post_init__(self): if self.streaming: @@ -330,10 +423,14 @@ class FinetunerArguments(TrainingArguments): """ Adapt transformers.TrainingArguments """ - is_seq2seq: bool = field( - default=False, - metadata={"help": "whether use seq2seq model"} - ) + pass + +@dataclass +class Seq2SeqFinetunerArguments(Seq2SeqTrainingArguments): + """ + Adapt transformers.TrainingArguments + """ + pass @dataclass @@ -500,6 +597,7 @@ class InferencerArguments: PIPELINE_ARGUMENT_MAPPING = { + "seq2seq_finetuner": Seq2SeqFinetunerArguments, "finetuner": FinetunerArguments, "evaluator": EvaluatorArguments, "inferencer": InferencerArguments, diff --git a/src/lmflow/models/auto_model.py b/src/lmflow/models/auto_model.py index 522b5aa53..befb5ec02 100644 --- a/src/lmflow/models/auto_model.py +++ b/src/lmflow/models/auto_model.py @@ -4,11 +4,14 @@ """ from lmflow.models.hf_decoder_model import HFDecoderModel - +from lmflow.models.hf_encoder_decoder_model import HFEncoderDecoderModel class AutoModel: @classmethod def get_model(self, model_args, *args, **kwargs): # TODO (add new models) - return HFDecoderModel(model_args, *args, **kwargs) + if model_args.is_seq2seq: + return HFEncoderDecoderModel(model_args, *args, **kwargs) + else: + return HFDecoderModel(model_args, *args, **kwargs) diff --git a/src/lmflow/models/hf_encoder_decoder_model.py b/src/lmflow/models/hf_encoder_decoder_model.py index 320441336..f09b1bed0 100644 --- a/src/lmflow/models/hf_encoder_decoder_model.py +++ b/src/lmflow/models/hf_encoder_decoder_model.py @@ -59,7 +59,7 @@ logger = logging.getLogger(__name__) -class HFDecoderModel(EncoderDecoderModel, Tunable): +class HFEncoderDecoderModel(EncoderDecoderModel, Tunable): r""" Initializes a HFEncoderDecoderModel instance. @@ -106,6 +106,9 @@ def __init__( # Distributed training: The .from_pretrained methods guarantee that # only one local process can concurrently download model & vocab. + data_args = kwargs + self.data_args = data_args + self.model_args = model_args if tune_strategy == 'normal': try: @@ -118,18 +121,15 @@ def __init__( with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) - # A list of all multilingual tokenizer which require lang attribute. - MULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast] - config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: - config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) + config = AutoConfig.from_pretrained(model_args.config_name, trust_remote_code=True, **config_kwargs) elif model_args.model_name_or_path: - config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) + config = AutoConfig.from_pretrained(model_args.model_name_or_path, trust_remote_code=True, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") @@ -145,9 +145,9 @@ def __init__( "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: - tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) + tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, trust_remote_code=True, **tokenizer_kwargs) elif model_args.model_name_or_path: - tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) + tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, trust_remote_code=True, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is" @@ -170,6 +170,7 @@ def __init__( revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, torch_dtype=torch_dtype, + trust_remote_code=True ) else: model = AutoModelForSeq2SeqLM.from_config(config) @@ -197,53 +198,53 @@ def __init__( if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)): if isinstance(tokenizer, MBartTokenizer): - model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.lang] + model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args["lang"]] else: - model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.lang) + model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args["lang"]) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") + max_source_length = data_args["max_source_length"] if ( hasattr(model.config, "max_position_embeddings") - and model.config.max_position_embeddings < data_args.max_source_length + and model.config.max_position_embeddings < max_source_length ): if model_args.resize_position_embeddings is None: logger.warning( "Increasing the model's number of position embedding vectors from" - f" {model.config.max_position_embeddings} to {data_args.max_source_length}." + f" {model.config.max_position_embeddings} to {max_source_length}." ) - model.resize_position_embeddings(data_args.max_source_length) + model.resize_position_embeddings(max_source_length) elif model_args.resize_position_embeddings: - model.resize_position_embeddings(data_args.max_source_length) + model.resize_position_embeddings(max_source_length) else: raise ValueError( - f"`--max_source_length` is set to {data_args.max_source_length}, but the model only has" + f"`--max_source_length` is set to {max_source_length}, but the model only has" f" {model.config.max_position_embeddings} position encodings. Consider either reducing" f" `--max_source_length` to {model.config.max_position_embeddings} or to automatically resize the" " model's position encodings by passing `--resize_position_embeddings`." ) - prefix = data_args.source_prefix if data_args.source_prefix is not None else "" - - # Preprocessing the datasets. - # We need to tokenize inputs and targets. - if training_args.do_train: - if "train" not in raw_datasets: - raise ValueError("--do_train requires a train dataset") - column_names = raw_datasets["train"].column_names - elif training_args.do_eval: - if "validation" not in raw_datasets: - raise ValueError("--do_eval requires a validation dataset") - column_names = raw_datasets["validation"].column_names - elif training_args.do_predict: - if "test" not in raw_datasets: - raise ValueError("--do_predict requires a test dataset") - column_names = raw_datasets["test"].column_names - else: - logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") - return + prefix = data_args["source_prefix"] if data_args["source_prefix"] is not None else "" + + # # Preprocessing the datasets. + # # We need to tokenize inputs and targets. + # if training_args.do_train: + # if "train" not in raw_datasets: + # raise ValueError("--do_train requires a train dataset") + # column_names = raw_datasets["train"].column_names + # elif training_args.do_eval: + # if "validation" not in raw_datasets: + # raise ValueError("--do_eval requires a validation dataset") + # column_names = raw_datasets["validation"].column_names + # elif training_args.do_predict: + # if "test" not in raw_datasets: + # raise ValueError("--do_predict requires a test dataset") + # column_names = raw_datasets["test"].column_names + # else: + # logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") + # return - self.model_args = model_args self.config = config self.backend_model = model self.tokenizer = tokenizer @@ -288,21 +289,25 @@ def tokenize(self, dataset, *args, **kwargs): The tokenized dataset. """ model_args = self.model_args + data_args = self.data_args + + # A list of all multilingual tokenizer which require lang attribute. + MULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast] - if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)): + if isinstance(self.tokenizer, tuple(MULTILINGUAL_TOKENIZERS)): assert ( - data_args.lang is not None - ), f"{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --lang argument" + data_args["lang"] is not None + ), f"{self.tokenizer.__class__.__name__} is a multilingual tokenizer which requires --lang argument" - tokenizer.src_lang = data_args.lang - tokenizer.tgt_lang = data_args.lang + self.tokenizer.src_lang = data_args["lang"] + self.tokenizer.tgt_lang = data_args["lang"] # For multilingual translation models like mBART-50 and M2M100 we need to force the target language token # as the first generated token. We ask the user to explicitly provide this as --forced_bos_token argument. forced_bos_token_id = ( - tokenizer.lang_code_to_id[data_args.forced_bos_token] if data_args.forced_bos_token is not None else None + self.tokenizer.lang_code_to_id[data_args["forced_bos_token"]] if data_args["forced_bos_token"] is not None else None ) - model.config.forced_bos_token_id = forced_bos_token_id + self.model.config.forced_bos_token_id = forced_bos_token_id # Preprocessing the datasets. # First we tokenize all the texts. diff --git a/src/lmflow/pipeline/auto_pipeline.py b/src/lmflow/pipeline/auto_pipeline.py index e36699512..13d595fdf 100644 --- a/src/lmflow/pipeline/auto_pipeline.py +++ b/src/lmflow/pipeline/auto_pipeline.py @@ -9,6 +9,7 @@ PIPELINE_MAPPING = { + "seq2seq_finetuner": Finetuner, "finetuner": Finetuner, "evaluator": Evaluator, "inferencer": Inferencer, diff --git a/src/lmflow/pipeline/finetuner.py b/src/lmflow/pipeline/finetuner.py index 45af477be..f7559a17d 100644 --- a/src/lmflow/pipeline/finetuner.py +++ b/src/lmflow/pipeline/finetuner.py @@ -24,9 +24,6 @@ from lmflow.datasets.dataset import Dataset from lmflow.pipeline.base_tuner import BaseTuner -import evaluate -import numpy as np -import nltk logger = logging.getLogger(__name__) @@ -216,46 +213,17 @@ def tune(self, model, lm_dataset): # Initialize our Trainer training_args = finetuner_args - if finetuner_args.is_seq2seq: + if model_args.is_seq2seq: # Data collator + tokenizer = model.get_tokenizer() label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id data_collator = DataCollatorForSeq2Seq( - model.get_tokenizer(), - model=model, + tokenizer, + model=model.get_backend_model(), label_pad_token_id=label_pad_token_id, pad_to_multiple_of=8 if training_args.fp16 else None, ) - # Metric - metric = evaluate.load("rouge") - def postprocess_text(preds, labels): - preds = [pred.strip() for pred in preds] - labels = [label.strip() for label in labels] - - # rougeLSum expects newline after each sentence - preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds] - labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels] - - return preds, labels - - def compute_metrics(eval_preds): - preds, labels = eval_preds - if isinstance(preds, tuple): - preds = preds[0] - decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) - if data_args.ignore_pad_token_for_loss: - # Replace -100 in the labels as we can't decode them. - labels = np.where(labels != -100, labels, tokenizer.pad_token_id) - decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) - - # Some simple post-processing - decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) - - result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True) - result = {k: round(v * 100, 4) for k, v in result.items()} - prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds] - result["gen_len"] = np.mean(prediction_lens) - return result # Override the decoding parameters of Seq2SeqTrainer training_args.generation_max_length = ( training_args.generation_max_length @@ -267,13 +235,13 @@ def compute_metrics(eval_preds): ) # Initialize our Trainer trainer = Seq2SeqTrainer( - model=model, + model=model.get_backend_model(), args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, - compute_metrics=compute_metrics if training_args.predict_with_generate else None, + compute_metrics=None, ) else: From 032cc9709d5478f878ab5203d7036fc5dc2c7ee5 Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Thu, 6 Apr 2023 12:15:59 +0800 Subject: [PATCH 04/18] support seq2seq finetuning --- examples/evaluation.py | 43 ++++++++++++ examples/seq2seq_finetune.py | 79 +++++++++++++++++++++++ scripts/run_finetune_with_lora_seq2seq.sh | 41 ++++++++++++ 3 files changed, 163 insertions(+) create mode 100644 examples/evaluation.py create mode 100644 examples/seq2seq_finetune.py create mode 100755 scripts/run_finetune_with_lora_seq2seq.sh diff --git a/examples/evaluation.py b/examples/evaluation.py new file mode 100644 index 000000000..8403ddc66 --- /dev/null +++ b/examples/evaluation.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2023 Statistics and Machine Learning Research Group at HKUST. All rights reserved. +"""A one-line summary of the module or program, terminated by a period. + +Leave one blank line. The rest of this docstring should contain an +overall description of the module or program. Optionally, it may also +contain a brief description of exported classes and functions and/or usage +examples. + +Typical usage example: + + foo = ClassFoo() + bar = foo.FunctionBar() +""" +import json +from transformers import HfArgumentParser + +from lmflow.datasets.dataset import Dataset +from lmflow.pipeline.auto_pipeline import AutoPipeline +from lmflow.models.auto_model import AutoModel +from lmflow.args import ModelArguments, DatasetArguments, AutoArguments + + +pipeline_name = "evaluator" +PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name) + +parser = HfArgumentParser((ModelArguments, DatasetArguments, PipelineArguments)) +model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses() + +with open (pipeline_args.deepspeed, "r") as f: + ds_config = json.load(f) + +model = AutoModel.get_model(model_args, tune_strategy='none', ds_config=ds_config) +dataset = Dataset(data_args) + +evaluator = AutoPipeline.get_pipeline( + pipeline_name=pipeline_name, + model_args=model_args, + data_args=data_args, + pipeline_args=pipeline_args, +) +evaluator.evaluate(model=model, dataset=dataset) diff --git a/examples/seq2seq_finetune.py b/examples/seq2seq_finetune.py new file mode 100644 index 000000000..c27262502 --- /dev/null +++ b/examples/seq2seq_finetune.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2023 Statistics and Machine Learning Research Group at HKUST. All rights reserved. +"""A one-line summary of the module or program, terminated by a period. + +Leave one blank line. The rest of this docstring should contain an +overall description of the module or program. Optionally, it may also +contain a brief description of exported classes and functions and/or usage +examples. + +Typical usage example: + + foo = ClassFoo() + bar = foo.FunctionBar() +""" + +import sys + +from transformers import HfArgumentParser + +from lmflow.args import ( + ModelArguments, + DatasetArguments, + AutoArguments, +) + +from lmflow.datasets.dataset import Dataset +from lmflow.models.auto_model import AutoModel +from lmflow.pipeline.auto_pipeline import AutoPipeline + + +def main(): + # Parses arguments + pipeline_name = "seq2seq_finetuner" + PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name) + + parser = HfArgumentParser((ModelArguments, DatasetArguments, PipelineArguments)) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args, data_args, pipeline_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) + else: + model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses() + + # TODO: deepspeed config initialization + + # Initialization + finetuner = AutoPipeline.get_pipeline( + pipeline_name=pipeline_name, + model_args=model_args, + data_args=data_args, + pipeline_args=pipeline_args, + ) + dataset = Dataset(data_args) + model = AutoModel.get_model( + model_args, + lang=data_args.lang, + forced_bos_token=data_args.forced_bos_token, + source_prefix = data_args.source_prefix, + streaming = data_args.streaming, + preprocessing_num_workers = data_args.preprocessing_num_workers, + overwrite_cache = data_args.overwrite_cache, + max_source_length = data_args.max_source_length + ) + + # Tokenization and text grouping must be done in the main process + with pipeline_args.main_process_first(desc="dataset map tokenization"): + tokenized_dataset = model.tokenize(dataset) + lm_dataset = finetuner.group_text( + tokenized_dataset, + model_max_length=model.get_max_length(), + ) + + # Finetuning + tuned_model = finetuner.tune(model=model, lm_dataset=lm_dataset) + + +if __name__ == '__main__': + main() diff --git a/scripts/run_finetune_with_lora_seq2seq.sh b/scripts/run_finetune_with_lora_seq2seq.sh new file mode 100755 index 000000000..3a2145df2 --- /dev/null +++ b/scripts/run_finetune_with_lora_seq2seq.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# Please run this script under ${project_id} in project directory of +# https://github.com/shizhediao/llm-ft +# COMMIT: d5fecf30ba8011067b10cf51fede53a5ab6574e4 + +deepspeed_args="--include=localhost:0,2 --master_port=11001" # Default argument +if [ $# -ge 1 ]; then + deepspeed_args="$1" +fi + +exp_id=finetune +project_dir=$(cd "$(dirname $0)"/..; pwd) +output_dir=${project_dir}/output_models/${exp_id} +log_dir=${project_dir}/log/${exp_id} + +dataset_path=${project_dir}/data/alpaca/test + +mkdir -p ${output_dir} ${log_dir} + +deepspeed ${deepspeed_args} \ + examples/seq2seq_finetune.py \ + --is_seq2seq True \ + --model_name_or_path THUDM/chatglm-6b \ + --dataset_path ${dataset_path} \ + --output_dir ${output_dir} --overwrite_output_dir \ + --num_train_epochs 0.01 \ + --learning_rate 2e-5 \ + --block_size 512 \ + --per_device_train_batch_size 1 \ + --deepspeed configs/ds_config_zero3.json \ + --bf16 \ + --run_name finetune \ + --validation_split_percentage 0 \ + --logging_steps 20 \ + --do_train \ + --ddp_timeout 72000 \ + --save_steps 5000 \ + --dataloader_num_workers 1 \ + --use_ram_optimized_load False \ + | tee ${log_dir}/train.log \ + 2> ${log_dir}/train.err From 12e98eaaba1ef4a3981347f458a27bec43e76d7f Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Thu, 6 Apr 2023 12:53:29 +0800 Subject: [PATCH 05/18] support chatglm inference --- examples/chatbot.py | 2 +- scripts/run_chatbot_seq2seq.sh | 17 ++++++++++++++++ src/lmflow/models/hf_encoder_decoder_model.py | 20 +++++++------------ src/lmflow/pipeline/inferencer.py | 2 +- 4 files changed, 26 insertions(+), 15 deletions(-) create mode 100755 scripts/run_chatbot_seq2seq.sh diff --git a/examples/chatbot.py b/examples/chatbot.py index 7d835acdf..55169dae0 100644 --- a/examples/chatbot.py +++ b/examples/chatbot.py @@ -8,7 +8,7 @@ import warnings from dataclasses import dataclass, field -from transformers import HfArgumentParser +from transformers import HfArgumentParser, AutoTokenizer from lmflow.datasets.dataset import Dataset from lmflow.pipeline.auto_pipeline import AutoPipeline diff --git a/scripts/run_chatbot_seq2seq.sh b/scripts/run_chatbot_seq2seq.sh new file mode 100755 index 000000000..ec1b01ed8 --- /dev/null +++ b/scripts/run_chatbot_seq2seq.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +model=THUDM/chatglm-6b +lora_args="" +if [ $# -ge 1 ]; then + model=$1 +fi +if [ $# -ge 2 ]; then + lora_args="--lora_model_path $2" +fi + +CUDA_VISIBLE_DEVICES=0 \ + deepspeed examples/chatbot.py \ + --is_seq2seq True \ + --deepspeed configs/ds_config_chatbot.json \ + --model_name_or_path ${model} \ + ${lora_args} diff --git a/src/lmflow/models/hf_encoder_decoder_model.py b/src/lmflow/models/hf_encoder_decoder_model.py index f09b1bed0..168743e7c 100644 --- a/src/lmflow/models/hf_encoder_decoder_model.py +++ b/src/lmflow/models/hf_encoder_decoder_model.py @@ -30,7 +30,6 @@ get_peft_model, prepare_model_for_int8_training, ) -import nltk # Here to have a nice missing dependency error message early on import torch import transformers from transformers.deepspeed import HfDeepSpeedConfig @@ -40,6 +39,7 @@ from transformers import ( CONFIG_MAPPING, AutoConfig, + AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForSeq2Seq, @@ -111,16 +111,6 @@ def __init__( self.model_args = model_args if tune_strategy == 'normal': - try: - nltk.data.find("tokenizers/punkt") - except (LookupError, OSError): - if is_offline_mode(): - raise LookupError( - "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" - ) - with FileLock(".lock") as lock: - nltk.download("punkt", quiet=True) - config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, @@ -252,8 +242,12 @@ def __init__( elif tune_strategy == 'none': dschf = HfDeepSpeedConfig(ds_config) - self.backend_model = AutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path) - self.tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) + if model_args.model_name_or_path == 'THUDM/chatglm-6b': + self.backend_model = AutoModel.from_pretrained(model_args.model_name_or_path, trust_remote_code=True) + self.tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, trust_remote_code=True) + else: + self.backend_model = AutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path) + self.tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) peft_model_id = model_args.lora_model_path if peft_model_id is not None: self.backend_model = PeftModel.from_pretrained( diff --git a/src/lmflow/pipeline/inferencer.py b/src/lmflow/pipeline/inferencer.py index 2457df86f..c8687abd9 100644 --- a/src/lmflow/pipeline/inferencer.py +++ b/src/lmflow/pipeline/inferencer.py @@ -50,7 +50,7 @@ def __init__(self, model_args, data_args, inferencer_args): torch.cuda.set_device(self.local_rank) # NOTE: cpu-only machine will have error deepspeed.init_distributed() - self.config = AutoConfig.from_pretrained(model_args.model_name_or_path) + self.config = AutoConfig.from_pretrained(model_args.model_name_or_path, trust_remote_code=True) try: self.model_hidden_size = self.config.hidden_size except: From 55a4bd938a1a465bd9e3ce024f4c470a627d8554 Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Thu, 6 Apr 2023 14:23:24 +0800 Subject: [PATCH 06/18] developing vicuna --- configs/ds_config_chatbot.json | 12 ++++++++++++ scripts/run_chatbot.sh | 5 +++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/configs/ds_config_chatbot.json b/configs/ds_config_chatbot.json index 09b0b7ae0..1ac6f8064 100644 --- a/configs/ds_config_chatbot.json +++ b/configs/ds_config_chatbot.json @@ -5,6 +5,18 @@ "bf16": { "enabled": true }, + "zero_optimization": { + "stage": 3, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true + }, "comms_logger": { "enabled": false, "verbose": false, diff --git a/scripts/run_chatbot.sh b/scripts/run_chatbot.sh index 3c6ee4f98..90a942b71 100755 --- a/scripts/run_chatbot.sh +++ b/scripts/run_chatbot.sh @@ -1,6 +1,6 @@ #!/bin/bash -model=gpt2 +model=eachadea/vicuna-13b lora_args="" if [ $# -ge 1 ]; then model=$1 @@ -9,8 +9,9 @@ if [ $# -ge 2 ]; then lora_args="--lora_model_path $2" fi -CUDA_VISIBLE_DEVICES=0 \ +CUDA_VISIBLE_DEVICES=0,1,2,3 \ deepspeed examples/chatbot.py \ + --use_ram_optimized_load False \ --deepspeed configs/ds_config_chatbot.json \ --model_name_or_path ${model} \ ${lora_args} From 216b6c47df06954a3662d0b34a79070a07de1597 Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Thu, 6 Apr 2023 22:19:57 +0800 Subject: [PATCH 07/18] removed zero3 --- configs/ds_config_chatbot.json | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/configs/ds_config_chatbot.json b/configs/ds_config_chatbot.json index 1ac6f8064..09b0b7ae0 100644 --- a/configs/ds_config_chatbot.json +++ b/configs/ds_config_chatbot.json @@ -5,18 +5,6 @@ "bf16": { "enabled": true }, - "zero_optimization": { - "stage": 3, - "overlap_comm": true, - "contiguous_gradients": true, - "sub_group_size": 1e9, - "reduce_bucket_size": "auto", - "stage3_prefetch_bucket_size": "auto", - "stage3_param_persistence_threshold": "auto", - "stage3_max_live_parameters": 1e9, - "stage3_max_reuse_distance": 1e9, - "stage3_gather_16bit_weights_on_model_save": true - }, "comms_logger": { "enabled": false, "verbose": false, From 1b72a66a8ef5c43876328575e83e7057ae9cd19b Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Thu, 6 Apr 2023 23:36:22 +0800 Subject: [PATCH 08/18] removed useless content --- examples/chatbot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/chatbot.py b/examples/chatbot.py index 55169dae0..7d835acdf 100644 --- a/examples/chatbot.py +++ b/examples/chatbot.py @@ -8,7 +8,7 @@ import warnings from dataclasses import dataclass, field -from transformers import HfArgumentParser, AutoTokenizer +from transformers import HfArgumentParser from lmflow.datasets.dataset import Dataset from lmflow.pipeline.auto_pipeline import AutoPipeline From 871d195b86d8b038da8bf70f8b16a984c85b2051 Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Thu, 6 Apr 2023 23:41:20 +0800 Subject: [PATCH 09/18] update bash --- scripts/run_chatbot.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/run_chatbot.sh b/scripts/run_chatbot.sh index 90a942b71..542b82a53 100755 --- a/scripts/run_chatbot.sh +++ b/scripts/run_chatbot.sh @@ -1,6 +1,6 @@ #!/bin/bash -model=eachadea/vicuna-13b +model=gpt2 lora_args="" if [ $# -ge 1 ]; then model=$1 @@ -9,7 +9,7 @@ if [ $# -ge 2 ]; then lora_args="--lora_model_path $2" fi -CUDA_VISIBLE_DEVICES=0,1,2,3 \ +CUDA_VISIBLE_DEVICES=0 \ deepspeed examples/chatbot.py \ --use_ram_optimized_load False \ --deepspeed configs/ds_config_chatbot.json \ From d3a59726ddb01940e67233b56911fe81f70c9750 Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Thu, 6 Apr 2023 23:42:15 +0800 Subject: [PATCH 10/18] update scripts --- scripts/run_chatbot.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/run_chatbot.sh b/scripts/run_chatbot.sh index 542b82a53..3c6ee4f98 100755 --- a/scripts/run_chatbot.sh +++ b/scripts/run_chatbot.sh @@ -11,7 +11,6 @@ fi CUDA_VISIBLE_DEVICES=0 \ deepspeed examples/chatbot.py \ - --use_ram_optimized_load False \ --deepspeed configs/ds_config_chatbot.json \ --model_name_or_path ${model} \ ${lora_args} From 511c02ecf13bceb185b0f4657ce472a655e0df79 Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Thu, 6 Apr 2023 23:43:19 +0800 Subject: [PATCH 11/18] update bash --- scripts/run_evaluation_with_lora.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/run_evaluation_with_lora.sh b/scripts/run_evaluation_with_lora.sh index 1dcaff8dc..e72c56305 100755 --- a/scripts/run_evaluation_with_lora.sh +++ b/scripts/run_evaluation_with_lora.sh @@ -3,11 +3,11 @@ # --model_name_or_path specifies the original huggingface model # --lora_model_path specifies the model difference introduced by finetuning, # i.e. the one saved by ./scripts/run_finetune_with_lora.sh -CUDA_VISIBLE_DEVICES=0,2 \ +CUDA_VISIBLE_DEVICES=0 \ deepspeed examples/evaluate.py \ --answer_type text \ - --model_name_or_path pinkmanlove/llama-7b-hf \ - --lora_model_path output_models/llama7b-lora-170k \ + --model_name_or_path facebook/galactica-1.3b \ + --lora_model_path output_models/finetune_with_lora \ --dataset_path data/alpaca/test \ --prompt_structure "Input: {input}" \ --deepspeed examples/ds_config.json From c4cfbb8bd63e06fcc8b188f675b61e15c097f88a Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Thu, 6 Apr 2023 23:52:34 +0800 Subject: [PATCH 12/18] update evaluate --- examples/{evaluation.py => evaluate.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename examples/{evaluation.py => evaluate.py} (100%) diff --git a/examples/evaluation.py b/examples/evaluate.py similarity index 100% rename from examples/evaluation.py rename to examples/evaluate.py From 2262b6d474569c3966664b50888c7d87b28fc2b0 Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Fri, 7 Apr 2023 00:20:40 +0800 Subject: [PATCH 13/18] removed useless python script of seq2seq --- examples/seq2seq_finetune.py | 79 ------------------------------------ 1 file changed, 79 deletions(-) delete mode 100644 examples/seq2seq_finetune.py diff --git a/examples/seq2seq_finetune.py b/examples/seq2seq_finetune.py deleted file mode 100644 index c27262502..000000000 --- a/examples/seq2seq_finetune.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2023 Statistics and Machine Learning Research Group at HKUST. All rights reserved. -"""A one-line summary of the module or program, terminated by a period. - -Leave one blank line. The rest of this docstring should contain an -overall description of the module or program. Optionally, it may also -contain a brief description of exported classes and functions and/or usage -examples. - -Typical usage example: - - foo = ClassFoo() - bar = foo.FunctionBar() -""" - -import sys - -from transformers import HfArgumentParser - -from lmflow.args import ( - ModelArguments, - DatasetArguments, - AutoArguments, -) - -from lmflow.datasets.dataset import Dataset -from lmflow.models.auto_model import AutoModel -from lmflow.pipeline.auto_pipeline import AutoPipeline - - -def main(): - # Parses arguments - pipeline_name = "seq2seq_finetuner" - PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name) - - parser = HfArgumentParser((ModelArguments, DatasetArguments, PipelineArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, pipeline_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses() - - # TODO: deepspeed config initialization - - # Initialization - finetuner = AutoPipeline.get_pipeline( - pipeline_name=pipeline_name, - model_args=model_args, - data_args=data_args, - pipeline_args=pipeline_args, - ) - dataset = Dataset(data_args) - model = AutoModel.get_model( - model_args, - lang=data_args.lang, - forced_bos_token=data_args.forced_bos_token, - source_prefix = data_args.source_prefix, - streaming = data_args.streaming, - preprocessing_num_workers = data_args.preprocessing_num_workers, - overwrite_cache = data_args.overwrite_cache, - max_source_length = data_args.max_source_length - ) - - # Tokenization and text grouping must be done in the main process - with pipeline_args.main_process_first(desc="dataset map tokenization"): - tokenized_dataset = model.tokenize(dataset) - lm_dataset = finetuner.group_text( - tokenized_dataset, - model_max_length=model.get_max_length(), - ) - - # Finetuning - tuned_model = finetuner.tune(model=model, lm_dataset=lm_dataset) - - -if __name__ == '__main__': - main() From aa2a899b3dca14c73b41db50273fb5424a33c46a Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Fri, 7 Apr 2023 00:21:13 +0800 Subject: [PATCH 14/18] removed seq2seq bash script --- scripts/run_finetune_with_lora_seq2seq.sh | 41 ----------------------- src/lmflow/args.py | 6 ++-- 2 files changed, 3 insertions(+), 44 deletions(-) delete mode 100755 scripts/run_finetune_with_lora_seq2seq.sh diff --git a/scripts/run_finetune_with_lora_seq2seq.sh b/scripts/run_finetune_with_lora_seq2seq.sh deleted file mode 100755 index 3a2145df2..000000000 --- a/scripts/run_finetune_with_lora_seq2seq.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -# Please run this script under ${project_id} in project directory of -# https://github.com/shizhediao/llm-ft -# COMMIT: d5fecf30ba8011067b10cf51fede53a5ab6574e4 - -deepspeed_args="--include=localhost:0,2 --master_port=11001" # Default argument -if [ $# -ge 1 ]; then - deepspeed_args="$1" -fi - -exp_id=finetune -project_dir=$(cd "$(dirname $0)"/..; pwd) -output_dir=${project_dir}/output_models/${exp_id} -log_dir=${project_dir}/log/${exp_id} - -dataset_path=${project_dir}/data/alpaca/test - -mkdir -p ${output_dir} ${log_dir} - -deepspeed ${deepspeed_args} \ - examples/seq2seq_finetune.py \ - --is_seq2seq True \ - --model_name_or_path THUDM/chatglm-6b \ - --dataset_path ${dataset_path} \ - --output_dir ${output_dir} --overwrite_output_dir \ - --num_train_epochs 0.01 \ - --learning_rate 2e-5 \ - --block_size 512 \ - --per_device_train_batch_size 1 \ - --deepspeed configs/ds_config_zero3.json \ - --bf16 \ - --run_name finetune \ - --validation_split_percentage 0 \ - --logging_steps 20 \ - --do_train \ - --ddp_timeout 72000 \ - --save_steps 5000 \ - --dataloader_num_workers 1 \ - --use_ram_optimized_load False \ - | tee ${log_dir}/train.log \ - 2> ${log_dir}/train.err diff --git a/src/lmflow/args.py b/src/lmflow/args.py index 540d4ee67..ed1da2b56 100644 --- a/src/lmflow/args.py +++ b/src/lmflow/args.py @@ -100,9 +100,9 @@ class ModelArguments: default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) - is_seq2seq: bool = field( - default=False, - metadata={"help": "whether use seq2seq model"} + arch_type: bool = field( + default="decoder_only", + metadata={"help": "The architecture type of the model. Currently supported decoder_only or encoder_decoder"} ) config_overrides: Optional[str] = field( default=None, From f436197a452a37731ebb1701462b021a4dc01a70 Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Fri, 7 Apr 2023 00:22:31 +0800 Subject: [PATCH 15/18] rename is_seq2seq to arch_type --- scripts/run_chatbot_seq2seq.sh | 2 +- src/lmflow/models/auto_model.py | 4 ++-- src/lmflow/pipeline/finetuner.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/run_chatbot_seq2seq.sh b/scripts/run_chatbot_seq2seq.sh index ec1b01ed8..5b729f934 100755 --- a/scripts/run_chatbot_seq2seq.sh +++ b/scripts/run_chatbot_seq2seq.sh @@ -11,7 +11,7 @@ fi CUDA_VISIBLE_DEVICES=0 \ deepspeed examples/chatbot.py \ - --is_seq2seq True \ + --arch_type encoder_decoder \ --deepspeed configs/ds_config_chatbot.json \ --model_name_or_path ${model} \ ${lora_args} diff --git a/src/lmflow/models/auto_model.py b/src/lmflow/models/auto_model.py index befb5ec02..5b0eb16e6 100644 --- a/src/lmflow/models/auto_model.py +++ b/src/lmflow/models/auto_model.py @@ -11,7 +11,7 @@ class AutoModel: @classmethod def get_model(self, model_args, *args, **kwargs): # TODO (add new models) - if model_args.is_seq2seq: + if model_args.arch_type == "encoder_decoder": return HFEncoderDecoderModel(model_args, *args, **kwargs) - else: + elif model_args.arch_type == "decoder_only": return HFDecoderModel(model_args, *args, **kwargs) diff --git a/src/lmflow/pipeline/finetuner.py b/src/lmflow/pipeline/finetuner.py index f7559a17d..a50813c56 100644 --- a/src/lmflow/pipeline/finetuner.py +++ b/src/lmflow/pipeline/finetuner.py @@ -213,7 +213,7 @@ def tune(self, model, lm_dataset): # Initialize our Trainer training_args = finetuner_args - if model_args.is_seq2seq: + if model_args.arch_type == "encoder_decoder": # Data collator tokenizer = model.get_tokenizer() label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id @@ -244,7 +244,7 @@ def tune(self, model, lm_dataset): compute_metrics=None, ) - else: + elif model_args.arch_type == "decoder_only": trainer = Trainer( model=model.get_backend_model(), args=training_args, From c54b64caa6da2c29b6e7e80b8747c9b473a19b72 Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Fri, 7 Apr 2023 14:54:56 +0800 Subject: [PATCH 16/18] merge seq2seq args into training args --- src/lmflow/args.py | 50 +++++++++++++++---- src/lmflow/models/auto_model.py | 2 + src/lmflow/models/hf_encoder_decoder_model.py | 26 ---------- src/lmflow/pipeline/finetuner.py | 5 ++ 4 files changed, 48 insertions(+), 35 deletions(-) diff --git a/src/lmflow/args.py b/src/lmflow/args.py index ed1da2b56..90df2cee0 100644 --- a/src/lmflow/args.py +++ b/src/lmflow/args.py @@ -19,8 +19,7 @@ from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, - TrainingArguments, - Seq2SeqTrainingArguments + TrainingArguments ) MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) @@ -100,7 +99,7 @@ class ModelArguments: default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) - arch_type: bool = field( + arch_type: Optional[str] = field( default="decoder_only", metadata={"help": "The architecture type of the model. Currently supported decoder_only or encoder_decoder"} ) @@ -423,14 +422,47 @@ class FinetunerArguments(TrainingArguments): """ Adapt transformers.TrainingArguments """ - pass -@dataclass -class Seq2SeqFinetunerArguments(Seq2SeqTrainingArguments): """ - Adapt transformers.TrainingArguments + Args: + sortish_sampler (`bool`, *optional*, defaults to `False`): + Whether to use a *sortish sampler* or not. Only possible if the underlying datasets are *Seq2SeqDataset* + for now but will become generally available in the near future. + + It sorts the inputs according to lengths in order to minimize the padding size, with a bit of randomness + for the training set. + predict_with_generate (`bool`, *optional*, defaults to `False`): + Whether to use generate to calculate generative metrics (ROUGE, BLEU). + generation_max_length (`int`, *optional*): + The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default to the + `max_length` value of the model configuration. + generation_num_beams (`int`, *optional*): + The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default to the + `num_beams` value of the model configuration. """ - pass + + sortish_sampler: bool = field(default=False, metadata={"help": "Whether to use SortishSampler or not."}) + predict_with_generate: bool = field( + default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} + ) + generation_max_length: Optional[int] = field( + default=None, + metadata={ + "help": ( + "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " + "to the `max_length` value of the model configuration." + ) + }, + ) + generation_num_beams: Optional[int] = field( + default=None, + metadata={ + "help": ( + "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " + "to the `num_beams` value of the model configuration." + ) + }, + ) @dataclass @@ -597,7 +629,7 @@ class InferencerArguments: PIPELINE_ARGUMENT_MAPPING = { - "seq2seq_finetuner": Seq2SeqFinetunerArguments, + "seq2seq_finetuner": FinetunerArguments, "finetuner": FinetunerArguments, "evaluator": EvaluatorArguments, "inferencer": InferencerArguments, diff --git a/src/lmflow/models/auto_model.py b/src/lmflow/models/auto_model.py index 5b0eb16e6..459d304a4 100644 --- a/src/lmflow/models/auto_model.py +++ b/src/lmflow/models/auto_model.py @@ -15,3 +15,5 @@ def get_model(self, model_args, *args, **kwargs): return HFEncoderDecoderModel(model_args, *args, **kwargs) elif model_args.arch_type == "decoder_only": return HFDecoderModel(model_args, *args, **kwargs) + else: + raise NotImplementedError(f"Model type \"{model_args.arch_type}\" is not implemented.") diff --git a/src/lmflow/models/hf_encoder_decoder_model.py b/src/lmflow/models/hf_encoder_decoder_model.py index 168743e7c..295882f79 100644 --- a/src/lmflow/models/hf_encoder_decoder_model.py +++ b/src/lmflow/models/hf_encoder_decoder_model.py @@ -21,7 +21,6 @@ from typing import List, Union import deepspeed -from filelock import FileLock from peft import ( LoraConfig, PeftModel, @@ -42,15 +41,10 @@ AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer, - DataCollatorForSeq2Seq, - HfArgumentParser, MBart50Tokenizer, MBart50TokenizerFast, MBartTokenizer, MBartTokenizerFast, - Seq2SeqTrainer, - Seq2SeqTrainingArguments, - set_seed, ) from transformers.utils import check_min_version, is_offline_mode, send_example_telemetry from lmflow.datasets.dataset import Dataset @@ -172,7 +166,6 @@ def __init__( task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=model_args.lora_r, - target_modules=["q_proj","v_proj"], lora_alpha=model_args.lora_alpha, lora_dropout=model_args.lora_dropout ) @@ -215,25 +208,6 @@ def __init__( f" `--max_source_length` to {model.config.max_position_embeddings} or to automatically resize the" " model's position encodings by passing `--resize_position_embeddings`." ) - prefix = data_args["source_prefix"] if data_args["source_prefix"] is not None else "" - - # # Preprocessing the datasets. - # # We need to tokenize inputs and targets. - # if training_args.do_train: - # if "train" not in raw_datasets: - # raise ValueError("--do_train requires a train dataset") - # column_names = raw_datasets["train"].column_names - # elif training_args.do_eval: - # if "validation" not in raw_datasets: - # raise ValueError("--do_eval requires a validation dataset") - # column_names = raw_datasets["validation"].column_names - # elif training_args.do_predict: - # if "test" not in raw_datasets: - # raise ValueError("--do_predict requires a test dataset") - # column_names = raw_datasets["test"].column_names - # else: - # logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") - # return self.config = config self.backend_model = model diff --git a/src/lmflow/pipeline/finetuner.py b/src/lmflow/pipeline/finetuner.py index a50813c56..513daa4db 100644 --- a/src/lmflow/pipeline/finetuner.py +++ b/src/lmflow/pipeline/finetuner.py @@ -257,6 +257,11 @@ def tune(self, model, lm_dataset): preprocess_logits_for_metrics=None, ) + else: + raise NotImplementedError( + f"Model type \"{model_args.arch_type}\" is not implemented." + ) + # Training if training_args.do_train: checkpoint = None From b4087056da2c0eb80566449320b2cf9573bc3b62 Mon Sep 17 00:00:00 2001 From: diaoshizhe <654745845@qq.com> Date: Sat, 8 Apr 2023 00:40:44 +0800 Subject: [PATCH 17/18] debuging and added seq2seq data --- examples/finetune.py | 3 ++- src/lmflow/args.py | 12 ++++++++++-- src/lmflow/datasets/dataset.py | 6 +++--- src/lmflow/pipeline/auto_pipeline.py | 1 - src/lmflow/pipeline/finetuner.py | 29 ++++++++++++++++++++++++++-- 5 files changed, 42 insertions(+), 9 deletions(-) diff --git a/examples/finetune.py b/examples/finetune.py index bf83fde2a..a2e52600d 100644 --- a/examples/finetune.py +++ b/examples/finetune.py @@ -13,7 +13,7 @@ foo = ClassFoo() bar = foo.FunctionBar() """ - +import os import sys from transformers import HfArgumentParser @@ -52,6 +52,7 @@ def main(): pipeline_args=pipeline_args, ) dataset = Dataset(data_args) + print(f"dataset = {dataset}") model = AutoModel.get_model( model_args, lang=data_args.lang, diff --git a/src/lmflow/args.py b/src/lmflow/args.py index 90df2cee0..80acfb8aa 100644 --- a/src/lmflow/args.py +++ b/src/lmflow/args.py @@ -13,10 +13,11 @@ """ from dataclasses import dataclass, field -from typing import Optional +from pathlib import Path +from typing import Optional, Union from transformers.utils.versions import require_version - +from transformers.generation.configuration_utils import GenerationConfig from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TrainingArguments @@ -463,6 +464,13 @@ class FinetunerArguments(TrainingArguments): ) }, ) + generation_config: Optional[Union[str, Path, GenerationConfig]] = field( + default=None, + metadata={ + "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." + }, + ) + @dataclass diff --git a/src/lmflow/datasets/dataset.py b/src/lmflow/datasets/dataset.py index 394f143b3..4654a56f8 100644 --- a/src/lmflow/datasets/dataset.py +++ b/src/lmflow/datasets/dataset.py @@ -169,7 +169,7 @@ def from_dict(self, dict_obj: dict, *args, **kwargs): return self else: raise NotImplementedError( - f'Currently .from_dict is not supported for backend "{backend}"' + f'Currently .from_dict is not supported for backend "{self.backend}"' ) @@ -222,7 +222,7 @@ def to_dict(self): return dict_obj else: raise NotImplementedError( - f'Current .to_dict is not supported for backend "{backend}"' + f'Current .to_dict is not supported for backend "{self.backend}"' ) @@ -251,7 +251,7 @@ def map(self, *args, **kwargs): else: # If the backend is not Hugging Face, raise a NotImplementedError raise NotImplementedError( - f'Currently .map is not supported for backend "{backend}"' + f'Currently .map is not supported for backend "{self.backend}"' ) diff --git a/src/lmflow/pipeline/auto_pipeline.py b/src/lmflow/pipeline/auto_pipeline.py index 13d595fdf..e36699512 100644 --- a/src/lmflow/pipeline/auto_pipeline.py +++ b/src/lmflow/pipeline/auto_pipeline.py @@ -9,7 +9,6 @@ PIPELINE_MAPPING = { - "seq2seq_finetuner": Finetuner, "finetuner": Finetuner, "evaluator": Evaluator, "inferencer": Inferencer, diff --git a/src/lmflow/pipeline/finetuner.py b/src/lmflow/pipeline/finetuner.py index 513daa4db..c72f018a4 100644 --- a/src/lmflow/pipeline/finetuner.py +++ b/src/lmflow/pipeline/finetuner.py @@ -107,7 +107,32 @@ def __init__(self, model_args, data_args, finetuner_args, *args, **kwargs): # Set seed before initializing model. set_seed(finetuner_args.seed) - + def seq2seq_process(self, tokenized_datasets, model_max_length): + def preprocess_function(examples): + # remove pairs where at least one record is None + + inputs, targets = [], [] + for i in range(len(examples[text_column])): + if examples[text_column][i] and examples[summary_column][i]: + inputs.append(examples[text_column][i]) + targets.append(examples[summary_column][i]) + + inputs = [prefix + inp for inp in inputs] + model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) + + # Tokenize targets with the `text_target` keyword argument + labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True) + + # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore + # padding in the loss. + if padding == "max_length" and data_args.ignore_pad_token_for_loss: + labels["input_ids"] = [ + [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] + ] + + model_inputs["labels"] = labels["input_ids"] + return model_inputs + def group_text(self, tokenized_datasets, model_max_length): """ Groups texts together to form blocks of maximum length `model_max_length` and returns the processed data as @@ -238,7 +263,7 @@ def tune(self, model, lm_dataset): model=model.get_backend_model(), args=training_args, train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=eval_dataset if training_args.do_eval else None, + eval_dataset=None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=None, From 44c626d7b343e8d2cc22d7a9fe20f2a5e8cc4d7c Mon Sep 17 00:00:00 2001 From: diaoshizhe Date: Sun, 9 Apr 2023 20:59:20 +0800 Subject: [PATCH 18/18] finished dataflow in seq2seq --- examples/finetune.py | 18 ++-- src/lmflow/args.py | 1 - src/lmflow/models/hf_encoder_decoder_model.py | 83 +++++++++++-------- src/lmflow/pipeline/finetuner.py | 32 ++----- 4 files changed, 65 insertions(+), 69 deletions(-) diff --git a/examples/finetune.py b/examples/finetune.py index a2e52600d..8756f21df 100644 --- a/examples/finetune.py +++ b/examples/finetune.py @@ -52,7 +52,7 @@ def main(): pipeline_args=pipeline_args, ) dataset = Dataset(data_args) - print(f"dataset = {dataset}") + model = AutoModel.get_model( model_args, lang=data_args.lang, @@ -61,16 +61,22 @@ def main(): streaming = data_args.streaming, preprocessing_num_workers = data_args.preprocessing_num_workers, overwrite_cache = data_args.overwrite_cache, - max_source_length = data_args.max_source_length + max_source_length = data_args.max_source_length, + max_target_length = data_args.max_target_length, + pad_to_max_length = data_args.pad_to_max_length ) # Tokenization and text grouping must be done in the main process with pipeline_args.main_process_first(desc="dataset map tokenization"): tokenized_dataset = model.tokenize(dataset) - lm_dataset = finetuner.group_text( - tokenized_dataset, - model_max_length=model.get_max_length(), - ) + if model_args.arch_type == "encoder_decoder": + # encoder-decoder model does not need group text + lm_dataset = tokenized_dataset + else: + lm_dataset = finetuner.group_text( + tokenized_dataset, + model_max_length=model.get_max_length(), + ) # Finetuning tuned_model = finetuner.tune(model=model, lm_dataset=lm_dataset) diff --git a/src/lmflow/args.py b/src/lmflow/args.py index 80acfb8aa..46fc35c12 100644 --- a/src/lmflow/args.py +++ b/src/lmflow/args.py @@ -637,7 +637,6 @@ class InferencerArguments: PIPELINE_ARGUMENT_MAPPING = { - "seq2seq_finetuner": FinetunerArguments, "finetuner": FinetunerArguments, "evaluator": EvaluatorArguments, "inferencer": InferencerArguments, diff --git a/src/lmflow/models/hf_encoder_decoder_model.py b/src/lmflow/models/hf_encoder_decoder_model.py index 295882f79..9ccec0508 100644 --- a/src/lmflow/models/hf_encoder_decoder_model.py +++ b/src/lmflow/models/hf_encoder_decoder_model.py @@ -258,6 +258,9 @@ def tokenize(self, dataset, *args, **kwargs): """ model_args = self.model_args data_args = self.data_args + text_column = "input" + summary_column = "output" + prefix = data_args["source_prefix"] if data_args["source_prefix"] is not None else "" # A list of all multilingual tokenizer which require lang attribute. MULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast] @@ -277,6 +280,10 @@ def tokenize(self, dataset, *args, **kwargs): ) self.model.config.forced_bos_token_id = forced_bos_token_id + # Temporarily set max_target_length for training. + max_target_length = data_args["max_target_length"] + padding = "max_length" if data_args["pad_to_max_length"] else False + # Preprocessing the datasets. # First we tokenize all the texts. if dataset.get_backend() != "huggingface": @@ -285,50 +292,54 @@ def tokenize(self, dataset, *args, **kwargs): "not supported yet" ) - raw_datasets = dataset - hf_raw_datasets = dataset.get_backend_dataset() - column_names = list(hf_raw_datasets.features) - text_column_name = "text" if "text" in column_names else column_names[0] - + # TODO: DO WE NEED THIS? # since this will be pickled to avoid _LazyModule error in Hasher force # logger loading before tokenize_function tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") if model_args.use_lora: self.tokenizer.pad_token = 1 - def tokenize_function(examples): - with CaptureLogger(tok_logger) as cl: - if not model_args.use_lora: - output = self.tokenizer(examples[text_column_name]) - else: - output = self.tokenizer( - examples[text_column_name], - truncation=True, - ) - # clm input could be much much longer than block_size - if "Token indices sequence length is longer than the" in cl.out: - tok_logger.warning( - "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" - " before being passed to the model." - ) - return output + raw_datasets = dataset + hf_raw_datasets = dataset.get_backend_dataset() + column_names = list(hf_raw_datasets.features) + text_column_name = "text" if "text" in column_names else column_names[0] + + def preprocess_function(examples): + # remove pairs where at least one record is None + + inputs, targets = [], [] + for i in range(len(examples[text_column])): + if examples[text_column][i] and examples[summary_column][i]: + inputs.append(examples[text_column][i]) + targets.append(examples[summary_column][i]) + + inputs = [prefix + inp for inp in inputs] + model_inputs = self.tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) + + # Tokenize targets with the `text_target` keyword argument + labels = self.tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True) + + # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore + # padding in the loss. + if padding == "max_length" and data_args.ignore_pad_token_for_loss: + labels["input_ids"] = [ + [(l if l != self.tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] + ] + model_inputs["labels"] = labels["input_ids"] + return model_inputs + data_args = raw_datasets.get_data_args() - if not data_args.streaming: - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on dataset", - ) - else: - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - remove_columns=column_names, - ) + + tokenized_datasets = raw_datasets.map( + preprocess_function, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on train dataset", + ) + return tokenized_datasets diff --git a/src/lmflow/pipeline/finetuner.py b/src/lmflow/pipeline/finetuner.py index c72f018a4..6a00da16a 100644 --- a/src/lmflow/pipeline/finetuner.py +++ b/src/lmflow/pipeline/finetuner.py @@ -107,32 +107,6 @@ def __init__(self, model_args, data_args, finetuner_args, *args, **kwargs): # Set seed before initializing model. set_seed(finetuner_args.seed) - def seq2seq_process(self, tokenized_datasets, model_max_length): - def preprocess_function(examples): - # remove pairs where at least one record is None - - inputs, targets = [], [] - for i in range(len(examples[text_column])): - if examples[text_column][i] and examples[summary_column][i]: - inputs.append(examples[text_column][i]) - targets.append(examples[summary_column][i]) - - inputs = [prefix + inp for inp in inputs] - model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) - - # Tokenize targets with the `text_target` keyword argument - labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True) - - # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore - # padding in the loss. - if padding == "max_length" and data_args.ignore_pad_token_for_loss: - labels["input_ids"] = [ - [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] - ] - - model_inputs["labels"] = labels["input_ids"] - return model_inputs - def group_text(self, tokenized_datasets, model_max_length): """ Groups texts together to form blocks of maximum length `model_max_length` and returns the processed data as @@ -228,6 +202,12 @@ def tune(self, model, lm_dataset): data_args = self.data_args finetuner_args = self.finetuner_args + if finetuner_args.label_smoothing_factor > 0 and not hasattr(model.get_backend_model(), "prepare_decoder_input_ids_from_labels"): + logger.warning( + "label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for" + f"`{model.get_backend_model().__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory" + ) + train_dataset = lm_dataset.get_backend_dataset() if finetuner_args.do_train: