-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
132 lines (107 loc) · 4.38 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
from typing import Dict, List
import csv
import datasets
import torch
from transformers import (
PreTrainedTokenizerFast,
DataCollatorForSeq2Seq,
Seq2SeqTrainingArguments,
BertJapaneseTokenizer,
Trainer
)
from transformers.models.encoder_decoder.modeling_encoder_decoder import EncoderDecoderModel
from datasets import load_dataset
import wandb
class GPT2Tokenizer(PreTrainedTokenizerFast):
def build_inputs_with_special_tokens(self, token_ids: List[int]) -> List[int]:
return token_ids + [self.eos_token_id]
class PairedDataset:
def __init__(self,
source_tokenizer: PreTrainedTokenizerFast, target_tokenizer: PreTrainedTokenizerFast,
file_path: str = None,
dataset_raw: datasets.Dataset = None
):
self.src_tokenizer = source_tokenizer
self.trg_tokenizer = target_tokenizer
if file_path is not None:
with open(file_path, 'r') as fd:
reader = csv.reader(fd)
next(reader)
self.data = [row for row in reader]
elif dataset_raw is not None:
self.data = dataset_raw
else:
raise ValueError('file_path or dataset_raw must be specified')
def __getitem__(self, index: int) -> Dict[str, torch.Tensor]:
# with open('train_log.txt', 'a+') as log_file:
# log_file.write(f'reading data[{index}] {self.data[index]}\n')
if isinstance(self.data, datasets.Dataset):
src, trg = self.data[index]['sourceString'], self.data[index]['targetString']
else:
src, trg = self.data[index]
embeddings = self.src_tokenizer(src, return_attention_mask=False, return_token_type_ids=False)
embeddings['labels'] = self.trg_tokenizer.build_inputs_with_special_tokens(self.trg_tokenizer(trg, return_attention_mask=False)['input_ids'])
return embeddings
def __len__(self):
return len(self.data)
def main():
encoder_model_name = "cl-tohoku/bert-base-japanese-v3"
decoder_model_name = "skt/kogpt2-base-v2"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
device, torch.cuda.device_count()
src_tokenizer = BertJapaneseTokenizer.from_pretrained(encoder_model_name)
trg_tokenizer = GPT2Tokenizer.from_pretrained(decoder_model_name, bos_token='</s>', eos_token='</s>', unk_token='<unk>',
pad_token='<pad>', mask_token='<mask>')
# dataset = load_dataset("sappho192/Tatoeba-Challenge-jpn-kor")
dataset = load_dataset("/dataset/Tatoeba-Challenge-jpn-kor")
train_dataset = dataset['train']
test_dataset = dataset['test']
train_first_row = train_dataset[0]
test_first_row = test_dataset[0]
train_dataset = PairedDataset(src_tokenizer, trg_tokenizer, dataset_raw=train_dataset)
eval_dataset = PairedDataset(src_tokenizer, trg_tokenizer, dataset_raw=test_dataset)
## Training section
model = EncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_model_name,
decoder_model_name,
pad_token_id=trg_tokenizer.bos_token_id,
)
model.config.decoder_start_token_id = trg_tokenizer.bos_token_id
collate_fn = DataCollatorForSeq2Seq(src_tokenizer, model)
wandb.init(project="fftr-poc1", name='jbert+kogpt2')
arguments = Seq2SeqTrainingArguments(
output_dir='dump',
do_train=True,
do_eval=True,
evaluation_strategy="epoch",
save_strategy="epoch",
num_train_epochs=3,
# num_train_epochs=25,
per_device_train_batch_size=1,
# per_device_train_batch_size=30, # takes 40GB
# per_device_train_batch_size=64,
per_device_eval_batch_size=1,
# per_device_eval_batch_size=30,
# per_device_eval_batch_size=64,
warmup_ratio=0.1,
gradient_accumulation_steps=4,
save_total_limit=5,
dataloader_num_workers=1,
# fp16=True, # ENABLE if CUDA is enabled
load_best_model_at_end=True,
report_to='wandb'
)
trainer = Trainer(
model,
arguments,
data_collator=collate_fn,
train_dataset=train_dataset,
eval_dataset=eval_dataset
)
trainer.train()
model.save_pretrained("dump/best_model")
src_tokenizer.save_pretrained("dump/best_model/src_tokenizer")
trg_tokenizer.save_pretrained("dump/best_model/trg_tokenizer")
if __name__ == "__main__":
main()