-
Notifications
You must be signed in to change notification settings - Fork 1
/
models_lightning.py
139 lines (122 loc) · 4.19 KB
/
models_lightning.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import pytorch_lightning as pl
from transformers.trainer_pt_utils import LabelSmoother
from utils import decode_batch_labels, eval_rouge, get_optimizer, get_scheduler
class LitLLMForConditionalGeneration(pl.LightningModule):
def __init__(
self,
model,
tokenizer,
num_training_steps=None,
lr=5e-4,
lr_end=5e-6,
repetition_penalty=2.5,
max_target_tokens=80,
num_beams=4,
ratio_warmup=0.1,
batch_size=16,
label_smoothing_factor=0.0,
scheduler="cosine",
warmup_steps=None,
optimizer_name="adamw",
):
super().__init__()
self.model = model
self.lr = lr
self.max_target_tokens = max_target_tokens
self.repetition_penalty = repetition_penalty
self.num_beams = num_beams
self.tokenizer = tokenizer
self.num_training_steps = num_training_steps
if warmup_steps is not None:
self.num_warmup_steps = warmup_steps
else:
assert ratio_warmup is not None
self.num_warmup_steps = int(ratio_warmup * num_training_steps)
self.lr_end = lr_end
self.batch_size = batch_size
self.label_smoothing_factor = label_smoothing_factor
self.scheduler = scheduler
self.optimizer_name = optimizer_name
if self.label_smoothing_factor > 0.0:
self.label_smoother = LabelSmoother(epsilon=self.label_smoothing_factor)
else:
self.label_smoother = None
self.save_hyperparameters({"config": self.model.config})
def training_step(self, batch, batch_idx):
out = self(**batch)
loss = out.loss
if self.label_smoother is not None:
loss = self.label_smoother(out, batch["labels"])
self.log("train_loss", loss)
return loss
def validation_step(self, batch, batch_idx):
out = self(**batch)
loss = out.loss
self.log("val_loss", loss, sync_dist=True)
return loss
def forward(
self,
input_ids=None,
labels=None,
decoder_input_ids=None,
encoder_outputs=None,
attention_mask=None,
):
return self.model(
input_ids=input_ids,
labels=labels,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
)
def configure_optimizers(self):
optimizer_cls = get_optimizer(self.optimizer_name)
optimizer = optimizer_cls(
[p for p in self.parameters()],
lr=self.lr,
)
lr_scheduler = get_scheduler(
scheduler_name=self.scheduler,
optimizer=optimizer,
num_warmup_steps=self.num_warmup_steps,
num_training_steps=self.num_training_steps,
lr_end=self.lr_end,
)
scheduler = {
"scheduler": lr_scheduler,
"name": "lr",
"interval": "step",
}
return [optimizer], [scheduler]
class LitLLMForSummarization(LitLLMForConditionalGeneration):
def __init__(
self,
repetition_penalty=1.0,
length_penalty=1.0,
**kwargs,
):
super().__init__(**kwargs)
self.repetition_penalty = repetition_penalty
self.length_penalty = length_penalty
def validation_step(self, batch, batch_idx):
out = self(**batch)
loss = out.loss
self.log("val_loss", loss, sync_dist=True)
top_beam_ids = self.model.generate(
inputs=batch["input_ids"],
max_new_tokens=512,
attention_mask=batch["attention_mask"],
repetition_penalty=self.repetition_penalty,
length_penalty=self.length_penalty,
)
labels = batch["labels"]
labels[labels == -100] = self.tokenizer.pad_token_id
target = decode_batch_labels(self.tokenizer, labels)
prediction = decode_batch_labels(self.tokenizer, top_beam_ids)
rouge_results = eval_rouge(
list_predictions=prediction,
list_targets=target,
n_print=0,
)
self.log_dict(rouge_results, sync_dist=True)
return loss