-
Notifications
You must be signed in to change notification settings - Fork 18
/
train_lora.py
82 lines (71 loc) · 2.14 KB
/
train_lora.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import os
import torch.nn as nn
import transformers
from llama import LlamaTokenizer, LlamaForCausalLM
from peft import (
prepare_model_for_int8_training,
LoraConfig,
get_peft_model,
get_peft_model_state_dict,
)
from utils import TextDataSet
ckpt_path = './ckpt'
tokenizer = LlamaTokenizer.from_pretrained(ckpt_path)
model = LlamaForCausalLM.from_pretrained(ckpt_path)
MICRO_BATCH_SIZE = 4 # this could actually be 5 but i like powers of 2
BATCH_SIZE = 128
GRADIENT_ACCUMULATION_STEPS = BATCH_SIZE // MICRO_BATCH_SIZE
EPOCHS = 3 # we don't always need 3 tbh
LEARNING_RATE = 3e-4 # the Karpathy constant
CUTOFF_LEN = 256 # 256 accounts for about 96% of the data
LORA_R = 8
LORA_ALPHA = 16
LORA_DROPOUT = 0.05
VAL_SET_SIZE = 2000
TARGET_MODULES = [
"q_proj",
"v_proj",
]
DATA_PATH = "./data/alpaca_data.json"
model = prepare_model_for_int8_training(model)
config = LoraConfig(
r=LORA_R,
lora_alpha=LORA_ALPHA,
target_modules=TARGET_MODULES,
lora_dropout=LORA_DROPOUT,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
tokenizer.pad_token_id = 0
train_data = TextDataSet(DATA_PATH, tokenizer=tokenizer)
trainer = transformers.Trainer(
model=model,
train_dataset=train_data,
eval_dataset=train_data,
args=transformers.TrainingArguments(
per_device_train_batch_size=MICRO_BATCH_SIZE,
gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
warmup_steps=100,
num_train_epochs=EPOCHS,
learning_rate=LEARNING_RATE,
fp16=True,
logging_steps=20,
evaluation_strategy="steps",
save_strategy="steps",
eval_steps=200,
save_steps=200,
output_dir="lora-alpaca",
save_total_limit=3,
load_best_model_at_end=True,
ddp_find_unused_parameters=None,
),
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
old_state_dict = model.state_dict
model.state_dict = (
lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())
).__get__(model, type(model))
trainer.train()
model.save_pretrained("lora")