-
Notifications
You must be signed in to change notification settings - Fork 0
/
merge_lora.py
87 lines (68 loc) · 2.5 KB
/
merge_lora.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
"""
======================================================================
MERGE_LORA ---
This file is to merge LoRA with pretrained models.
Author: Zi Liang <zi1415926.liang@connect.polyu.hk>
Copyright © 2024, ZiLiang, all rights reserved.
Created: 6 June 2024
======================================================================
"""
# ------------------------ Code --------------------------------------
import os
if __name__ == "__main__":
# os.environ["CUDA_VISIBLE_DEVICES"] = "3,7"
# os.environ["CUDA_VISIBLE_DEVICES"] = "5,6,7"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
os.environ["TORCH_USE_CUDA_DSA"]="1"
import torch
from datasets import load_dataset
import json
from collections import OrderedDict
from math import exp
import random
from sklearn.metrics import precision_score, accuracy_score, recall_score, f1_score
from tqdm import tqdm
import pickle
from pprint import pprint
from collections import OrderedDict
from transformers import AutoModelForCausalLM,AutoTokenizer
from peft import PeftModel
import numpy as np
def main():
pretrained_p="meta-llama/Meta-Llama-3-8B-Instruct"
# lora_p="./general_train/ckpts/boring_test/NewTemperatureLoRD-VIINewLoss___period1000/"
lora_p="./general_train/ckpts/boring_test/NewTemperatureNewTau8BvanillaNewLoss___finally/"
save_p="./general_train/ckpts/MERGED/llama38b-vanilla-Claude3short256/"
upload_p="liangzid/llama38b-LoRD-Claude3short256"
mergelora(pretrained_p,lora_p,save_p,)
# uploadmodel(save_p, upload_p,)
def mergelora(pretrained_p,
lora_p,
save_p):
pretrained_model=AutoModelForCausalLM.from_pretrained(
pretrained_p,
device_map="auto",
torch_dtype=torch.bfloat16,
)
model=PeftModel.from_pretrained(
pretrained_model,
lora_p,
)
tokenizer=AutoTokenizer.from_pretrained(pretrained_p)
model = model.merge_and_unload()
model.save_pretrained(save_p)
tokenizer.save_pretrained(save_p)
print("TOKENIZER AND THE MODEL SAVED DONE.")
# def uploadmodel(model_p,upload_p):
# print("First valid the model loading...")
# from transformers import AutoConfig, AutoModel, AutoTokenizer
# config = AutoConfig.from_pretrained(
# model_p, revision=revision)
# model = AutoModel.from_pretrained(
# model_p, revision=revision)
# tokenizer = AutoTokenizer.from_pretrained(
# model_p, revision=revision)
## running entry
if __name__=="__main__":
main()
print("EVERYTHING DONE.")