-
Notifications
You must be signed in to change notification settings - Fork 2
/
tesseraq_w2g64_L2_70b.yml
54 lines (54 loc) · 1.28 KB
/
tesseraq_w2g64_L2_70b.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
base:
seed: &seed 42
model:
type: Llama
path: meta-llama/Llama-2-70b-hf
torch_dtype: auto
calib:
name: wikitext2
download: False
n_samples: 256
path: ../cache/data/calib/wikitext2
bs: 1
seq_len: 2048
preproc: wikitext2_gptq
seed: *seed
eval:
eval_pos: [fake_quant]
name: [wikitext2, c4]
download: False
path: ../cache/data/eval
bs: 20
seq_len: 2048
inference_per_block: True
quant:
method: TesseraQ
weight:
bit: 2
symmetric: False
granularity: per_group
group_size: 64
calib_algo: minmax
special:
lr: 0.0005
iterations: 250
wd: 0.0
batch_size: 2
deactive_amp: False
aug_loss: False
optimize_scale: True
scale_lr: 0.0005
thresholds: [0.8, 0.65, 0.5, 0.43, 0.38, 0.34, 0.3, 0.27, 0.24, 0.21, 0.18, 0.15, 0.12, 0.10, 0.08,
0.06, 0.04, 0.02, 0.01, 0.005]
weight_clip: True
load_transform: True
clip_version: v1
reduce_memory: False
scale_path: ../cache/activations/L2_70b/awq_w2g64
clip_path: ../cache/activations/L2_70b/awq_w2g64
quant_out: True
save:
save_fp: False
save_trans: False
save_lightllm: False
save_autogptq: False