-
Notifications
You must be signed in to change notification settings - Fork 11
/
experiments_stl.sh
67 lines (56 loc) · 1.46 KB
/
experiments_stl.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# !/bin/bash
# MIT License. Copyright (c) 2020 Ivan Sosnovik, Michał Szmaja
STL_DIR="${STL_DIR:-./datasets/stl10}"
function train_stl() {
# 1 model_name
python train_stl10.py \
--batch_size 128 \
--epochs 1000 \
--optim sgd \
--decay 0.0005 \
--nesterov \
--lr 0.1 \
--lr_steps 300 400 600 800 \
--lr_gamma 0.2 \
--cuda \
--test_epochs 50 300 400 600 800 "900|1000|20" \
--model $1 \
--save_model_path "" \
--tag "" \
--data_dir="$STL_DIR" \
}
function train_stl_64() {
###### Here batch size is equal 64. The learning rate is adjusted accordingly.
###### The final result is very close train_stl/
###### Use this function if the model does not fit your GPU memory
# 1 model_name
python train_stl10.py \
--batch_size 64 \
--epochs 1000 \
--optim sgd \
--decay 0.0005 \
--nesterov \
--lr 0.05 \
--lr_steps 300 400 600 800 \
--lr_gamma 0.2 \
--cuda \
--test_epochs 50 300 400 600 800 "900|1000|20" \
--model $1 \
--save_model_path "" \
--tag "minibatch_64" \
--data_dir="$STL_DIR" \
}
model_list=(
"wrn_16_8"
"wrn_16_8_kanazawa"
"wrn_16_8_xu"
"wrn_16_8_ss"
"wrn_16_8_dss"
"wrn_16_8_ses_a"
"wrn_16_8_ses_b"
"wrn_16_8_ses_c"
)
for model_name in "${model_list[@]}"
do
train_stl "$model_name"
done