-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain.py
177 lines (162 loc) · 8.33 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
# coding=utf-8
import os
import sys
import time
import numpy as np
import argparse
from alg.opt import *
from alg import alg, modelopera
from utils.util import set_random_seed, save_checkpoint, print_args, train_valid_target_eval_names, alg_loss_dict, Tee, img_param_init, print_environ
from datautil.getdataloader import get_img_dataloader
def get_args():
parser = argparse.ArgumentParser(description='DG')
parser.add_argument('--algorithm', type=str, default="ERM")
parser.add_argument('--alpha', type=float,
default=1, help='DANN dis alpha')
parser.add_argument('--anneal_iters', type=int,
default=500, help='Penalty anneal iters used in VREx')
parser.add_argument('--batch_size', type=int,
default=32, help='batch_size')
parser.add_argument('--beta', type=float,
default=1, help='DIFEX beta')
parser.add_argument('--beta1', type=float, default=0.5,
help='Adam hyper-param')
parser.add_argument('--bottleneck', type=int, default=256)
parser.add_argument('--checkpoint_freq', type=int,
default=3, help='Checkpoint every N epoch')
parser.add_argument('--classifier', type=str,
default="linear", choices=["linear", "wn"])
parser.add_argument('--data_file', type=str, default='',
help='root_dir')
parser.add_argument('--dataset', type=str, default='office')
parser.add_argument('--data_dir', type=str, default='', help='data dir')
parser.add_argument('--dis_hidden', type=int,
default=256, help='dis hidden dimension')
parser.add_argument('--disttype', type=str, default='2-norm',
choices=['1-norm', '2-norm', 'cos', 'norm-2-norm', 'norm-1-norm'])
parser.add_argument('--gpu_id', type=str, nargs='?',
default='0', help="device id to run")
parser.add_argument('--groupdro_eta', type=float,
default=1, help="groupdro eta")
parser.add_argument('--inner_lr', type=float,
default=1e-2, help="learning rate used in MLDG")
parser.add_argument('--lam', type=float,
default=1, help="tradeoff hyperparameter used in VREx")
parser.add_argument('--layer', type=str, default="bn",
choices=["ori", "bn"])
parser.add_argument('--lr', type=float, default=1e-2, help="learning rate")
parser.add_argument('--lr_decay', type=float, default=0.75, help='for sgd')
parser.add_argument('--lr_decay1', type=float,
default=1.0, help='for pretrained featurizer')
parser.add_argument('--lr_decay2', type=float, default=1.0,
help='inital learning rate decay of network')
parser.add_argument('--lr_gamma', type=float,
default=0.0003, help='for optimizer')
parser.add_argument('--max_epoch', type=int,
default=120, help="max iterations")
parser.add_argument('--mixupalpha', type=float,
default=0.2, help='mixup hyper-param')
parser.add_argument('--mldg_beta', type=float,
default=1, help="mldg hyper-param")
parser.add_argument('--mmd_gamma', type=float,
default=1, help='MMD, CORAL hyper-param')
parser.add_argument('--momentum', type=float,
default=0.9, help='for optimizer')
parser.add_argument('--net', type=str, default='resnet50',
help="featurizer: vgg16, resnet50, resnet101,DTNBase")
parser.add_argument('--N_WORKERS', type=int, default=4)
parser.add_argument('--rsc_f_drop_factor', type=float,
default=1/3, help='rsc hyper-param')
parser.add_argument('--rsc_b_drop_factor', type=float,
default=1/3, help='rsc hyper-param')
parser.add_argument('--save_model_every_checkpoint', action='store_true')
parser.add_argument('--schuse', action='store_true')
parser.add_argument('--schusech', type=str, default='cos')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--split_style', type=str, default='strat',
help="the style to split the train and eval datasets")
parser.add_argument('--task', type=str, default="img_dg",
choices=["img_dg"], help='now only support image tasks')
parser.add_argument('--tau', type=float, default=1, help="andmask tau")
parser.add_argument('--test_envs', type=int, nargs='+',
default=[0], help='target domains')
parser.add_argument('--output', type=str,
default="train_output", help='result output path')
parser.add_argument('--weight_decay', type=float, default=5e-4)
args = parser.parse_args()
args.steps_per_epoch = 100
args.data_dir = args.data_file+args.data_dir
os.environ['CUDA_VISIBLE_DEVICS'] = args.gpu_id
os.makedirs(args.output, exist_ok=True)
sys.stdout = Tee(os.path.join(args.output, 'out.txt'))
sys.stderr = Tee(os.path.join(args.output, 'err.txt'))
args = img_param_init(args)
print_environ()
return args
if __name__ == '__main__':
args = get_args()
set_random_seed(args.seed)
loss_list = alg_loss_dict(args)
train_loaders, eval_loaders = get_img_dataloader(args)
eval_name_dict = train_valid_target_eval_names(args)
algorithm_class = alg.get_algorithm_class(args.algorithm)
algorithm = algorithm_class(args).cuda()
algorithm.train()
opt = get_optimizer(algorithm, args)
sch = get_scheduler(opt, args)
s = print_args(args, [])
print('=======hyper-parameter used========')
print(s)
if 'DIFEX' in args.algorithm:
ms = time.time()
n_steps = args.max_epoch*args.steps_per_epoch
print('start training fft teacher net')
opt1 = get_optimizer(algorithm.teaNet, args, isteacher=True)
sch1 = get_scheduler(opt1, args)
algorithm.teanettrain(train_loaders, n_steps, opt1, sch1)
print('complet time:%.4f' % (time.time()-ms))
acc_record = {}
acc_type_list = ['train', 'valid', 'target']
train_minibatches_iterator = zip(*train_loaders)
best_valid_acc, target_acc = 0, 0
print('===========start training===========')
sss = time.time()
for epoch in range(args.max_epoch):
for iter_num in range(args.steps_per_epoch):
minibatches_device = [(data)
for data in next(train_minibatches_iterator)]
if args.algorithm == 'VREx' and algorithm.update_count == args.anneal_iters:
opt = get_optimizer(algorithm, args)
sch = get_scheduler(opt, args)
step_vals = algorithm.update(minibatches_device, opt, sch)
if (epoch in [int(args.max_epoch*0.7), int(args.max_epoch*0.9)]) and (not args.schuse):
print('manually descrease lr')
for params in opt.param_groups:
params['lr'] = params['lr']*0.1
if (epoch == (args.max_epoch-1)) or (epoch % args.checkpoint_freq == 0):
print('===========epoch %d===========' % (epoch))
s = ''
for item in loss_list:
s += (item+'_loss:%.4f,' % step_vals[item])
print(s[:-1])
s = ''
for item in acc_type_list:
acc_record[item] = np.mean(np.array([modelopera.accuracy(
algorithm, eval_loaders[i]) for i in eval_name_dict[item]]))
s += (item+'_acc:%.4f,' % acc_record[item])
print(s[:-1])
if acc_record['valid'] > best_valid_acc:
best_valid_acc = acc_record['valid']
target_acc = acc_record['target']
if args.save_model_every_checkpoint:
save_checkpoint(f'model_epoch{epoch}.pkl', algorithm, args)
print('total cost time: %.4f' % (time.time()-sss))
algorithm_dict = algorithm.state_dict()
save_checkpoint('model.pkl', algorithm, args)
print('valid acc: %.4f' % best_valid_acc)
print('DG result: %.4f' % target_acc)
with open(os.path.join(args.output, 'done.txt'), 'w') as f:
f.write('done\n')
f.write('total cost time:%s\n' % (str(time.time()-sss)))
f.write('valid acc:%.4f\n' % (best_valid_acc))
f.write('target acc:%.4f' % (target_acc))