-
Notifications
You must be signed in to change notification settings - Fork 8
/
evaluate.py
119 lines (101 loc) · 4.02 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import importlib
import os
import pathlib
import time
import chainer
from src.extension.analyze_inequality import AnalyzeInequality
def main():
"""evaluation script
Calculate each value in inequality (5) in our paper.
If --nograd flag is specified, only (A) and (D) are calculated.
inequality-(attack_name).npy file is created under a specified
result directory.
Multi-GPU is not supported. If you want, then resort to ChainerMN.
"""
# commandline arguments
parser = argparse.ArgumentParser()
# result directory
parser.add_argument('dir', type=str,
help='result directory of trained model')
parser.add_argument('attacker_config', type=str,
help='''please specify an attack configuration file
under config/attack/.''')
# training
parser.add_argument('--gpu', '-g', type=int, default=0)
parser.add_argument('--batchsize', '-B', type=int)
parser.add_argument('--loader_threads', '-l', type=int, default=4)
parser.add_argument('--out', '-o', default='./result/')
parser.add_argument('--nograd', '-n', action='store_true')
# util
parser.add_argument('--wait', type=int)
args = parser.parse_args()
if not os.path.isdir(args.dir):
raise ValueError()
# load config
config = importlib.import_module('.'.join(args.dir.split('/') + ['config']))
attacker_name = pathlib.Path(args.attacker_config).name
if args.attacker_config.endswith('.py'):
args.attacker_config = args.attacker_config[:-3]
attacker_config = importlib.import_module(
'.'.join(args.attacker_config.split('/')))
# wait until specified process finish
# this works as a pseudo job scheduler
# Linux only
pid = args.wait
if pid is not None:
while os.path.exists('/proc/{}'.format(pid)):
time.sleep(1)
# set up GPU
gpu = args.gpu
if gpu >= 0:
# if non negative gpu id is specified: use specified gpu
# else (e.g. -1): use cpu
chainer.cuda.get_device_from_id(gpu).use()
chainer.cuda.set_max_workspace_size(1 * 1024 * 1024 * 1024)
else:
raise ValueError('currently, execution on CPU is not supported')
chainer.global_config.autotune = True
# set up model
model = config.model
chainer.serializers.load_npz(os.path.join(args.dir, 'snapshot.npz'), model)
if args.gpu >= 0:
model.to_gpu()
# get iterator of dataset
_, test_dataset = config.dataset
batchsize = config.batchsize if args.batchsize is None else args.batchsize
if args.loader_threads > 1:
test_iter = chainer.iterators.MultiprocessIterator(
test_dataset, batchsize, shuffle=False, repeat=False,
n_processes=args.loader_threads)
else:
test_iter = chainer.iterators.SerialIterator(
test_dataset, batchsize, repeat=False, shuffle=False)
# set up optimizer
# optimizer means SGD algorithms like momentum SGD
optimizer = config.optimizer
optimizer.setup(model)
for hook in getattr(config, 'hook', []):
# hook is called before optimizer's update
# weight decay is one of the most common optimizer hook
optimizer.add_hook(hook)
attack = attacker_config.attacker(model, *attacker_config.attacker_args,
**attacker_config.attacker_kwargs)
evaluator = AnalyzeInequality(
test_iter, model, config.preprocess,
n_class=10, # we use datasets with 10 classes only
attack=attack, output_dir=args.dir, device=gpu, nograd=args.nograd,
attack_name=attacker_name)
# my implementation switches its behavior depending on training mode
# for details on training modes, please read codes under src/ directory
for mode in config.mode:
setattr(chainer.config, mode, True)
#
# evaluation
#
with chainer.using_config('cudnn_deterministic', True):
evaluator()
if __name__ == '__main__':
main()