-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy patheval-uncorrupted.sh
152 lines (128 loc) · 6.34 KB
/
eval-uncorrupted.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
#!/bin/bash
modify_loss_module=0
single_norm_layer=1
date
echo "======================================="
echo "====== Train the target [ uncorrupted ] model and then perform non-shadow-model-based MIA"
echo "======================================="
printf "\n\n===========================================================================\n"
printf "==== Perform experiments across [ different datasets ]\n"
printf "===========================================================================\n\n"
#dataset_list=(cifar10 gtsrb svhn medmnist cifar100)
dataset_list=(cifar10)
train_size=12500
network=wideresnet2810
for dataset in ${dataset_list[@]}; do
if [ $modify_loss_module = 0 ]; then
save_tag=$network-uncorrupted
single_norm_layer=1
else
if [ $single_norm_layer = 0 ]; then
save_tag=$network-codePoisoned
else
save_tag=$network-codePoisoned-singleNorm
fi
fi
target_model_res_folder=lira-$dataset-$save_tag-$train_size-targetModel
echo "===> Train target model"
python train.py --dataset $dataset --lr 0.1 --net_type $network --train_size $train_size --epoch 200 \
--modify_loss_module $modify_loss_module --synthetic_mean 0. --synthetic_stdev 0.1 \
--single_norm_layer $single_norm_layer --save_tag $save_tag
printf '\n\n'
echo "===> Get the outputs from the target model for MIA"
python lira-inference.py --resume_path checkpoint/$dataset-trainSize-$train_size-$save_tag.pth.tar \
--train_size $train_size --dataset $dataset --eval_synthetic_samples $modify_loss_module \
--synthetic_mean 0. --synthetic_stdev 0.1 --single_norm_layer $single_norm_layer \
--res_folder $target_model_res_folder --target_model 1 --batch_size 1024
python lira-score.py --res_folder $target_model_res_folder
printf '\n\n'
echo "===> Perform MIA using target samples as query samples"
python lira-plot.py --test_data_path $target_model_res_folder --eval_target_sample 1 --fpr 1e-3
printf '\n'
if [ $modify_loss_module = 1 ]; then
echo "===> Perform MIA using synthetic samples as query samples"
python lira-plot.py --test_data_path $target_model_res_folder --eval_target_sample 0 --fpr 1e-3
fi
printf '\n\n\n'
done
printf "\n\n===========================================================================\n"
printf "==== Perform experiments across [ different nework architectures ]\n"
printf "===========================================================================\n\n"
train_size=12500
dataset=cifar10
#model_list=(wideresnet282 wideresnet284 wideresnet402 densenet wideresnet404 resnext wideresnet168 senet wideresnet287)
model_list=(senet)
for network in ${model_list[@]}; do
if [ $modify_loss_module = 0 ]; then
save_tag=$network-uncorrupted
single_norm_layer=1
else
if [ $single_norm_layer = 0 ]; then
save_tag=$network-codePoisoned
else
save_tag=$network-codePoisoned-singleNorm
fi
fi
target_model_res_folder=lira-$dataset-$save_tag-$train_size-targetModel
echo "===> Train target model"
python train.py --dataset $dataset --lr 0.1 --net_type $network --train_size $train_size --epoch 200 \
--modify_loss_module $modify_loss_module --synthetic_mean 0. --synthetic_stdev 0.1 \
--single_norm_layer $single_norm_layer --save_tag $save_tag
printf '\n\n'
echo "===> Get the outputs from the target model for MIA"
python lira-inference.py --resume_path checkpoint/$dataset-trainSize-$train_size-$save_tag.pth.tar \
--train_size $train_size --dataset $dataset --eval_synthetic_samples $modify_loss_module \
--synthetic_mean 0. --synthetic_stdev 0.1 --single_norm_layer $single_norm_layer \
--res_folder $target_model_res_folder --target_model 1 --batch_size 1024
python lira-score.py --res_folder $target_model_res_folder
printf '\n\n'
echo "===> Perform MIA using target samples as query samples"
python lira-plot.py --test_data_path $target_model_res_folder --eval_target_sample 1 --fpr 1e-3
printf '\n'
if [ $modify_loss_module = 1 ]; then
echo "===> Perform MIA using synthetic samples as query samples"
python lira-plot.py --test_data_path $target_model_res_folder --eval_target_sample 0 --fpr 1e-3
fi
printf '\n\n\n'
done
printf "\n\n===========================================================================\n"
printf "==== Perform experiments across [ different training sizes ]\n"
printf "===========================================================================\n\n"
#train_size_list=(2500 5000 7500 10000 15000 20000 25000)
train_size_list=(5000)
dataset=cifar10
network=wideresnet2810
for train_size in ${train_size_list[@]}; do
if [ $modify_loss_module = 0 ]; then
save_tag=$network-uncorrupted
single_norm_layer=1
else
if [ $single_norm_layer = 0 ]; then
save_tag=$network-codePoisoned
else
save_tag=$network-codePoisoned-singleNorm
fi
fi
target_model_res_folder=lira-$dataset-$save_tag-$train_size-targetModel
echo "===> Train target model"
python train.py --dataset $dataset --lr 0.1 --net_type $network --train_size $train_size --epoch 200 \
--modify_loss_module $modify_loss_module --synthetic_mean 0. --synthetic_stdev 0.1 \
--single_norm_layer $single_norm_layer --save_tag $save_tag
printf '\n\n'
echo "===> Get the outputs from the target model for MIA"
python lira-inference.py --resume_path checkpoint/$dataset-trainSize-$train_size-$save_tag.pth.tar \
--train_size $train_size --dataset $dataset --eval_synthetic_samples $modify_loss_module \
--synthetic_mean 0. --synthetic_stdev 0.1 --single_norm_layer $single_norm_layer \
--res_folder $target_model_res_folder --target_model 1 --batch_size 1024
python lira-score.py --res_folder $target_model_res_folder
printf '\n\n'
echo "===> Perform MIA using target samples as query samples"
python lira-plot.py --test_data_path $target_model_res_folder --eval_target_sample 1 --fpr 1e-3
printf '\n'
if [ $modify_loss_module = 1 ]; then
echo "===> Perform MIA using synthetic samples as query samples"
python lira-plot.py --test_data_path $target_model_res_folder --eval_target_sample 0 --fpr 1e-3
fi
printf '\n\n\n'
done
date