forked from zhenglisec/Label-Only-MIA
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdeeplearning.py
115 lines (98 loc) · 3.73 KB
/
deeplearning.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
from runx.logx import logx
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
def train_target_model(args, model, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
logx.msg('TargetModel Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item()))
def test_target_model(args, model, test_loader, epoch, save=True):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
output = model(data)
test_loss += F.cross_entropy(output, target).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
logx.msg('\nTargetModel Test: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset), accuracy))
# save model
if save:
save_dict = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'accuracy': accuracy}
logx.save_model(
save_dict,
metric=accuracy,
epoch='',
higher_better=True)
return accuracy/100.
def train_shadow_model(args, targetmodel, shadowmodel, train_loader, optimizer, epoch):
targetmodel.eval()
shadowmodel.train()
for batch_idx, (data, _) in enumerate(train_loader):
data = data.cuda()
output = targetmodel(data)
_, target = output.max(1)
optimizer.zero_grad()
output = shadowmodel(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
logx.msg('ShadowModel Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item()))
def test_shadow_model(args, targetmodel, shadowmodel, test_loader, epoch, save=True):
targetmodel.eval()
shadowmodel.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for batch_idx, (data, _) in enumerate(test_loader):
data = data.cuda()
output = targetmodel(data)
_, target = output.max(1)
output = shadowmodel(data)
test_loss += F.cross_entropy(output, target).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
logx.msg('\nShadowModel Test: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset), accuracy))
# save model
if save:
save_dict = {
'epoch': epoch + 1,
'state_dict': shadowmodel.state_dict(),
'accuracy': accuracy}
logx.save_model(
save_dict,
metric=accuracy,
epoch='',
higher_better=True)
return accuracy/100.