-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
125 lines (93 loc) · 4.12 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import argparse
import shutil
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.models as models
import torchvision.transforms as transforms
import experiments
from experiments import Experiment
from utils.meters import AverageMeter, ProgressMeter
import utils.modelling as modelling
import extend_parser
'''
Main driver code to run experiments
'''
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
# Command line arguments
parser = argparse.ArgumentParser(description='Shape Bias Training')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser = extend_parser.set_args(parser)
def main():
args = parser.parse_args()
if args.savemodel or args.savecsv:
from google.colab import drive
drive.mount('/content/gdrive')
ngpus_per_node = torch.cuda.device_count()
# Simply call main_worker function
main_worker(ngpus_per_node, args)
def main_worker(ngpus_per_node, args):
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
experiment = Experiment(args)
# Create file and store in args.data directory...
filename = f'{experiment.name}.csv'
print ("Creating {} to store accuracy results".format(filename))
results = open(filename, 'w+')
results.write("Epoch,Top1,Top5\n")
results.close()
# create model
model = modelling.get_model(args, True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
# Get Transformations.
transform, additional_transform, validation_transform = experiment.get_transformation_set()
# Get Data
train_dataset, val_dataset = experiment.get_data_set(transform, additional_transform, validation_transform)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# Evaluate loaded model.
if args.evaluate:
modelling.validate(val_loader, model, criterion, args, filename)
return
# Train your own model
if args.own == None:
modelling.train_model(model, train_dataset, val_loader, args.start_epoch, args.epochs, optimizer, criterion, filename, experiment.name, args)
# Fine Tune the model.
if args.finetune:
if args.arch.startswith('vgg'):
number_of_features = model.classifier[6].in_features
model.classifier[6] = nn.Linear(number_of_features, len(train_dataset.classes))
model.classifier[6] = model.classifier[6].cuda()
else:
number_of_features = model.module.fc.in_features
model.module.fc = nn.Linear(number_of_features, len(train_dataset.classes))
model.module.fc = model.module.fc.cuda()
# only fc layer is are being updated.
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
modelling.train_model(model, train_dataset, val_loader, args.start_epoch, args.epochs, optimizer, criterion, filename, experiment.name, args)
if args.savemodel:
drive_path = f'/content/gdrive/My Drive/{experiment.name}.pth.tar'
shutil.copyfile(f'{experiment.name}.pth.tar', drive_path)
if args.savecsv:
drive_path = f'/content/gdrive/My Drive/{experiment.name}.csv'
shutil.copyfile(f'{experiment.name}.csv', drive_path)
if __name__ == '__main__':
main()