-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy paththroughput.py
58 lines (47 loc) · 2.18 KB
/
throughput.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import torch
import sys
sys.path.append('./pytorch-image-models/')
from timm.models.convmixer import ConvMixer
from timm.models.splitmixer import SplitMixer
device = 'cuda'
args_hdim = 256
args_depth = 8
args_psize = 7
args_conv_ks = 7
num_classes = 102
I_ratios = [2/3, 3/5, 4/7, 5/9, 6/11]
II_n_part = [2, 3, 4, 5, 6]
III_n_part = [2, 4, 8]
IV_n_part = [2, 3, 4, 5]
labels = ['convmixer']
models = [ConvMixer(args_hdim, args_depth, patch_size=args_psize, kernel_size=args_conv_ks, num_classes=num_classes)]
models += [SplitMixer(args_hdim, args_depth, patch_size=args_psize, kernel_size=args_conv_ks, num_classes=num_classes,
ratio=r, mixer_setting='I') for r in I_ratios]
labels += [f'splitmixerI-{r}' for r in I_ratios]
models += [SplitMixer(args_hdim, args_depth, patch_size=args_psize, kernel_size=args_conv_ks, num_classes=num_classes,
num_segments=_p, mixer_setting='II') for _p in II_n_part]
labels += [f'splitmixerII-{_p}' for _p in II_n_part]
models += [SplitMixer(args_hdim, args_depth, patch_size=args_psize, kernel_size=args_conv_ks, num_classes=num_classes,
num_segments=_p, mixer_setting='III') for _p in III_n_part]
labels += [f'splitmixerIII-{_p}' for _p in III_n_part]
models += [SplitMixer(args_hdim, args_depth, patch_size=args_psize, kernel_size=args_conv_ks, num_classes=num_classes,
num_segments=_p, mixer_setting='IV') for _p in IV_n_part]
labels += [f'splitmixerIV-{_p}' for _p in IV_n_part]
batch_size = 64
dummy_input = torch.randn(batch_size, 3, 224, 224, dtype=torch.float).to(device)
repetitions=100
for i in range(len(models)):
print('\n', labels[i])
model = models[i].to(device)
total_time = 0
with torch.no_grad():
for rep in range(repetitions):
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
starter.record()
_ = model(dummy_input)
ender.record()
torch.cuda.synchronize()
curr_time = starter.elapsed_time(ender)/1000
total_time += curr_time
Throughput = (repetitions * batch_size)/total_time
print('Throughput:',Throughput)