-
Notifications
You must be signed in to change notification settings - Fork 269
/
tensorboard-example.py
74 lines (58 loc) · 2.31 KB
/
tensorboard-example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import torch
import torch.nn as nn
import torch.utils.data
import torchvision
from functools import partial
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
# Writer will output to ./runs/ directory by default
writer = SummaryWriter()
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]
)
trainset = datasets.MNIST("mnist_train", train=True, download=True, transform=transform)
train_data_loader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
model = torchvision.models.resnet50(False)
model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
images, labels = next(iter(train_data_loader))
grid = torchvision.utils.make_grid(images)
writer.add_image("images", grid, 0)
writer.add_graph(model, images)
def send_stats(i, module, input, output):
writer.add_scalar(f"layer {i}-mean", output.data.mean())
writer.add_scalar(f"layer {i}-stddev", output.data.std())
for i, m in enumerate(model.children()):
m.register_forward_hook(partial(send_stats, i))
# Now train the model and watch output in Tensorboard
optimizer = optim.Adam(model.parameters(), lr=2e-2)
criterion = nn.CrossEntropyLoss()
def train(
model, optimizer, loss_fn, train_loader, val_loader, epochs=20, device="cuda:0"
):
model.to(device)
for epoch in range(epochs):
print(f"epoch {epoch+1}")
model.train()
for batch in train_loader:
optimizer.zero_grad()
ww, target = batch
ww = ww.to(device)
target = target.to(device)
output = model(ww)
loss = loss_fn(output, target)
loss.backward()
optimizer.step()
model.eval()
num_correct = 0
num_examples = 0
for batch in val_loader:
ww, target = batch
ww = ww.to(device)
target = target.to(device)
output = model(ww)
correct = torch.eq(torch.max(output, dim=1)[1], target).view(-1)
num_correct += torch.sum(correct).item()
num_examples += correct.shape[0]
print("Epoch {}, accuracy = {:.2f}".format(epoch+1, num_correct / num_examples))
train(model, optimizer, criterion, train_data_loader, train_data_loader, epochs=5)