forked from hunkim/PyTorchZeroToAll
-
Notifications
You must be signed in to change notification settings - Fork 0
/
03_auto_gradient.py
34 lines (26 loc) · 899 Bytes
/
03_auto_gradient.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import torch
import pdb
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
w = torch.tensor([1.0], requires_grad=True)
# our model forward pass
def forward(x):
return x * w
# Loss function
def loss(y_pred, y_val):
return (y_pred - y_val) ** 2
# Before training
print("Prediction (before training)", 4, forward(4).item())
# Training loop
for epoch in range(10):
for x_val, y_val in zip(x_data, y_data):
y_pred = forward(x_val) # 1) Forward pass
l = loss(y_pred, y_val) # 2) Compute loss
l.backward() # 3) Back propagation to update weights
print("\tgrad: ", x_val, y_val, w.grad.item())
w.data = w.data - 0.01 * w.grad.item()
# Manually zero the gradients after updating weights
w.grad.data.zero_()
print(f"Epoch: {epoch} | Loss: {l.item()}")
# After training
print("Prediction (after training)", 4, forward(4).item())