forked from pytorch/audio
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Example of MNIST using RNN (pytorch#752)
* Example of MNIST using RNN * Example of MNIST using RNN: Changed RNN type to LSTM and changed variable names * Example of MNIST using RNN: Resolving review comments * Example of MNIST using RNN: Removing unintentional new line
- Loading branch information
1 parent
a8cf0b8
commit 5d4b584
Showing
3 changed files
with
144 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
# Example of MNIST using RNN | ||
|
||
## Motivation | ||
Create pytorch example similar to Official Tensorflow Keras RNN example using MNIST [here](https://www.tensorflow.org/guide/keras/rnn) | ||
|
||
```bash | ||
pip install -r requirements.txt | ||
python main.py | ||
# CUDA_VISIBLE_DEVICES=2 python main.py # to specify GPU id to ex. 2 | ||
``` |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,132 @@ | ||
from __future__ import print_function | ||
import argparse | ||
import torch | ||
import torch.nn as nn | ||
import torch.nn.functional as F | ||
import torch.optim as optim | ||
from torchvision import datasets, transforms | ||
from torch.optim.lr_scheduler import StepLR | ||
|
||
|
||
class Net(nn.Module): | ||
def __init__(self): | ||
super(Net, self).__init__() | ||
self.rnn = nn.LSTM(input_size=28, hidden_size=64, batch_first=True) | ||
self.batchnorm = nn.BatchNorm1d(64) | ||
self.dropout1 = nn.Dropout2d(0.25) | ||
self.dropout2 = nn.Dropout2d(0.5) | ||
self.fc1 = nn.Linear(64, 32) | ||
self.fc2 = nn.Linear(32, 10) | ||
|
||
def forward(self, input): | ||
# Shape of input is (batch_size,1, 28, 28) | ||
# converting shape of input to (batch_size, 28, 28) | ||
# as required by RNN when batch_first is set True | ||
input = input.reshape(-1, 28, 28) | ||
output, hidden = self.rnn(input) | ||
|
||
# RNN output shape is (seq_len, batch, input_size) | ||
# Get last output of RNN | ||
output = output[:, -1, :] | ||
output = self.batchnorm(output) | ||
output = self.dropout1(output) | ||
output = self.fc1(output) | ||
output = F.relu(output) | ||
output = self.dropout2(output) | ||
output = self.fc2(output) | ||
output = F.log_softmax(output, dim=1) | ||
return output | ||
|
||
|
||
def train(args, model, device, train_loader, optimizer, epoch): | ||
model.train() | ||
for batch_idx, (data, target) in enumerate(train_loader): | ||
data, target = data.to(device), target.to(device) | ||
optimizer.zero_grad() | ||
output = model(data) | ||
loss = F.nll_loss(output, target) | ||
loss.backward() | ||
optimizer.step() | ||
if batch_idx % args.log_interval == 0: | ||
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( | ||
epoch, batch_idx * len(data), len(train_loader.dataset), | ||
100. * batch_idx / len(train_loader), loss.item())) | ||
|
||
|
||
def test(model, device, test_loader): | ||
model.eval() | ||
test_loss = 0 | ||
correct = 0 | ||
with torch.no_grad(): | ||
for data, target in test_loader: | ||
data, target = data.to(device), target.to(device) | ||
output = model(data) | ||
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss | ||
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability | ||
correct += pred.eq(target.view_as(pred)).sum().item() | ||
|
||
test_loss /= len(test_loader.dataset) | ||
|
||
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( | ||
test_loss, correct, len(test_loader.dataset), | ||
100. * correct / len(test_loader.dataset))) | ||
|
||
|
||
def main(): | ||
# Training settings | ||
parser = argparse.ArgumentParser(description='PyTorch MNIST Example using RNN') | ||
parser.add_argument('--batch-size', type=int, default=64, metavar='N', | ||
help='input batch size for training (default: 64)') | ||
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', | ||
help='input batch size for testing (default: 1000)') | ||
parser.add_argument('--epochs', type=int, default=14, metavar='N', | ||
help='number of epochs to train (default: 14)') | ||
parser.add_argument('--lr', type=float, default=0.1, metavar='LR', | ||
help='learning rate (default: 0.1)') | ||
parser.add_argument('--gamma', type=float, default=0.7, metavar='M', | ||
help='learning rate step gamma (default: 0.7)') | ||
parser.add_argument('--no-cuda', action='store_true', default=False, | ||
help='disables CUDA training') | ||
parser.add_argument('--seed', type=int, default=1, metavar='S', | ||
help='random seed (default: 1)') | ||
parser.add_argument('--log-interval', type=int, default=10, metavar='N', | ||
help='how many batches to wait before logging training status') | ||
parser.add_argument('--save-model', action='store_true', default=False, | ||
help='for Saving the current Model') | ||
args = parser.parse_args() | ||
use_cuda = not args.no_cuda and torch.cuda.is_available() | ||
|
||
torch.manual_seed(args.seed) | ||
|
||
device = torch.device("cuda" if use_cuda else "cpu") | ||
|
||
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} | ||
train_loader = torch.utils.data.DataLoader( | ||
datasets.MNIST('../data', train=True, download=True, | ||
transform=transforms.Compose([ | ||
transforms.ToTensor(), | ||
transforms.Normalize((0.1307,), (0.3081,)) | ||
])), | ||
batch_size=args.batch_size, shuffle=True, **kwargs) | ||
test_loader = torch.utils.data.DataLoader( | ||
datasets.MNIST('../data', train=False, transform=transforms.Compose([ | ||
transforms.ToTensor(), | ||
transforms.Normalize((0.1307,), (0.3081,)) | ||
])), | ||
batch_size=args.test_batch_size, shuffle=True, **kwargs) | ||
|
||
model = Net().to(device) | ||
optimizer = optim.Adadelta(model.parameters(), lr=args.lr) | ||
|
||
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma) | ||
for epoch in range(1, args.epochs + 1): | ||
train(args, model, device, train_loader, optimizer, epoch) | ||
test(model, device, test_loader) | ||
scheduler.step() | ||
|
||
if args.save_model: | ||
torch.save(model.state_dict(), "mnist_rnn.pt") | ||
|
||
|
||
if __name__ == '__main__': | ||
main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
torch | ||
torchvision |