diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index f139fe40882..728bb610ca6 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -6,7 +6,8 @@ ############################################### # The data has been loaded and transformed we can now build the model. # We will leverage `torch.nn `_ -# predefined layers that Pytorch has that can simplify our code. + +# predefined layers that PyTorch has that can simplify our code. # # In the below example, for our FashionMNIT image dataset, we are using a `Sequential` # container from class `torch.nn. Sequential `_ @@ -53,18 +54,26 @@ # -------------------------- # -class NeuralNework(nn.Module): - def __init__(self, x): - super(NeuralNework, self).__init__() + +class NeuralNetwork(nn.Module): + def __init__(self): + super(NeuralNetwork, self).__init__() + self.flatten = nn.Flatten() self.layer1 = nn.Linear(28*28, 512) self.layer2 = nn.Linear(512, 512) self.output = nn.Linear(512, 10) def forward(self, x): + + x = self.flatten(x) x = F.relu(self.layer1(x)) x = F.relu(self.layer2(x)) x = self.output(x) return F.softmax(x, dim=1) +model = NeuralNetwork().to(device) + +print(model) + ############################################# # Get Device for Training @@ -90,8 +99,6 @@ def forward(self, x): # # From the docs: # ``torch.nn.Flatten(start_dim: int = 1, end_dim: int = -1)`` -# - # Here is an example using one of the training_data set items: tensor = training_data[0][0] print(tensor.size()) @@ -112,6 +119,7 @@ def forward(self, x): # # Now that we have flattened our tensor dimension we will apply a linear layer transform that will calculate/learn the weights and the bias. # + # From the docs: # # ``torch.nn.Linear(in_features: int, out_features: int, bias: bool = True)`` diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index 71ea6d38b79..eaa06edc2c4 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -13,11 +13,10 @@ # # PyTorch has two basic data primitives: ``DataSet`` and ``DataLoader``. # The `torchvision.datasets` ``DataSet`` object includes a ``transforms`` mechanism to -# modify data in-place. Below is an example of how to load that data from the Pytorch open datasets and transform the data to a normalized tensor. - +# modify data in-place. Below is an example of how to load that data from the PyTorch open datasets and transform the data to a normalized tensor. # This example is using the `torchvision.datasets` which is a subclass from the primitive `torch.utils.data.Dataset`. Note that the primitive dataset doesnt have the built in transforms param like the built in dataset in `torchvision.datasets.` # -# To see more examples and details of how to work with Tensors, Datasets, DataLoaders and Transforms in Pytoch with this example checkout these resources: +# To see more examples and details of how to work with Tensors, Datasets, DataLoaders and Transforms in PyTorch with this example checkout these resources: # # - `Tensors `_ # - `DataSet and DataLoader `_ @@ -29,6 +28,7 @@ import matplotlib.pyplot as plt from torch.utils.data import DataLoader from torchvision import datasets, transforms +import torch.nn.functional as F classes = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"] @@ -51,26 +51,32 @@ train_dataloader = DataLoader(training_data, batch_size=batch_size, num_workers=0, pin_memory=True) test_dataloader = DataLoader(test_data, batch_size=batch_size, num_workers=0, pin_memory=True) +################################ # Creating Models # --------------- # # There are two ways of creating models: in-line or as a class. This -# quickstart will consider an in-line definition. For more examples checkout `building the model `_. +# quickstart will consider a class definition. For more examples checkout `building the model `_. device = 'cuda' if torch.cuda.is_available() else 'cpu' print('Using {} device'.format(device)) -# in-line model - -model = nn.Sequential( - nn.Flatten(), - nn.Linear(28*28, 512), - nn.ReLU(), - nn.Linear(512, 512), - nn.ReLU(), - nn.Linear(512, len(classes)), - nn.Softmax(dim=1) - ).to(device) +# Define model +class NeuralNetwork(nn.Module): + def __init__(self): + super(NeuralNetwork, self).__init__() + self.flatten = nn.Flatten() + self.layer1 = nn.Linear(28*28, 512) + self.layer2 = nn.Linear(512, 512) + self.output = nn.Linear(512, 10) + + def forward(self, x): + x = self.flatten(x) + x = F.relu(self.layer1(x)) + x = F.relu(self.layer2(x)) + x = self.output(x) + return F.softmax(x, dim=1) +model = NeuralNetwork().to(device) print(model) @@ -193,7 +199,7 @@ def test(dataloader, model): print(f'Predicted: "{predicted}", Actual: "{actual}"') ################################################################## -# Pytorch Quickstart Topics +# PyTorch Quickstart Topics # ---------------------------------------- # | `Tensors `_ # | `DataSets and DataLoaders `_