From a94d8b5a1702c831cd520c053133d9cb5c8a45b4 Mon Sep 17 00:00:00 2001 From: loganthomas Date: Wed, 19 Jun 2024 15:08:29 -0500 Subject: [PATCH 1/2] FIX: redo PR --- beginner_source/basics/optimization_tutorial.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beginner_source/basics/optimization_tutorial.py b/beginner_source/basics/optimization_tutorial.py index c6c327f851..452dfaac52 100644 --- a/beginner_source/basics/optimization_tutorial.py +++ b/beginner_source/basics/optimization_tutorial.py @@ -147,7 +147,7 @@ def forward(self, x): # We define ``train_loop`` that loops over our optimization code, and ``test_loop`` that # evaluates the model's performance against our test data. -def train_loop(dataloader, model, loss_fn, optimizer): +def train_loop(dataloader, model, batch_size, loss_fn, optimizer): size = len(dataloader.dataset) # Set the model to training mode - important for batch normalization and dropout layers # Unnecessary in this situation but added for best practices @@ -198,7 +198,7 @@ def test_loop(dataloader, model, loss_fn): epochs = 10 for t in range(epochs): print(f"Epoch {t+1}\n-------------------------------") - train_loop(train_dataloader, model, loss_fn, optimizer) + train_loop(train_dataloader, model, batch_size, loss_fn, optimizer) test_loop(test_dataloader, model, loss_fn) print("Done!") From 73bfa3436e4406d7384c46e29f6832bd7771a395 Mon Sep 17 00:00:00 2001 From: loganthomas Date: Thu, 20 Jun 2024 20:06:34 -0500 Subject: [PATCH 2/2] make batch size clear global variable --- beginner_source/basics/optimization_tutorial.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/beginner_source/basics/optimization_tutorial.py b/beginner_source/basics/optimization_tutorial.py index 452dfaac52..06a886ad63 100644 --- a/beginner_source/basics/optimization_tutorial.py +++ b/beginner_source/basics/optimization_tutorial.py @@ -44,8 +44,10 @@ transform=ToTensor() ) -train_dataloader = DataLoader(training_data, batch_size=64) -test_dataloader = DataLoader(test_data, batch_size=64) +batch_size = 64 + +train_dataloader = DataLoader(training_data, batch_size=batch_size) +test_dataloader = DataLoader(test_data, batch_size=batch_size) class NeuralNetwork(nn.Module): def __init__(self): @@ -81,9 +83,11 @@ def forward(self, x): # - **Learning Rate** - how much to update models parameters at each batch/epoch. Smaller values yield slow learning speed, while large values may result in unpredictable behavior during training. # -learning_rate = 1e-3 -batch_size = 64 epochs = 5 +learning_rate = 1e-3 +# batch_size defined earlier in Prerequisite Code + +print(f'{epochs=} {batch_size=} {learning_rate=}')