Skip to content

Commit

Permalink
Removed use of torch_lr_finder
Browse files Browse the repository at this point in the history
- Since the environment will not have torch-lr-finder I've removed the
import where it was unused and commented it out otherwise.
  • Loading branch information
adcroft committed May 17, 2022
1 parent 6a9cc95 commit d646573
Show file tree
Hide file tree
Showing 5 changed files with 44 additions and 60 deletions.
12 changes: 2 additions & 10 deletions notebooks/LRP-L96.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,11 @@
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"import math\n",
"\n",
"# import math\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import torch\n",
"import torch.nn.functional as F\n",
"import torch.utils.data as Data\n",
"import torchvision\n",
"from sklearn.metrics import r2_score\n",
"from torch import nn, optim\n",
"from torch.autograd import Variable\n",
"from torch_lr_finder import LRFinder\n",
"from torch import nn\n",
"\n",
"np.random.seed(14) # For reproducibility\n",
"torch.manual_seed(14) # For reproducibility"
Expand Down
4 changes: 0 additions & 4 deletions notebooks/Neural-Network-Advection-FwdEuler.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,6 @@
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"import math\n",
"\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"from IPython.display import HTML\n",
Expand Down Expand Up @@ -709,7 +706,6 @@
"from sklearn.metrics import r2_score\n",
"from torch import nn, optim\n",
"from torch.autograd import Variable\n",
"from torch_lr_finder import LRFinder\n",
"\n",
"np.random.seed(14) # For reproducibility\n",
"torch.manual_seed(14) # For reproducibility"
Expand Down
5 changes: 0 additions & 5 deletions notebooks/Neural-Network-Advection.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,6 @@
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"import math\n",
"\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"from IPython.display import HTML\n",
Expand Down Expand Up @@ -787,10 +784,8 @@
"import torch.nn.functional as F\n",
"import torch.utils.data as Data\n",
"import torchvision\n",
"from sklearn.metrics import r2_score\n",
"from torch import nn, optim\n",
"from torch.autograd import Variable\n",
"from torch_lr_finder import LRFinder\n",
"\n",
"np.random.seed(14) # For reproducibility\n",
"torch.manual_seed(14) # For reproducibility"
Expand Down
79 changes: 41 additions & 38 deletions notebooks/Neural_network_for_Lorenz96.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@
"from sklearn.metrics import r2_score\n",
"from torch import nn, optim\n",
"from torch.autograd import Variable\n",
"from torch_lr_finder import LRFinder\n",
"\n",
"# from torch_lr_finder import LRFinder # you might need to install the torch-lr-finder package\n",
"\n",
"np.random.seed(14) # For reproducibility\n",
"torch.manual_seed(14); # For reproducibility"
Expand Down Expand Up @@ -1144,7 +1145,9 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## Finding an optimal learning rate"
"## Finding an optimal learning rate\n",
"\n",
"To use the `LRFinder` package, uncomment the import of LRFinder in the top cell and then uncomment the next cell."
]
},
{
Expand All @@ -1153,12 +1156,12 @@
"metadata": {},
"outputs": [],
"source": [
"nn_3l_LR = Net_ANN().double()\n",
"optimizer = optim.Adam(nn_3l_LR.parameters(), lr=1e-7)\n",
"lr_finder = LRFinder(nn_3l_LR, optimizer, criterion)\n",
"lr_finder.range_test(loader, end_lr=100, num_iter=200)\n",
"lr_finder.plot() # to inspect the loss-learning rate graph\n",
"lr_finder.reset() # to reset the model and optimizer to their initial state"
"# nn_3l_LR = Net_ANN().double()\n",
"# optimizer = optim.Adam(nn_3l_LR.parameters(), lr=1e-7)\n",
"# lr_finder = LRFinder(nn_3l_LR, optimizer, criterion)\n",
"# lr_finder.range_test(loader, end_lr=100, num_iter=200)\n",
"# lr_finder.plot() # to inspect the loss-learning rate graph\n",
"# lr_finder.reset() # to reset the model and optimizer to their initial state"
]
},
{
Expand All @@ -1167,19 +1170,19 @@
"metadata": {},
"outputs": [],
"source": [
"n_epochs = 20 # Number of epocs\n",
"optimizer = optim.Adam(nn_3l_LR.parameters(), lr=0.01)\n",
"validation_loss = list()\n",
"train_loss = list()\n",
"# time0 = time()\n",
"for epoch in range(1, n_epochs + 1):\n",
" train_model(nn_3l_LR, criterion, loader, optimizer)\n",
" train_loss.append(test_model(nn_3l_LR, criterion, loader, optimizer, \"train\"))\n",
" validation_loss.append(test_model(nn_3l_LR, criterion, loader_test, optimizer))\n",
"plt.plot(train_loss, \"b\", label=\"training loss\")\n",
"plt.plot(validation_loss, \"r\", label=\"validation loss\")\n",
"# n_epochs = 20 # Number of epocs\n",
"# optimizer = optim.Adam(nn_3l_LR.parameters(), lr=0.01)\n",
"# validation_loss = list()\n",
"# train_loss = list()\n",
"# # time0 = time()\n",
"# for epoch in range(1, n_epochs + 1):\n",
"# train_model(nn_3l_LR, criterion, loader, optimizer)\n",
"# train_loss.append(test_model(nn_3l_LR, criterion, loader, optimizer, \"train\"))\n",
"# validation_loss.append(test_model(nn_3l_LR, criterion, loader_test, optimizer))\n",
"# plt.plot(train_loss, \"b\", label=\"training loss\")\n",
"# plt.plot(validation_loss, \"r\", label=\"validation loss\")\n",
"\n",
"plt.legend();"
"# plt.legend();"
]
},
{
Expand All @@ -1195,11 +1198,11 @@
"metadata": {},
"outputs": [],
"source": [
"optimizer = optim.Adam(nn_3l_LR.parameters(), lr=1e-7)\n",
"lr_finder = LRFinder(nn_3l_LR, optimizer, criterion)\n",
"lr_finder.range_test(loader, end_lr=100, num_iter=200)\n",
"lr_finder.plot() # to inspect the loss-learning rate graph\n",
"lr_finder.reset() # to reset the model and optimizer to their initial state"
"# optimizer = optim.Adam(nn_3l_LR.parameters(), lr=1e-7)\n",
"# lr_finder = LRFinder(nn_3l_LR, optimizer, criterion)\n",
"# lr_finder.range_test(loader, end_lr=100, num_iter=200)\n",
"# lr_finder.plot() # to inspect the loss-learning rate graph\n",
"# lr_finder.reset() # to reset the model and optimizer to their initial state"
]
},
{
Expand All @@ -1208,19 +1211,19 @@
"metadata": {},
"outputs": [],
"source": [
"n_epochs = 10 # Number of epocs\n",
"optimizer = optim.Adam(nn_3l_LR.parameters(), lr=0.001)\n",
"validation_loss = list()\n",
"train_loss = list()\n",
"# time0 = time()\n",
"for epoch in range(1, n_epochs + 1):\n",
" train_model(nn_3l_LR, criterion, loader, optimizer)\n",
" train_loss.append(test_model(nn_3l_LR, criterion, loader, optimizer, \"train\"))\n",
" validation_loss.append(test_model(nn_3l_LR, criterion, loader_test, optimizer))\n",
"plt.plot(train_loss, \"b\", label=\"training loss\")\n",
"plt.plot(validation_loss, \"r\", label=\"validation loss\")\n",
"\n",
"plt.legend();"
"# n_epochs = 10 # Number of epocs\n",
"# optimizer = optim.Adam(nn_3l_LR.parameters(), lr=0.001)\n",
"# validation_loss = list()\n",
"# train_loss = list()\n",
"# # time0 = time()\n",
"# for epoch in range(1, n_epochs + 1):\n",
"# train_model(nn_3l_LR, criterion, loader, optimizer)\n",
"# train_loss.append(test_model(nn_3l_LR, criterion, loader, optimizer, \"train\"))\n",
"# validation_loss.append(test_model(nn_3l_LR, criterion, loader_test, optimizer))\n",
"# plt.plot(train_loss, \"b\", label=\"training loss\")\n",
"# plt.plot(validation_loss, \"r\", label=\"validation loss\")\n",
"\n",
"# plt.legend();"
]
},
{
Expand Down
4 changes: 1 addition & 3 deletions notebooks/gradient_decent.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,7 @@
"\n",
"# from fastai.basics import *\n",
"from matplotlib.animation import FuncAnimation\n",
"from torch import nn, optim\n",
"from torch.autograd import Variable\n",
"from torch_lr_finder import LRFinder\n",
"from torch import nn\n",
"\n",
"np.random.seed(42)"
]
Expand Down

0 comments on commit d646573

Please sign in to comment.