diff --git a/notebooks/LRP-L96.ipynb b/notebooks/LRP-L96.ipynb index ad2e3ab7..7855aa0d 100644 --- a/notebooks/LRP-L96.ipynb +++ b/notebooks/LRP-L96.ipynb @@ -6,19 +6,11 @@ "metadata": {}, "outputs": [], "source": [ - "%matplotlib inline\n", - "import math\n", - "\n", + "# import math\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import torch\n", - "import torch.nn.functional as F\n", - "import torch.utils.data as Data\n", - "import torchvision\n", - "from sklearn.metrics import r2_score\n", - "from torch import nn, optim\n", - "from torch.autograd import Variable\n", - "from torch_lr_finder import LRFinder\n", + "from torch import nn\n", "\n", "np.random.seed(14) # For reproducibility\n", "torch.manual_seed(14) # For reproducibility" diff --git a/notebooks/Neural-Network-Advection-FwdEuler.ipynb b/notebooks/Neural-Network-Advection-FwdEuler.ipynb index 41be18fe..a4ae9bd1 100644 --- a/notebooks/Neural-Network-Advection-FwdEuler.ipynb +++ b/notebooks/Neural-Network-Advection-FwdEuler.ipynb @@ -13,9 +13,6 @@ "metadata": {}, "outputs": [], "source": [ - "%matplotlib inline\n", - "import math\n", - "\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "from IPython.display import HTML\n", @@ -709,7 +706,6 @@ "from sklearn.metrics import r2_score\n", "from torch import nn, optim\n", "from torch.autograd import Variable\n", - "from torch_lr_finder import LRFinder\n", "\n", "np.random.seed(14) # For reproducibility\n", "torch.manual_seed(14) # For reproducibility" diff --git a/notebooks/Neural-Network-Advection.ipynb b/notebooks/Neural-Network-Advection.ipynb index 1bab3b0e..ff3d9a37 100644 --- a/notebooks/Neural-Network-Advection.ipynb +++ b/notebooks/Neural-Network-Advection.ipynb @@ -13,9 +13,6 @@ "metadata": {}, "outputs": [], "source": [ - "%matplotlib inline\n", - "import math\n", - "\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "from IPython.display import HTML\n", @@ -787,10 +784,8 @@ "import torch.nn.functional as F\n", "import torch.utils.data as Data\n", "import torchvision\n", - "from sklearn.metrics import r2_score\n", "from torch import nn, optim\n", "from torch.autograd import Variable\n", - "from torch_lr_finder import LRFinder\n", "\n", "np.random.seed(14) # For reproducibility\n", "torch.manual_seed(14) # For reproducibility" diff --git a/notebooks/Neural_network_for_Lorenz96.ipynb b/notebooks/Neural_network_for_Lorenz96.ipynb index 2f9e2523..9ec9356d 100644 --- a/notebooks/Neural_network_for_Lorenz96.ipynb +++ b/notebooks/Neural_network_for_Lorenz96.ipynb @@ -27,7 +27,8 @@ "from sklearn.metrics import r2_score\n", "from torch import nn, optim\n", "from torch.autograd import Variable\n", - "from torch_lr_finder import LRFinder\n", + "\n", + "# from torch_lr_finder import LRFinder # you might need to install the torch-lr-finder package\n", "\n", "np.random.seed(14) # For reproducibility\n", "torch.manual_seed(14); # For reproducibility" @@ -1144,7 +1145,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Finding an optimal learning rate" + "## Finding an optimal learning rate\n", + "\n", + "To use the `LRFinder` package, uncomment the import of LRFinder in the top cell and then uncomment the next cell." ] }, { @@ -1153,12 +1156,12 @@ "metadata": {}, "outputs": [], "source": [ - "nn_3l_LR = Net_ANN().double()\n", - "optimizer = optim.Adam(nn_3l_LR.parameters(), lr=1e-7)\n", - "lr_finder = LRFinder(nn_3l_LR, optimizer, criterion)\n", - "lr_finder.range_test(loader, end_lr=100, num_iter=200)\n", - "lr_finder.plot() # to inspect the loss-learning rate graph\n", - "lr_finder.reset() # to reset the model and optimizer to their initial state" + "# nn_3l_LR = Net_ANN().double()\n", + "# optimizer = optim.Adam(nn_3l_LR.parameters(), lr=1e-7)\n", + "# lr_finder = LRFinder(nn_3l_LR, optimizer, criterion)\n", + "# lr_finder.range_test(loader, end_lr=100, num_iter=200)\n", + "# lr_finder.plot() # to inspect the loss-learning rate graph\n", + "# lr_finder.reset() # to reset the model and optimizer to their initial state" ] }, { @@ -1167,19 +1170,19 @@ "metadata": {}, "outputs": [], "source": [ - "n_epochs = 20 # Number of epocs\n", - "optimizer = optim.Adam(nn_3l_LR.parameters(), lr=0.01)\n", - "validation_loss = list()\n", - "train_loss = list()\n", - "# time0 = time()\n", - "for epoch in range(1, n_epochs + 1):\n", - " train_model(nn_3l_LR, criterion, loader, optimizer)\n", - " train_loss.append(test_model(nn_3l_LR, criterion, loader, optimizer, \"train\"))\n", - " validation_loss.append(test_model(nn_3l_LR, criterion, loader_test, optimizer))\n", - "plt.plot(train_loss, \"b\", label=\"training loss\")\n", - "plt.plot(validation_loss, \"r\", label=\"validation loss\")\n", + "# n_epochs = 20 # Number of epocs\n", + "# optimizer = optim.Adam(nn_3l_LR.parameters(), lr=0.01)\n", + "# validation_loss = list()\n", + "# train_loss = list()\n", + "# # time0 = time()\n", + "# for epoch in range(1, n_epochs + 1):\n", + "# train_model(nn_3l_LR, criterion, loader, optimizer)\n", + "# train_loss.append(test_model(nn_3l_LR, criterion, loader, optimizer, \"train\"))\n", + "# validation_loss.append(test_model(nn_3l_LR, criterion, loader_test, optimizer))\n", + "# plt.plot(train_loss, \"b\", label=\"training loss\")\n", + "# plt.plot(validation_loss, \"r\", label=\"validation loss\")\n", "\n", - "plt.legend();" + "# plt.legend();" ] }, { @@ -1195,11 +1198,11 @@ "metadata": {}, "outputs": [], "source": [ - "optimizer = optim.Adam(nn_3l_LR.parameters(), lr=1e-7)\n", - "lr_finder = LRFinder(nn_3l_LR, optimizer, criterion)\n", - "lr_finder.range_test(loader, end_lr=100, num_iter=200)\n", - "lr_finder.plot() # to inspect the loss-learning rate graph\n", - "lr_finder.reset() # to reset the model and optimizer to their initial state" + "# optimizer = optim.Adam(nn_3l_LR.parameters(), lr=1e-7)\n", + "# lr_finder = LRFinder(nn_3l_LR, optimizer, criterion)\n", + "# lr_finder.range_test(loader, end_lr=100, num_iter=200)\n", + "# lr_finder.plot() # to inspect the loss-learning rate graph\n", + "# lr_finder.reset() # to reset the model and optimizer to their initial state" ] }, { @@ -1208,19 +1211,19 @@ "metadata": {}, "outputs": [], "source": [ - "n_epochs = 10 # Number of epocs\n", - "optimizer = optim.Adam(nn_3l_LR.parameters(), lr=0.001)\n", - "validation_loss = list()\n", - "train_loss = list()\n", - "# time0 = time()\n", - "for epoch in range(1, n_epochs + 1):\n", - " train_model(nn_3l_LR, criterion, loader, optimizer)\n", - " train_loss.append(test_model(nn_3l_LR, criterion, loader, optimizer, \"train\"))\n", - " validation_loss.append(test_model(nn_3l_LR, criterion, loader_test, optimizer))\n", - "plt.plot(train_loss, \"b\", label=\"training loss\")\n", - "plt.plot(validation_loss, \"r\", label=\"validation loss\")\n", - "\n", - "plt.legend();" + "# n_epochs = 10 # Number of epocs\n", + "# optimizer = optim.Adam(nn_3l_LR.parameters(), lr=0.001)\n", + "# validation_loss = list()\n", + "# train_loss = list()\n", + "# # time0 = time()\n", + "# for epoch in range(1, n_epochs + 1):\n", + "# train_model(nn_3l_LR, criterion, loader, optimizer)\n", + "# train_loss.append(test_model(nn_3l_LR, criterion, loader, optimizer, \"train\"))\n", + "# validation_loss.append(test_model(nn_3l_LR, criterion, loader_test, optimizer))\n", + "# plt.plot(train_loss, \"b\", label=\"training loss\")\n", + "# plt.plot(validation_loss, \"r\", label=\"validation loss\")\n", + "\n", + "# plt.legend();" ] }, { diff --git a/notebooks/gradient_decent.ipynb b/notebooks/gradient_decent.ipynb index 60436dba..0a66abe2 100644 --- a/notebooks/gradient_decent.ipynb +++ b/notebooks/gradient_decent.ipynb @@ -91,9 +91,7 @@ "\n", "# from fastai.basics import *\n", "from matplotlib.animation import FuncAnimation\n", - "from torch import nn, optim\n", - "from torch.autograd import Variable\n", - "from torch_lr_finder import LRFinder\n", + "from torch import nn\n", "\n", "np.random.seed(42)" ]