From ce315798eceb1c7a14d35467bfc1bce36bfee27c Mon Sep 17 00:00:00 2001 From: sethjuarez Date: Mon, 2 Nov 2020 12:14:51 -0800 Subject: [PATCH 01/55] added basic devcontainer --- .devcontainer/Dockerfile | 7 +++++++ .devcontainer/devcontainer.json | 20 +++++++++++++++++++ .devcontainer/requirements.txt | 30 ++++++++++++++++++++++++++++ .gitignore | 3 +++ beginner_source/pytorch_overview.rst | 3 +++ 5 files changed, 63 insertions(+) create mode 100644 .devcontainer/Dockerfile create mode 100644 .devcontainer/devcontainer.json create mode 100644 .devcontainer/requirements.txt create mode 100644 beginner_source/pytorch_overview.rst diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 00000000000..206113697da --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.6-slim + +COPY requirements.txt requirements.txt + +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ + && apt-get install git gcc -y \ + && pip install --no-cache-dir -r requirements.txt \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000000..429fd872121 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,20 @@ +{ + "name": "PyTorch", + "build": { + "context": "..", + "dockerfile": "Dockerfile", + "args": { } + }, + "settings": { + "terminal.integrated.shell.linux": "/bin/bash", + "workbench.startupEditor": "none", + "files.autoSave": "afterDelay", + "python.dataScience.enabled": true, + "python.dataScience.alwaysTrustNotebooks": true, + "python.insidersChannel": "weekly", + "python.showStartPage": false + }, + "extensions": [ + "ms-python.python" + ] +} \ No newline at end of file diff --git a/.devcontainer/requirements.txt b/.devcontainer/requirements.txt new file mode 100644 index 00000000000..5e87cf36170 --- /dev/null +++ b/.devcontainer/requirements.txt @@ -0,0 +1,30 @@ +# Refer to ./jenkins/build.sh for tutorial build instructions + +sphinx==1.8.2 +sphinx-gallery==0.3.1 +tqdm +numpy +matplotlib +torch +torchvision +torchtext +torchaudio +PyHamcrest +bs4 +awscli==1.16.35 +flask +spacy +ray[tune] + +# PyTorch Theme +-e git+git://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme + +ipython + +# to run examples +pandas +scikit-image +# pillow >= 4.2 will throw error when trying to write mode RGBA as JPEG, +# this is a workaround to the issue. +pillow==4.1.1 +wget diff --git a/.gitignore b/.gitignore index 27c61631029..2834a874f07 100644 --- a/.gitignore +++ b/.gitignore @@ -121,3 +121,6 @@ cleanup.sh # PyTorch things *.pt + +# vscode things +.vscode/ diff --git a/beginner_source/pytorch_overview.rst b/beginner_source/pytorch_overview.rst new file mode 100644 index 00000000000..efdb708f171 --- /dev/null +++ b/beginner_source/pytorch_overview.rst @@ -0,0 +1,3 @@ +Deep Learning with PyTorch: A Fast Overview(TM) +----------------------------------------------- +Seth From a0ce8e165e45949f494a7195c4523b83a70ee44c Mon Sep 17 00:00:00 2001 From: sethjuarez Date: Mon, 2 Nov 2020 13:59:02 -0800 Subject: [PATCH 02/55] added more complete devcontainer --- .devcontainer/Dockerfile | 2 +- .devcontainer/devcontainer.json | 3 ++- .devcontainer/requirements.txt | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 206113697da..3ca67455049 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -3,5 +3,5 @@ FROM python:3.6-slim COPY requirements.txt requirements.txt RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ - && apt-get install git gcc -y \ + && apt-get install git gcc unzip make -y \ && pip install --no-cache-dir -r requirements.txt \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 429fd872121..a0212d506ff 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -15,6 +15,7 @@ "python.showStartPage": false }, "extensions": [ - "ms-python.python" + "ms-python.python", + "lextudio.restructuredtext" ] } \ No newline at end of file diff --git a/.devcontainer/requirements.txt b/.devcontainer/requirements.txt index 5e87cf36170..0eb78001604 100644 --- a/.devcontainer/requirements.txt +++ b/.devcontainer/requirements.txt @@ -15,6 +15,7 @@ awscli==1.16.35 flask spacy ray[tune] +pylint # PyTorch Theme -e git+git://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme From 390e283819758339d8065b7a71a5180dcbd4fd63 Mon Sep 17 00:00:00 2001 From: sethjuarez Date: Mon, 2 Nov 2020 14:43:58 -0800 Subject: [PATCH 03/55] added auto reload for in browser editing --- .devcontainer/requirements.txt | 1 + preview.sh | 1 + 2 files changed, 2 insertions(+) create mode 100644 preview.sh diff --git a/.devcontainer/requirements.txt b/.devcontainer/requirements.txt index 0eb78001604..b5734aad709 100644 --- a/.devcontainer/requirements.txt +++ b/.devcontainer/requirements.txt @@ -16,6 +16,7 @@ flask spacy ray[tune] pylint +sphinx-autobuild # PyTorch Theme -e git+git://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme diff --git a/preview.sh b/preview.sh new file mode 100644 index 00000000000..6482ce9afcf --- /dev/null +++ b/preview.sh @@ -0,0 +1 @@ +sphinx-autobuild --ignore *.png -D plot_gallery=0 -b html "." "_build/html" \ No newline at end of file From 6271276e78fa01c4e8d5b7fa70ad2bdcc672c447 Mon Sep 17 00:00:00 2001 From: sethjuarez Date: Mon, 2 Nov 2020 14:59:37 -0800 Subject: [PATCH 04/55] moved pip packages for codespaces to the end --- .devcontainer/requirements.txt | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.devcontainer/requirements.txt b/.devcontainer/requirements.txt index b5734aad709..cebbcfc763d 100644 --- a/.devcontainer/requirements.txt +++ b/.devcontainer/requirements.txt @@ -15,8 +15,6 @@ awscli==1.16.35 flask spacy ray[tune] -pylint -sphinx-autobuild # PyTorch Theme -e git+git://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme @@ -30,3 +28,7 @@ scikit-image # this is a workaround to the issue. pillow==4.1.1 wget + +# for codespaces env +pylint +sphinx-autobuild From 762ca9d05c67f2d2773f7b25f31b9f1fb0a0ba4f Mon Sep 17 00:00:00 2001 From: sethjuarez Date: Mon, 2 Nov 2020 15:15:27 -0800 Subject: [PATCH 05/55] corrected auto preview --- .devcontainer/requirements.txt | 1 - preview.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/.devcontainer/requirements.txt b/.devcontainer/requirements.txt index cebbcfc763d..993febbb4c6 100644 --- a/.devcontainer/requirements.txt +++ b/.devcontainer/requirements.txt @@ -31,4 +31,3 @@ wget # for codespaces env pylint -sphinx-autobuild diff --git a/preview.sh b/preview.sh index 6482ce9afcf..b4f99cc1ecb 100644 --- a/preview.sh +++ b/preview.sh @@ -1 +1,2 @@ +pip install sphinx-autobuild sphinx-autobuild --ignore *.png -D plot_gallery=0 -b html "." "_build/html" \ No newline at end of file From 31cb40840e35eb190357a397968dbb84efab1b96 Mon Sep 17 00:00:00 2001 From: sethjuarez Date: Mon, 2 Nov 2020 15:38:24 -0800 Subject: [PATCH 06/55] added better ignore criteria for watch --- preview.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/preview.sh b/preview.sh index b4f99cc1ecb..d4d4128c234 100644 --- a/preview.sh +++ b/preview.sh @@ -1,2 +1,8 @@ pip install sphinx-autobuild -sphinx-autobuild --ignore *.png -D plot_gallery=0 -b html "." "_build/html" \ No newline at end of file +sphinx-autobuild --ignore "*.png" \ + --ignore "advanced/*" \ + --ignore "beginner/*" \ + --ignore "intermediate/*" \ + --ignore "prototype/*" \ + --ignore "recipes/*" \ + -D plot_gallery=0 -b html "." "_build/html" \ No newline at end of file From 97300e32176ac604c6801b14f1f4ce3f813a24b7 Mon Sep 17 00:00:00 2001 From: Seth Juarez Date: Tue, 3 Nov 2020 01:51:30 +0000 Subject: [PATCH 07/55] added quickstart and folder for sub-docs --- beginner_source/pytorch_overview.rst | 3 - beginner_source/quickstart/README.txt | 7 + beginner_source/quickstart/data_tutorial.py | 9 + beginner_source/quickstart_tutorial.py | 193 ++++++++++++++++++++ index.rst | 1 + preview.sh | 2 + 6 files changed, 212 insertions(+), 3 deletions(-) delete mode 100644 beginner_source/pytorch_overview.rst create mode 100644 beginner_source/quickstart/README.txt create mode 100644 beginner_source/quickstart/data_tutorial.py create mode 100644 beginner_source/quickstart_tutorial.py mode change 100644 => 100755 preview.sh diff --git a/beginner_source/pytorch_overview.rst b/beginner_source/pytorch_overview.rst deleted file mode 100644 index efdb708f171..00000000000 --- a/beginner_source/pytorch_overview.rst +++ /dev/null @@ -1,3 +0,0 @@ -Deep Learning with PyTorch: A Fast Overview(TM) ------------------------------------------------ -Seth diff --git a/beginner_source/quickstart/README.txt b/beginner_source/quickstart/README.txt new file mode 100644 index 00000000000..0dcf2df4681 --- /dev/null +++ b/beginner_source/quickstart/README.txt @@ -0,0 +1,7 @@ +PyTorch Quickstart +---------------------------------- + +1. data_tutorial.py + Data Tutorial + https://pytorch.org/tutorials/beginner/quickstart/data_tutorial.html + diff --git a/beginner_source/quickstart/data_tutorial.py b/beginner_source/quickstart/data_tutorial.py new file mode 100644 index 00000000000..18024f126aa --- /dev/null +++ b/beginner_source/quickstart/data_tutorial.py @@ -0,0 +1,9 @@ +""" +Data Tutorial +=================== + +More to come + +""" + +x = 5 \ No newline at end of file diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py new file mode 100644 index 00000000000..2968ce35e15 --- /dev/null +++ b/beginner_source/quickstart_tutorial.py @@ -0,0 +1,193 @@ +""" +PyTorch Quickstart +=================== + +The basic machine learning concepts in any framework should include: + +1. Working with data +2. Creating models +3. Optimizing Parameters +4. Saving Models +5. Loading Models + +""" + +import torch +import torch.nn as nn +import torch.onnx as onnx +import matplotlib.pyplot as plt +from torch.utils.data import DataLoader +from torchvision import datasets, transforms + +###################################################################### +# Working with data +# ----------------- +# +# PyTorch has two basic data primitives: ``DataSet`` and ``DataLoader``. +# These ``DataSet`` objects include a ``transforms`` mechanism to +# modify data in-place. + + +# image classes +classes = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"] + +# data used for training +training_data = datasets.FashionMNIST('data', train=True, download=True, + transform=transforms.Compose([transforms.ToTensor()]), + target_transform=transforms.Compose([ + transforms.Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), value=1)) + ]) +) + +# data used for testing +test_data = datasets.FashionMNIST('data', train=False, download=True, + transform=transforms.Compose([transforms.ToTensor()]), + target_transform=transforms.Compose([ + transforms.Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), value=1)) + ]) +) + +###################################################################### +# DataLoader + +# batch size +batch_size = 64 + +# loader +train_dataloader = DataLoader(training_data, batch_size=batch_size, num_workers=0, pin_memory=True) +test_dataloader = DataLoader(test_data, batch_size=batch_size, num_workers=0, pin_memory=True) + + +###################################################################### +# More details `DataSet, DataLoader, and transforms `_ +# +# Creating Models +# --------------- +# +# There are two ways of creating models: in-line or as a class. This +# quickstart will consider an in-line definition. + +# where to run +device = 'cuda' if torch.cuda.is_available() else 'cpu' +print('Using {} device'.format(device)) + +# model +model = nn.Sequential( + nn.Flatten(), + nn.Linear(28*28, 512), + nn.ReLU(), + nn.Linear(512, 512), + nn.ReLU(), + nn.Linear(512, len(classes)), + nn.Softmax(dim=1) + ).to(device) + +print(model) + +###################################################################### +# Optimizing Parameters +# --------------------- +# +# Optimizing model parameters requires a loss function, and optimizer, +# and the optimization loop. + +# cost function used to determine best parameters +cost = torch.nn.BCELoss() + +# used to create optimal parameters +learning_rate = 1e-3 +optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) + +###################################################################### +# training function +def train(dataloader, model, loss, optimizer): + size = len(dataloader.dataset) + for batch, (X, Y) in enumerate(dataloader): + X, Y = X.to(device), Y.to(device) + optimizer.zero_grad() + pred = model(X) + loss = cost(pred, Y) + loss.backward() + optimizer.step() + + if batch % 100 == 0: + loss, current = loss.item(), batch * len(X) + print(f'loss: {loss:>7f} [{current:>5d}/{size:>5d}]') + +###################################################################### +# validation/test function +def test(dataloader, model): + size = len(dataloader.dataset) + model.eval() + test_loss, correct = 0, 0 + + with torch.no_grad(): + for batch, (X, Y) in enumerate(dataloader): + X, Y = X.to(device), Y.to(device) + pred = model(X) + + test_loss += cost(pred, Y).item() + correct += (pred.argmax(1) == Y.argmax(1)).type(torch.float).sum().item() + + test_loss /= size + correct /= size + + print(f'\nTest Error:\nacc: {(100*correct):>0.1f}%, avg loss: {test_loss:>8f}\n') + +###################################################################### +# training loop +epochs = 5 + +for t in range(epochs): + print(f'Epoch {t+1}\n-------------------------------') + train(train_dataloader, model, cost, optimizer) + test(test_dataloader, model) +print('Done!') + +###################################################################### +# Saving Models +# ------------- +# +# PyTorch has can serialize the internal model state to a file. It also +# has built-in ONNX support. + +# saving PyTorch Model Dictionary +torch.save(model.state_dict(), 'model.pth') +print('Saved PyTorch Model to model.pth') + +# create dummy variable to traverse graph +x = torch.randint(255, (1, 28*28), dtype=torch.float).to(device) / 255 +onnx.export(model, x, 'model.onnx') +print('Saved onnx model to model.onnx') + +###################################################################### +# Loading Models +# ---------------------------- +# +# Once a model has been serialized the process for loading the +# parameters includes re-creating the model shape and then loading +# the state dictionary. Once loaded the model can be used for either +# retraining or inference purposes (in this example it is used for +# inference) + +loaded_model = nn.Sequential( + nn.Flatten(), + nn.Linear(28*28, 512), + nn.ReLU(), + nn.Linear(512, 512), + nn.ReLU(), + nn.Linear(512, len(classes)), + nn.Softmax(dim=1) + ) + +loaded_model.load_state_dict(torch.load('model.pth')) +loaded_model.eval() + +# inference +x, y = test_data[0][0], test_data[0][1] +with torch.no_grad(): + pred = loaded_model(x) + predicted, actual = classes[pred[0].argmax(0)], classes[y.argmax(0)] + print(f'Predicted: "{predicted}", Actual: "{actual}"') + + diff --git a/index.rst b/index.rst index c17172771e4..3f616b7800a 100644 --- a/index.rst +++ b/index.rst @@ -437,6 +437,7 @@ Additional Resources :includehidden: :caption: Learning PyTorch + beginner/quickstart_tutorial beginner/deep_learning_60min_blitz beginner/pytorch_with_examples beginner/nn_tutorial diff --git a/preview.sh b/preview.sh old mode 100644 new mode 100755 index d4d4128c234..2bb8f143681 --- a/preview.sh +++ b/preview.sh @@ -5,4 +5,6 @@ sphinx-autobuild --ignore "*.png" \ --ignore "intermediate/*" \ --ignore "prototype/*" \ --ignore "recipes/*" \ + --ignore "*.zip" \ + --ignore "*.ipynb" \ -D plot_gallery=0 -b html "." "_build/html" \ No newline at end of file From 5aa2c2480df9c119220b19d88a90eef8b78160c6 Mon Sep 17 00:00:00 2001 From: sethjuarez Date: Wed, 4 Nov 2020 10:22:03 -0800 Subject: [PATCH 08/55] added staging build for reviews --- .github/workflows/staging.yml | 52 +++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 .github/workflows/staging.yml diff --git a/.github/workflows/staging.yml b/.github/workflows/staging.yml new file mode 100644 index 00000000000..2177440f263 --- /dev/null +++ b/.github/workflows/staging.yml @@ -0,0 +1,52 @@ +name: PyTorch Tutorial Staging + +on: + push: + branches: + - seth-blitz + pull_request: + branches: + - seth-blitz + +jobs: + hugo: + runs-on: ubuntu-latest + env: + WEB_PATH: $web + ACCOUNT: ${{ secrets.stagingaccount }} + KEY: ${{ secrets.stagingkey }} + SOURCEDIR: . + BUILDDIR: _build/html + steps: + - + # checkout + name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 1 + - + # python + name: Python + uses: actions/checkout@v2 + with: + python-version: 3.6 + - + # install requirements + name: install requirements + run: | + pip install --no-cache-dir -r requirements.txt + - + # build site + name: sphinx build + run: | + sphinx-build -D plot_gallery=0 -b html "$SOURCEDIR" "$BUILDDIR" + - + # clear old site + name: clear old site + run : | + az storage blob delete-batch --source $WEB_PATH --account-name $ACCOUNT --account-key $KEY + - + # push to azure storage + name: push to azure storage + run : | + az storage blob upload-batch -s $BUILDDIR -d $WEB_PATH --account-name $ACCOUNT --account-key $KEY From 1a023b4c809b9881ed24288b8890fce504756bfe Mon Sep 17 00:00:00 2001 From: sethjuarez Date: Wed, 4 Nov 2020 10:23:42 -0800 Subject: [PATCH 09/55] corrected yaml error --- .github/workflows/staging.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/staging.yml b/.github/workflows/staging.yml index 2177440f263..6fc46e181b2 100644 --- a/.github/workflows/staging.yml +++ b/.github/workflows/staging.yml @@ -23,7 +23,7 @@ jobs: name: Checkout uses: actions/checkout@v2 with: - fetch-depth: 1 + fetch-depth: '1' - # python name: Python From 35882094a9927643d0b59c826cfe425ccc7f9f10 Mon Sep 17 00:00:00 2001 From: sethjuarez Date: Wed, 4 Nov 2020 10:26:04 -0800 Subject: [PATCH 10/55] another yaml correction --- .github/workflows/staging.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/staging.yml b/.github/workflows/staging.yml index 6fc46e181b2..30e48398899 100644 --- a/.github/workflows/staging.yml +++ b/.github/workflows/staging.yml @@ -20,14 +20,12 @@ jobs: steps: - # checkout - name: Checkout + name: Prep uses: actions/checkout@v2 - with: - fetch-depth: '1' - # python name: Python - uses: actions/checkout@v2 + uses: actions/setup-python@v2 with: python-version: 3.6 - From ebf5aa851aea30921ad799c9844342d2aa82c65e Mon Sep 17 00:00:00 2001 From: sethjuarez Date: Wed, 4 Nov 2020 10:31:22 -0800 Subject: [PATCH 11/55] revised yaml --- .github/workflows/staging.yml | 52 ++++++++++++++++------------------- 1 file changed, 23 insertions(+), 29 deletions(-) diff --git a/.github/workflows/staging.yml b/.github/workflows/staging.yml index 30e48398899..07ccec9d6d0 100644 --- a/.github/workflows/staging.yml +++ b/.github/workflows/staging.yml @@ -18,33 +18,27 @@ jobs: SOURCEDIR: . BUILDDIR: _build/html steps: - - - # checkout - name: Prep - uses: actions/checkout@v2 + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: 3.6 - - # python - name: Python - uses: actions/setup-python@v2 - with: - python-version: 3.6 - - - # install requirements - name: install requirements - run: | - pip install --no-cache-dir -r requirements.txt - - - # build site - name: sphinx build - run: | - sphinx-build -D plot_gallery=0 -b html "$SOURCEDIR" "$BUILDDIR" - - - # clear old site - name: clear old site - run : | - az storage blob delete-batch --source $WEB_PATH --account-name $ACCOUNT --account-key $KEY - - - # push to azure storage - name: push to azure storage - run : | - az storage blob upload-batch -s $BUILDDIR -d $WEB_PATH --account-name $ACCOUNT --account-key $KEY + # install requirements + name: install requirements + run: | + pip install --no-cache-dir -r requirements.txt + - + # build site + name: sphinx build + run: | + sphinx-build -D plot_gallery=0 -b html "$SOURCEDIR" "$BUILDDIR" + - + # clear old site + name: clear old site + run : | + az storage blob delete-batch --source $WEB_PATH --account-name $ACCOUNT --account-key $KEY + - + # push to azure storage + name: push to azure storage + run : | + az storage blob upload-batch -s $BUILDDIR -d $WEB_PATH --account-name $ACCOUNT --account-key $KEY From fa26938ab25f113bcdcb14a113e34547471014c8 Mon Sep 17 00:00:00 2001 From: sethjuarez Date: Wed, 4 Nov 2020 10:37:29 -0800 Subject: [PATCH 12/55] changed job name --- .github/workflows/staging.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/staging.yml b/.github/workflows/staging.yml index 07ccec9d6d0..5ba350be56b 100644 --- a/.github/workflows/staging.yml +++ b/.github/workflows/staging.yml @@ -9,7 +9,7 @@ on: - seth-blitz jobs: - hugo: + tutorial-staging: runs-on: ubuntu-latest env: WEB_PATH: $web From 4cb5ce24a531cddeb1894c70b0a15c58000ce260 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Wed, 4 Nov 2020 21:44:40 +0000 Subject: [PATCH 13/55] added tutorial files and work on data and tensors --- .../quickstart/autograd_tutorial.py | 3 + .../quickstart/build_model_tutorial.py | 9 + beginner_source/quickstart/data_tutorial.py | 197 +++++++++++++++++- .../quickstart/optimization_tutorial.py | 9 + .../quickstart/save_load_run_tutorial.py | 9 + beginner_source/quickstart/tensor_tutorial.py | 120 +++++++++++ .../quickstart/transforms_tutorial.py | 9 + beginner_source/quickstart_tutorial.py | 37 ++-- 8 files changed, 375 insertions(+), 18 deletions(-) create mode 100644 beginner_source/quickstart/autograd_tutorial.py create mode 100644 beginner_source/quickstart/build_model_tutorial.py create mode 100644 beginner_source/quickstart/optimization_tutorial.py create mode 100644 beginner_source/quickstart/save_load_run_tutorial.py create mode 100644 beginner_source/quickstart/tensor_tutorial.py create mode 100644 beginner_source/quickstart/transforms_tutorial.py diff --git a/beginner_source/quickstart/autograd_tutorial.py b/beginner_source/quickstart/autograd_tutorial.py new file mode 100644 index 00000000000..2fb4fcb665e --- /dev/null +++ b/beginner_source/quickstart/autograd_tutorial.py @@ -0,0 +1,3 @@ +""" +Autograd +=================== \ No newline at end of file diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py new file mode 100644 index 00000000000..d8d707d4638 --- /dev/null +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -0,0 +1,9 @@ +""" +Build Model Tutorial +=================== + +More to come + +""" + +x = 5 \ No newline at end of file diff --git a/beginner_source/quickstart/data_tutorial.py b/beginner_source/quickstart/data_tutorial.py index 18024f126aa..f84ad4f1794 100644 --- a/beginner_source/quickstart/data_tutorial.py +++ b/beginner_source/quickstart/data_tutorial.py @@ -1,9 +1,194 @@ -""" -Data Tutorial -=================== +############################################## +# Datasets & Dataloaders +# --------------------- +# +# Getting Started With Data in PyTorch +# +# Before we can even think about building a model with PyTorch, we need to first learn how to load and process data. Data can be sourced from local files, cloud datastores and database queries. It comes in all sorts of forms and formats from structured tables to image, audio, text, video files and more. +# +# ![types of data](../images/typesofdata.PNG) +# +# Different data types require different python libraries to load and process such as [openCV](https://opencv.org/) and [PIL](https://pillow.readthedocs.io/en/stable/reference/Image.html) for images, [NLTK](https://www.nltk.org/) and [spaCy](https://spacy.io/) for text and [Librosa](https://librosa.org/doc/latest/index.html) for audio. +# +# If not properly organized, code for processing data samples can quickly get messy and become hard to maintain. Since different model architectures can be applied to many data types, we ideally want our dataset code to be decoupled from our model training code. To this end, PyTorch provides a simple Datasets interface for linking managing collections of data. +# +# A whole set of example datasets such as Fashion MNIST that implement this interface are built into PyTorch extension libraries. These are useful for benchmarking and testing your models before training on your own custom datasets. +# +# You can find some of them below. +# - [Image Datasets](https://pytorch.org/docs/stable/torchvision/datasets.html) +# - [Text Datasets](https://pytorch.org/text/datasets.html) +# - [Audio Datasets](https://pytorch.org/audio/datasets.html) +# +# Iterating through a Dataset +######## +# +# Once we have a Dataset we can index it manually like a list ```clothing[index]```. +# +# Here is an example of how to load the fashion MNIST dataset from torch vision. +# +# -More to come +import torch +from torch.utils.data import Dataset +import torchvision.datasets as datasets +import matplotlib.pyplot as plt +import numpy as np -""" +clothing = datasets.FashionMNIST('data', train=True, download=True) +labels_map = {0 : 'T-Shirt', 1 : 'Trouser', 2 : 'Pullover', 3 : 'Dress', 4 : 'Coat', 5 : 'Sandal', 6 : 'Shirt', + 7 : 'Sneaker', 8 : 'Bag', 9 : 'Ankle Boot'} +figure = plt.figure(figsize=(8,8)) +cols, rows = 3, 3 +for i in range(1, cols*rows +1): + sample_idx = np.random.randint(len(clothing)) + img = clothing[sample_idx][0][0,:,:] + figure.add_subplot(rows, cols, i) + plt.title(labels_map[clothing[sample_idx][1]]) + plt.axis('off') + plt.imshow(img, cmap='gray') +plt.show() -x = 5 \ No newline at end of file +###################################### +# +# ![Fashion MNIST](../images/fashion_mnist.png) +# Creating a Custom Dataset +######## +# +# To work with your own data lets look at the a simple custom image Dataset implementation: + +import os +import torch +import pandas as pd +from torch.utils.data import Dataset +from torchvision import transforms, utils +from torchvision.io import read_image + +class CustomImageDataset(Dataset): + """Custom Image PyTorch Dataset.""" + + def __init__(self, annotations_file, img_dir, transform=None): + """ + Args: + annotations_file (string): Path to file containing image anntoations . + img_dir (string): Directory with all the images. + transform (callable, optional): Optional transform to be applied on a sample, see the next section for more information. + """ + self.img_labels = pd.read_csv(annotations_file) + self.img_dir = img_dir + self.transform = transform + + def __len__(self): + return len(self.img_labels) + + def __getitem__(self, idx): + # Return a single transformed item of our dataset. + # This is where the processing logic for reading our images goes + if torch.is_tensor(idx): + idx = idx.tolist() + + img_name = os.path.join(self.root_dir, + self.img_labels.iloc[idx, 0]) + image = read_image('path_to_image.jpeg') + label = self.img_labels.iloc[idx, 1:] + sample = {'image': image, 'label': label} + + if self.transform: + sample = self.transform(sample) + + return sample + +################################################### +# Lets break down whats happening in this sample implementaion of a PyTorch Dataset function by function. +# +# Imports +######## +# +# Import os for file handling, torch for PyTorch, [pandas](https://pandas.pydata.org/) for loading labels, [torch vision](https://pytorch.org/blog/pytorch-1.7-released/) to read image files, and Dataset to implement the Dataset interface. +# +# Example: +import os +import torch +import pandas as pd +from torchvision.io import read_image +from torch.utils.data import Dataset + +################################################### +# Init +######## +# +# The init function is used for all the first time operations when our Dataset is loaded. In this case we use it to load our annotation labels to memory and the keep track of directory of our image file. Note that different types of data can take different init inputs you are not limited to just an annotations file, directory_path and transforms but for images this is a standard practice. +# +# Example: +# + +def __init__(self, annotations_file, img_dir, transform=None): + """ + Args: + annotations_file (string): Path to file containing image anntoations . + img_dir (string): Directory with all the images. + transform (callable, optional): Optional transform to be applied on a sample, see the next section for more information. + """ + self.img_labels = pd.read_csv(annotations_file) + self.img_dir = img_dir + self.transform = transform +################################################ +# __len__ +# ######## +# The __len__ function is very simple here we just need to return the number of samples in our dataset. +# +# Example: + + def __len__(self): + return len(self.img_labels) + +################################################### +# __getitem__ +######## +# The __getitem__ function is the most important function in the Datasets interface this. It takes a tensor or an index as input and returns a loaded sample from you dataset at from the given indecies. +# +# In this sample if provided a tensor we convert the tensor to a list containing our index. We then load the file at the given index from our image directory as well as the image label from our pandas annotations DataFrame. This image and label are then wrapped in a single sample dictionary which we can apply a Transform on and return. To learn more about Transforms see the next section of the Blitz. +# +# Example: +def __getitem__(self, idx): + # Return a single transformed item of our dataset. + # This is where the processing logic for reading our images goes + if torch.is_tensor(idx): + idx = idx.tolist() + img_name = os.path.join(self.root_dir, + self.img_labels.iloc[idx, 0]) + image = read_image('path_to_image.jpeg') + label = self.img_labels.iloc[idx, 1:] + sample = {'image': image, 'label': label} + if self.transform: + sample = self.transform(sample) + return sample + +######################################################## +# Preparing your data for training with DataLoaders +######### +# +# Now we have a organized mechansim for managing data which is great, but there is still a lot of manual work we would have to do train a model with our Dataset. +# +# For example we would have to manually maintain the code for: +# - Batching +# - Suffling +# - Parallel batch distribution +# +# The PyTorch Dataloader ```torch.utils.data.DataLoader``` is an iterator that handles all of this complexity for us enabling us to load a dataset and focusing on train our model. + +dataloader = DataLoader(clothing, batch_size=4, + shuffle=True, num_workers=0) + +################################ +# With this we have all we need to know to load an process data of any kind in PyTorch to train deep learning models. +# +# +# More help with the FashionMNIST Pytorch Blitz +######## +# [Tensors]()
+# [DataSets and DataLoaders]()
+# [Transformations]()
+# [Choosing Model]()
+# [Optimization Loop]()
+# [AutoGrad]()
+# [Back to FashionMNIST main code base]()
diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py new file mode 100644 index 00000000000..6a89748cc25 --- /dev/null +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -0,0 +1,9 @@ +""" +Optimization Tutorial +=================== + +More to come + +""" + +x = 5 \ No newline at end of file diff --git a/beginner_source/quickstart/save_load_run_tutorial.py b/beginner_source/quickstart/save_load_run_tutorial.py new file mode 100644 index 00000000000..3997ea65b29 --- /dev/null +++ b/beginner_source/quickstart/save_load_run_tutorial.py @@ -0,0 +1,9 @@ +""" +Save Load Run Tutorial +=================== + +More to come + +""" + +x = 5 \ No newline at end of file diff --git a/beginner_source/quickstart/tensor_tutorial.py b/beginner_source/quickstart/tensor_tutorial.py new file mode 100644 index 00000000000..ef25beb119d --- /dev/null +++ b/beginner_source/quickstart/tensor_tutorial.py @@ -0,0 +1,120 @@ +""" +Tensors and Operations +=================== + +Tensors and Operations +When training neural network models for real world tasks, we need to be able to effectively represent different types of input data: sets of numerical features, images, videos, sounds, etc. All those different input types can be represented as multi-dimensional arrays of numbers that are called tensors. + +Tensor is the basic computational unit in PyTorch. It is very similar to NumPy array, and supports similar operations. However, there are two very important features of Torch tensors that make the especially useful for training large-scale neural networks: + + - Tensor operations can be performed on GPU using CUDA + - Tensor operations support automatic differentiation using `AutoGrad `_ + +Conversion between Torch tensors and NumPy arrays can be done easily: +""" + +import torch +import numpy as np + +np_array = np.arange(10) +tensor = torch.from_numpy(np_array) + +print(f"Tensor={tensor}, Array={tensor.numpy()}") + +################################################################# +# .. code:: python +# Output: Tensor=tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=torch.int32), Array=[0 1 2 3 4 5 6 7 8 9] +# +# .. note:: When using CPU for computations, tensors converted from arrays share the same memory for data. Thus, changing the underlying array will also affect the tensor. +# +# +# Creating Tensors +# ------------- +# The fastest way to create a tensor is to define an uninitialized tensor - the values of this tensor are not set, and depend on the whatever data was there in memory: +# + +x = torch.empty(3,6) +print(x) + +############################################################################ +# .. code:: python +# Output: tensor([[-1.3822e-06, 6.5301e-43, -1.3822e-06, 6.5301e-43, -1.4041e-06, +# 6.5301e-43], +# [-1.3855e-06, 6.5301e-43, -2.9163e-07, 6.5301e-43, -2.9163e-07, +# 6.5301e-43], +# [-1.4066e-06, 6.5301e-43, -1.3788e-06, 6.5301e-43, -2.9163e-07, +# 6.5301e-43]]) +# +# +# In practice, we ofter want to create tensors initialized to some values, such as zeros, ones or random values. Note that you can also specify the type of elements using dtype parameter, and chosing one of torch types: + + +x = torch.randn(3,5) +print(x) +y = torch.zeros(3,5,dtype=torch.int) +print(y) +z = torch.ones(3,5,dtype=torch.double) +print(z) + +###################################################################### +# Output: +# tensor([[-1.0166, -0.6828, 1.8886, -1.2115, 0.0202], +# [-1.1278, 0.7447, 0.4260, -2.1909, 0.5653], +# [ 0.0562, -0.1393, 0.6145, -0.6181, 0.1879]]) +# tensor([[0, 0, 0, 0, 0], +# [0, 0, 0, 0, 0], +# [0, 0, 0, 0, 0]], dtype=torch.int32) +# tensor([[1., 1., 1., 1., 1.], +# [1., 1., 1., 1., 1.], +# [1., 1., 1., 1., 1.]], dtype=torch.float64) +# +# +# You can also create random tensors with values sampled from different distributions, as described `in documentation. `_ +# +#Similarly to NumPy, you can use eye to create a diagonal identity matrix: + +print(torch.eye(10)) + +################################################################ +# Output: +# tensor([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], +# [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], +# [0., 0., 1., 0., 0., 0., 0., 0., 0., 0.], +# [0., 0., 0., 1., 0., 0., 0., 0., 0., 0.], +# [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], +# [0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], +# [0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], +# [0., 0., 0., 0., 0., 0., 0., 1., 0., 0.], +# [0., 0., 0., 0., 0., 0., 0., 0., 1., 0.], +# [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]) +# +# +# You can also create new tensors with the same properties or size as existing tensors: +# + +print(z.new_ones(2,2)) +print(torch.zeros_like(x,dtype=torch.long)) + +############################################################################ +# Tensor Operations +# ------------- +# Tensors support all basic arithmetic operations, which can be specified in different ways: +# +# - Using operators, such as +, -, etc. +# - Using functions such as add, mult, etc. Functions can either return values, or store them in the specified ouput variable (using out= parameter) +# - In-place operations, which modify one of the arguments. Those operations have _ appended to their name, eg. add_. +# +# Complete reference to all tensor operations can be found in documentation. +# +# Let us see examples of those operations on two tensors, x and y. +# +# +# +# +# +# +# +# +# +# +# \ No newline at end of file diff --git a/beginner_source/quickstart/transforms_tutorial.py b/beginner_source/quickstart/transforms_tutorial.py new file mode 100644 index 00000000000..0f924fd7b0e --- /dev/null +++ b/beginner_source/quickstart/transforms_tutorial.py @@ -0,0 +1,9 @@ +""" +Transforms Tutorial +=================== + +More to come + +""" + +x = 5 \ No newline at end of file diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index 2968ce35e15..a12b96ea601 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -2,13 +2,7 @@ PyTorch Quickstart =================== -The basic machine learning concepts in any framework should include: - -1. Working with data -2. Creating models -3. Optimizing Parameters -4. Saving Models -5. Loading Models +The basic machine learning concepts in any framework should include: Working with data, Creating models, Optimizing Parameters, Saving and Loading Models """ @@ -27,11 +21,8 @@ # These ``DataSet`` objects include a ``transforms`` mechanism to # modify data in-place. - -# image classes classes = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"] -# data used for training training_data = datasets.FashionMNIST('data', train=True, download=True, transform=transforms.Compose([transforms.ToTensor()]), target_transform=transforms.Compose([ @@ -39,7 +30,6 @@ ]) ) -# data used for testing test_data = datasets.FashionMNIST('data', train=False, download=True, transform=transforms.Compose([transforms.ToTensor()]), target_transform=transforms.Compose([ @@ -59,7 +49,9 @@ ###################################################################### -# More details `DataSet, DataLoader, and transforms `_ +# More details `DataSet and DataLoader `_ +# +# More details `Tensors `_ # # Creating Models # --------------- @@ -85,6 +77,8 @@ print(model) ###################################################################### +# More details `on building the model `_ +# # Optimizing Parameters # --------------------- # @@ -145,6 +139,8 @@ def test(dataloader, model): print('Done!') ###################################################################### +# More details `optimization and training loops `_ +# # Saving Models # ------------- # @@ -161,6 +157,8 @@ def test(dataloader, model): print('Saved onnx model to model.onnx') ###################################################################### +# More details `Saving loading and running `_ +# # Loading Models # ---------------------------- # @@ -190,4 +188,19 @@ def test(dataloader, model): predicted, actual = classes[pred[0].argmax(0)], classes[y.argmax(0)] print(f'Predicted: "{predicted}", Actual: "{actual}"') +###################################################################### +# +# More help with the FashionMNIST Pytorch Blitz +# `Tensors `_ +# +# `DataSets and DataLoaders `_ +# +# `Transformations `_ +# +# `Building the Model `_ +# +# `Optimization Loop and AutoGrad `_ +# +# `Save, Load and Use Model `_ +# From f86760a80d50b1013353a03d9a45a1f7fe72f100 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Thu, 5 Nov 2020 01:06:31 +0000 Subject: [PATCH 14/55] fixing syntax issues --- ...ial.py => autograd_quickstart_tutorial.py} | 0 beginner_source/quickstart/data_tutorial.py | 103 +++++++----------- ...orial.py => tensor_quickstart_tutorial.py} | 0 3 files changed, 42 insertions(+), 61 deletions(-) rename beginner_source/quickstart/{autograd_tutorial.py => autograd_quickstart_tutorial.py} (100%) rename beginner_source/quickstart/{tensor_tutorial.py => tensor_quickstart_tutorial.py} (100%) diff --git a/beginner_source/quickstart/autograd_tutorial.py b/beginner_source/quickstart/autograd_quickstart_tutorial.py similarity index 100% rename from beginner_source/quickstart/autograd_tutorial.py rename to beginner_source/quickstart/autograd_quickstart_tutorial.py diff --git a/beginner_source/quickstart/data_tutorial.py b/beginner_source/quickstart/data_tutorial.py index f84ad4f1794..93b4371c499 100644 --- a/beginner_source/quickstart/data_tutorial.py +++ b/beginner_source/quickstart/data_tutorial.py @@ -1,26 +1,25 @@ -############################################## +################################################################# # Datasets & Dataloaders -# --------------------- -# -# Getting Started With Data in PyTorch +################################################################# +# Getting Started With Data in PyTorch # # Before we can even think about building a model with PyTorch, we need to first learn how to load and process data. Data can be sourced from local files, cloud datastores and database queries. It comes in all sorts of forms and formats from structured tables to image, audio, text, video files and more. # -# ![types of data](../images/typesofdata.PNG) +# ADD IMAGE HERE /images/typesofdata.PNG # -# Different data types require different python libraries to load and process such as [openCV](https://opencv.org/) and [PIL](https://pillow.readthedocs.io/en/stable/reference/Image.html) for images, [NLTK](https://www.nltk.org/) and [spaCy](https://spacy.io/) for text and [Librosa](https://librosa.org/doc/latest/index.html) for audio. +# Different data types require different python libraries to load and process such as `openCV `_ and `PIL `_ for images, `NLTK `_ and `spaCy `_ for text and `Librosa `_ for audio. # # If not properly organized, code for processing data samples can quickly get messy and become hard to maintain. Since different model architectures can be applied to many data types, we ideally want our dataset code to be decoupled from our model training code. To this end, PyTorch provides a simple Datasets interface for linking managing collections of data. # # A whole set of example datasets such as Fashion MNIST that implement this interface are built into PyTorch extension libraries. These are useful for benchmarking and testing your models before training on your own custom datasets. # # You can find some of them below. -# - [Image Datasets](https://pytorch.org/docs/stable/torchvision/datasets.html) -# - [Text Datasets](https://pytorch.org/text/datasets.html) -# - [Audio Datasets](https://pytorch.org/audio/datasets.html) +# * `Image Datasets _` +# * `Text Datasets `_ +# * `Audio Datasets `_ # -# Iterating through a Dataset -######## +# Iterating through a Dataset +################################################################# # # Once we have a Dataset we can index it manually like a list ```clothing[index]```. # @@ -35,8 +34,7 @@ import numpy as np clothing = datasets.FashionMNIST('data', train=True, download=True) -labels_map = {0 : 'T-Shirt', 1 : 'Trouser', 2 : 'Pullover', 3 : 'Dress', 4 : 'Coat', 5 : 'Sandal', 6 : 'Shirt', - 7 : 'Sneaker', 8 : 'Bag', 9 : 'Ankle Boot'} +labels_map = {0 : 'T-Shirt', 1 : 'Trouser', 2 : 'Pullover', 3 : 'Dress', 4 : 'Coat', 5 : 'Sandal', 6 : 'Shirt', 7 : 'Sneaker', 8 : 'Bag', 9 : 'Ankle Boot'} figure = plt.figure(figsize=(8,8)) cols, rows = 3, 3 for i in range(1, cols*rows +1): @@ -48,11 +46,12 @@ plt.imshow(img, cmap='gray') plt.show() -###################################### +################################################################# # -# ![Fashion MNIST](../images/fashion_mnist.png) +# Add Image Here +# # Creating a Custom Dataset -######## +################################################################# # # To work with your own data lets look at the a simple custom image Dataset implementation: @@ -64,15 +63,7 @@ from torchvision.io import read_image class CustomImageDataset(Dataset): - """Custom Image PyTorch Dataset.""" - def __init__(self, annotations_file, img_dir, transform=None): - """ - Args: - annotations_file (string): Path to file containing image anntoations . - img_dir (string): Directory with all the images. - transform (callable, optional): Optional transform to be applied on a sample, see the next section for more information. - """ self.img_labels = pd.read_csv(annotations_file) self.img_dir = img_dir self.transform = transform @@ -81,8 +72,6 @@ def __len__(self): return len(self.img_labels) def __getitem__(self, idx): - # Return a single transformed item of our dataset. - # This is where the processing logic for reading our images goes if torch.is_tensor(idx): idx = idx.tolist() @@ -97,11 +86,11 @@ def __getitem__(self, idx): return sample -################################################### +################################################################# # Lets break down whats happening in this sample implementaion of a PyTorch Dataset function by function. # # Imports -######## +################################################################# # # Import os for file handling, torch for PyTorch, [pandas](https://pandas.pydata.org/) for loading labels, [torch vision](https://pytorch.org/blog/pytorch-1.7-released/) to read image files, and Dataset to implement the Dataset interface. # @@ -112,9 +101,9 @@ def __getitem__(self, idx): from torchvision.io import read_image from torch.utils.data import Dataset -################################################### +################################################################# # Init -######## +################################################################# # # The init function is used for all the first time operations when our Dataset is loaded. In this case we use it to load our annotation labels to memory and the keep track of directory of our image file. Note that different types of data can take different init inputs you are not limited to just an annotations file, directory_path and transforms but for images this is a standard practice. # @@ -122,36 +111,29 @@ def __getitem__(self, idx): # def __init__(self, annotations_file, img_dir, transform=None): - """ - Args: - annotations_file (string): Path to file containing image anntoations . - img_dir (string): Directory with all the images. - transform (callable, optional): Optional transform to be applied on a sample, see the next section for more information. - """ self.img_labels = pd.read_csv(annotations_file) self.img_dir = img_dir self.transform = transform -################################################ + +################################################################# # __len__ -# ######## +################################################################# # The __len__ function is very simple here we just need to return the number of samples in our dataset. # # Example: - def __len__(self): - return len(self.img_labels) +def __len__(self): + return len(self.img_labels) -################################################### +################################################################# # __getitem__ -######## +################################################################# # The __getitem__ function is the most important function in the Datasets interface this. It takes a tensor or an index as input and returns a loaded sample from you dataset at from the given indecies. # # In this sample if provided a tensor we convert the tensor to a list containing our index. We then load the file at the given index from our image directory as well as the image label from our pandas annotations DataFrame. This image and label are then wrapped in a single sample dictionary which we can apply a Transform on and return. To learn more about Transforms see the next section of the Blitz. # # Example: def __getitem__(self, idx): - # Return a single transformed item of our dataset. - # This is where the processing logic for reading our images goes if torch.is_tensor(idx): idx = idx.tolist() img_name = os.path.join(self.root_dir, @@ -163,32 +145,31 @@ def __getitem__(self, idx): sample = self.transform(sample) return sample -######################################################## +################################################################# # Preparing your data for training with DataLoaders -######### +################################################################# # # Now we have a organized mechansim for managing data which is great, but there is still a lot of manual work we would have to do train a model with our Dataset. # # For example we would have to manually maintain the code for: -# - Batching -# - Suffling -# - Parallel batch distribution +# * Batching +# * Suffling +# * Parallel batch distribution # # The PyTorch Dataloader ```torch.utils.data.DataLoader``` is an iterator that handles all of this complexity for us enabling us to load a dataset and focusing on train our model. +# +# dataloader = DataLoader(clothing, batch_size=4, shuffle=True, num_workers=0) -dataloader = DataLoader(clothing, batch_size=4, - shuffle=True, num_workers=0) - -################################ +################################################################# # With this we have all we need to know to load an process data of any kind in PyTorch to train deep learning models. # # # More help with the FashionMNIST Pytorch Blitz -######## -# [Tensors]()
-# [DataSets and DataLoaders]()
-# [Transformations]()
-# [Choosing Model]()
-# [Optimization Loop]()
-# [AutoGrad]()
-# [Back to FashionMNIST main code base]()
+################################################################# +# `Tensors <>`_ +# `DataSets and DataLoaders <>`_ +# `Transformations <>`_ +# `Choosing Model <>`_ +# `Optimization Loop <>`_ +# `AutoGrad <>`_ +# `Back to FashionMNIST main code base <>`_ diff --git a/beginner_source/quickstart/tensor_tutorial.py b/beginner_source/quickstart/tensor_quickstart_tutorial.py similarity index 100% rename from beginner_source/quickstart/tensor_tutorial.py rename to beginner_source/quickstart/tensor_quickstart_tutorial.py From fccac71e6b92bc7458012bd04a0c1aecdf9395c0 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Thu, 5 Nov 2020 01:18:28 +0000 Subject: [PATCH 15/55] fix syntax --- beginner_source/quickstart/data_tutorial.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beginner_source/quickstart/data_tutorial.py b/beginner_source/quickstart/data_tutorial.py index 93b4371c499..53f2186a185 100644 --- a/beginner_source/quickstart/data_tutorial.py +++ b/beginner_source/quickstart/data_tutorial.py @@ -21,7 +21,7 @@ # Iterating through a Dataset ################################################################# # -# Once we have a Dataset we can index it manually like a list ```clothing[index]```. +# Once we have a Dataset we can index it manually like a list *clothing[index]*. # # Here is an example of how to load the fashion MNIST dataset from torch vision. # @@ -156,7 +156,7 @@ def __getitem__(self, idx): # * Suffling # * Parallel batch distribution # -# The PyTorch Dataloader ```torch.utils.data.DataLoader``` is an iterator that handles all of this complexity for us enabling us to load a dataset and focusing on train our model. +# The PyTorch Dataloader *torch.utils.data.DataLoader* is an iterator that handles all of this complexity for us enabling us to load a dataset and focusing on train our model. # # dataloader = DataLoader(clothing, batch_size=4, shuffle=True, num_workers=0) From d527c8c557d734d8cddbd52da63964e511575fa8 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Thu, 5 Nov 2020 01:27:21 +0000 Subject: [PATCH 16/55] renamed file --- .../{data_tutorial.py => data_quickstart_tutorial.py} | 0 beginner_source/quickstart_tutorial.py | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename beginner_source/quickstart/{data_tutorial.py => data_quickstart_tutorial.py} (100%) diff --git a/beginner_source/quickstart/data_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py similarity index 100% rename from beginner_source/quickstart/data_tutorial.py rename to beginner_source/quickstart/data_quickstart_tutorial.py diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index a12b96ea601..24482b7ea5d 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -49,7 +49,7 @@ ###################################################################### -# More details `DataSet and DataLoader `_ +# More details `DataSet and DataLoader `_ # # More details `Tensors `_ # @@ -193,7 +193,7 @@ def test(dataloader, model): # More help with the FashionMNIST Pytorch Blitz # `Tensors `_ # -# `DataSets and DataLoaders `_ +# `DataSets and DataLoaders `_ # # `Transformations `_ # From 031b9df9b3aee0763954e91202d36b07c0041c9c Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Thu, 5 Nov 2020 02:20:28 +0000 Subject: [PATCH 17/55] syntax fix --- beginner_source/quickstart/data_quickstart_tutorial.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index 53f2186a185..7369e368181 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -1,5 +1,6 @@ -################################################################# -# Datasets & Dataloaders +""" +Datasets & Dataloaders +""" ################################################################# # Getting Started With Data in PyTorch # From bd782b0a62286af1d891d1c5e1b6e8bc8c6f001b Mon Sep 17 00:00:00 2001 From: Dmitri Soshnikov Date: Thu, 5 Nov 2020 16:02:27 +0300 Subject: [PATCH 18/55] Add tensors, autograd tutorials --- _static/img/quickstart/comp-graph.png | Bin 0 -> 14368 bytes .../autograd_quickstart_tutorial.py | 3 - .../quickstart/autograd_tutorial.py | 234 ++++++++++++++++ .../quickstart/tensor_quickstart_tutorial.py | 120 --------- beginner_source/quickstart/tensor_tutorial.py | 250 ++++++++++++++++++ beginner_source/quickstart_tutorial.py | 7 +- 6 files changed, 490 insertions(+), 124 deletions(-) create mode 100644 _static/img/quickstart/comp-graph.png delete mode 100644 beginner_source/quickstart/autograd_quickstart_tutorial.py create mode 100644 beginner_source/quickstart/autograd_tutorial.py delete mode 100644 beginner_source/quickstart/tensor_quickstart_tutorial.py create mode 100644 beginner_source/quickstart/tensor_tutorial.py diff --git a/_static/img/quickstart/comp-graph.png b/_static/img/quickstart/comp-graph.png new file mode 100644 index 0000000000000000000000000000000000000000..cfa6163d58a85baea0c9d90ff583ec956be11ec1 GIT binary patch literal 14368 zcmeHuWmMbE)-O(r7T4kq#fwWR?nO#*DOMbcy9amo0s%^Kx1d3ayHg;z6bJ>1!wu~@ z=RWVZ^M1YSW+m&NjBS}cGkgE`B;uWlEG8N$8XO!Pro5b#IvgA#6b=qv?-?SjM7l3U z0QP`)R+oJXS2;$$5Bq^+DWNO@2Uin^erJLV2Zu2CPDxW5R{sBA|6c?Dk7?jqNCsx% zaHlTnDjIO`2#ClisL!6GV_;%ozrex8!zUmjCLtvwe@Q_}MNLCXN6)~>#LU9V_KKZ@ zlZ%^&_cb5CfRM0=sF?U0iMNu{GO}{=3W~}qs_)d)H8kIAY3u0f=^Gdt8Jm1GH8Z!c z{A6uwXYb(X^x4_P)y>_*%iG7-&p#kAC^#e(5EdR085JE97oU)rl#-g3o{^cIlbiP? zzn~CUR9y14^jleZMP*fWO>JF$Lt|5OOKTgby`!_My9eCc*FW%maA#Um(|GpS1BxS@wcTFO8J3If&IyHXaqMwQK1UR!Q=jp zmSfk)7Vb$u$9|4w|5&{D^9$I`{t?gNyd!ufhNr%jsAnO1HWynQEbDY`JRRGVo8dZ4 ze=g5HW%qq+DATS@7b{P|so8wKF>`28vUO6nB}}7RAsbx)|K^vnJBQy9w)by;myKT8 z+sivhL!bNFx%o}F)xc})N>RXXa=qL2*XJ?%1H+lb?BC?#Jz-L2SEjy)ro!mUEE2zO-la# z_Ne7CrSSPL4g%LB-SeU48*_RaL%6J}aJXE(Lo!5N_(G)hXTVwUTx8WxrjlXX z4b5uYT?Da{d~9if!xNU`L#%yqyY{Q8Fl_p32lKoF)=sXou+D{F{%x1{M@EdQ4~Gu7 zZu556eb|JDAE)j$QHkB!l zeB;??`Oy4vZYfSwk8<&1NtKd?^0k@q>Gh9sfx_GCKE$wS7XQ7Hsw1Hd-@DT#oevXn z!10yw)BTmYeVW&nf!qWd(TveC3Q^tJ zU^qL1g+8(N8{GutHy-5s#(yV~$N_1UT!LsQ+5>MPy}q52+|(#p65ZeTIJU!umf82j z83pDkgf6Po2elTeZ&!lmvnz^t|n~K2i5A;@uz+W zIk6d&!7q1{HB4-M>3IeHi!PK?Nq?in*5L|7mH;9Z+6u502OI$qam&hBO#amlqG}Qb zR!LBb01*^Bz^GUd`Rg)AAZ7d*;bxK3qy|k8bgfqDVZfuWKvv=pZ z*g(M-npov0(n7?4bwuLD7u*zz({W1yJ&l{ro#(z9phP_JO{<*QYr@G>rl}YQj#t-E z?!O^k3h@QY+wk~MW&#qi5dPAJ%8-wJJu_Z0;Vr%wA4|>%3lJYaQ21aq={lDeD9N+0 z5SV~Ekv@oYO&ue^ndkziWZQ=lnm;$TR|j8#C%-)K$0Y6yj5Pp-U?K;E^GG?36X=9=j5&Yin4k+ck))cARa8yV z2>Ca$LV1LDNK6XLhx^~A%!B7Tk@xVk2VspLUk=vmgv}WrF z9k=|ZP||&+?3i46<`RcO8Wxm>*xNI?Uj@?5enKm782oFug*AyfGx+v&BpCCyh`GG2 z)>Q*ZU5&|hXS>zQy$(k;WaA0-LteI2DL7=+xyi~;+STo%m#q#r>u2jpz)h6kB)XcO zmnYji^hZ!1@%2T@`-J)1edvAOI!+>?C+RKso|<*xcn6$J^pjw^UAvueMDnET(jb2^ z`9l{yM7HC%L@4{UNZLuZ(WfeJ8XPzXnh@L*>-%lHCawk@Z|i#gk}R?7&!UTgGVqn~ zT(V(ST6g7h0Wo=YUXTiL^vI_BaME;YAAh5?thOv7uNFRA{v#X+4O1rV@Z7eGCDHAA z$I13cbxGj9Y2%7to^h&Zu{T~Onf_DbJ4LHPOM!Nhyq7$bCRt@|f9(eyIzB8)YgG@EP_U-GrcBFfdXOW%@i>|oTS{6 zaex3AVwCu~icGI_F2;p7!pCr&&r06Oy?|O>7wXVBL!-0J71K!3Z`28+l^fuyM|wtEkt_qBo?U0 zH0JSE3lc+Yf92@YmLC#ewR1tittw2_gqzuLRj1%zr(&)n=M~qcA&F`SM%NPY@$ovt z4i_8vOO*QGC9{ySfC?=i0WH z2h0N5s+Y86+dsWRYF1TZ;^l7qF?}JxPHUAq>?l%r?-b!+d5*E%fI| zSmS9#03zD=c>^)(W>==fUK94!SCxFkW1hC{(NssturE8+urc}h{g2c>qFC{@J@ERf zLrl&SGu;u{e1h$z6+0HJC)z`zWOg8FI*poyWWm1*6_2vXCM`h|r!`MRH?BZ0*62rY zK{_W2;J6tQxDVin=au2_no`T#g)eSma{F_lQuch1(}A!GNDa}c763E@W~3|#R-)H# zdk{LqPBqL2jM15q6#_`;2o?~g(UTg2X2Cvx3@{p7PGJz5d@Fb4_ozk}O|g=-y4t&2(+uk&2%`_zjGG zY8k-XDVd)$#V^HA{_flecWPO)A>mp0PIU->sQbWF>}i5J1DECL&3kGs;2Pd1<7}Bs zsU=blxV2rsHr{<0n6ZEBg)!j2MR;ymc!BWeezny-$}7NH=jBZ8_jLE*&m|y%&K~pm%!c63zA_mHau%Q?O2+7 zS2*C^oS94wST|5-W+E7`t!&1k|K(b#UFAOy%|#vRE)L#*ve3IV7G;l4k*bOdJNA=j z@AYt~`^YvC@A#QDW*Ut^bFo4pqm%~sb`BG^xPIG4x{wcYk6&7`GRi^o+NB=l5Q~|J z`5UCxl*sp-jx(wl2i?Vie}?rb>ybd?2^nTaO@0+cYS?09S+O4>>Kizt5id1jm2;P! zeEi0!MX=k{#V&Y&drc8i+8hW`h%qescP8Z|Q5y^VM8G`eKpJZYoU%x|8gJ6^BLgHTNL$6YrTA&@O!ntfluMaOsw<>pX%$dZ zmowolDj0O9Cb#7eBXk%92mvTfyexWTS_lHW z_QG5v-Hc2K(1|N7pJ5U(wdJ2sPKZG%6ab%3`1LrLV|uKN+zR;NapZF96qB+B)GiBPpsKQ4Hb1;_Gnkl@wjaJ=i3~mT3s#k8h2j_7Bh;=#D}&yaj~BLO8};*p1$f z;GJ@pnuVJ+m|UevJ8y7msy=73Mls6^`#Spi)?IxXZ$Pw(R6^HKbOl#%xJQwHHiDmK z2@IT~f7t(q$~wvG)8F0i9_z7(%=W^QdTLBjey4PqeaoL}cCnsjx&E>-7UB(|1WwUW z??@{MPexVS5kHu)q2&bp%Dmv_h%Ts>;WU_Jsj!aB(RkfMu%}!>Fquo0!hWjyfkRsv zY)NkGvl|{}*j0crF^ry3Y~L0y-shii5c2U9@dy#0`6Wr1>Z0n6(N(f$CSC7cE1+hjV%rLz{umf!VMLjpBO zAzpy_{cApxtA&!Xq{c-qpP=mvgx}mDDu$dNuNLCPl?keT4(?At)%LVXh`D@}M)Xot zd~n1iOwyk5--eVPY2vqR#__3qVNEHXZCGHjc8Ull%+X*oWa~LO0tgBXd`hAC)N(=_ zILF^uRP-6Lo<}X>Le{g&UZy|*w>V$nN-Y}YaOy2 z@UUviNYAFJQn8I&zXkfwW-Y}9Z&UZptORfB%L^3Dh^`41gAEzQh@?9%t<+IIx__0n zRHXIilxawlnV^&fQF6T<_7y$vnWaCoFS;rzBeqqFYYJPcq!!P2AfvMO9GxNXWY_cT z#=Z{L{85twTf;?73y;1G(2rtFmTg5+KFip8bWS;!{^vukrBbYY{==z!!-{Hf-j8Vo zETOGhM&z25sb}#O&$kUp18YnK*N$6h4KgtbjLZ?PT?m3xd}(XGj;bbrWBRFv4~r^2 z)b@XV;(g{{vSxuq^7+6-@U-uDjL=K4_~s)YPk4G9>(O)Cjzbc|nR!~*uX1@U9$|ed zK{$^@7YcJkj{3eEF;Z?*dovqK%Akk^19$c)1~ACT9A-QfDxhnUds0ad+w=!k<3(z% zx4NMw0_%?~8lV;0i(aw>ljU)oX|B7^H|7A3YjK79f{mym)tLYz<<8z`$z8goWu|0<_`DMiJV6+O z7c;szX182sIp60es(6CJuc>Y(II@y}n+KS&F4fE3&-K7NS& zSwF2!!K#O2UVHTQ;R_~PoB@uh`ysl(D0N1>CD-h=Wkggt4adhVRs9KX5VQ+(D|F-62s^Zt5~ z^iHs)wh3NF$gD$J@@{$-U27Ujf_gYWYU*AQa6j@TI1rN)S5j*MPO`PI;<2?^CN)|J zMjkObB)x%QwU^9LV41(A6NC3=EpuJZFkf6+1*EpN!BT4>Gna@jY*X9Pmm-G3XAeYQfB6enMUhd>J|6JP)WmHs0$<}F>eQQQf-xI9k>SrLu4opm)FH|7=}x!;ng|eTSt=jc;_kxYDamiMY(hAYxEqT&6yb5J7!r zyeTZ}lZ=>}#C~t}mLT#-7B5S?5naawPcbu{aB(A2;i~afWn!8^hQr5<7iV-ieLS^! z>^HhGZBT%@>945lFG*hN`@`u>O{H3T%j{@wUSZ!aW0#_52kJn|4HPJR>M9tU1GZY+m}L*N9y(0q##r1=9CS_LNON3ZYZX`cG!8^S<30yMxSUst@W^2J$I%$c;9b!1#7&DB@t zw>D9%Ix&~?-|?eTBX;jIbvtor$PF3GemxYlY^48c)&RY#G(!%7O(iR2rPZJbgMmMtgh;L$ybvc7Zewj3U!)95Mvj@ zLLZQ}vDXmoCJRiOxu$RH3{a-gyR0oZnQnaTV0pEy8TQs7Zj<{2V8sr0YQM)%-t!w8 z3u=jSWVr~Q=z}?;W5{>8`@Wl|Ewla1uQE~9@JHi)qeov)jRb?0kYZ4PT`n?&91*PcVi0dp@Yfm zot3t=f6|*Lw`*bTyZ_4@?@*W$Fhah=ON~j~2Gh~&IdL#2twdG8oMzfz#oU7i|M2tY zScD52>(Z0eYJK$U*ZIK9Jh!liI)GUgkvW+6OLnr(U;p?%peliPWS$LE`_jJeT_QX; zeudp^iaTdgvYA{mWgpdkJB6e;u@VX-@B%2S5N5_z?_^l( z{)MiH*7MSmczd@Wd4E(!l%P>+A5iUuuTs_}@9B%09(iTr1B_>k=K1kLj#1PgS^hjjWmnKNt)=5o!of zU5@6LCxs~6ip2ow$JWA%=ql>)Z^sx(hH0<1SQ0awW*I#~fYQ1BVks{>G^Q*ma~Z6# z|HPfp>yu6CL=+F46Zfqbh5G2MXL`cxt7BeUdEn#USrq%zru@0IRw)+?Z&+ygj76EV z{t{G-O+UOc455u|tGDQBf^eudjf!?X#w(dQ57~9R*S2vMp#>N!GkE!zP0mCC{~!_i zUHso1q_cw1)@2g4TEt%rpm(mR*1o*|P4?*nTc4kvo&`N;J;9+|@u*Ne3QyJIpEaY8 z#WUO|2%OO@`)7Y9=s?pLATOdJp#|H7!oYTRVR zx`rRii{mpF-z^o1AyTMJS_=JwBjw3(a8#znLJ}x6{>aK_HqkU=iKrVqobVTqIYG!` zXUJiIK|mdfLU19QAL8qKB9l$&?)f(A&f|i!&Pn)CH1jS=U>KBD93|#jSTWT+^WS(h zjDN#XCir0!t8j9{14z0i5?gUEPge4%MgGXJKjul8E~}pA`A&eSi>-aG`i%e#NMdDxN^!9}yBb3Wd`Xw-bpu z-w7O(eIvP4^S3h()E3|n#k=2#qW#f?!Yi$>i?kt`*dOD9nZLn7R!1~eM<*>CZ4yhj zu)Wphi`))XaXI~8{FcI-c+@?9t^h<5hGp&RS*?GLIgNg2t$6e@PKg=`xLu7!#|;rGZf6Er-ap9JLUaUEf!=@V1%H9nwQ9P z+cp5ZX8Nt5SWcPW)Y+o5c;D{O3Et*;IST3P-{zT9&oq|}yJt!MDt@9KJ221Y$ozIT z50O%L0i;228Y%LzTEXQJ=P2#Gj{{zoQbVtM{Sc}dWpe1S*TM$$%~b5jp*keGYa{Ni9A;vTon z6EJAcZD7{5wFZB&U*X+u-{5h2l;O^7;MQAoCZ*DKaJa`^mVELee?~?5^Uc1e#&)Tw zFhZt42L9hI#}hjuQglvLQE14(Cyi+i3Xx2W9ho)VpuEGYi>*y5P6qmk6qq?^~pOJgm*Nggy%Y1-HpF z@5pfQ4v75Az(F13O@vWz39e!SKhC~b^Tocp8nC*xg2suXtPVRCtdFvSHd*xyjl9NV zK49;8tE4&KU^USk(Na$xmwB+EbH zQg7aO;M0jmfAktv!OiC7G%@?$vvnKT~0({i&e4poV z*##)tXIzZv31?za^i2jU3$J=v%)LsRU~G|VqE2G9X*{wPT!~cGbF(We%@(1Ptt+G= z{W*vi<}9Wy&%2Hni$sp*dtKZ{)cHG1^j2KRX`?XM`vJPN(dKZ&?VB7N?W?Ax_;9oG za@Fa$bkBRh*`L73!I!HfQ)4k-7Cly5@OTA9)J+KQ+ps{wh^4Y_hfz8)s$nU49?;83 zq~jOC;xOQTm@z|4Xh^;|vc9_YZi4RY$Mae9Z228li;8>gOcGhQ_TSTCL$a78+CF}l zJY#!)ey3g3g+r7#`!;Q{9+dtdw>g4yTI6w+464}VER|WAmA(oy3dh^cLoCn6eSSkoT&3}z2Vp?j<=w5GHPyeD%aQrWpHst9oZ68?vH_K2sF5`5mRG)SWX#~ore zW>i^%dRUiqyu44E$!e#?sb9|~ewICQjX{pf9M~Mz~-mzxu zcP9Zx6a+?RF)+Xqcin8vQlA^7SR;h-kCNO()l0Gerr`oq2?0i6#OK;&cUn3YEXef& zrXdnOkt8_SaGQw`gUDHvf0X>221u z+XI)>>Rq(<-DYolgI10H){o!6l_UGw4k>N$!8nh?S6;;!mKT^T?!!5=RB}eF%UCPF?}p=RY`+SWEzD` zERD$noE4jbnHQd7W-$f)=K#B6;F7x<$RZTR_!)@<4)ZI8QP%}%U>9KAasg_4g|q;@ zj#dOT;j)s6;iT0f6{3^nV&=h-iNW+=2f;H2@8WLuCwx!0mt^=bXS+}7Xk)2*MU`R&^X4q#jn|;hzw*)FppjYo-bzrjZ1I6g5qgS^)FHK! z=@?>_e;py)+N}w!@6I-7O2@^;&6}@Hycm+4F$_K4h@4%PF&ez#e=Dnt)B!S9zqs9) zMnp%womZML`*ii|<;Ll(ifA0W@H(ipVS`n8XrKGGdGXFM^iV4bxWaRy-ug3KarX#k zO~?180e4_x0;D90vB^EwpIZu-J+tnESq%KH^NKY{MiLup_8f2D;BbenT+e(O*n zg5y@Pc$EsH@KVU7 zePV>(g}UVThmIo(qfR2@VOWv+@l{(;S(&3$!doO^rLSjF?pSaO_Fb8N<)E!lfq7Rk zjH5_zy#W3~qmt^oPx;V9sw_$pmq-C6WN42S&{g0F`AZNl9@75SxZzd$tiCPDNW z3Z{DM8%h0!AexQkKT|R)KPErGZ zD6d0LHX4V(bLYJ9BiM}Ld?996j$5#IdLd0Ba8V}DS0Z}Avgle(8{m(Wr!URTQ+b0F z7jO0%u=r*PbHf9G%DKxfJQ^3oeV5Z!LC3SLwXTw=6wAqegmv;e8L@Hqi$%$`nj8$k z{?+hG0q^+RII|aEnrz}Y{rcImfpA(8Oj9Hsiae?Dyxq9G?$m=@2(#LfL(goKG@FGP z^eWq@V(oMfZRUyGv+K!XEq zU}gKd&j>E)_OyKq60=ZYMle0&SylJ$prYk;*`&y62(XBMC2O zFghpaG}0qufFlV%cEUcA4LaF^zWSD@yowG}fB1<)C-{XKZwyh1axrroX4u3J=v_qe znZ=Y)xLfM(oK~dDe zkuM8(asyC>(YUvqce5UG$(XC)Ss?M5zQ1S$ol-xGI-z#x;QwqzcNdwA=@)vA{moSA zomJI1JHFS&fIDwFkYpggnT~a zFuGDeV(1y#C^j~UMUDs0d(H4~4Bz(b(KItDLybLBFd%AlhS!?@xP$r!WtzleI7;N! z0P!SE+z@Y&EbHo40nM(c-ehJ~ygy?fbn|_M-OUL-htece&H(51U8yZ007AeW5++0NOrj+GI^Z z6PgmfM_U(Rr-_Owko3rcl-eouPu&mFI2)<+*76Cyu&Wt6W*(Y%ohO>2di z2+l4Q^NI`#3wau}W`6e$8)-hy+(;vfG|Vxu0IGTmfr&J0+XX+^Zx6SU zb=NMKxj@{qmfEqvRtP4)c(%0V=?I2S2~lX^=a^igac${7XynhT+@` z8$)`edTR9OR1L_rpuYD=6~RmjQV6W<$w^QNd@Z%1JocBbi#j6ArRD3`c90`4lvjJX zdx!%3=rm}vlp@i$k&Lzqi^~gA#a{|3sn=Yq;SwJGE^o(TTo1se&)7RB2zMuAPwgW6>?e_2_rw<*I>ws?lV-ES&XUyj&u)5qr%W; zITqwT6W%hn#AjH#BE^Z5#;C;!+~1$DWU8m#JscwWpCc>%r~01@C~pLrlgyf~)tCUC zqeJQt!WEgw8nA|Hf?OZP*{-s3H-);)p#53lI}A!MR37;ZQwG(thM0dJv%F#qyGgJ; zv76YBMoc6M$UG(#9zjQ<_;q*usQ8jzJKzY2@wZ;=9L4YL5PsAt3Ufc6lXFL!G z1))$rlObyqyCFmAy32T7H_@V*kXR|PCR8R+Gv+lgx*BB`7`^%K3~Zj;wG14A^5s(X zX-k5kqTr#VSNQ31)f@2UR_iLT`c_`k=kj=L63#@4(XU-Hvp0?Hi7Uv7Wv39ZUW@Bn zAaQ1y?Tnzv#GC!hpBp$c!D~)2D_%ickx#4%PlZUTni^3i zl1%=AVo}2~DOJv%iM2$US?FIpr=T8OkQ#^P{|9u{Gpo4H7fE6k_rPlNjvoo3XgejB z?`O#boXnaPeAbI8Hl(XasMra!2of5%D5p<(Zc~$iZ%Xt7=2QL)P{pFRN??O|I-}74 zTuM98v*613k|WMYGtJ#W+O70uMCHlH<7hiD68v-7jm3Qe72x-fy%32>f}i8d_vLl^ z^XWy{^2&xt(qez%N}3L?wCW8^0Fo>N6U4*#OpKOBHzBtZ_-}kyX<1=j1dq>GnE#sk z;N%GNUu7!CQDua#>*jCj+q#AWFXnC!3(N|y)dB$6p}ELA2{YytP&Ro6xT)bhRoZrw zKUuB^i4he47`J2E^aYI zax*U9@aQgAd+5N)R>X>-IMrtgCrvEE=JmIR+T+7dK>!nLe# zdiCE?RK#Zl*tJ|mhId-!R0-^+A0pFUH)_TntZV` z8j1}|9T3H+2{^u{3bOL6hKN>rIv#Oxp!Yi49tF=&@TZp>v%^|)x71oJSEhm)RmklZ z8!C!F|B7GXYEykN8VB>2w`tQ>xDPX$+<0G#L>8iExq&W5%JtauRnyDWjFA(`MzH}k z(vaCVIiLJEqhF%c4eq}=3pTOs@>RmncvB4_DGo3OlQC>&C}Ei5@PSMOBr@~?XC?oO z=bsUHl;FDomgxvVnB$ztNMyMtXwUdW!1yn0Js8p!>m!g%QDDfG#REpykaymLbiP z-<>Bczc9k&g)3dyXdK@DUybsUqvqC2%}%A`#6<;zW`kNc3^xx z3n7ro3d9F7fo+D}qv!}%maA0xiFWLyPw2ub{YuF$h8+xOm; ztCI?!u(OdS+QQR7m)1}62!n#tm{_=;#IRYH$w~QYr{axWM71(loC@LjAWcD$fj^d6Y{2Pj;l5Yx_ zh&aLurqtDLqWs^lpvf~Ir-bKy`_;i(8G6ln!00(p0}&lb>V7E?cO-w?Wl`deu-PhRcuKCfM~7g zx!6YDX)-71#2sdn3o_}^Ut{>mbToZb?CzL)N)vk49>3Bp9T1wg-<7co0XaGjeHpDi zPT9lnC~is{dyHmS4siWRz?>-5VE*~5mrg-b<} z(ix*XCurX41Am~RCv4e$d~P_i^IZy@_We|d5)*uTbQX)L`u^rCL=q1~EpanI4B%2O zfY?Z7{b2pMOwu8jUK4q3KpvYTuLWjaik5Zi{+=qn-ELV5qAh!ce(-M&X-w< zJ7S9$>T&KZrO9$z_h?|(EkdX=F=w_1IHPrNF(|p5Q2}w^W&m**D-AfVuICLn^pnt9 zT5(7T34lw|8+?0V1zG$JxLjJ1Ec5J->k-o3Aqu4}JkF1QbkBku-l=tj z#KTyWDQJ_D^p#Qjswu!f2`y;k*q!mZw`RuR@J8r)|JkFH;b)r;NZeXsc&klntj<77 zOIOH5r|$6w9~k|0e!ZsV&3%Rs1~{Ez5}NR;>fitulQM^rV62;ZI$3 z#nr=czKlV6jXcQ$2j;ZELAD3AE2IvR{KAu>KGJbT>6xV9ZJ|=B{EU9f^K(h8fUMRm z?tIR(Z2MhXbn$HHR`jif|8ZrLE|%--dM5T2zM^?Owhseshg|=Qwg2Dm5`{`k=Z`2! X@LtY6@}p0G`__. +# + +print(z.grad_fn,loss.grad_fn,sep='\n') + +###################################################################### +# Computing Gradients +# ------------------- +# +# To optimize weights of parameters in the neural network, we need to +# compute the derivatives of our loss function with respect to parameters, +# namely, we need :math:`\frac{\partial loss}{\partial w}` and +# :math:`\frac{\partial loss}{\partial b}` under some fixed values of +# ``x`` and ``y``. To compute those derivatives, we call +# ``loss.backward()``, and then retrieve the values from ``w.grad`` and +# ``b.grad``: +# + +loss.backward() +print(w.grad) +print(b.grad) + + +###################################################################### +# **Notes:** \* We can only obtain the ``grad`` properties for the leaf +# nodes of the computational graph, which have ``requires_grad`` property +# set to ``True``. For all other nodes in our graph gradients will not be +# available. \* We can only perform gradient calculations using +# ``backward`` once on a given graph, for performance reasons. If we need +# to do several ``backward`` calls on the same graph, we need to pass +# ``retain_graph=True`` to the ``backward`` call. +# + + +###################################################################### +# Tensor Gradients and Jacobian Products +# -------------------------------------- +# +# In many cases, we have a scalar loss function, and we need to compute +# the gradient with respect to some parameters. However, there are cases +# when the output function is an arbitrary tensor. In this case, PyTorch +# allows you to compute so-called **Jacobian product**, and not the actual +# gradient. +# +# For a vector function :math:`\vec{y}=f(\vec{x})`, where +# :math:`\vec{x}=\langle x_1,\dots,x_n\rangle` and +# :math:`\vec{y}=\langle y_1,\dots,y_m\rangle`, a gradient of +# :math:`\vec{y}` with respect to :math:`\vec{x}` is given by **Jacobian +# matrix**: +# +# .. math:: +# +# +# \begin{align}J=\left(\begin{array}{ccc} +# \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{1}}{\partial x_{n}}\\ +# \vdots & \ddots & \vdots\\ +# \frac{\partial y_{m}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}} +# \end{array}\right)\end{align} +# +# Instead of computing the Jacobian matrix itself, PyTorch allows you to +# compute **Jacobian Product** :math:`v^T\cdot J` for a given input vector +# :math:`v=(v_1 \dots v_m)`. This is achieved by calling ``backward`` with +# :math:`v` as an argument. The size of :math:`v` should be the same as +# the size of the original tensor, with respect to which we want to +# compute the product: +# + +inp = torch.eye(5,requires_grad=True) +out = (inp+1).pow(2) +out.backward(torch.ones_like(inp),retain_graph=True) +print("First call\n",inp.grad) +out.backward(torch.ones_like(inp),retain_graph=True) +print("\nSecond call\n",inp.grad) +inp.grad.zero_() +out.backward(torch.ones_like(inp),retain_graph=True) +print("\nCall after zeroing gradients\n",inp.grad) + + +###################################################################### +# Notice that when we call ``backward`` for the second time with the same +# argument, the value of the gradient is different. This happens because +# when doing ``backward`` propagation, PyTorch **accumulates the +# gradients**, i.e. the value of computed gradients is added to the +# ``grad`` property of all leaf nodes of computational graph. If you want +# to compute the proper gradients, you need to zero out the ``grad`` +# property before. In real-life training an *optimizer* helps us to do +# this. +# +# **Note:** Previously we were calling ``backward()`` function without +# parameters. This is essentially equivalent to calling +# ``backward(torch.tensor(1.0))``, which is a useful way to compute the +# gradients in case of a scalar-valued function, such as loss during +# neural network training. +# + + +###################################################################### +# Disabling Gradient Tracking +# --------------------------- +# +# By default, all tensors with ``requires_grad=True`` are tracking their +# computational history and support gradient computation. However, there +# are some cases when we do not need to do that, for example, when we have +# trained the model and just want to apply it to some input data, i.e. we +# only want to do *forward* computations through the network. We can stop +# tracking computations by surrounding our computation code with +# ``with torch.no_grad()`` block: +# + +z = torch.matmul(x,w)+b +print(z.requires_grad) + +with torch.no_grad(): + z = torch.matmul(x,w)+b +print(z.requires_grad) + + +###################################################################### +# Another way to achieve the same result is to use the ``detach()`` method +# on the tensor: +# + +z = torch.matmul(x,w)+b +z_det = z.detach() +print(z_det.requires_grad) + + +###################################################################### +# All forward-pass computations on tensors that do not track gradients +# would be more efficient. +# + + +###################################################################### +# Example of Gradient Descent +# --------------------------- +# +# Let's use the AutoGrad functionality to minimize a simple function of +# two variables :math:`f(x_1,x_2)=(x_1-3)^2+(x_2+2)^2`. We will use the +# ``x`` tensor to represent the coordinates of a point. To do the gradient +# descent, we start with some initial value :math:`x^{(0)}=(0,0)`, and +# compute each consecutive step using: +# +# .. math:: +# +# +# x^{(n+1)} = x^{(n)} - \eta\nabla f +# +# Here :math:`\eta` is so-called **learning rate** (we will call it ``lr`` +# in our code), and +# :math:`\nabla f = (\frac{\partial f}{\partial x_1},\frac{\partial f}{\partial x_2})` +# is the gradient of :math:`f`. +# +# We will start by defining the initial value of ``x`` and the function +# ``f``: +# + +x = torch.zeros(2,requires_grad=True) +f = lambda x : (x-torch.tensor([3,-2])).pow(2).sum() +lr = 0.1 + + +###################################################################### +# For the gradient descent, let's do 15 iterations. On each iteration, we +# will update the coordinate tensor ``x`` and print its coordinates to +# make sure that we are approaching the minimum: +# + +for i in range(15): + y = f(x) + y.backward() + gr = x.grad + x.data.add_(-lr*gr) + x.grad.zero_() + print("Step {}: x[0]={}, x[1]={}".format(i,x[0],x[1])) + + +###################################################################### +# As you can see, we have obtained the values close to the optimal point +# :math:`(3,-2)`. Training a neural network is in fact a very similar +# process, we will need to do a number of iterations to minimize the value +# of **loss function**. +# + diff --git a/beginner_source/quickstart/tensor_quickstart_tutorial.py b/beginner_source/quickstart/tensor_quickstart_tutorial.py deleted file mode 100644 index ef25beb119d..00000000000 --- a/beginner_source/quickstart/tensor_quickstart_tutorial.py +++ /dev/null @@ -1,120 +0,0 @@ -""" -Tensors and Operations -=================== - -Tensors and Operations -When training neural network models for real world tasks, we need to be able to effectively represent different types of input data: sets of numerical features, images, videos, sounds, etc. All those different input types can be represented as multi-dimensional arrays of numbers that are called tensors. - -Tensor is the basic computational unit in PyTorch. It is very similar to NumPy array, and supports similar operations. However, there are two very important features of Torch tensors that make the especially useful for training large-scale neural networks: - - - Tensor operations can be performed on GPU using CUDA - - Tensor operations support automatic differentiation using `AutoGrad `_ - -Conversion between Torch tensors and NumPy arrays can be done easily: -""" - -import torch -import numpy as np - -np_array = np.arange(10) -tensor = torch.from_numpy(np_array) - -print(f"Tensor={tensor}, Array={tensor.numpy()}") - -################################################################# -# .. code:: python -# Output: Tensor=tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=torch.int32), Array=[0 1 2 3 4 5 6 7 8 9] -# -# .. note:: When using CPU for computations, tensors converted from arrays share the same memory for data. Thus, changing the underlying array will also affect the tensor. -# -# -# Creating Tensors -# ------------- -# The fastest way to create a tensor is to define an uninitialized tensor - the values of this tensor are not set, and depend on the whatever data was there in memory: -# - -x = torch.empty(3,6) -print(x) - -############################################################################ -# .. code:: python -# Output: tensor([[-1.3822e-06, 6.5301e-43, -1.3822e-06, 6.5301e-43, -1.4041e-06, -# 6.5301e-43], -# [-1.3855e-06, 6.5301e-43, -2.9163e-07, 6.5301e-43, -2.9163e-07, -# 6.5301e-43], -# [-1.4066e-06, 6.5301e-43, -1.3788e-06, 6.5301e-43, -2.9163e-07, -# 6.5301e-43]]) -# -# -# In practice, we ofter want to create tensors initialized to some values, such as zeros, ones or random values. Note that you can also specify the type of elements using dtype parameter, and chosing one of torch types: - - -x = torch.randn(3,5) -print(x) -y = torch.zeros(3,5,dtype=torch.int) -print(y) -z = torch.ones(3,5,dtype=torch.double) -print(z) - -###################################################################### -# Output: -# tensor([[-1.0166, -0.6828, 1.8886, -1.2115, 0.0202], -# [-1.1278, 0.7447, 0.4260, -2.1909, 0.5653], -# [ 0.0562, -0.1393, 0.6145, -0.6181, 0.1879]]) -# tensor([[0, 0, 0, 0, 0], -# [0, 0, 0, 0, 0], -# [0, 0, 0, 0, 0]], dtype=torch.int32) -# tensor([[1., 1., 1., 1., 1.], -# [1., 1., 1., 1., 1.], -# [1., 1., 1., 1., 1.]], dtype=torch.float64) -# -# -# You can also create random tensors with values sampled from different distributions, as described `in documentation. `_ -# -#Similarly to NumPy, you can use eye to create a diagonal identity matrix: - -print(torch.eye(10)) - -################################################################ -# Output: -# tensor([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], -# [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], -# [0., 0., 1., 0., 0., 0., 0., 0., 0., 0.], -# [0., 0., 0., 1., 0., 0., 0., 0., 0., 0.], -# [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], -# [0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], -# [0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], -# [0., 0., 0., 0., 0., 0., 0., 1., 0., 0.], -# [0., 0., 0., 0., 0., 0., 0., 0., 1., 0.], -# [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]) -# -# -# You can also create new tensors with the same properties or size as existing tensors: -# - -print(z.new_ones(2,2)) -print(torch.zeros_like(x,dtype=torch.long)) - -############################################################################ -# Tensor Operations -# ------------- -# Tensors support all basic arithmetic operations, which can be specified in different ways: -# -# - Using operators, such as +, -, etc. -# - Using functions such as add, mult, etc. Functions can either return values, or store them in the specified ouput variable (using out= parameter) -# - In-place operations, which modify one of the arguments. Those operations have _ appended to their name, eg. add_. -# -# Complete reference to all tensor operations can be found in documentation. -# -# Let us see examples of those operations on two tensors, x and y. -# -# -# -# -# -# -# -# -# -# -# \ No newline at end of file diff --git a/beginner_source/quickstart/tensor_tutorial.py b/beginner_source/quickstart/tensor_tutorial.py new file mode 100644 index 00000000000..b37ff5a2e4a --- /dev/null +++ b/beginner_source/quickstart/tensor_tutorial.py @@ -0,0 +1,250 @@ +""" +Tensors and Operations +---------------------- + +**Tensor** is the basic computational unit in PyTorch. It is very +similar to **NumPy array**, and supports similar operations. However, +there are two very important features of Torch tensors that make them +especially useful for training large-scale neural networks: + +- Tensor operations can be performed on GPU using CUDA +- Tensor operations support automatic differentiation using + `AutoGrad `__ + +Conversion between Torch tensors and NumPy arrays can be done easily: + +""" + +import torch +import numpy as np + +np_array = np.arange(10) +tensor = torch.from_numpy(np_array) + +print(f"Tensor={tensor}, Array={tensor.numpy()}") + + +###################################################################### +# **Note:** When using CPU for computations, tensors converted from arrays +# share the same memory for data. Thus, changing the underlying array will +# also affect the tensor. +# + + +###################################################################### +# Creating Tensors +# ~~~~~~~~~~~~~~~~ +# +# The fastest way to create a tensor is to define an *uninitialized* +# tensor - the values of this tensor are not set, and depend on the +# whatever data was there in memory: +# + +x = torch.empty(3,6) + + +###################################################################### +# In practice, we ofter want to create tensors initialized to some values, +# such as zeros, ones or random values. Note that you can also specify the +# type of elements using ``dtype`` parameter, and chosing one of ``torch`` +# types: +# + +x = torch.randn(3,5) +y = torch.zeros(3,5,dtype=torch.int) +z = torch.ones(3,5,dtype=torch.double) + +###################################################################### +# You can also create random tensors with values sampled from different +# distributions, as described `in +# documentation `__. +# +# Similarly to NumPy, you can use ``eye`` to create a diagonal identity +# matrix: +# + +I = torch.eye(10) + + +###################################################################### +# You can also create new tensors with the same properties or size as +# existing tensors: +# + +print(z.new_ones(2,2)) # new_ method allows specifying new size +print(torch.zeros_like(x,dtype=torch.long)) # _like method supports overriding dtype + + +###################################################################### +# Size of the tensor can be obtained using ``.size()`` method, which +# returns a tuple-like object: +# + +print(z.size()) # Prints [3.0] + + +###################################################################### +# Tensor Operations +# ~~~~~~~~~~~~~~~~~ +# +# Tensors support all basic arithmetic operations, which can be specified +# in different ways: \* Using operators, such as ``+``, ``-``, etc. \* +# Using functions such as ``add``, ``mult``, etc. Functions can either +# return values, or store them in the specified ouput variable (using +# ``out=`` parameter) \* In-place operations, which modify one of the +# arguments. Those operations have ``_`` appended to their name, eg. +# ``add_``. +# +# Complete reference to all tensor operations can be found `in +# documentation `__. +# +# Let us see examples of those operations on two tensors, ``x`` and ``y``. +# + +x = torch.randn(3,5) +y = torch.randn(3,5) + + +###################################################################### +# Using operator notation +# ^^^^^^^^^^^^^^^^^^^^^^^ +# +# We can use overloaded arithmetic operators, such as ``+`` and ``*``: +# + +z = x*y + + +###################################################################### +# Note, that ``*`` means elementwise product, and not the matrix product. +# To compute matrix product, we need to use ``matmul`` function, as shown +# below. +# +# Using functions +# ^^^^^^^^^^^^^^^ +# +# While only some operations are available as Python operators, `many more +# functions `__ +# can be specified using the full name. In the example below, ``t`` +# transposes the matrix, and ``matmul`` means matrix multiplication: +# + +z = torch.matmul(x,y.t()) + + +###################################################################### +# Simple operations (addition, multiplication, etc.) also have +# corresponsing functions, and can be called either as methods, or as +# functions: +# + +z = x.add(y) +z = torch.add(x,y) + + +###################################################################### +# Sometimes it may be more convenient to store the result into specified +# variable, instead of returning it from a function. In this case you can +# use ``out=`` parameter: +# + +torch.add(x,y,out=z) + + +###################################################################### +# In-place operations +# ^^^^^^^^^^^^^^^^^^^ +# +# When training neural networks, you often need to **modify** the weights, +# i.e. perform some operation and then store the result into the original +# variable. Those operations are called **in-place operations**, and they +# are marked by the ``_`` symbol at the end of their name: +# + +x.add_(y) # x will be modified + + +###################################################################### +# Resizing and Indexing +# ~~~~~~~~~~~~~~~~~~~~~ +# +# Very often you need to change the shape of the tensor without modifying +# its valies, eg. to add an extra dimension. To do that, you can use +# ``view`` method, which provides a **view** to the same in-memory values +# using different dimensions: +# + +print(x.size()) # original size of x is 3x5 +print(x.view(5,3,1).size()) # will give size 5x3x1 +print(x.view(5,-1)) # will result in size 5x3 + + +###################################################################### +# Note that the number of elements in a view should be the same as in the +# original tensor, and that you can use ``-1`` in one of the dimensions to +# figure out this dimension automatically. +# + + +###################################################################### +# **Note:** ``view`` is similar to ``reshape`` operation in NumPy. There +# is also a ``reshape`` method available in PyTorch, and it is more +# powerful than ``view``, because it can also reshape non-contiguous +# arrays by copying them to the new shape. However, in vast majority of +# cases you can use ``view`` and make sure that no data copying occurs, +# and the operation is always efficient. +# + + +###################################################################### +# Tensors support all slicing operations that exist in NymPy: +# + +print(x.size()) # original size of x is 3x5 +print(x[0].size(), x[:,0].size(), x[...,1].size()) # will give 5, 3, 3 + + +###################################################################### +# If you have a one-element tensor, for example, after aggregating all +# values of the tensor into one value, you can convert it to a Python +# numerical value using ``item()``: +# + +print(x.sum().item()) # will print + + +###################################################################### +# GPU Computations +# ~~~~~~~~~~~~~~~~ +# +# One of the major benefits of using PyTorch is the ability to perform +# tensor operations on GPU. To do that, we need to explicitly **move** +# tensors to GPU using ``.to`` method. +# +# In most of the cases, we check for the availability of GPU on our +# machine, and define the ``device`` object accordingly. Then we move all +# tensors to that device before performing the computations: +# + +if torch.cuda.is_available(): + device = torch.device("cuda") +else: + device = torch.device("cpu") + +print("Doing computations on {}".format(device)) + +x = torch.randn(3,5,device=device) +y = torch.ones_like(x) +y = y.to(device) +z = x+y # this is performed on GPU if it is available +print(z) +print(z.to("cpu",torch.double)) + + +###################################################################### +# In the last operation, when we move the tensor back to the CPU, we can +# also change the ``dtype``. This does not result in additional +# computational time, because we need to copy and transform the data when +# moving it from GPU anyway. +# + diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index 24482b7ea5d..915ca01e4ba 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -141,6 +141,9 @@ def test(dataloader, model): ###################################################################### # More details `optimization and training loops `_ # +# More details `automatic differentiation and AutoGrad `_ +# +# # Saving Models # ------------- # @@ -199,7 +202,9 @@ def test(dataloader, model): # # `Building the Model `_ # -# `Optimization Loop and AutoGrad `_ +# `Automatic Differentiation and AutoGrad `_ +# +# `Optimization Loop `_ # # `Save, Load and Use Model `_ # From 40a261cae7dfaa5b20b40979635c1115401d17d3 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Thu, 5 Nov 2020 13:58:41 +0000 Subject: [PATCH 19/55] more work on quickstart --- .../autograd_quickstart_tutorial.py | 15 +- .../quickstart/build_model_tutorial.py | 14 +- .../quickstart/data_quickstart_tutorial.py | 32 +++-- .../quickstart/optimization_tutorial.py | 13 +- .../quickstart/save_load_run_tutorial.py | 14 +- .../quickstart/tensor_quickstart_tutorial.py | 18 +-- .../quickstart/transforms_tutorial.py | 134 +++++++++++++++++- 7 files changed, 210 insertions(+), 30 deletions(-) diff --git a/beginner_source/quickstart/autograd_quickstart_tutorial.py b/beginner_source/quickstart/autograd_quickstart_tutorial.py index 2fb4fcb665e..be7b2c4fb63 100644 --- a/beginner_source/quickstart/autograd_quickstart_tutorial.py +++ b/beginner_source/quickstart/autograd_quickstart_tutorial.py @@ -1,3 +1,16 @@ """ Autograd -=================== \ No newline at end of file +=================== +""" + +################################################################## +# More help with the FashionMNIST Pytorch Blitz +################################################################## +# `Tensors `_ +# `DataSets and DataLoaders `_ +# `Transformations `_ +# `Build Model `_ +# `Optimization Loop `_ +# `AutoGrad `_ +# `Back to FashionMNIST main code base <>`_ +# \ No newline at end of file diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index d8d707d4638..e3fa50a2dae 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -6,4 +6,16 @@ """ -x = 5 \ No newline at end of file +x = 5 + + +################################################################## +# More help with the FashionMNIST Pytorch Blitz +################################################################## +# `Tensors `_ +# `DataSets and DataLoaders `_ +# `Transformations `_ +# `Build Model `_ +# `Optimization Loop `_ +# `AutoGrad `_ +# `Back to FashionMNIST main code base <>`_ diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index 7369e368181..e2d4e0144be 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -1,9 +1,11 @@ """ Datasets & Dataloaders +=================== """ ################################################################# # Getting Started With Data in PyTorch -# +################################################################# +# # Before we can even think about building a model with PyTorch, we need to first learn how to load and process data. Data can be sourced from local files, cloud datastores and database queries. It comes in all sorts of forms and formats from structured tables to image, audio, text, video files and more. # # ADD IMAGE HERE /images/typesofdata.PNG @@ -18,7 +20,8 @@ # * `Image Datasets _` # * `Text Datasets `_ # * `Audio Datasets `_ -# +# +################################################################# # Iterating through a Dataset ################################################################# # @@ -48,9 +51,9 @@ plt.show() ################################################################# -# # Add Image Here # +################################################################# # Creating a Custom Dataset ################################################################# # @@ -88,8 +91,6 @@ def __getitem__(self, idx): return sample ################################################################# -# Lets break down whats happening in this sample implementaion of a PyTorch Dataset function by function. -# # Imports ################################################################# # @@ -101,6 +102,7 @@ def __getitem__(self, idx): import pandas as pd from torchvision.io import read_image from torch.utils.data import Dataset +from torch.utils.data import DataLoader ################################################################# # Init @@ -158,19 +160,19 @@ def __getitem__(self, idx): # * Parallel batch distribution # # The PyTorch Dataloader *torch.utils.data.DataLoader* is an iterator that handles all of this complexity for us enabling us to load a dataset and focusing on train our model. -# -# dataloader = DataLoader(clothing, batch_size=4, shuffle=True, num_workers=0) + +dataloader = DataLoader(clothing, batch_size=4, shuffle=True, num_workers=0) ################################################################# # With this we have all we need to know to load an process data of any kind in PyTorch to train deep learning models. # -# +################################################################## # More help with the FashionMNIST Pytorch Blitz -################################################################# -# `Tensors <>`_ -# `DataSets and DataLoaders <>`_ -# `Transformations <>`_ -# `Choosing Model <>`_ -# `Optimization Loop <>`_ -# `AutoGrad <>`_ +################################################################## +# `Tensors `_ +# `DataSets and DataLoaders `_ +# `Transformations `_ +# `Build Model `_ +# `Optimization Loop `_ +# `AutoGrad `_ # `Back to FashionMNIST main code base <>`_ diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py index 6a89748cc25..eda7d4c7e6d 100644 --- a/beginner_source/quickstart/optimization_tutorial.py +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -6,4 +6,15 @@ """ -x = 5 \ No newline at end of file +x = 5 + +################################################################## +# More help with the FashionMNIST Pytorch Blitz +################################################################## +# `Tensors `_ +# `DataSets and DataLoaders `_ +# `Transformations `_ +# `Build Model `_ +# `Optimization Loop `_ +# `AutoGrad `_ +# `Back to FashionMNIST main code base <>`_ diff --git a/beginner_source/quickstart/save_load_run_tutorial.py b/beginner_source/quickstart/save_load_run_tutorial.py index 3997ea65b29..7abcd11a6e3 100644 --- a/beginner_source/quickstart/save_load_run_tutorial.py +++ b/beginner_source/quickstart/save_load_run_tutorial.py @@ -6,4 +6,16 @@ """ -x = 5 \ No newline at end of file +x = 5 + + +################################################################## +# More help with the FashionMNIST Pytorch Blitz +################################################################## +# `Tensors `_ +# `DataSets and DataLoaders `_ +# `Transformations `_ +# `Build Model `_ +# `Optimization Loop `_ +# `AutoGrad `_ +# `Back to FashionMNIST main code base <>`_ diff --git a/beginner_source/quickstart/tensor_quickstart_tutorial.py b/beginner_source/quickstart/tensor_quickstart_tutorial.py index ef25beb119d..6d017b69b38 100644 --- a/beginner_source/quickstart/tensor_quickstart_tutorial.py +++ b/beginner_source/quickstart/tensor_quickstart_tutorial.py @@ -110,11 +110,13 @@ # # # -# -# -# -# -# -# -# -# \ No newline at end of file +################################################################## +# More help with the FashionMNIST Pytorch Blitz +################################################################## +# `Tensors `_ +# `DataSets and DataLoaders `_ +# `Transformations `_ +# `Build Model `_ +# `Optimization Loop `_ +# `AutoGrad `_ +# `Back to FashionMNIST main code base <>`_ diff --git a/beginner_source/quickstart/transforms_tutorial.py b/beginner_source/quickstart/transforms_tutorial.py index 0f924fd7b0e..6431c506e73 100644 --- a/beginner_source/quickstart/transforms_tutorial.py +++ b/beginner_source/quickstart/transforms_tutorial.py @@ -1,9 +1,137 @@ """ -Transforms Tutorial +Transforms =================== -More to come +Data does not come ready to be processed in the machine learning algorithm. We need to do different data manipulations or transforms to prepare it for training. There are many types of transformations and it depends on the type of model you are building and the state of your data as to which ones you should use. +In the below example, for our FashionMNIT image dataset, we are taking our image features (x), turning it into a tensor and normalizing it. Then taking the labels (y) padding with zeros to get a consistent shape. We will break down each of these steps and the why below. + +Full Section Example: """ +import os +import torch +import torch.nn as nn +import torch.onnx as onnx +import matplotlib.pyplot as plt +from torch.utils.data import DataLoader +from torchvision import datasets, transforms + +# image classes +classes = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"] + +# data used for training +training_data = datasets.FashionMNIST('data', train=True, download=True, + transform=transforms.Compose([transforms.ToTensor()]), + target_transform=transforms.Compose([ + transforms.Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), value=1)) + ]) + ) + +# data used for testing +test_data = datasets.FashionMNIST('data', train=False, download=True, + transform=transforms.Compose([transforms.ToTensor()]), + target_transform=transforms.Compose([ + transforms.Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), value=1)) + ]) + ) + +############################################## +# Pytorch Datasets +############################################## +# +# We are using the built-in open FashionMNIST datasets from the PyTorch library. For more info on the Datasets and Loaders check out [this]() resource. The `Train=True`indicates we want to download the training dataset from the built-in datasets, `Train=False` indicates to download the testing dataset. This way we have data partitioned out for training and testing within the provided PyTorch datasets. We will apply the same transfoms to both the training and testing datasets. +# +# From the docs: +# +# ```torchvision.datasets.FashionMNIST(root, train=True, transform=None, target_transform=None, download=False)``` + +############################################## +# Transform: Features +############################################## +# Example: +# + +transform=transforms.Compose([transforms.ToTensor()]) + +# *Compose* +# The `transforms.compose` allows us to string together different steps of transformations in a sequential order. This allows us to add an array of transforms for both the features and labels when preparing our data for training. +# +# *ToTensor()* +#For the feature transforms we have an array of transforms to process our image data for training. The first transform in the array is `transforms.ToTensor()` this is from class [torchvision.transforms.ToTensor](https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.ToTensor). We need to take our images and turn them into a tensor. (To learn more about Tensors check out [this]() resource.) The ToTensor() transformation is doing more than converting our image into a tensor. Its also normalizing our data for us by scaling the images to be between 0 and 1. +# +# +# ..note: ToTensor only normalized image data that is in PIL mode of (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) or if the numpy.ndarray has dtype = np.uint8. In the other cases, tensors are returned without scaling. +# + +############################################## +# Target_Transform: Labels +############################################## +# +#Example: + +target_transform= transforms.Lambda(lambda y: torch.zeros(10, dtype=torchfloat).scatter_(dim=0, index=torchtensor(y), value=1)) + +# This function is taking the y input and creating a tensor of size 10 with a float datatype. Then its calling scatter ([torch.Tensor.scatter_ class](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.scatter_)) to send each item to torch.zeros, according to the row, index and current item value. +# * *Dim=0* is row wise index +# * *index* = torchtensor(y)` is the index of the element toscatter +# * *value* = 1` is the source elemnt + +############################################## +# Using your own data +############################################## +# Below is an example for processing image data using a dataset from a local directory. +# +#Example: + +data_dir='data' +batch_size=4 + +data_transforms = { + 'train': transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + 'val': transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), +} +image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), + data_transforms[x]) + for x in ['train', 'val']} + +dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], + batch_size=batch_size, + shuffle=True, num_workers=4) + for x in ['train', 'val']} + +dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} + +class_names = image_datasets['train'].classes + +################################################## +# Full Source for this examples: [FoodAI](https://github.com/sethjuarez/FoodAI)
+# +################################################## +# Resources +################################################## +#Check out the other TorchVision Transforms available: https://pytorch.org/docs/stable/torchvision/transforms.html +# +# +################################################################## +# More help with the FashionMNIST Pytorch Blitz +################################################################## +# `Tensors `_ +# `DataSets and DataLoaders `_ +# `Transformations `_ +# `Build Model `_ +# `Optimization Loop `_ +# `AutoGrad `_ +# `Back to FashionMNIST main code base <>`_ + + -x = 5 \ No newline at end of file From 1fff4d10b4248accc7cb6ea054da20eebc292562 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Thu, 5 Nov 2020 14:20:28 +0000 Subject: [PATCH 20/55] updated optimization tutorial --- .../quickstart/optimization_tutorial.py | 97 ++++++++++++++++++- 1 file changed, 94 insertions(+), 3 deletions(-) diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py index eda7d4c7e6d..5999e429e2b 100644 --- a/beginner_source/quickstart/optimization_tutorial.py +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -1,12 +1,103 @@ """ -Optimization Tutorial +Optimizing Model Parameters =================== +Now that we have a model and data it's time to train, validate and test our model by optimizating it's paramerters on our data! + +To do this we need to understand a how to handle 5 core deep learning concepts in PyTorch +1. Hyperparameters (learning rates, batch sizes, epochs etc) +2. Optimization Loops +3. Loss +4. AutoGrad +5. Optimizers + +Let's dissect these concepts one by one and look at some code at the end we'll see how it all fits together. -More to come """ -x = 5 +###################################################### +# Hyperparameters +###################################################### +# +#Hyperparameters are adjustable parameters that let you control the model optimization process. For example, with neural networks, you can configure: +# +# - **Number of Epochs**- the number times iterate over the dataset to update model parameters +# - **Batch Size** - the number of samples in the dataset to evaluate before you update model parameters +# - **Cost Function** - the method used to decide how to evaluate the model on a data sample to update the model parameters +# - **Learning Rate** - how much to update models parameters at each batch/epoch set this to large and you won't update optimally if you set it to small you will learn really slowly + +learning_rate = 1e-3 +batch_size = 64 +epochs = 5 + +###################################################### +# Optimizaton Loops +###################################################### +# Once we set our hyperparameters we can then optimize our our model with optimization loops. +# +# The optimziation loop is comprized of three main subloops in PyTorch. +# ![](../images/optimization_loops.PNG) +# +# 1. The Train Loop - Core loop iterates over all the epochs +# 2. The Validation Loop - Validate loss after each weight parameter update and can be used to gauge hyper parameter performance and update them for the next batch. +# 3. The Test Loop - is used to evaluate our models performance after each epoch on traditional metrics to show how much our model is generalizing from the train and validation dataset to the test dataset it's never seen before. +# + +for epoch in range(num_epochs): # Optimization Loop + # Train loop over batches + model.train() # set model to train + # Model Update Code + model.eval() # After exiting batch loop set model to eval to speed up evaluation and not track gradients (this is explained below) + # Validation Loop + # - Put sample validation metric logging and hyperparameter update code here + # After exiting train loop set model to eval to speed up evaluation and not track gradients (this is explained below) + # Test Loop + # - Put sample test metric logging and hyperparameter update code here + +###################################################### +# Loss +# ###################################################### +#The loss is the value used to update our parameters. To calculate the loss we make a prediction using the inputs of our given data sample. +# + +preds = model(inputs) +loss = cost_function(preds, labels) + +###################################################### +# AutoGrad and Optimizer (We might want to split this when we go more in depth on autograd ) +###################################################### +# By default each tensor maintains a graph of every operation applied on it unless otherwise specified using the torch.no_grad() command. +# +# `Autograd graph `_ +# +# PyTorch uses this graph to automatically update parameters with respect to our models loss during training. This is done with one line loss.backwards(). Once we have our gradients the optimizer is used to propgate the gradients from the backwards command to update all the parameters in our model. + +optimizer.zero_grad() # make sure previous gradients are cleared +loss.backward() # calculates gradients with respect to loss +optimizer.step() + +###################################################### +# The standard method for optimization is called Stochastic Gradient Descent, to learn more check out this awesome video by `3blue1brown `_. There are many different optimizers and variations of this method in PyTorch such as ADAM and RMSProp that work better for different kinds of models, they are out side the scope of this Blitz, but can check out the full list of optimizers[here](https://pytorch.org/docs/stable/optim.html) + +###################################################### +# Putting it all together lets look at a basic optimization loop +###################################################### +# +# +# +# #initilize optimizer and example cost function +# +# # For loop to iterate over epoch +# # Train loop over batches +# # Set model to train mode +# # Calculate loss using +# # clear optimizer gradient +# # loss.backword +# # optimizer step +# # Set model to evaluate mode and start validation loop +# #calculate validation loss and update optimizer hyper parameters +# # Set model to evaluate test loop + ################################################################## # More help with the FashionMNIST Pytorch Blitz From 41e866ae42cce8e5a0a30280898ecf3b02b4f398 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Thu, 5 Nov 2020 14:33:10 +0000 Subject: [PATCH 21/55] added more links to quickstart main page --- beginner_source/quickstart_tutorial.py | 28 +++++++++++--------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index 24482b7ea5d..e874e7f8718 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -50,9 +50,10 @@ ###################################################################### # More details `DataSet and DataLoader `_ +# More details `Tensors `_ +# More details `Transformations `_ +# # -# More details `Tensors `_ -# # Creating Models # --------------- # @@ -140,6 +141,7 @@ def test(dataloader, model): ###################################################################### # More details `optimization and training loops `_ +# More deatils `AutoGrad `_ # # Saving Models # ------------- @@ -188,19 +190,13 @@ def test(dataloader, model): predicted, actual = classes[pred[0].argmax(0)], classes[y.argmax(0)] print(f'Predicted: "{predicted}", Actual: "{actual}"') -###################################################################### -# +################################################################## # More help with the FashionMNIST Pytorch Blitz -# `Tensors `_ -# -# `DataSets and DataLoaders `_ -# -# `Transformations `_ -# -# `Building the Model `_ -# -# `Optimization Loop and AutoGrad `_ -# -# `Save, Load and Use Model `_ -# +################################################################## +# `Tensors `_ +# `DataSets and DataLoaders `_ +# `Transformations `_ +# `Build Model `_ +# `Optimization Loop `_ +# `AutoGrad `_ From ad410b686a7c1b43baa4bf3f9aa8a8eabc44a274 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Thu, 5 Nov 2020 15:21:24 +0000 Subject: [PATCH 22/55] removed links at bottom of page causing error --- beginner_source/quickstart_tutorial.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index e874e7f8718..05a0a0a265b 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -190,13 +190,3 @@ def test(dataloader, model): predicted, actual = classes[pred[0].argmax(0)], classes[y.argmax(0)] print(f'Predicted: "{predicted}", Actual: "{actual}"') -################################################################## -# More help with the FashionMNIST Pytorch Blitz -################################################################## -# `Tensors `_ -# `DataSets and DataLoaders `_ -# `Transformations `_ -# `Build Model `_ -# `Optimization Loop `_ -# `AutoGrad `_ - From c7d4159c180dd853d42c2b5b85bbc17c9b53b703 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Fri, 6 Nov 2020 19:38:53 +0000 Subject: [PATCH 23/55] fix data formatting --- .../quickstart/data_quickstart_tutorial.py | 37 ++++++++++--------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index e2d4e0144be..0b205af9e90 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -4,7 +4,7 @@ """ ################################################################# # Getting Started With Data in PyTorch -################################################################# +# ----------------- # # Before we can even think about building a model with PyTorch, we need to first learn how to load and process data. Data can be sourced from local files, cloud datastores and database queries. It comes in all sorts of forms and formats from structured tables to image, audio, text, video files and more. # @@ -23,7 +23,7 @@ # ################################################################# # Iterating through a Dataset -################################################################# +# ----------------- # # Once we have a Dataset we can index it manually like a list *clothing[index]*. # @@ -55,7 +55,7 @@ # ################################################################# # Creating a Custom Dataset -################################################################# +# ----------------- # # To work with your own data lets look at the a simple custom image Dataset implementation: @@ -92,7 +92,7 @@ def __getitem__(self, idx): ################################################################# # Imports -################################################################# +# ----------------- # # Import os for file handling, torch for PyTorch, [pandas](https://pandas.pydata.org/) for loading labels, [torch vision](https://pytorch.org/blog/pytorch-1.7-released/) to read image files, and Dataset to implement the Dataset interface. # @@ -106,8 +106,8 @@ def __getitem__(self, idx): ################################################################# # Init -################################################################# -# +# ----------------- +## # The init function is used for all the first time operations when our Dataset is loaded. In this case we use it to load our annotation labels to memory and the keep track of directory of our image file. Note that different types of data can take different init inputs you are not limited to just an annotations file, directory_path and transforms but for images this is a standard practice. # # Example: @@ -120,7 +120,8 @@ def __init__(self, annotations_file, img_dir, transform=None): ################################################################# # __len__ -################################################################# +# ----------------- +# # The __len__ function is very simple here we just need to return the number of samples in our dataset. # # Example: @@ -130,7 +131,8 @@ def __len__(self): ################################################################# # __getitem__ -################################################################# +# ----------------- +# # The __getitem__ function is the most important function in the Datasets interface this. It takes a tensor or an index as input and returns a loaded sample from you dataset at from the given indecies. # # In this sample if provided a tensor we convert the tensor to a list containing our index. We then load the file at the given index from our image directory as well as the image label from our pandas annotations DataFrame. This image and label are then wrapped in a single sample dictionary which we can apply a Transform on and return. To learn more about Transforms see the next section of the Blitz. @@ -150,7 +152,7 @@ def __getitem__(self, idx): ################################################################# # Preparing your data for training with DataLoaders -################################################################# +# ----------------- # # Now we have a organized mechansim for managing data which is great, but there is still a lot of manual work we would have to do train a model with our Dataset. # @@ -168,11 +170,12 @@ def __getitem__(self, idx): # ################################################################## # More help with the FashionMNIST Pytorch Blitz -################################################################## -# `Tensors `_ -# `DataSets and DataLoaders `_ -# `Transformations `_ -# `Build Model `_ -# `Optimization Loop `_ -# `AutoGrad `_ -# `Back to FashionMNIST main code base <>`_ +# ----------------- +# +#| `Tensors `_ +#| `DataSets and DataLoaders `_ +#| `Transformations `_ +#| `Build Model `_ +#| `Optimization Loop `_ +#| `AutoGrad `_ +#| `Back to FashionMNIST main code base <>`_ From 506eac2f6acf5a4d67ec714450aafeeb4f7e23e8 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Fri, 6 Nov 2020 19:42:37 +0000 Subject: [PATCH 24/55] fix on optimization formatting --- beginner_source/quickstart/optimization_tutorial.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py index 5999e429e2b..a2b7311b306 100644 --- a/beginner_source/quickstart/optimization_tutorial.py +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -17,7 +17,7 @@ ###################################################### # Hyperparameters -###################################################### +# ----------------- # #Hyperparameters are adjustable parameters that let you control the model optimization process. For example, with neural networks, you can configure: # @@ -32,7 +32,7 @@ ###################################################### # Optimizaton Loops -###################################################### +# ----------------- # Once we set our hyperparameters we can then optimize our our model with optimization loops. # # The optimziation loop is comprized of three main subloops in PyTorch. @@ -56,7 +56,7 @@ ###################################################### # Loss -# ###################################################### +# ----------------- #The loss is the value used to update our parameters. To calculate the loss we make a prediction using the inputs of our given data sample. # @@ -65,7 +65,7 @@ ###################################################### # AutoGrad and Optimizer (We might want to split this when we go more in depth on autograd ) -###################################################### +# ----------------- # By default each tensor maintains a graph of every operation applied on it unless otherwise specified using the torch.no_grad() command. # # `Autograd graph `_ @@ -81,7 +81,7 @@ ###################################################### # Putting it all together lets look at a basic optimization loop -###################################################### +# ----------------- # # # @@ -101,7 +101,7 @@ ################################################################## # More help with the FashionMNIST Pytorch Blitz -################################################################## +# ----------------- # `Tensors `_ # `DataSets and DataLoaders `_ # `Transformations `_ From edffc4a920d6d78d748135d1d44a5d9cef7598c5 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Fri, 6 Nov 2020 20:19:03 +0000 Subject: [PATCH 25/55] more updates on sections and formatting --- .../autograd_quickstart_tutorial.py | 2 +- .../quickstart/build_model_tutorial.py | 130 +++++++++++++++++- .../quickstart/tensor_quickstart_tutorial.py | 2 +- .../quickstart/transforms_tutorial.py | 16 +-- 4 files changed, 136 insertions(+), 14 deletions(-) diff --git a/beginner_source/quickstart/autograd_quickstart_tutorial.py b/beginner_source/quickstart/autograd_quickstart_tutorial.py index be7b2c4fb63..49713300413 100644 --- a/beginner_source/quickstart/autograd_quickstart_tutorial.py +++ b/beginner_source/quickstart/autograd_quickstart_tutorial.py @@ -5,7 +5,7 @@ ################################################################## # More help with the FashionMNIST Pytorch Blitz -################################################################## +# ---------------------- # `Tensors `_ # `DataSets and DataLoaders `_ # `Transformations `_ diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index e3fa50a2dae..d0e012640cc 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -2,16 +2,140 @@ Build Model Tutorial =================== -More to come +The data has been loaded and transformed we can now build the model. We will leverage `torch.nn `_ predefined layers that Pytorch has that can both simplify our code, and make it faster. +In the below example, for our FashionMNIT image dataset, we are using a `Sequential` container from class `torch.nn.Sequential `_ that allows us to define the model layers inline. The neural network modules layers will be added to it in the order they are passed in. + +Another way this model could be bulid is with a class using `nn.Module `_. We will break down each of these step of the model below. + +Inline nn.Sequential Example: +""" +import os +import torch +import torch.nn as nn +import torch.onnx as onnx +from torch.utils.data import DataLoader +from torchvision import datasets, transforms + + +device = 'cuda' if torch.cuda.is_available() else 'cpu' +print('Using {} device'.format(device)) + +# model +model = nn.Sequential( + nn.Flatten(), + nn.Linear(28*28, 512), + nn.ReLU(), + nn.Linear(512, 512), + nn.ReLU(), + nn.Linear(512, len(classes)), + nn.Softmax(dim=1) + ).to(device) + +print(model) + +""" +Class nn.Module Example: """ +class Model(nn.Module): + def __init__(self, x): + super(Model, self).__init__() + self.layer1 = nn.Linear(28*28, 512) + self.layer2 = nn.Linear(512, 512) + self.output = nn.Linear(512, 10) + + def forward(self, x): + x = F.relu(self.layer1(x)) + x = F.relu(self.layer2(x)) + x = self.output(x) + return F.softmax(x, dim=1) + +############################################# +# Get Device for Training +# ----------------------- +# Here we check to see if `torch.cuda `_ is available to use the GPU, else we will use the CPU. +# +# Example: -x = 5 +device = 'cuda' if torch.cuda.is_available() else 'cpu' +print('Using {} device'.format(device)) +############################################## +# The Model Module Layers +# ------------------------- +# +# +# Lets break down each model layer in the FashionMNIST model. +# +################################################## +# [nn.Flatten](https://pytorch.org/docs/stable/generated/torch.nn.Flatten.html) to reduce tensor dimensions to one. +# +# From the docs: +# ``` +# torch.nn.Flatten(start_dim: int = 1, end_dim: int = -1) +# ``` +# +#Here is an example using one of the training_data set items: +tensor = training_data[0][0] +print(tensor.size()) + +# Output: torch.Size([1, 28, 28]) + +model = nn.Sequential( + nn.Flatten() +) +flattened_tensor = model(tensor) +flattened_tensor.size() + +#vOutput: torch.Size([1, 784]) + +############################################## +# [nn.Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html) to add a linear layer to the model. +# +# Now that we have flattened our tensor dimension we will apply a linear layer transform that will calculate/learn the weights and the bias. +# +# From the docs: +# ``` +# torch.nn.Linear(in_features: int, out_features: int, bias: bool = True) +# +# in_features – size of each input sample +# +# out_features – size of each output sample +# +# bias – If set to False, the layer will not learn an additive bias. Default: True +# +# Lets take a look at the resulting data example with the flatten layer and linear layer added: + +input = training_data[0][0] +print(input.size()) +model = nn.Sequential( + nn.Flatten(), + nn.Linear(28*28, 512), +) +output = model(input) +output.size() + + +# Output: +# torch.Size([1, 28, 28]) +# torch.Size([1, 512]) + +################################################# +# Activation Functions +# +# - [nn.ReLU](https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html) Activation: +# "Applies the rectified linear unit function element-wise" +# - [nn.Softmax]() Activation: +# "Applies the Softmax function to an n-dimensional input Tensor rescaling them so that the elements of the n-dimensional output Tensor lie in the range [0,1] and sum to 1." + +###################################################### +# Resources +# +# `torch.nn `_ ################################################################## # More help with the FashionMNIST Pytorch Blitz -################################################################## +# ------------------------- # `Tensors `_ # `DataSets and DataLoaders `_ # `Transformations `_ diff --git a/beginner_source/quickstart/tensor_quickstart_tutorial.py b/beginner_source/quickstart/tensor_quickstart_tutorial.py index 6d017b69b38..c37dd9eacd6 100644 --- a/beginner_source/quickstart/tensor_quickstart_tutorial.py +++ b/beginner_source/quickstart/tensor_quickstart_tutorial.py @@ -112,7 +112,7 @@ # ################################################################## # More help with the FashionMNIST Pytorch Blitz -################################################################## +# ---------------------------------- # `Tensors `_ # `DataSets and DataLoaders `_ # `Transformations `_ diff --git a/beginner_source/quickstart/transforms_tutorial.py b/beginner_source/quickstart/transforms_tutorial.py index 6431c506e73..2dbfad4abf6 100644 --- a/beginner_source/quickstart/transforms_tutorial.py +++ b/beginner_source/quickstart/transforms_tutorial.py @@ -37,7 +37,7 @@ ############################################## # Pytorch Datasets -############################################## +# -------------------------- # # We are using the built-in open FashionMNIST datasets from the PyTorch library. For more info on the Datasets and Loaders check out [this]() resource. The `Train=True`indicates we want to download the training dataset from the built-in datasets, `Train=False` indicates to download the testing dataset. This way we have data partitioned out for training and testing within the provided PyTorch datasets. We will apply the same transfoms to both the training and testing datasets. # @@ -47,7 +47,7 @@ ############################################## # Transform: Features -############################################## +# --------------------------- # Example: # @@ -65,7 +65,7 @@ ############################################## # Target_Transform: Labels -############################################## +# ------------------------------- # #Example: @@ -78,7 +78,7 @@ ############################################## # Using your own data -############################################## +# -------------------------------------- # Below is an example for processing image data using a dataset from a local directory. # #Example: @@ -113,18 +113,16 @@ class_names = image_datasets['train'].classes -################################################## -# Full Source for this examples: [FoodAI](https://github.com/sethjuarez/FoodAI)
-# + ################################################## # Resources -################################################## +#------------------------------------------- #Check out the other TorchVision Transforms available: https://pytorch.org/docs/stable/torchvision/transforms.html # # ################################################################## # More help with the FashionMNIST Pytorch Blitz -################################################################## +# ---------------------------------------- # `Tensors `_ # `DataSets and DataLoaders `_ # `Transformations `_ From 72f58484fbd44e05a691f274ddbf8a4030cfed97 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Fri, 6 Nov 2020 20:24:57 +0000 Subject: [PATCH 26/55] work on autograd --- .../quickstart/autograd_quickstart_tutorial.py | 16 ---------------- beginner_source/quickstart/autograd_tutorial.py | 11 +++++++++++ 2 files changed, 11 insertions(+), 16 deletions(-) delete mode 100644 beginner_source/quickstart/autograd_quickstart_tutorial.py diff --git a/beginner_source/quickstart/autograd_quickstart_tutorial.py b/beginner_source/quickstart/autograd_quickstart_tutorial.py deleted file mode 100644 index 49713300413..00000000000 --- a/beginner_source/quickstart/autograd_quickstart_tutorial.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -Autograd -=================== -""" - -################################################################## -# More help with the FashionMNIST Pytorch Blitz -# ---------------------- -# `Tensors `_ -# `DataSets and DataLoaders `_ -# `Transformations `_ -# `Build Model `_ -# `Optimization Loop `_ -# `AutoGrad `_ -# `Back to FashionMNIST main code base <>`_ -# \ No newline at end of file diff --git a/beginner_source/quickstart/autograd_tutorial.py b/beginner_source/quickstart/autograd_tutorial.py index 10a002f00f2..f016719181d 100644 --- a/beginner_source/quickstart/autograd_tutorial.py +++ b/beginner_source/quickstart/autograd_tutorial.py @@ -232,3 +232,14 @@ # of **loss function**. # +################################################################## +# More help with the FashionMNIST Pytorch Blitz +# ---------------------- +# `Tensors `_ +# `DataSets and DataLoaders `_ +# `Transformations `_ +# `Build Model `_ +# `Optimization Loop `_ +# `AutoGrad `_ +# `Back to FashionMNIST main code base <>`_ +# \ No newline at end of file From 486bf5b57c450b8fc28ec6e4ed0749a17a1c643f Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Fri, 6 Nov 2020 20:48:30 +0000 Subject: [PATCH 27/55] more format work --- .../quickstart/save_load_run_tutorial.py | 103 ++++++++++++++++-- .../quickstart/transforms_tutorial.py | 14 +-- beginner_source/quickstart_tutorial.py | 16 +-- 3 files changed, 107 insertions(+), 26 deletions(-) diff --git a/beginner_source/quickstart/save_load_run_tutorial.py b/beginner_source/quickstart/save_load_run_tutorial.py index 7abcd11a6e3..cfa64d380c5 100644 --- a/beginner_source/quickstart/save_load_run_tutorial.py +++ b/beginner_source/quickstart/save_load_run_tutorial.py @@ -1,21 +1,102 @@ """ -Save Load Run Tutorial +Save, Load and Use the Model =================== -More to come +We have trained the model! Now lets take a look at how to save, load and use the model created. +Full Section Example: """ -x = 5 +import os +import torch +import torch.nn as nn +import torch.onnx as onnx +# create dummy variable to traverse graph +x = torch.randint(255, (1, 28*28), dtype=torch.float).to(device) / 255 +onnx.export(model, x, 'model.onnx') +print('Saved onnx model to model.onnx') + +# saving PyTorch Model Dictionary +torch.save(model.state_dict(), 'model.pth') +print('Saved PyTorch Model to model.pth') + +draw_clothes(test_data) + +#rehydrate model +loaded_model = nn.Sequential( + nn.Flatten(), + nn.Linear(28*28, 512), + nn.ReLU(), + nn.Linear(512, 512), + nn.ReLU(), + nn.Linear(512, len(classes)), + nn.Softmax(dim=1) + ) + +#load graph +loaded_model.load_state_dict(torch.load('model.pth')) +loaded_model.eval() + +x, y = test_data[0][0], test_data[0][1] +with torch.no_grad(): + pred = loaded_model(x) + predicted, actual = classes[pred[0].argmax(0)], classes[y.argmax(0)] + print(f'Predicted: "{predicted}", Actual: "{actual}"') + + +###################################################### +# Save the Model +# ----------------------- +# Example: + +# create dummy variable to traverse graph +x = torch.randint(255, (1, 28*28), dtype=torch.float).to(device) / 255 +onnx.export(model, x, 'model.onnx') +print('Saved onnx model to model.onnx') + +# saving PyTorch Model Dictionary +torch.save(model.state_dict(), 'model.pth') +print('Saved PyTorch Model to model.pth') + + +####################################################################### +# Load the Model +# --------------------------- +# Example: + +draw_clothes(test_data) + +loaded_model = nn.Sequential( + nn.Flatten(), + nn.Linear(28*28, 512), + nn.ReLU(), + nn.Linear(512, 512), + nn.ReLU(), + nn.Linear(512, len(classes)), + nn.Softmax(dim=1) + ) +loaded_model.load_state_dict(torch.load('model.pth')) +loaded_model.eval() + +###################################################################### +# Test the Model +# ---------------------------------- +# Example: + +x, y = test_data[0][0], test_data[0][1] +with torch.no_grad(): + pred = loaded_model(x) + predicted, actual = classes[pred[0].argmax(0)], classes[y.argmax(0)] + print(f'Predicted: "{predicted}", Actual: "{actual}"') ################################################################## # More help with the FashionMNIST Pytorch Blitz -################################################################## -# `Tensors `_ -# `DataSets and DataLoaders `_ -# `Transformations `_ -# `Build Model `_ -# `Optimization Loop `_ -# `AutoGrad `_ -# `Back to FashionMNIST main code base <>`_ +# ---------------------------------------- +# | `Tensors `_ +# | `DataSets and DataLoaders `_ +# | `Transformations `_ +# | `Build Model `_ +# | `Optimization Loop `_ +# | `AutoGrad `_ +# | `Back to FashionMNIST main code base <>`_ \ No newline at end of file diff --git a/beginner_source/quickstart/transforms_tutorial.py b/beginner_source/quickstart/transforms_tutorial.py index 2dbfad4abf6..5b4ceb95f03 100644 --- a/beginner_source/quickstart/transforms_tutorial.py +++ b/beginner_source/quickstart/transforms_tutorial.py @@ -123,13 +123,13 @@ ################################################################## # More help with the FashionMNIST Pytorch Blitz # ---------------------------------------- -# `Tensors `_ -# `DataSets and DataLoaders `_ -# `Transformations `_ -# `Build Model `_ -# `Optimization Loop `_ -# `AutoGrad `_ -# `Back to FashionMNIST main code base <>`_ +# | `Tensors `_ +# | `DataSets and DataLoaders `_ +# | `Transformations `_ +# | `Build Model `_ +# | `Optimization Loop `_ +# | `AutoGrad `_ +# | `Back to FashionMNIST main code base <>`_ diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index 14ceef61aae..b812dd59cc8 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -195,11 +195,11 @@ def test(dataloader, model): ################################################################## # More help with the FashionMNIST Pytorch Blitz -################################################################## -# `Tensors `_ -# `DataSets and DataLoaders `_ -# `Transformations `_ -# `Building the Model `_ -# `Automatic Differentiation and AutoGrad `_ -# `Optimization Loop `_ -# `Save, Load and Use Model `_ +# ---------------------------------------- +# | `Tensors `_ +# | `DataSets and DataLoaders `_ +# | `Transformations `_ +# | `Build Model `_ +# | `Optimization Loop `_ +# | `AutoGrad `_ +# | `Back to FashionMNIST main code base <>`_ \ No newline at end of file From 2c7288370748fa8b442b436f1e359f9ffb342498 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Fri, 6 Nov 2020 15:55:41 -0600 Subject: [PATCH 28/55] add images --- .../quickstart/images/fashion_mnist.png | Bin 0 -> 33424 bytes .../quickstart/images/optimization_loops.PNG | Bin 0 -> 60630 bytes .../quickstart/images/typesofdata.PNG | Bin 0 -> 14723 bytes 3 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 beginner_source/quickstart/images/fashion_mnist.png create mode 100644 beginner_source/quickstart/images/optimization_loops.PNG create mode 100644 beginner_source/quickstart/images/typesofdata.PNG diff --git a/beginner_source/quickstart/images/fashion_mnist.png b/beginner_source/quickstart/images/fashion_mnist.png new file mode 100644 index 0000000000000000000000000000000000000000..213b1e1f17b764182a251c3ed3b700978864b46c GIT binary patch literal 33424 zcmeFZcT`h(yEcsDsAI(rh=2_h5CH|GV*yd>Py&b$#E1x?XhIJG9Z{qR7K&0837sH9 zz)(lfAfQBPL0Uvgq)SVHknrA{ne(3KJ?A@TJpX)aed~LlwR~m<1ML0Vce(ECy6zqN zyRqKJb-UIH2ncLEe@@3tKwxDs^5?hJ@RfPvC3X16ulUpFEq;Ul1^jm99{l&(>*p@u z1q1{gkUuLjwKJUIi>jWw)}H1bS3P|kaE=0Q4xZOtJv?2VFYobo#NnMi-1i(mDu49& zkv-QuJ+G_D$^H8Wj(Xsn&%-Z<$AV|)H3aEr+SIWV6wc@@FOWIkSngp* zGRryuw-R>z%1a^m=bv96Z2m<+V7vb5-vk7HKfHFYfWXP;2jF-F?mYUxfAN2h%a7(A3#_Ya0w7;{8)8w~z z4z2_BF%F{x*)8-yea9;HId`UBZFpDD_}JJ+43(IYz-Vc)@LGI!d(%2rSHSc~hg`F) z;DWduGi@%Z(*4I5ruKp0HLuNrybf?mM@B|!tYz24j*pMa$Ck-|&}6)HXe~jzj|Dnj z_8|qb&84{%y;RK@v+{f#o~?V@x}lOz%r`Ln&44!=w@~`VDQM~XowfflK&MUwZ^(XZ zqNN(~FUGxZq>LiI9Rqx>nin+=%_s!u7^^R*D4eY=rO@+R?JnSMuHrJ0Fx- zin5!H3d_xda8X!P&GVWtVXE#>P9!ssJJ!2fGQv>5D-`=x%XEA zfy2?k|MC~DDCwB`OLBj7P$(2qTU*;3G;O^uWulLyU@(o%*PU7()lTDcY zXCpGz_$}rpcksAX*YT)%T&kaWhVe$$ZomGA4o9pUJ>Nf=W664Xd7)~3#=Ars>*`RJ z_j~$mceFe`?sT^xBPGy(LTL5M?;noSH()e1H63zaym&F}Yh`7nl987u8!^jf-z~W2 zk+EH3oA+49jz(3u=o(=SIiJcpA{wiJZOfJ4Irh10{-6(km-3NH!;fPL-+dpb8~0~# zVw6T6jz#`0pCz5^@#Ph1*w><4g%o|XVx~#Ht-D){<~x7mryiuU`kR||6!%PB@~@uc z2ya=*V!R{?KfuBdq$eaK)L0kMy3}waDxOsIVycbGh~c7L)ZbiEooz9%u_o*ITRf_2 z!x)k!jme1tigFD1t1X$P4z5APyS|cMbU5Y4PH;xQ6xOs0Rn5JVkHsqN$lK5OCRyU- z!rQGEb)kVD@Z66&a`XG^Xac!wUW0)fffUSE)AFDBe!cOLq(OCihBj|V-kUpC5>jw3 zZ$gtBqW)7Yu5;~bLktEZ9By;4zg~$|znRr>ykB2bZy*ux$i&lQ!UyUd^#;CSWebw9 z=eJZu!L$C2)nAXUx#aBPvJP{x*mW)Db6^hTLx8bt)P<4221@VizDRW!e?xS~-3=mU zv-oK42FTC+;km{6=^E7>%e-|Mx)#?}IK0E4r9T!uyYP6yLhM{iVh4 zlSc6Qs*KA0{t}FQdx^#`l|+Itx1nmWL7rV3v{>sf`Z#6rO6#Q8NZTEJc12Z;zOH&0 z@4eW6pNW5kFKO)8l~+GJQE+`WGpU;n{>X1_O##Of3!~&+Zi_3WFJ|A~7Y3p^ezYAIV8rjUr;>K~GfI8Gh35Cl#!Xv}O*=orIJ1X65Tt!IJ zN}p1}`Mf*kEJwoUzbc&nT5Y0>w{3W0dww?a{wZ>RS4;LjQoz~~+t7hbNW}mzcyjJM zs`UXIdwnS{AD@r{+dAHtLMJP+py8~HYE@ZTS%@&=auD4nN?8qSNZTc=Te|j?t40je ze~|EQcW^kO=HA*7c^N9Q(#O65mdVnLNlm*t;K}ShN@AgT{RHK^2CBht{!V|}Lv;_v zlrPnCGgaC8GiI=nMUCA~^%ne$b%%st^^?t^4hzw66SYGbDQK_7l%@F;tj(IA&&LH4 zl}b(bgEJrD^bOcZ={WYAXIB`#T+Et%-O|=(*{re2wpPr?wpDp!MzX()iqqoU zB&s&I z$D~f2>;oLPG`4sf1�T?=bGSIj@V$lf*Gje8vx;t;PvozI+j(;h2RIi(l8EYO7{Q za(D2>u3ui&wv+A3nkqOwqKyuh{wY2GnmLM+p4q1xKHW;gxgAQ~Iu0moJ?jX%TqWZE zZq;KEt*Nn7tx~ORHmR0W&IzZ5nITkd5W6r1dMYXNkl~Zfv|{(}QqKC@f%P#abc*}E z`V5wShLy~1ez~5w-+_pvz=a_=N)vl*XXu^vLT!psD;X#&JJf7>;iWf!TyJbP(FVvd zJKWlvxMly@nV=gYRPVK_R88$qc}dxnXf@PBss|$6x?U?pw3^Kyx3bG^8@7XN%8qHZi8+$}qtv11?)65fx6nJcijEoxYWz4U ztUmYz1L1t6PD~J2o01vUvpzIAzinMal-9yEqiL1vdA8J7!*hTGMH@vl{XL;dIcZqc zmF48fMChXA@jFi8S?xA5eu6X9%C?c4*U%PQ6iJI-C&zjB$Xjp|x_KyDJY&UYY#F-) zTcdg?C16S%L#2zXr2lMR;T?0eUVK;=qa10o`$MuOS2usi$IH_*xFFL42cW@NHc!$l ztU;AE*pIftZ(XuiTS$nrbI3U|t(cQ4m{#&vBc$EBxB9=C;aGy?8qf<)e=F4m6eg)fyr(2zSKhd=?C2O>C!*b;R``2W zF&@f0FBP>g1n=g$t5agQ`}WsAXkk2uhlY zmAzPPO&}1Es9lROy)a~ZVUW02TClsdbw{Jf@vDZygdcC#a4YWGW{uj8MA{Br6Fy?1 zIcp)DaLiG^brO)P8UM-eSef|m3V273N7-0$d~*1!eyt@;ZDgKWGIzJfaz7@oOr7>1 zQ_RQ9JYRTW_x`!-oHzoa3brzeSM>^x*f(I7aP5n z!2>>a1p)I@@-%+$Kb9p&!J518B*JYSb8X+fN>5LRW75BB5PKwRh>9D@r>K49Kg#j< zg?kDqAWT%8h|q04Z=x~z_PX&nF-)3LeW_kdZj<`xOZYbFJJh~&PEM>IxzQ3GuC&$p z8}Gt)Ac+do2@Ju14{pc(ruyZJ;chGJ+n1)ptjhOA3}jyrM@A%9E*silhdF|aRSsEI zls|vI7yj~b+f)xU+byZ}DO3upk+sBG;xpxAxs?$T+LiWwQluiNl5nq+O_gdu^j%7M z_z!$m=UVmynGoA73zsnHIj@F&FJ8Q;;QpTME2%X>pOr&c1@Z8(bZ8fTo z4;rn*0I)%TvPVn(K9_efW_*>n1R-Jyo?!sx&SA7sePRO!@X>kz=!slIg}Hy2RsWvH zJB(k3*fdnK^k3`Y2I5b6^z5e5B1E>)aM*~J4x4>KqRP2TojGmFJE?x3xlg!(W9imBF*DU>BIs3|TCOyWitA3x)qR4k;wQ`Mt4zl6XW2E7^Z6+F9 z08FWiD7jd`gk$_6Q57(@Lv=9BbQupjUxe?J1ssf(ogB8RQi~7^TAXwkeQ;(I%uw8i z*!hQ@(^Nbj#$Ms@{+=i%w}J_$fcfeWB4+7T9y?pGq|ywu5Qzu^G;60| z;fLwh<=aw^B{m~i=4|BdFB7c#x|m!#rQm&xsk@uoMvQ5~JzMHf7eOMX&_v2z{d0S} zaARFX2(|dYkIStc-j};x-vEfLJfPK}JzrC2tHx40#f(pyPe!&bbBs{ER>p-I$b*B)ueQvU^roDrL&588rD3VIVbGiYt zhe!mfL>MKiC`42Y8F?UQbYrp$Prp;pH3=hzEUFgkhPg4<8xm5`MY#z0ZvB?kDgRK) z%p9XNTWlN^tthza;I)9J_|qvN_`e_d==k_1j4A4k?GAFH?W!W%RZU%7l}~}Z5qm2! zIhlWz5$jHd-KFIWqtbVrPI2G4QbNcYa)GJ`PC95~_zUYa&1-7j}W=9wy z1=#$G;^Da>=jLivq~JqWhk`A?vtkf#8jxQ;)QmcZBZoM|UQ)x)b*VrN!abYgY0*u9 zPoV|ucsuwpAtzEBXo%iKDxP(>AkkK8K68}d;o+fjh>`p}D@!V3InrC3_^BBg8R=d+ zj%9dK-u1Y)CpOv$kbVZ8|mk>z7izJDgFqc&@CA=$WT*I@y6} zT*^Szd_q;0S|ER(1@U!X6sAAMZ$UrS1pV%%;iC7@jBlHQ4S#_3+EJGzgU7M*yZqF{ z2HGkC&O=pb_2SJkAm)JbU^ZZ)G?gPXc{9xpqs5)>Icn$;%J!e1si3$wbRm@1a*RX6 zZkzDxg5Kre3SR(tvTMJzq&+rtJ`oBzn_x`V@Eqqpl35`pq8%6@+z_*yPFeR9f%?tf ze^i)aou>{Z`|gX-b@Ra0hAod61B;9xo|UjVTwhKk%VssgQ7Yx#j?2&|HBD^g8N?mU zQ{y`Yv7MCi_WJNOA>?u*O;WTf(pkepLs`yf*{Ha(P48i-l=Vn$@2`7YqYBL3S`3mX z7*oJ%3SPa{cN|8s_FNm`hKj*+~g=C%pPQwqJ92 z$w@udIy*ZHIN5o;%jpttVU|!OpLrP}C8MeIf!l&w!nB_GMjuNtdd#D$iArv@bt5p$ zUnW$!sk~LAG7WKZf|#YbsHN9SJF8ddVngGU|ADn2)gLBYL&%^{lxa z`PdYM71pK}s%DL~#VJZbK|y-$%A2>o{sYSSOXh0w{zFp(o`!?#GTFw!Ju4~U=;jUi zILl6clfCfjKt!H(!M=#)Q3bNHB2(#9UTqk1!@psy(0-1slWaEodgE|w4qQU@o%J>C ztycC>`F%8yq0neV;AX|ZKU|1~h~}ST;Vuc(+AgFcjSLN~#Q=3C=VG*>yo+;yQ}Kbg zdUa{Mt8rb-Rr|(#qJ4RB<}r7`R5!J)WJi2egS6M!WKE#5@U%+IzyeWQu`mx915B`#EYMfXOti8#x=-tujzi$-g+86rG@+o5Qr?Stm1}8Mw&l7 zJ>I-Z$o7HvXuCqYL8p^=z>haGeG*G5Nj^P(Qlx0_@vf&wOTVXjeo&}QgHb=N?(v}v zJY5qJo66mA!5fd=G#%+boZZ=TIXm|UK>4;{Q^ zX1i78RE+09j8(cv;B>-JL9MufJ#W~mnaLkxrh5eRi4yJcG!dt?yo)92ogP)hs@!e3 z1HrpD@7lEsn$qK@mMrsTRaW9-=;?9p6|-&8X8OQU`ZU4B=)+yO=uttjuZV|OX#xQ( z<)CrmlU<^s&2@)T5(8&*>`Bpq%}a<}I8-l_-{sZ_M`aEw7xP^^q8z0x)}^FDqfZA_ zki=Z#F-;K8g~`xf!sY-Qx)go2G4Ux}hZ%(|S5Z+hv_^aGje8v$i^bZ!ymZQ+$?b`Y z)R=ld1E;A>>=R`kGPn`*Z(e|N0DJXitjMF>(i?p=5~E_=Yv``1d;17ulbkL9{$XxD zg`(8j+B&9C>^angMEdfxTVPaaDJd$ZCAgPDU^m@A(eoryZ8%dgVD86q2qBP(7}fpd z3(|_4+)BEOJMqfO%ITsWPamUvKvf9T#_Z zYRF zc<%Vs59cO`6NF~xjHKA{swIA9z6fZT{@hOHc)&-4KY|~uG&47EzS*;O?t_!oWB1DW zLC=p$jI?**LKznduab}`g=QnQQRLWxiY4BJ2`Hl7?s#$)f5G!x=%IAb;nMXFGgBoFEhAsX>OGM`9I*#Ds-Gq>Bu3nM;AC^MeV^pgU!g2i~laFa#Ose62)K zfcM9XCGJL02nYvi5bgs>Hh6)XD^*Ow)k`@;t9R}x_Zzw`v3SWJ zfPFIyD2kb<^ni5x&}JsDn@L)=erMW6cdDc&f39yR9!owCnxYZF!{+A4Jqv9m=}3~O zkH6jz2~?$D=tMV0uG1VwS|$bpasN=QHva~&(t*)|^9}Zv#AMy@og`>vLR}MExMJQV zL@wL2+lxU$jt8Q0_I?Qn@e=_o^`Xyt5=K_GH*^=+B~?j${=`zHTp;qvhNERihyybA z=aGYf12JI2WxS$O{H6zIe$)s~6a<-Q&G&o4(UK781QEMZP~!oV$P`jZ6>qGJn8a%^ zc!vn5GYlf9I}SPhnZXlPeka8}Y9qylE;1J^7aDykANR0}OGwdX%fEl;5$gx!%7tOi z-aA6+@S8{ta#b2gzcxzI(gb1juVD9B9XRzkB@Llx;aH9$NfRB2KjjC*NjhXt) zT{cbZsf6KJ^*}dh^GYA@#DxcOOM(pc>qQzutxpVu9+R>Bl{0r2mpGb8XQ^CDfjYY& zvD6oohUCl8@sxQ({h)6m|w`kG`yV1;4fG%LJ&4gbFg#vFeO|Mj}q% z=GB$+$e)D)xuB=U>SIMS#U&N85SFPESohMQbWiYBEZ1JRk932h)GH<0RNuwP+UB)~ z5?^-q@yDxztU&P8ml`Vwsp3>jkR}2-WkU-i`6l5)a>fi>ywO5g6}JqjVWVs>=)YOu zmVvs`I#cTK?Dd)VD|(~Njn5xKJf#A2p^z;dND6Z}%Hi>G z80CI72gvjyiowtR7Kz&Vip+!lg zfXrj#^^s5$7{VN-KyjMoaa;2PyucW#2X6Y)0dtaYx8DW zIy&Or%O`2w{G%$QVa5toy8TL!IQ>e9zDVVOeC6=YRx8x*er%a*{jhRzKrTZt@W*YU zeTj2(dP`SV`NdOG!W+gn<4*eWFKH7OX1X7}0r!c#!|4u?r`_ESr&iH(u0mWm!w@m# zq|Lp+Y=_3!%;PXqkfvN&fK(4av}c?@r|J$t)zJsJVkG?I)8mIUf%1-(JS! zEUv|PXfCZ81>NSmdmsJKZ77>pN~asueYlUc)20C4CdIl zgT9m)P_#pp-0@Dg(UR~#E%5CQAtV4vV>c?@Jv^9wRwqL?iIwVwhawaf+--(^{zmqy zwR_i;1KpB7I5;>%r4x@L)9`Y9BY2JN&%d{2f72nY$PKclQC#=VNiE&|58=z4Fp=j1z~ zs;_GYe2kvR?}ZC-Dp}uQSLRKE+EOvZ=PvhrJD9}JBQ&!=`->Cjuu~9SlatnLte&@g z*h03~Pc2MF1tkJX%})ab>=+M_38oY*MBZ4`H0UPWWi*c1!kc2hR zc4L;3{RW>7y=pjWpbfaOd1|1cSukj>#&dokE(6J0=-$&xaw0o?L%CnqXj`j}ff$OBA-}#{g2?L92!N9L`0-;5;+_ED@w@Z2YnffwYc|H{ zlg#D;sayc27bA^)0#;Pe~VmthaX^m$O->rQP(@{BOBio1%rCOEt2)tk zAlQ*AD=S~Qv}UjH75e%vE5`!;RVeKik6c)Ie7PLZL_G{43BRG~h^*NRm4#u&+wMEm zS%A@721Ql;kt+5Gqqb3F`h#b^R7M(@m#Vr#a&F@)24p>8DlyV$pb2hsN?`e{U{msq zQWjb=*qQs{@kq5spluGh)MZcXI=QgaV?VaGw6{Ou57_f1-Jp!-vpl`LNLCfYsG;T* z{*0Ebi2Nt;ggjjSfF_LG1kH?!3eAs6>L+;f;6H&6M8AM9*!c~_49zr~n4AgOWOSnI z`Wd9=Hvh3|{i|tYegd~Z>x8{fipq^|?p3r+x9bqXIjrsT=ZDBm6!}hmYiVtbL54h} z(qwSTS(>*i6x&&UkxXWe+ke6h`G1&t{?#V=|NXzHSxs0`t^%X6^T&9lED}i)=Gl#G z+@+Kp3u$K%w48y;hh}puDBbZtP)$DsN`RgENiHikHuep=+CYE&P==75r>XEo#Oo7o zT#b;Ftlh+!&knips3VAg5H?^&$^G=ey8McGxq#mF4uIx1t;c|fsUr*yuS_*`mPNH{ zQc{w`5g%93>MUC)i;Ih|H)a4ZlxO2Chn=cg7#p>?+z>Dmv#j*IFY_ zb#)!(Bm#*e-`3eFN^2)b2C)qS$69p=ilpfHgak0k8JvFdn47GN?V{qHjOerf`V;Nz zSN%t$?7ZmdP3JuPn-j7lnJ^uJmN4<0#XoxOSx}IsmG8H&w*|>+mB0>MZ;Y#291yBF zsPcdaGtUML6*sW#ZvM<%*d(T=nD{t+fPYstK8~wC5EZo8=deLwq2}kbld)grE=cfy zgw(Wa^P06$Vje)@vOEz7$LAM%Aq7ARf!gc69cCB@SHycYnuu@AQqyv^E=R@5UffBm zgaIf-LFH5VR~^otGIasc}rMweZ^oi82Ev`*@iwl;=l-${?oKdb_ri2TkW#HI}+_(lvEwKCOu zz^>2;+QfW?!tlsg$P}Oiq&~VpT#=V={(0q z80vS^EJRj4)BdLm`zwh-TcvjgfFFERbHdL=xU<|3FFXQm4-SFz2^<16W%n*&8d84Y z1gEA!(F0}k*qmv2xNSp30jBnD0YVN$AU?|BIgnn&8gIP;L~G*}02b_CM6dwOEaBcc zFe&AwV$OqE#MAbdoBW^itsu@_)!yK3AX43+iS#C-axCu@zz`*`4BZN@2dsQeGs}`Q z_)PNEdO#*^bcXR0^WyCNb)Zayc6IoSwr>GX-vl(>3to$JeNlp}uAn8J!v;1bGtZE` zL&trh-{wypSt(qe^xcg)W6`_vv`g496vij7|6bKa(Ul#;q8pK>tDm*N9U*pC z{p{JwU*dm{;%5%eJ=i-*7sHVf@o=2hU;$)nGHClsXqhtvb8~Z*j5bCq41@WT(NkCn z#Ni&^lM@xtzrT04+aX~?)9@Hu?!81Cn{%Gvk;LN&1+iO)xvB3~{oY-9O?I+ZFzEI0 zoG3&FI2=?Sh3nP^L$|(*hiPF8ZQ0Ve>|Luw9B>8aC=3)756d$fH2z5mF%(%j2plY= zL(bNioJ~)1hIhQf?GR8`R|jhs&Z>w8V(E2b=*hwjpU)A*qN*)&}$ui~f-gCN(>RVtH(Jb`GWfJAy>zyfh zS%=c0Yod+E3Q~5T8a2&F9|SE z@;fQCcA-4CXICiVGB`9){=o$>_gjfYMLNoD?|gj&?AUE7Wr0s%_yX;`y^iwnx~rP} z&$0Fj>`g=l2(T7vrCJPl7Ao%r|>wB^&J9K1O zU!j}CCy(G$xVA_m{|)oGrxARc?gts)Qvb4R5p~Afzx4Eo(?GTnr9~qC44OgQKl>#D z>rk)5`m>z}q-}w*3PJW#EX3$2k&kf5!)a6sL)!nta=wleL2&`s3IRGif7uWdwT4#! z16+2WsaHkxRE&06X(@O$4d0_?-IiVNL^WLh`#^12|GB=W4z>Pck`taac8$rBc4|cS zc#>fHau|iSvjGG9N?B^+WAng1zyygX!QX`ofvvsXh?wEpAx5Ecyc!1M7d(*4xa?#> zt$!#&f3M{fZ<}d&9wOc`p=#QBH+pK|o5A)NW+T{KymzhS@x=q@dL0a5AHsm_x$x3q z()lR(`AYKlQHrbX>61InqmMEo_Xve|)PMkYC~^7=Jy1EK+lNVX7)7?BFv}}Km4M^^ zwylnNFYlB=?Li$ zQvL5eFlSVyY6T44SPbQq5H7HJT+gu6f%fqDL&40=oBeE!lx zzPxvfsoof`I{h%31wX z72=v=;&GBu$g&t>sjjw`mEDHJNfxZJksT<(<4eoH>WUa5k^Kna@RliF2vOFmng~`J zha)c2kIeXU+nkpJ3}ZQCOYPggSls2LDiq$*v2LF~*+yal=IV_YiKV&Pnsx*~W%mWm zS_Q!h5rAQ^$Gp=8L;)IB>m=X-SY)8+xc7ispA(sHSEig;h`36Nfq;dYaB62o)UrkA ze{OYm1XYxm{8)yWEEAF>GmwaNG;K)iQd_+M$hG7}(+nlI_fCHYO2=iMUsGv0qHvJ0 z?lQ~}JajGS2b*bV5I8~#K&(cINr0`ait%T|MQDAkbkFhfS8|OKE9Y(hvKfHGU>B{D z+dXl&0InGY1t<0iu;IcDNpirOiPJzaHuWlp$t3YO5Zm&_fxLyl+Ek^qXzkVFH{Nbp zF5?bIa3IET2%ET3KT$4#UI6RH4l-D9;qdldAmc7s+>xE~t|6Ym8UQ8J+{c zCDP~zqRW?jmhWGWhSg|JtxrK(mM|7UX`dII=*eP@e?#x`$~-n9?({sFbEfp7;IYl$sU`u=Dx}+UpW`x;Rpzo zm%NQKelDwcrv(Ut$RfinYMVOvj6cunk9+;R5Au^7_03tdXAo#vcIausJsf2G!Lx@SbSVX z1p*3?G|%8El$dPsY36h?Cm5evd<1lZ-b93NkKm0uX|G3|hRa)9Ha0dcVZ`reB-UXj zd~4OAsMtt>=MlOws}TlL7y1K2xlM-)X?qITK6R-WCvuXB=LdZ$M8ONBKRxDHFOK

r+GQVE6c79fJ~bL1;Ta@q`H<)jUM zcxnzfdy_hcUwtW5zdIwksW${t3CC3|$#?N9w;g!`R4_^`Xq?Jgj@_PFvJc8l1QB?y z8@Jf(@iG2D!PZg0lefOVQHI*KhPrbDb^3A(dY_NoL-i_Mw|Mk5F@)`d?l1LN-GkLu z0Id$Tj*5G-2K?nn(Q4}INRaqj5K@4T`lqQu`o)X^S^C2E2Gmy@iVI3O0fCX~ z4|41VdunX5A1f;<-6?3zfs%p$#v?W%nU$ri#<@V=M7U&HhGR_n11ZkCf|%q@2|3 zbZYjQ-?iQT%?rQ+nsx&Q5mJONf^k5Y3knj9RG_(TWCbiVr)*l@QWi!~Fre-o4BOn; zk&0+Dn=)^=)=9*Oh)*|3sjOYr9#VC~t5r7~vGVzuWqwFz$7h|JT$z=c5d$4kIylMb z5SY~Z#9{S`a_@c2l}5^kdxNkU3&_i{2tU101{i@f?eMY-Y4W+q7Ri7XodQpw$k|-D zK}`Kcdp_7I0`qSjlSP-mPPabba_5m5q6@Fb0AY!$1w34%O0C#Ji8?1m0!9y9q9jyH zd@rd#M0?QzfPK~6#1elZyAK&XA(t;uv9}Vx+sH8LgWs_J!B6$Sja`9t6hl}^h2@C@ z0Z?bFSR0CL434pMM(8n($kGUag@nld`T&-V+n<;GAWEyPeSCc2^5p=Ia4RBh&vUy9 zj@#$~A6mr_Sh?579a(ec^y$|Zf~*jKd1YSPbOn3snu9+Lu?)x?ZnTj1oXBW<^y70m z=pWY`PnLXmUq?yHB-y>IJiN49Grf4*?rUJmOx>vjdOnQ#=0}cQIk$Mct*bIB(um4f zC9v|{&oVZ=W6R73MddrP+EotZ>RMzg73_8OF~M|-&Aq{a4(t97%^74-JX4VfcH9ZX zMX{5X%8@RqM3zTZ%jr^ipApw7z~#;fRy+(2@OWH@kflXF=s+C8MuAWE|1>hjm@bfD z$(vaKq}cUFQ$4ZSREDB84z==y%X3Nxvcf8=itE3rwY>Y&fN~9vOYL&<29*l zja!Q%-!J26fO6IC$ado2#1>**f#Hn*3pxyRL5bkMYJ?4Y2y8dPGyt|cXi$K%BN0y* z54}=g=HKUu#h|fnzkul0irPt!n@1i82TQKqd~)ZOhtiwbq)S^?37xckx$fxUR5RU+ za=)N#-ub(A`|EC}$nG!=Haqp%?9_Fcd-rZWJK65DW%I2kNk{0sng(0i2h{_Ddmkjy zxwG{nt5_#kevO8MiBIj5pE6<5{`z(V7z+rfgdy~kfWU5DWL_5#h)k`&Ch3$D=_lPT zJ@Hg};7dC#1*X!7fq{WLZuGjkNw1?9KbTNXnXfXbsy*Iqe?BZCLXozpq3v3wr;X7@ zNiR@?4&Iorj617WpQuZ3^;Df8SL7#^syZhpgfY2~kaqhQU z>wZ8R0*{fUV@I*W%=gcwoF0NnZ>B(^)M`QTTx8jYR7o^8js#QC>@34RrP_3bQ!JQx zb;dd<$7vbHN!idEl3G;nZWK*Fb$fMEi$Z-TSWHnsCOcle8e2-ArfcWlTDc~<+Vr{f z-n}|8g&s^Tu!iV^i_Xf^vuM`|_uLgLSAPfZ$6>~_qFaXrA}VINt6%*fjA+0PGLFQB zp^R*3j-f(S*cAH?j;haqtQ#E5PdaYhvc-WX0yftn@ypctZ{cz?;8D8SelocflriL zln|jydBXgh0}=bC7)GEh&_KiP-$#MuGK0LMU`9mYaxhp$)`Rmk&9?S_c4+WsR6Q(A zwLCwwh6v!-AG{05p<(%p^TXX`CF7vfH6KeS8N|7OqP}zoS^9Z2s-HM zRlnT2bP*oW(efXsd7Pl7k^<((6U59RbKN6JMC~3DHzvJw zqKj96Zc_><^0wPV1tn@3x(z37M90sEsbQ5z36^mbQHsK%qO4Az{)1dLt~Lfn zS7BzUR}Sof=>v2ynVg)20qpTkS)1(G*)q(?msmV+XlS9MqZ3*THvPl<_MHZ|nJGP4 zbBCB%9w^A>O;OYE0~*aOETTc5NCz|7QQy(>at-AA;oa z-)6!y_aA)f^hXKo+dXhE8&j5)l$26XFa_gXC_-dLM2Vg-hM+(mSQ^;cq!kntMtMgL zAN~YyU@!bZ-7YtWm%O38jC)6Jb0!)QGUEXN^O3A9?9mIM~_mUJQ3!BB*_53;g+T)u%w#;%0doj z^~PP-V5odEy-8Hr5EfNK!Rsj;APAQl`LYkzbz{M^A^J*{3+p9FdJn-vG=tdndbh*s z+Gu-$KRA^<7u|6y*KBG5n`vHGSJ$>4``1u+P&A<0!6y^Y4H8UPc(}|KZFa>k^l8Ow zpZ37zpF@5ZD9xDwXNEzBar{wy;lhQ|#i<6$wa@#Apq0a8O-f^)X zF*dxB*?ACZQV?>OTgURtr^3MHk`0E5_VyCcZaOLmC16{=FkHLh`bSv$Rq~&?214hl z3IeAM497~I-+xCe(SQN-#QneTy)SY+-7pk!-sZukvkP?4j@LI{poz06C7{J0Sy))4 zA-O0bBI3Wq;&q1eO=w!va``= z;e;u^lb7v@x58#XJ~{(IM{d;Q*mnE&-S-FieY?BNDA?@I0fLPHZu~je<-WtN#(F|I zBxtr?lsFWKm*{}WcQ5VhtY2Qb1)iL;{5jBmN%TuwQX@J>p0^3w6&|=N<3Y8>n3~j%eJY&=MW;& z5bEG>$m0k-SYxpzC5TTwRF`nvDRS0(qH3wq3e;~8P+i^>!=UE(sC=LyPLxY=@9u!Y zW%gsqC|+?A%u;+-aG$=R;SA)@&Zc7L=7VyQlJ5YtzhL<#a{w|CfUl7Kd}9Cpn#cV_ zFn%4<5d0lgF;y>vhPn{So|*A9dBkZlkh@c4Pn>P-xw&HH=?*Yq4^TXqJ<}TWhL0a- z5BNcKL%(-Reet3Kwgi2XS$^>LkspW|?`0pDB9g6ONSOd`=uPprZ8%OLb{iTN269;fjlXu6`EZhn>v17*$KS&t#l`Cl{ zz-)AB5w0E$D1g)d*u)?1Pf%;_nGb+;|99`+z3D6mpTipr+&3cI^*}=U{Gs=88d7@% z$*H|=a#f43ghWM0WNnJx%ScO4LEZUIacQl8+>f|y`->qU*Sph@>{P7BjnY(Uw_FJROy3v z1DJxQ&45NMcOG!WUBtyZ42#x;&`u=Y<|g2r&pq0-TV=CrCj}b^?$68M=|ck@Q=IGr z3GEM0@bo{Ebwt{de%QHN$-D*^07qhQ8|$zDE>%2J+SA z%^#~IX=X4`_r0z@udisw`X-4yH0S4BmxA{XcfF#$FJalNy(zRAteSFaQ>g+Sc*Qf7Ezkv6eAq@oW z+q=rq$tmuhgep#_DavZ4&sYZuGIb3Gc8IOua$y5l<9rh=FetgU%iDRiqL7sqi>g50 zR{Ocgu&{N0E1e(3#-_uH<(j1wXs!8Gd>dpDncwBbuv3HcrTEXfcnvj4`amkg z_m_R4*UwlB9K`}u2{ju5gt@VCA~Q4dQfm_i8irNsCmqNW&zg-VuE7e0KO{QX1w9A) z`!fOibrd>9{c`Ij)aor$<|Xdk$L=f1${OkF>b6yjC<87-+1SMASDw^8eB?+A-0=)} z)`H8&VIk)%l+(|^12%Y;pWO2d)*0->LPJgAMsf+$a7t*=6YfV8OfJIDWvhqOf+_c4 z+UAB=jt$!(MIkh@z|#)Nx$XjH-MV!}z*c|V)m-X@T~7ZfV+17wk9_-l0TcQ`4!WWg z7E|Ds4ui~o25!?_UteFo(rd&L#-oC>UxUy6W=b+l*0=!I4f<;#c!cu+?#pOu+JQ^Z z_T?iMw3}RIw~lLI97kFc7{tl27I8rc?d$O4g%gyaPtf!Kkh3{+cY~Dan_MuKF3^Mc zFOaUaMo?TaGRwXyaIf5McxzoVI0q%B@VRDRkbl9U9N^t^!{9nQr|K`@4Jq7K?l+CB z?py=Zve3!or-S7~Zm)z~8SGXl-uR}2934^}kQcB_t)7JOp#=`7=tkdJq;9}60KB3k ziPYY1Z5KE{;KCp$A;=Xj@Ej=o40zvIHe#iP4x3-E*aqi#$LLbk5}%NVu27p(U_kA= z2Ghk}i6mrnLq1(eC&X?56SXdV{A?B0@q4q(IUY*$;}2 ziP^#St^zxVl3RzOCTt_vl(;7~H8;;9kOKBjO2n6-R)Ue{bz8oDJ~F%sYA?Qo$}bK` z#puS38@p6-Y2vEln_VNeNTEa*>zfDeBx*ps9^7)DDUr$Z|U|H956<|`&pSM<4 zynd~c53soB>zmsZkO-^#;D9;)obj<$zpTgyWvK@SN&9c>zrqzfWuzO4%BiV!6dIAB z*7)5z`6t*|Zci$wryhg0vSBAxz}71@^;qvZGQ6Lw1`FLoNJL}^POv2bRP}QK@H^2d zTEudAtod+EJ+O*+Cahiq41PVZ5cl;RjpiR38j5CC5-aj67o^*l;AJB{@TUU!4uYwv zX%EE20OC^VfpJ`u&Mn9^gqI)bdo3XVU;^H}rhBe4T(0u_dFjb%SRPUEO@N2;$-vl{ zJvBXT;3fXm^ED~V++KS_I;$Vz>dqiGC)QZOmOp~AulWD z=6omMT`-)fsi{9o>HrEsSPj4w;|BvBI!atqe>;f_9+V0wmws?xXxM120A%gIYSpSY zb1=qNfRN^Y>*St(Exv_=!*h7=;u#N^m);bEXSZ>rgOVd8B*a3F3oyte?44SsdcdKb zY1{DUf$Hh%mc>=SlL0inh=|xw0nnZ_4*G?LCiE~QVJu#(w&$$M&K@0grMP}EgP%z5 zDDki#4qUl^B{6X~QmCz;tUKB21uu`t zw8}@fO&IW7;29#x_2^XeIxFNIVgi4f+Wse^sP+qY2e)L5fLrq73+ zJ#Ae#)?H@xue4YzFpbTc5zT-230hIrCZ0}TkIBJTras(kiGvwNqx;NBNyi15LYChS zjjchpf#YQX%o=2#zh3~LQW)b=F&nGk4NC_v$AEe`!Cw38FSK^(8Xgf!5SFf7kfJY- z7Wzz}F}c$CS*rb)37`AB21^p|+_|$uVtWgA-4u+PSDNwe}YhMd+4Dw4J=O|G$XJ z?J;kptRB8Q3){b2F|L2!TtRDx)jx;jWo*~7JnK7oE})XdD=1%i|KQ~qlzAn#A0*d( zg@kY4z8&Kf5YH#?J9$_TUa}Ld;0=>~u$zgYp(6Xdk&zOOOdX7G5**#G=KJGPI~&NY zUEJNOxVOTv1DmYChH)__@X|?<_OPJs+qXxLP!W;`GYVSkq?e(&V=pw|U0m=ss?wh1 zg=J*be}@?}f{373%m>;`;9JW*Ij=1C=+5`9`+sI(|6?NU)6idO>7u>(<2I!l?N^A} zGCgha3NXTDqS?@}uwyhF9)G_8s=>=K0G~81 zF#8{;!N`D(K!z=cPNx5eEGFgndBB5Pw{A7&$Qy@;C36A~NJ@%epbMR#DSPy|MnJIM zb)&z3FVC){(@)S`W4w*qJl9UNh=Y3Q+kp*Ls(Sh`ZI;A zoo##7)yqB*|5tlw9@W&jw*4Npj#XO`jG%ISRTNM$ARs7UtrLTwGS;9BVGu|FCn_RX z2ONX3Y7_*;Dg*%|#9<=>QXGP1u0RNE1eHQ$h*X3yM&Ik<)OVfleCw>w)3x6JzCUXh zN%r2)b3gZe-M{`I1_z$c5V?WI3Gq}{(x=UQfFg{;u!Z3Csg0XlEd z($OsKUvn*OS`;fkbf;KNA55@j{dV8HQ-5C>z0K7w z`W8(O*<#BH;)#56wW07BOE}Fhxcp-QF|9bou~hCVM(G_*RE)& zBzOMCVBAg(>V=E&1$FO8YW-cG#gn1{+BQ zCjv@3FY?ExxGNVa`5CUQ2Ce)`C(5Ipn{iQx5?$srWxnvQ+~>*qjw8PtUU&R8QYDXUNuN)25%qeC}J>K!2Po{?@pl+pRWj-X*;;HZ6tL!{s*a)lZCN z+u&34n8F>RWpc=Cl2}E5VA?`XyaN@^W^b9yX}VJy4;gDZTlkKaZpiPvXy@#%Hf0Jg zl9O*+sd=a<=0D!fqlz}5=8}b3HA@|;%AdF>Uue8%wbkPO4%Pe9eH`DO4YZ%EsM~aV z#ntYShbxCJT(!b{NsJgJlny5+b!pyQDBKKoRE>@rD}3P4p%LtoyNVdaUR1gUs$WjO zS7bFF<@yI-ze_A7G8L2=J7V!*?c$=a#kBjMEB=NwnxFf`KDFY=LOgr+>?2AWZ4ShelXWg@%}h;AIXQ`3t+^lR z)qP%C*8}p;et+Q;3os+9aGJWhbIT5yO<0YHRa3KUMK#F;i79y=!B4}8$U5A zl-`+&@bGZ#xEc%EgJAEVeMNl^AB4BFml zL+%I?sY$QxvvRSNPtYV~~T(hi{=QNoCTP>D~{)2h=OKOkDUrNOQ#)7k7)ObpVJJmnU42d2;` z(6h#l;zio2&wF)?)ZduR%DlrA%{iOB*&inhFH+gr7{aT)(k=`SVNK>R=_apBy_8-w z75EGCI*HqwTmCmL*=4j+@+8r|7N9y_ysf)`TjNEuy}j|V&d=sW+^fXk?lEpw>2$h1 zLt$|&wJ3{Q)>(J2n301C-WV|o+zY8&Z(G@a{8v0c>@$hoP_K|Y_^W?bOVQ+hW9@0qDxKbrWDgL)SsRS?mNU( z*^g=uC)kM}=)PhWG*_9jq{NP$ND6p%6m|Joaj}t7ERbe#dk&|iVwZZ+s)wiRD61vF z?;=FKcb=YFhe&_tifZDa5_p3&CjuwwTSgXJ0op;hM(n zbBxhQ}4wD@Te&jV_qX1EEN7ZUFkW5*g+bu==U{&x(o+B@hLXWOU6&JdmZVo%r51 zjhlz3i91AZc|_fM3vroYqv3p#R`?-#G$=~JC#!q?0tpuBV2l1JY;hQTi74j{bmz8kW#E|I}z6lC0k7{gxW(X<^J zj!X&lgsn|qm_&)R?1G{ddzUY!&lQ zog(q%b?@>?PQfIl%Mp#r+km(e$ENl?agn@mv3yZ(Vh$Zx63ew6@0cAyOQ%pxk?0{v zVgoEFi?_PEx)Mg35P2sGV>3_s`72n0^%$QX9p zB~pX=^XKDcbVneHA#~SYNJ9F7!>P0pO%C=~&W@--@$U&)Tw6@4Pr;)`7ha}>%EuU# zQAgk1i9W)8JMiK`!ChsnbcA&v?A@4tcTg4~bc$)|$b8!4Wr;-n^JJ(^8eb2SGQ2oF zm)e1w+w$rd#6G)qOxPi2f?dgzu>2uIXJrxO!96QjJx`&f9N~6W{Crr_4{#?H=yj_PRFTkZk8xRz8! z@6-A)e(Hmix#&SgbNMGuFqO>Gt-pHolnkevCN-S$U6or%q)1!4us=O4kmbQa8z`8E zFm1reQS_4T4!gub01vqbd+<9wc?@T}ad`=e%v*~RA%X?hn`0}hORotWQ zzJWz+xby=yVdiL92%BJob;?{Cw4E=e-rcuS)RE>;ir;Ndjj{^p54mvI==+s{*1CnyHF1w*6z?7m_VW?eh zVyTrsuf8}w^~k-(cgaGbCYwn1aeV*5nS+rXo3hajLnYt4fZX1$1f_To*vYt0;}~r%H-idl_u7>woOAQ_E+sZuZkl46ggHRkts)vr5qoc!09u;nA#RNocyAB^QztbMlN=m zRYtxEC5Psn_lP=x#2S@M+EuD8l~ln)trr7IZ|6CPOMgSV8C&jh)e)O(Z(q%vL28%( zi;etuc@Ss$K|?Wj(Z&Ya9Dd?Y5Lh=+f_O@>$+EK6=11Ba(X!e6^vU|LQG&y%b0eUN ziuv;Jhh>5b7c?jp+`>$hsm|#an0{K1?zRP zWJT|W^Y6nEcJFJHLs_N$sMj>v#FzIm1vO73u{05KUFW`^@kh?K+6I17Qqqxs9Ao8Z zItB7nuqIik`sD*3F-kh+?dPhGpDGq|ohL{YGIYVu1a%po&!0>O>m4+(h7x1d$`mN~ zQiYEkQvGq*p*O`KHWM*SD^9Pzx%mV4^5~-3!^Ny3{E9%(-0m)v6ADo504|K_rZq*CO9an`vj!%&V74{s8Z!O7>DP};lwA$dk%GtQBq5W%vJOdT&z@U! za89YK)lfe2Dx8R_hiFdOSBJhFOHzXx^~#`D81C{SVG`;OTgFmTNo=dJ3SB{oCDC>z z4F^6(vN;u3zm>#lM>RHnQTVx8y3_PEaaE+s8V&=&>7)Jbpl0=8CL|08WLu(kLv^r! z8X9^u2jJ)Q($t1Cae9M$6gzZ!W5_ING&d2x3fcTQhK7bD zuh6`&k)?n^Ci>&B$U*>SdeIjbe(&A!9)2hT=C00{A4m1+ zRvQriyNPr|{(%GRL~w5d=F>vc5>ASOz<+8|Dwx$A7MDZ+d5fiakEfUu`rz|#vFP_9 zmrL*Ui6UNHQnGNDcD`M2&NXd*+OzV`zSS!GFE1Xb@qZp^>U_Ht85dE~d-vQjYK?Mr z5f(Nm2fG|aeNH=i%wD#sZmkyAv3%^JDI zu35KsM3%WO{Zz2(n*N)RcSH>i``@6BTHN;$L`wixV9Jiw^TR4Pn6|>G$e>5@{{4Hn zEcyok8A=ItTG{+d!k9D)DhlCIvGR=@T0#yC#$$=;g*r8N!nU#kAm;aNw7jK-;LTg| z_Vwt_P*2mGvMS*n!KtA@gHK%mri66W(d8FKjC2kI6}`_ zTX4SFaKD;q0#MHdhwK7Qyg>Z2VXC`qaSUz8tiF; zvRiKkyNqNcxl#4@2SS4Y`@N1ckibT>RfI`~_vEqMWnT!s*>%N?k13as{D~h{K+UvS zzkX~0I{L45%|ibE^+p?;lgua}>}s>v05S=s3E`l4*Y;$13C&f(Vcf~x-91wQaN;`3 zLo>V^@q3UTt^lV7lnSk`F?V1JWR3pI&mdErUa{9Uuf|2pHaFh_P1F@T+8E^8#F^$! zA^9q5ZUC@lg3UiukoW!>Dyq*XCT2kKbVWe}!QnUHD!U@#ZgVzjD99aa6tsjvbockT zJoG^{&5Zdm^4?BN}J8o8&C0LcZybBWNGJL9of-#nUN-~`AK6Jf?OBiam) z&8iF!D*_!8UI-W<`Ekd(!<%AEaUPf=5p%yHCw62NINiDd2C^_&=7a)`fCl)p1u}og zCAjVj_*m}H{*UxC`FK^U>NFPq<&Ks7f4i)o?f=&w|v*>|kIxWO*v^I85ohU10_*`$@SeZHoO6`xqya(y9UBB!QOB`39P&wA46zCVo7)A{bIMw zm6&Jx4Q_1La0A*gBTxkcW4{@4{0c!_-1OUvq2I27WzJ=?A2{G%@jotJz+1hZC+_*1 z@3*+c#O-q*xo;^Bi95}1T#F(ll`(`a8+HRu-BNz`@6FA`ADT3s(63$PjXI?Y%hO)z^|vxcNqEbOdRW{o##{MP@q|d} zOpwm4JD#e~&I10+1NUo4h!a@<9f!My5Xz|*|9dw#M#4EnDrd4w=FCyo($dQH{$5na z58=xZZt*)%P3ZwS1>|4gV1Om%0o9%0dt||q#U@WY@Q{2HUOur2Cu}v!!o%_y!~)z4 za%(0smaHVJognr~$C1zZ?W+J0tRIn!GS_-DqW`L37SeOi4uX%RumRH(VDzfcTK8lD|26;N?Tn#q_UP{;cB&7#Ny+mCkN*a{?A%;%MVqlG z(pXuy5DX7n5o!&2$>Boo^Q044CZZ-#LC%=NdDj$zMB>4Rfp!9S!s+|s@i8L~p5dpF zu>0X7Q~v4@cE@P>Ce&l|tckmKH%vCuL27BD`96&Pc*L7<4}A=-xO#AID~K-1`=vor zJ7_ez)v>1|-K%tf{{;jDAm9{GXt@%6@b~5W@h?Q5{g97~EPD2AtnSo30Ho{B(Au3T zLD4{<>fEMn)G)U1{o=>xZ!kkc)*C1af{hT6)D3NGo1tjYLhVJ%rM;^sbl_C$fu|ag zQ-&96hDZL(AJi~_P<{2=k%S+gcN*_~%sKth`#wQt(Jt8yl@6OvaNmOjRVt+M7I{cd z1S47tJKYoBOk5BuWS4#XsPZm*{3RJU08B7XciChD@50w65YCvOHGcxBCW6pO9M2OE zsZM)noi1$zOx_f3V>CZmJKvuS)dc~|m(tO;s2}Z?328{hBqPChr-7(U(#}7ntJ<6V zN1xdNoP2+-0@tydbRldLJTnlI&9m1As=nsdl9G}$<5|A72U7mTHlxhk2i|Rv!7^oc zL%94)8ZXSeMo?7*vMPX;shGtW&Bi*f!83HazIGxRviLpaX2joT(#sVo$+!7=%=+B$ zI~c=pdL1#f@aEPW7d#kW^ZB7duQDU~s|(0SM$0RJy{U5?tgU|&Wi_Oc7=kwwN{Qv( zPOBfVluvZdBv!gM_V!YO)#PnOTNKYH&C^id`p-ctObHcx(j?#{CTb6T#DAR!29ulC z?}+c~T49iMC=~Jfd_uxaBpjn@lm@Vi^WM|hV3B0iKBDN*k*@{YfZ9#f(n^Q#;zknB z5K(?kbxgVN-~dt}49};InYDK%D#aSWdaz#^3$k`Qpq~O7x|SW--x?s7&e*d08M%gx zoW9qms&NRQv{*kuV_^lt$lcwon+gA|?{afNrOMjg-UUa&cMUf+G&I`kf-<%pvBv9Zjf0Mf_bJoPd4cl(HD$bRN)BJ z&b=%ThcV{3_7OIi))A1tJ=xIhKnJrH-+wuRkiEnkHSY8~yb#tJM!GSukEVyCM%FXH{X=3^04%ZulgoVi1hPlzRxoQ#?K_jv+SNNsnHk z!s{mY2P0a9snuM%{p%sKhqq;*V}v{&Z~Tj8({5}Zj^tkhs7H#MhJ^3|%4l^pHOFfM zA>pvO<*g{M43}R{0Y*d0#C^~-Z08=q{Y)WqQD{Cc09}p;;0QSn7L-?2gT3kK>^yk; zpq=Cc#NKdr{#`+TP&hJ?>Upr_F7J+qQ_Xv@h^QZ8Gt73<+Gt zT-}lVQ^U|R#-tCq?#Q$JuiU?R6<4J7w`E+c*xmSxKped|>RG9S6KAb(!`deAF>rYG z=8N;DEh|D>+#c@F#*2eD&s-L|(l{_-e|8Kx7tvpyt$y5gj;X0DR!J2Zd&0ur+)LiP zOfnTRu5|Oe=<6sX6Aohf1&Hq5GiR&kpGE44gRIRozrR#>#`GT@51LI*Fhk5UhAr7n z>(LHtaUf%H-E?+5pl4v z#pg5S*#GrEUwZdQ;TK>0Ezf%Ow@&1T|J$+N|Mg$;|G)iH>+I9)@#nSwZ+!f+Juzzm UJ^B4@atp23*ss2|%Ja~F0~!bT9smFU literal 0 HcmV?d00001 diff --git a/beginner_source/quickstart/images/optimization_loops.PNG b/beginner_source/quickstart/images/optimization_loops.PNG new file mode 100644 index 0000000000000000000000000000000000000000..c43d83f2799abf30ce76d5cb6cdf0162a2a1ab45 GIT binary patch literal 60630 zcmdSBcOaZ=`z;(H7`^xCK@x&Q4ADo69zsHh9=(i6lxV@|Ekg7XqzIyy(HXsyM6^*x zPjsTU?;d-<`}gkeob&g0zCTii;klo?tZS`xt!Kg>XeyHuF%ez5bcs~uzJkuBOL$I~ zF5%P=;)AakJh}7-{Db4FqYS%L*w69{yuh=T(~!G#sU(K@#Ow-q4T0Y`aJ_VitOffA zr^7kd^3tVMdldyaT@T~cddMivLUNJJQd%$ad>FamLU5&-_727F`^(C*l!`(DwUo41 zuF&d=FkGRbz$441hzX~Jq%wyRJ8>ypF+e}ird~p3MJq=QsozfCm)W;SydF_in_rs; z{`l>`lo}zJ9;t8}u05HuJ|y5pP{0T|F8;b5hG72df8b?kBqv7c?{6R|ICJhJDgX06 zL?<7W(|`R~`2X!cmQ8ray8lJJ*S=e-b3^rUh~EK0*`)jQWV-v5=eo(+g30qtRq|+B zme0Bt6a21&as*?Z&(EcgSIf}OK@^8?;9P&-tfzkU^N$b`Z5M7kLJ8Y%)vneXF0%|1 zR>_;yk6?eOvWNcbt%P_ql8&2&2F0fSVWfn2wOy`hPzKDQ;Vq;q3`-O&F>}Y#EtSLg z{x>(3QtrL9Td(p(u|i>?v;WLt|1d&gf+A8|^Na$5vsm&M)lT{)HoW}=Z@e^`bsdxG$l{;|WAVoG`Rx24 zolY)Od72wRv3p;PLr{is;Bg8PrIBW*U%n;16*wIfM3s5PHB8L)Yc8*jK1n!LgY>#} z)=sDU8HMr#BK}vtFw3YE2(<3aQ1AyGqL)ZBzTvz>su1Su1yqO)#gFS%6fhB@@*a{C zRG4~;kKP(3@G*+toVHyGbUf^8Nb0!a$a6e5?R{%B z*ozu{lmCVd?Qe?%@6DX699v%pGzQLwLr;T_6qjf@| z@Y;!~AIaxSncn#50uK9-cX+I?9NMSw-mE|TQ5k`KaNo_$WhM5oH${8{`hO~?X z$x_iOdB@+by75D)W0KsDSekyVD8i9#9hx&i6swpgof6jY=VRTB?ZX}mVh%*EN%qYm z7yR;%5A2Y-9>c-{?x%KyY|Upm)WIUT*h}Fd6fYHggUObNtLqPj+7G#j)!g?V8lj;I zP)#<}dxC@<4S5r@V|*WmzIwl?Wp`cBW4u+$jpCe9#HJuPm^roBo+FBpRPJEIe+Ujw zg9ZL>x-$Lwr}9AHX>sG`rA!N&Cyu0h%`V^<$>&Daw?;9q1O7v&nL`oMvV zdR=}tWg{(c%Uei>FtR#Ct0_#PDgfsA0)2`ox-(85(pJ8SzJ~m?IgQ9A;6}HKxotlG z-o$=efmx8B{m3hGXmo*EI$+=czXY>0gZ5dX)NfGYLqAYt79%`Uvgnl#I9HCcSfV!C zp3L!HlpyCobl$_b@kc*azh2yEI(_(v7#9`zy%?@#s4k(IiD7;bLQpDAM4C6|zFB3S zUlgj+ymiPZ?Nm%|lKHs*Fl57!gLvPhjZWn90dvHSp(~L77#|_VPUYfDRFQW_>6h3V zZb|2yJqSIzhNe3nFBi%BN*KM6+Csy;kw~=0JVL)tUkuO0ZR@|O?qQ-myZ(9b$Gkb*Jju*Z$M((cPXO3BC5e63A2vIkOC9Z?oqGs}b zF3G_;*JhUkuLI|kTdKt|-hSj~bgLnm4lUN=ng>kYON8iD(+PNBzjOeOpdWF+AtA@_ zsA5@JuSXEnWjPKx4vH&cF`Jh}(8fz;?*;ZKr=*D--8Z2}glvCYo2|3y{*X7H`<$wX zFR{_Z6B!^kuKyH-9o#vLbcGPZ%mVDqWc(k*4`fqwa47gt_*VHiK1rdm545EZY^2{A z=ckR<@akw_+zAQ$4#neMvtUPyDo<>fN~5f^Vd5Q+3~&A>g+PU)uR75bduOY$1SsL{ zP@eRS%qS1Ej8KFSprjKIH>I;lS@8T?l0*)8h%xq|WI8QZgNo!xwA{X}-v_`WS^waa&v}FmvVLE*DF1GzeeK>R+5OwOWSJ6y&!nkYBq&l)iz_?b zQs*=4C$h=CAR4LDF!Oc{qNWi4vCVoy~3cfNzxH#&qH zJy0RyvqgK$mI*p4sQ~{hXCc-pWu!kb;aPj-A$~Ll$5S^AvLVCPY+6HgHH-qL%QW7; ziH9CZ_$22sqfz@SP*m)IBtYppf|s$u9(yO#Y?<^#OIOd-#ZTql?vOvsH6igMNGsBc z_us5OXop~2&14#ONH=mGun8h^Z`Bo*H#h8yvZLuN8Y(#{zA{=Y*{jW>s_{CuE1D^s z+_UF)`G(w5cIG5^p1L;d-K|S{;Lxv|8x`&9^QP#O%(-Y!SA-}?`Hmj@dV~SwttipL z0|%4lX`9~cR;d^msoC2(0-73U%;{2K^!rg^fQtEu$4rOr*C6;u`dML@%w@kICZ1<5 zKU{yG;&a7MKC)BUgKgmtfl9xOdX}&7$y55&13VvQx~B((nQ9I28@_Dbp9X!XVY-QM z!@$?_sm&Wg<3YvC;ffQjuQ#p3D|p)oGaok01x?UIx`oc+FKO1QFw^trZr8rvJgb(L zW_F-nPxN-wLkf_r({@BC+mTUj*a0N+sW4lCRdu5h<3#f?>|2?BT9D` z%DP@jP8cCMblNJFICEzKP0pe4?EP2Lsw+XgBXm~~GLK0y_{(KP5C*e|@b0$4$Vhkj^#!O!Hh+2b^9VY*33L91)`bF@QY@+?)cn`<`@Qk@6s z_9cn#BHH#SPreSmFB$bAZoRQCuU}_Y3+H|FLqnI4BM%Zd{@!QF`o$YV2x^wZ(LSj- z_u;b90n5z~LHL-4$~v^%OOLyiNyW6@g%2_xhW)j{d`*^5E~Db!EgxQt-X)qBY^_M! zq ziA0SCT;{+qeslMbE&vy`=2Xr`63y1DY%`5(A<#kR{<%dIsY5(>2RWbHG>Y`4^uswmL@uw*bPH}B%n?402zs;GBq9mfY_`Vbs82c^ z--h|9Wcj`ybkhzPI6O*I&G1WtotkeR!+gwTnfAF4)YIl59EeS7r;(OZD;ls{G>xI0 zD>Q6+dP(M-ve&}AR$+lPLUo&M(vPuQ-#f4pxuk9LI%)m@$ARAa)~jg>%LA#vwcPRk z4W#mRIVJ1|HOZ=7^^FcyGq}SeLXOoi>XTa}e&i9X-aPj_1b!sQXEl5nti-H!?;^ka zISje}LlD1F@CPqG;<4|cdlGVq%A2Qu3vl@Z$ReA|__z0y&x931`MEGW?IpydzfrA> zNwgD>H=!o-aqXVm;=Zz3$8;qD7PqJ#&t;g^S33oPlJc}`{|i&vPRuinHSf6B?GS8%DY_UW2O%{k%lgu zp3|O&RZiF69Jh$iwmqa>$_=|Hn@zp$PD6Tz(+vTQhxm>QY0B~D$Ao8(YzFn08?JPG z%8Iv+qR7sWD~EWI{!}gZFIMLDQ7IAnB6g~nRmnoMdo8GvlpNtRC5wX@P&`gk%>--G zK@{?Zw!7?7vqCfP_QgIMItDtAQyL>3=O#Js`#t8{UI@KnxMwJlwte0$^k%k@5E1|C zlU_Btvs7nV;RF7J7ZiQM0X~QLJ~%G;Zzxz1w@1nQGlSXDVl?Am!lvH~tb5Whv13nm z6)dnFwl7e3o;$=>Y7)8el=K6@t`8jJc&E&cekCe?!YGUm2FJF676mt4mDpe2Rh)Rv z_al+!H^C?Yi6O^o{Nqa>T+YnM8K%YY5HYlC^erO>Hl}Q6nyU4QW&r~V8zDfU z9w;1#!ss%}AhFNDs1J;jycZ2Ch|cTN{9h!&a0i5f@Ly_&-=)Gdr(@ygKif;+fvtI( zh?FT}vTBp)8AlKFJv}V+3AIy)&4}#^QL>gSq^hm_R0Se=50x-b7CTLl8Ahc1Cw<7r zg;X2NZ`+Z7u?=dgTE?Np6S3bjeZaWp`c3S-Sgh5(Q(BCmA!5_VVq(U%!B_ZIgAsFP zi$mI0b4$@Y&m!V zOwh?=SfpS0&|AL=V-)Cbevl+R#EF!V-rq(pQKGq|=|7H26J3>I_a*c>WG%FJbM_kNpAOaYXngg~JkcDC)9r z){dz*cg-9B2ULVC&B6!2xVjE;Pl+#UZC0Be7dPoR$Z@gjSCj71PMMVGP0TQbhdDMk z9M)m|35C}P=2#|@;=twt=i%Gx9zoxiJb{+Pjtl$+fpRM9yO6Xdncr;oKfaeA@%PsO z_;6vAKH%S4^oIws2OfKluCa`~z)Ob{!ld`z{;@iMb}<|6lLf{C_w|h3_R+eN0gERzmkq zju7Pe-t<}Og=v(m{uC-0%frt#1JcL*BRZD2e|awVTQkr8@b0(Kn{=k2 z8&0;>_J<#vjO}bDHlAGDe?b^4^|SEl3LU@xOfWgydbIc=L)A{dgc>ro%W^&Q>{q5y z1f76UrnCp# z2;b?Oj|*SqKQ3}R-#eNP*GPtn^8 zJ(JLB?>KuoyO6)Uq4-{t^P|W6>$Mv-LB#Ymv0TbE5rPe)Phb7|xL(?GEAH->r@n^^ z>)|&G;g3yxDwyxu#n&z*7@`Xd%FsD6(!Y&g{kpb4q|7$v%B3K1Ffo0$XzX|T5^B}U z;J%y{QWGUQ-YH=g*JWASR5|Hp=)PLgkuqv%KfwIg)bS;g%bW~qq>A>Po-E0@%N%xT zp^sLIHbloPsjJ4UQo$5;$efi-*yVpk8+)xi%i@y{A-fwJvlI*|rx;$W-=A`|=!mZH z7Hxo}4EX-ZVKbNr?hJIS+iIUaH{^*MP7Io!-u-D<^kK8}p{977)Y-ut9huQhZZsA^ zI7R1gc`l{9qn&+sm?LneUyeMj4`fN^7s5*e3;KCI&9&c!npd;Dd1J%x+$%^YEKg38 zdAz`9Z>-}u@@SRqe2Z-|T4H^9yxex~>@f3u>~z&y#x>-sTheKKm*uW?^Ks9yy5Z*e zso#0#{^UMmw$@UAx&RCE(O)2<;6NB-K3rp1<>Zv=Sh+<8w=69_jXB?v>9zN_yXs*b zduL{E!nw*?B~&#qWJGEoZINN(F?f(M)MWEL=|{TTxb1rP&7$)858pp)xw}ukOsqX# zuhlpK+iKUouA^?CYU))!v(&3{W)Yh=D?>RU6BaR2{K6$gPYELxLru=M6P>SyLdAlaBWbJ&rYY$rS{_?vC?eb zoBmSH4c5XAi2dEwvG3shj_c%p2Z6@}=O+VbRs=;9QI^(#Cs4|oBhv%E~#g*55%aMA736$;t;UR_>kdp8Ek1lC+8FR4b08-Dw5(M9Co;~q^$4D ztOqKML?@gZPxl=Iowf7aH>MJn>&7F<{&KKm6N?(p#_PAcZ#sSZpi&6-&`PN2c6sbO z``Xpghy|4t8NY_(@0n-uUbBe%F`qMDb))W8jhYj0+_`Vl+HkzSQrLL*>ZX|>!p1n4 zX}1ZNd^G#=q4Rc>^LVABeTL`xQPFwIMCrZ1XVy4D&}2QBgwed~8902d8|+FvH%B_J z!ysED#$Fn;QTNl&cl^#`^lEpcyco>9Jm%|8_a>@=CDunGUcz^(G0II#<+Fxg?G!C@ z=CLn$zAae&H81e5^IL#h*mCnxL9zaNAi4BtxAhUe&~1Y+bWhv3ZTUGKI@j+$TK`4O z$>?{wV$v6Ab+Cz%@%VXpj?q%5z58Bv7DaY7WU2h<&v&iK-sC$^SAKrosbrH_D~~%~ zbM{M}$i63152x0UR4QOn>4g)2MklKC#2rRL)_xD?PcCKp72*YFa~w`?_%7j-FwBri zZZ^GDwd%I~$bf4K`=lwkrBG$s9Dx4Lt8kle%c$HYc4eZfXutY+1Pil&GCIicTL1M) zJ$V*v{>J$X1&;0z#dlkC9krR@KBvyr}U-_(c zK1+ORH7uGx?5u)36x=M8D`u2%D_HsQC9Yag`**%#1l{dq^~&f9hp}(IZwKM$r~B@w zN4u{3Q(i^KryG8m_Iki)%<$PWa9^)pdScKU1)Um|(2wo@W?R!qRbX=uP;YD;3XLxMX?`L?jozKafV)W%raVEKy$`SIi_?<+HD2YsaS8+L!5{s#@$4vp9AO#yM1XD=--jbE2@ARk%-1** z^d<p~Vxsfni*{^gFuLAtL0IpyP z>-V1IzoqbPm}9_Y$}4lI-~DM7@1AfsL;EcL`6jay(oC^k#lz#}+W7F~tCg?oyCWt8 zQt7aGf~h|ptX>6DMMKu4f@5S??;))-JbCtQIS~5vt*HP=q>A9KKeUyrvGxC=+B!va zfBS(Vz4c(el2O33&0Dfu#PUObh zjriv4!=i#U%bX)0)#%lZYJPo6{j})QQWkP4bZLZGu>Jzm!@*cyM*d<9gwm{X^82nf|cqP=DCV6&Z~*f<$YA3k5P(dvBiDk z9T*35oAD>8@mjC$s0W;gE@Klg2?JkFmnU$hOx1xrlJklo6`lxU=A zBh8HECDh-^t_u1cMx1IAYJ*9+4ZDsa?dt0c#I_o%ohEAxKE3kowk&P0V3m3`YMHRd z5$yqV%MUJ35lMy1A0KaZ zu+;*b8Te{Y*8aBt97FC0w;w62XoPTd%cC@u_!N;@vCbof>9w0E;x*zrKoZtItMk|! zVSi|Hw8X;ET(#mV1e#453+H;8(-qa>58RP9%QYMj3s&;Z#$x&bBsayc66V_vkB^|6~6b9R|U0lkc zC?Pg5+c>I}PL)%y>~FdPQgh6Kg|>t)OHzlXZrYD`Ep}w(y^>hbjq4L8hp$48y~7~`fRUiqa}07dq)H6x~QI7^&rD<+GoGw&HD20 z!X1QOC5N3UKy=a1o~OGblWgY)%J#M53FB}M-%vzqgh2iB2$dvbAN&9(mL|dU5uB-lT^6oXyfV3T?eBR^FZ^MICorzc?5amTa({E-UlQ z%!J8%hBfAK(XR!^mzL~)8;e;SmV{ZnAEo%90Q*Yd%>Hyae`2xiR zi+6qdsP>f|Hjp8uQ684zUJ?0+LU)+uj#Xq|Tmz``Xak}l@Bj6;fkDxI;l(UooUgj= z@LEs_EO-yBBvF=cZ=WEvF#_`2W`zO{C((^tjM6=#ZSA-HW6Q$rYI@%#-GK~6{vUoL zJto*&v{yiE1T2wgWiBcSQ6~b}U0&^M_)V<-atIK&|H(hW*6!uman*fvI%(yN>`8G8 znJZY+boWf{Iin;t?I63~KfbJEv0%L*$cB|0ZFvDE=3s)OqQSC(SDX<)0^wzDYfrq~ zlT2uO%vxG0&8)Gc;AySfOL`-?f2xS>TQ4Ax(BZT<(Z?H&&d=&wGK9-JJa#sr(q*R2 zms2gtz=^u*+ACZsxF|DU6~-#)E$#7pPzGMe=JQ9}X#*xg03am*us{W2Fxg7VsHpqk zRjgv^EwuO1ug~j%rn<_U{?Zx|Z_MS$UIj1Ld+!XCdv>y&Z4#)NM`|~u9`=GG1hw~T zafEOBowZvZ@K4N$?_fgM#8To>VwZj%#E{j_vU(l>G&}|GkO89UE}>ph%5{ z8V~!NjS_D?4No|nvd;8Dk6CA~IRWySTIRjo#ams^y;L?JUFM^iDLpv_j_Bk*!4yC;DEubmpQbVeGU2{X|;oA+uWduuS9C?(d3z&mhM@Qc<|qYDoyQs6&!rrGtUf zO1GR0{BR-B@IW`q_w`P8AMdK_TD=Ya|M3?ZGuZ@LD znrz`%P=c^}G@JvYBytbzdn&M+{BQ4Kfb=_2A^LgVMF1 zFhv&G`0!Vlq*%owh{YUE?eMH}&}iz{b@8Mk=b|P0VV@AWqQgwi|NA4<^%lMZWNB>} zHE&J*-nf0bYqMAfoxqdP0}izZr7STiE#xw2uPFw-65qofla#?Yx<6d;{C;5X3v`gr z2_=)vdCpW9Eb3!R)5SF6Uj?jTsPmtROh7QdC2?OKY{shil{8|dT|UIs=QEb1JRe#@ z$lYr<8oZ31IqIIXf>VI?T;?lpupLnxy#>4OC#+&~`PcW)G3r)3o7noA!3QJH5Bk@v z7*yE3V+QCWakQKRh{IbjOQrW8A($nVDcXIqNzIhgrXjp(GguR2A5=MkD!rGc+n{D{ zj2|D5TEvW1A9*Y$TcZz`GM%%uhwPg+xtSzA;5RJ57@n^J4b_1sus%R%=)e)@BSWU# zs`9-hpW$EU)VeL4*6ByC_BN988CY_#JcLj}j>hB34Ycj&^vZs5Q+0fD@p%fU6IyH^ zR44H&z1^l3>+bmLPJoQN0RXAkCmZ4>TD#{cX$);xF?A?EKRd3aH}>GDQapqg$eeA( ztV0_QqoQiPJL2wEVIjm*v0W8=dht+ww2zIzRS7YQ*h0Wi@SPku9Ws3qV%IIHoKk=* z!x>>IhdcxuWfJIWOGo=ZEXBY;86agA?M3P3L)QKPjWDMa$zb*_%VS@3!r>0FJm%?M zkR;&QhJ_}Lz856axaX0f<1=$&l-65cb_yo|MNSgD(Xm1uJDIN zXpyz|CSh#eqlqpTZ1r~szC|rAnV4z1D^RA1sS@smlZ_V~?Q4VA05AdEc6uY70P^aCU-`JF;cG`syu?L)d&r8@Q-=slQ!|!$8`1yoMC%$2@YLU_nTPY!q;ar!lNj z1E2$2}CSvmVIstQS#%pMVp-B>j~2 zk9Me%?w41fpiY7cb~7$nYWK%OJ;8KA43EWNQ}OT6qo~`OIhAKeD~1+Y-p_!8B7mU_ zq~=g?1;>o$vbzU9vZ#J9#nV#y{A5RT)N(eN*1{zqljN0t8+Y@(-s7jwe2kg&B)HKc z&+s(av9!IHGOmAB!3<0XPyT~`E+M)i+nx>N9GSD; zmrF_E^1iwr-#$49xWUC|fiKg4j+buide~)h# zm_~4NsXuR0Z$ArX7JvTXctOzb7gj^3dRw(&O|E0lv1f_Z-!&!rD(px4@tAqKkyXbT z3p(m34Ztb^d_5tC60-MR%dMo5l5Y3T`Nsa9eVpcIQ@~Xq7@V2|u1sy_3e9(AwR!@Z zyY4tkW~M=h&L~t#7NrqPNPhbq7>bke^9E&BV~Mg#u0~hmES2eDKfqid$h$u@0EzO` zgyOp&5Z+^|7RP&QYvP@6h1r*Q&9sr-ORkfGBmYcX1^kZfH*TzRUl2!(-%{oWFt%Fj zGkJ%ysToIB(r42B&p;L}h8+TGvat7tZG8nW`Cp)v$!wef=So(s+vqD=3dBVqP{tq} zUR9RrfV0f9$5T{z{b<8JhmbLUg7~V=@ZU0*J zQpSq<0fX`b&b0YW7sBma%C|jeuBMTn+qLYJTTZoWbT=u5hjW&{&!UbS;22chwKmFp z27*(dEIS=RFbRl>mcivLDw%z>tEf;T=mr#QUA^}r7t7HnU*q8R;*&DX2a_^eRpTCz zWM1RAb~zfBkFa3@a3VwkXDrVE@mtTHI?>cCtYBJGs4wX96C}q7bn_Y^+Us0EfPA=n_j@+x$?DvLRCEAW56c>~>at8x`d-9}jvJFajrF!7~wr1fk#Qktn}~+K$5D`T8Y}&{K+v@N#_yry@ArXZ&%w}( z2TDmNN@D;W8f#5k=t)dO^AT~B0*4icBwS_u{;Kc!+38m;Lce|65VqmCQ&2H0sM4t% z{~5ZO*~pShU@S|6elE>1dkj-VTdG58n4@x|Je4=mCk2fATzix&){MT_5y7;F0KQ(sC9IYX zW69FxF8?v}DD+4U6?I7=RhiA_$5RhXkQ2}+yT0drc9Q!Diikypi-Otd61X3>aUi+U z_LlnlC!O1pEYUqD*^U#Hl>$?Daey_Pz%1>JTu6kl@!ENu3qd)m;o{k(LOeI((~_po z7om;X(ab|a*rA2;M$ik2wlR?>`<*TMi7I;c2vq$MEnHwa9tWM7UWt%z`94Q~N-Vyq zuron8euKW)Y>!}L`2%)^)a52*vXtZH6?Ao(*TTEU&)wBTm@j~?0xr*|+mNTD-|&2f z|5lN4{rFSYzPoN24US`iv!uoX+?X~7L@Zr%{yUr zfCyuo6-%d6sbivOxV&MKY1?BF-v12hEJ-u{UNx!csA>9~g~xX+bFUUS>jwpDxnFFK zqzdqyrKb!{uJ{;)cpG4BRvS-C+N?Q=ll&-t$Ky%*gR(q(0md$!o_nih19q`bL`Z-m zMuO{XguXrzKEatGWczBQwE{Sg)3Z;yVqAeEobp@awlar;8yIArH!v&Ke|^rZdqz6Etw+;=p`T9b{+#aP{d`j`0Riz*DKl#5r=GhCXpz(|m^pglQGLM_jI)WZclS76ty^Ex3T*f75VTXE< zoKQ_HZolnl7_Ke*Y$#jNcq8LEaw)?L4HEcqP`uS7i)a-Z?WXz`!w^?#z}5xEgs<~j zj@>5uvq`4?sbT1*sw(me72g<_QY2qWLH?uQ2ywkUc=LG++u8dMZ7@p;2(+&w^Zdkm zzjpex|8391+j^c3C&2_a;~aQoRG#yM9$oOt3OXl-kvK0bK{Hu8(5LwQ2ax^6$|JJM zYXhXT&dDw8Zx1R3t^ODS=_cZmi&kirZjZidKb6oh=U6_Zv<{Y7lSZRyw+wz8SO-<0 zQjRr@6=CNSW^fF!X#yb%0Whb^dD0~Gnj2>e7`t|!&$cI5`5CC?bk&$Pfb0K_3Aj&g zZ{XjJipRTTQ*;V-nc_c3O)o#LeYs+FN*5>!!&PtvUKFUdM9?OW4EPuCh3e~DKJ&54 zi(flC>Vg}VL9f;oYu_(aY^`-z^OMZc3VZFX{!n;%)Lu31JMk3Wd}YewxvxjZ=2_{1 z_AMi5kR=7)w@tCPdi|Xs0u0CsN`v!$c+hOBlrD)tt+c8Ic}J<66L`yc&(E>{~=DZl;R6Gc{1+%3T1zdml*nb zgd`(uXpW$At#YCU6usolwbX;RO8(x$4?n59V2v$zHtCh|iu?ukuwb)mI7L_6p`jWL z7F73MqFp+YL&$w>tMO2z$7bjSi3EOTr8DktvB3R>Q>ShQZi80&6ZLseboeJcnF5H` zUz6zCdXV2+X75RezUndKpJ17!1Qm#3WmqiDBS3~?eX_z zd8Rfb8=C%k8Rl%@hA{3SH|~oehQFCV>9B*>?(cWdSV41wUsU=+e0UkR8c!Ak9a(25 zv`p1MR4-zA^E-uBvs{)={ZP-CjnO}p+}kGA&!1&mR)F|Phem_Y9RtzGHWp8yCayEr z>A^7*z#6ZIRwZqH6cXnO6FzS0qZmFaDC;l%=Fa_vdkM!4@D9Uo!ia?abctGQ8w){S zT01Aza;eol(p-s4aZcU5l)Eq(7Wx8gnFIEt?|x`tRs3A!g~Y&Cn@5#Ro6om?e8_}E zLKIC&W*84JU!>+Yke{61Q{H`r=ArZXFV^e^bSzO@v@>Wf<4GF`+bolT&1d*795%&VNkr_&7OE z9e86{YVp+Cqa!-vWhB{O9%{ihgnlk-XVkJd)8m&5)@=r08$t{DW~RNe=n-3* zJN>qcy~$tNKNE4NDx)+kuryT7Dn?nQAzC~dQ~w;Snc+G;{x5a&u8QCe9ycISk)TC0 zPzS@6fB(5?W~s>^S3pHdWmmX0E=xY-Nh1w|(=tC9UNAi$Yl=UH^q$?1+?T6B!jxZu zv|jfuy{w+8(n;-U$D@&ADdXw(3zyAvo^tMb+2 zI|iueVtWjmGvoVUQIQt&UY#h)`0f?B&*kItb24&;-@0mBXRai z@PYfxa;X8iZ4BxO0&`yCJH2-%ziY`jx>{jD*xWxwW9$*?oj0`njWC1@=USkCyc4UW z(TTplz!FaDJBoc5-|Z{gLU#r~mF!$?d{gp-IezOQ84d?X%cQmkqc4XtzGyf^56m$H z+Mq`n9XLH^-gT7)epWR-k#SJxM=3?87nj5oV|EQXH}Tudqq-bDd7u2Joa4COBn!g>CY;wmWd+AR^{o*CZX6 z=_z(3UM$Z@Z8T|$VenFSC}!jFdz+JDhS-K0u=J!fl5v`NTI<^&@)f|g@ZuW`@!dO< zgWf;06^UJsx%}x_Mc=)q@+OgH=;cBcbFN6xXJVwgmOyjCn|YHM1SY5*AkZjCAZZ@O zY#&UTe%hUBgkiKlQ)U`p1SUd_B}TyYlMkh4jrRDYFmTObo|;R6PZQ#d=gAXKE`4D7 zTio$d5&cp5`ed!)i+~O9)v$*)afeR z`O7kcKxLohf;@{`x+{bkqnk!Qb#vj>`DYvyWdsxA_^AJmP&O`-XHg6$a)ZiA=e5H9 z_%yCW+3V|TUk$6=EZQSodJ+Wk?S^?CnW?O|HCchiX^fN{yZ-{9UF)oVCtK(Y1jVod zI2M6$d0&R-c-*v3gpSwDeD2cWAv%dJW+VmPQWs=o?qN-*`Z|UIR_gJ#Wh7g$*^5As zl`b_fPxCPH!=dX^|JVCT2^VufG~;$3rYNc74ZUs0{ex3N9KxO)Ivft@7gE~iNPkRk z+EXcN=hHZChN8!M=nF@o*LPq`9D20i;G{>D(_!o4Pv1P;becKU~b zxLhW^p_E*C>$cQa+R~q6TOLNMo44W*&rXjGD?K#`Z>Wt5yi@qj{gvknI{x}Y2iVzi zPRxzysxDA>M!{G(884*jZewZYTRjZ^fFmpxWKLS^ z7Ajhlgx_J?co1ryi#Gl+-oa{wm+b63mIP29&8NN(maj#+_kQWLmw#r$fo)(xOl5mN zEQ5Yo^g;(n z;ljsBkGUwvD+UE=u!R0W?{H1_Az%bfPG?EQMzb0XWnVAj!aARgsJL!U^#`{dxCwr~I## z+rIRpas*c`U7Dzznb`h4PGTtcpJ&ppqh=wr>A0pw1QB6`Vpk>HUM!`#Mtfr3NbJ0O zY%I*t1|7h-pR9p~<4cg*E53HWVJ3jMcO`Rs{=?f&=yg)&Ou6#rmAWT!x*Vmi5vh1W zVKrcz0>gUEoV3(-2CmY}iI9q7y4zjhJUxZa->^f@Ou7dg*SLu%A4s$Nu;~q_hC{2J zye%IB|JeAba4TZ^TfL3~Otk;+(Z1gA0U5A0Nk$8WfwDJ_wQH4o4HW%;z$2-_Hee>7 z_XsSK%A2%$W59!{<v;ree&u4({-FiTpP+ba&G` za&{OcJ&GrB^B#u%X?jQV_&c?xShLqgj*eJ`lPs1S7m;TlyoBWOZh>VCZeoS~^y`iiptdp+2GQS!hmO}@u~ zH`Jx8je6K@igG6Frx+{*mDPzNW2v4j)$VIgfT`A)PnK|Zaw5;x{`Um%Vt8IlipUXN zX=ib{Iq=CAUF29f){bT6POYJo>%e}~z8`O6$BmB1iNZIg45gc=-ls#pA6rO8Ec|+9iJHXO9$#V&FG=m5Q!?aYSN*oj@X|>eyv(%z8q23q}v~2E^jF z?AFKG0gxFgc#*!bcqP~#0~GK>XqW}#vKC0jHh53(fhbLHhI%=(8r-1E5czj~KZ|EnpX;sk1x!o|JWe8v5s0 zu+pJF0V^Cx{C#5}eQxB&x6OnmJ&YyE_`-O{ibmk{s;Gjz>a9{L&i$dqe@}=ug4tB3 z6S)({n^AmwI#ON`Idj<^h&zMnmRT-OThVbFE<;n2!Z^RVk7dJ;3=MoA)a}IpTOJsN zO9seu@XWsBF?UrzXz?XTq)>21f;oGCM75P*X*6vfAy5Yu6k;j@_UDw;pF#QRuVthr zdw}z;Y2N$Ay#c1~Lx7|hViyFKd8#V-2=bn5kH3L$>SeSv`D z0Y`uW6gOa&azUWlpl?CFQcK~odFe9}|B69pVi%_n=48Hge!5ox>jgnAJ1LI&&HLV7 zQ*#*7C)8#EGx!=Ve~XHVmFGc2Dm}w&I|!7lg6?Si#J!1=tr)*@IU?E|?-6Q-I3k7X zg=-+5*zG9L?O$AbXF&4%TKRX)5+3`4C!ozsxehdzj`=cZiv9P_iVb4k_Ps5bjl{=j z=%#!$f|`?9=tkDPKbdzwiQ11JxV>a*xW|Fu1JRP(gb3#CS{iC6H?)iLNR_k6KB(P` zmHna~e*7j4=-ri(0WT@gC{ZCB?pp5c^7~Qq(tZPEo56K|7uK%dOKr_Bv%dxGbx~k_ z-v`a^SVblS5Fyb8pELmOatHVxDH5h755BzmgEXjlVap%GteVBO$)}qq3346G%%*$4 zzVo`d`DQ@xowr5MFYR%rK>i!YDgO>&J;WzLP(nd8-dIFj*!Vh-f5jxfc4e=NJ2B8` z7ix8d^#tDj-Fo!4Nuaa9q&Hgn$Oh`ZZsaOK+lS@k|121Y3UMef0fmiw;y>8JvQ9bj_(56b`&g)kPM%BlNx9jAwydiuR#Z0P}rISQzl^W_d;Mr446 z78+~>CZUGI_ERlW61(vV`z36@bRQOvA9n*kXg+^yaypkXL8W|j?mJZ9@|oTr%BTXRQvy|!Tmc# zP`~4eX?_@o!^vNnH>%BVYJy{z#5K*wusx)EetDwL7-23?Ky2g1wBk5TALzVgiEH5jKzhbq6ULNVyMY+o8(RBmJX6*-ZSh z+fWV#Pi*-FB*M&?S2y|w&(xbc!KsHADT{tqgddg@l)yYFSe@#&l!zy_xr|uW*Sg=` zoP5I}zuf@Wo=b+!V?VUx9ob`;`M*~ETPTXuLT3sZ?pUB-f}&%f7kDzi*q3VUDTNIP z121qzDT-+UL>hmAu*V!$-}Itr!-LaL|7T-2{*ov~vgXKiaF7 zpnDh1vpw@MPO;(a$R6}k2C5@&)pTA&mhA~AKsaOFsy8Wi;t5S4UVk6~d8NXz3y)=} zC^llYz?}V%=JLNLg-IKkg}QEhM0f`qXp#U(=vY_=lG5WZZ&&|e^qTI}K@LHfI$ zTru;NbV2Nr>!1)6lZ7uexohPpDNDm=WP8LFU=|$z3+iu++>WURBybW4Dq5>l6kUX z%OW*=-scwhyaL^(oXf(2b%G7dj0~h#1N~dbBzl>a`H`2v(>RIi*vKK)cMARnNR$Mi z>AgXeSB&OERu4hhOu|M0_oa^_x2J*X5*k_r8B{eVtqZR4Ux&3G0`1khNKXJxQVW{f z6BRZ?=IJ|y32Svb{cdES_VcEx5A3r%)H3&5_0&4+ zw|_j;s?Lz|ssWjHWNtuot}L#qmB-z*HUkjMm%s+S!D!d&GqvRpN;6i&Go24QpbSO;qz2#>DxFBDj;r+LpwU6N_37c(c_`Pww`shjWs z(T}+&9*^MtT9R7`@Zd%dT2o@}&uKO7JwH5NMCcP@&D~2`@Of!$lgeDH2`-;5=~?>q zIU(lXMb#LQip|NL(ek)66!IN21usp$deFqRwd##DqqFi$u+WXN#6HJQtJE#(B7;J79wC zjb%hiT`cs%?xut1ORim~KFkVi0PZ8dcfUJZPW$QSIceZmIc8r|kp9=v{w;@uppK9C zH!|JL8APXIK{DsZ_S6iiJX0uE0K8@hV@CUwAq?csTfiDa8b896A25x@sV83viy)x;Z*&z4MP;C7E9W z2&We}!76Ah`Mn0L;*gB@-oI^!_ySbCi|CNfPj7ATFy?4)qsxbQN$|e ze;4?-&oIF*ryvty!=aI;_U8lzqRK1PAm-MCmHShql6JZ3soV>=bH;lvAkkKfZKv=4 z`rfey7I)>3|IqTK^=F`4yULPxSDQBbC)g2;Jk!_*)cjqML)Yn|5wE=e3@IDV{y%iR zWmuGZ_x7)pGy?-ecc*lR42Y!C0wR(E5)PpZ5(*66DJ2LnU2q=o6ARQ*D zAY%O2<^DbQZ+rir=f!?;AIE*{Ju_U__q*1)KIgdr)jrz>-XH1QgDM|(5K|tavkPB! zbS2G{Zk{BD(eKs4A%^+heZyiYK9?f!<;KFdwTjXH3;O+UfN%;5V-(Q&>v*we@wdZ? z91~UZD_><%nn_gld(+CV7iSkeXN*ty4PL8wgd*SspThDz@#g-gk3LQ|RIoTOma_wt zEw8Aq=%q4^i?MOudOL|Vn50eBSYSvMmx%|$JC&|Ou3cS~BF;FK{P*hl@HqLmV($_R z70-%Iety#=IWR;an3J5c1@HA8vjh4r`N5lGJi)9}t)6ynEE_JI8NzpatbEg0_i z05nxNvUzOm0*GmWl&`V^4; z@_WkRLm^#mX=;5E=5jTni4$Mmzj%8(6; zy4ZW`k`QJ3$7f|Ry}!S&dB1!|7*YQyh1?uaLhA?obucuFOxMpPcG@7j*WI&F!@G%hOe?AlFt%&PtBh@mSJB4+rmP2m_`1!;EWLak!Lky_1Qgm_?hn2 z+AJBkl(PM<%He_HCBCP1uj8d^Ujr&}W6px|gv+}httW!}D^2fHX1Zn>^hNM5{QcSp z!)qfsaE0B(7g}5$)vGw|5v`rc?%J6w(R;6)=0R)?x_TzEt@1i$*bHGb1ddRJQNESa zf7DrtBdt+_v@3w1HP?${-Lfbpa+OEuj_#)@jMN8J1+dOW#6#hJC-g}>_!n%|Xd5U5 zX?`h9IHEO583m@{n)i|9E;LWH{Od*my}Da9df*Juv9HvwcJ>E^c9FB@SDV>>N6qLF z*fF&ct0@UFc!f(}-o0Es{3(6EGrHXr(=5a?J_ye%3txb2s4GH04V_+m__|4%LY&e> z?2QnQ;E8nHO28Mu+)m*+21YOKu6=%R@$(0SAPZSY6n)d_4-{bXwG1|wltgy50D{=x z$`ydP>6%*!AIB<_r-EOYO{lS^NVMEAc6Q0yGqH8fdg_?OH0N@ns3voE8z3dK;fiZ} zs99bYyZh42%$Wm7&1rMJ6lOrR9Y8Bc7*1}-6|1yzW+lf_$d$S}(zxNe)!>f4<9$eM zem%+HI|y}6pZ0`8*MNwnn}d?udzN$F>(u#2P0I(%yXhYKVdUHW0IK%Lz6Wkd^$CJESCAWP3dh^Pr;5)3ZmErA;jBD1=I{WdeKzqlZvc31 zYFOq5&q*R%j&lNo(7;hG>yF%YzMYV#`EqL;-3%u_hw-Q7k8D71Iyl8nh49V)x@-`D z7KL~qLj*6;vn`$ubVqJo+u({f07V}1hcVZy%ZyC18%)EuyMM#Q?sv*-L{vK<{q-@9 zlFwY}v9=?UVv60rJmiT~p*&#dZLPIEm5s9^?Z=NCxiV<3cpciU#LrEcar1itod*xu zjkyn6WZzl_Bz@r8>Fwz2*!Nv<)aLhF9;f;mZa~i^apDntqqYhv3&6SmRRRLRstp(4 z(B1xWcVJjW%B|~rr$i+3tvOcskWn~tObuJGDVz!4?9Dqlc8KNtRFlu{&n2~U2~vok z^@pI*omfBnmsQd02AqCNT6uMCpP{GE!@ZEYzOWFXr%ErpcX2i%gr-|pwtu%W$L$jh z%e7i-#W+ODJ?Ru$sT9uf@;7(jJ}sMUr}2gTH8-_BYKj@to%KU73a5A3*gN-qE zW+?r+J3FiCM_lbDiV9eP^Ihtgm)(^;koTqRabQKcTyN(5=_A4`sGZ2F zg{#!P7l1~z9wm#5iD@$w^XJg_0Rwu?S8R})WQ|{j+EV6K{%7D|@+#skD;&`cT|v}( z+^qU?j+O73@aL^u66wPaGH#Znkp@T(Z-}Lwv+DVv{cSFWX;2@SoqvfajeOIZV9lRh z^-koN`6VT9t596JA%@MU8T+77-HG79DsVe+{wCvF$mq}COFhpi(wtz8R4;OoZ3Ml_ zjfG8a;D|2*5($*6EzV4z{TK9#b!BsXivkHG$19>}7VBNQrHbP}tx%G*YHgi&qzDif za$yzdfhX3xOsD65)Wv5mK*uj5@rKaLP7y>)0q5%H9gsmM?_@8DU~`45 zPR^ezcl_(QIww9YyCU=!3nmK4lDc zhSu7L8{j{XzTQUG-|bYz{5+S16L)qUiO3l!w>5i?cxRY-$T%c9)LmEmuOBCuAXa_D zk~F1X-qv8&;SgDFNIc|Xf)+8Yk1AN7-(|l>eT~$=sI}M$&Mw=&YsXtAg^~$lt@Z*K zVR17tgc<+Wfepy8=?XoFwHG8*$PFG_8Rb7V--63wX6=~H_sm@`l8SfKYMKMk7~?JFbKe3JH%+6rcrron7Fk?lp$ z=;ao6K;cmLyt=MwYl_(u2(wp-7I&t0H;$#IQ7~}!aCD&65BBhi^obVZqL&*s{aD#p zI`4o!ldO=36C6DH@46M9vtbbEpIS|agtwmo38`}0Y_Da# z5TWOd{8olID^G(T^wGYc*}==JL4Q8+Ia<0m2)hzJ1UyaMya*<@=d^V~x#deJSJ%Jq zeExW8=aV;8k9x?O?-_c7z{AGh=E(y=oo}_MFlYmI^zAfbY3K?&n%(xUlb--8@sY%5 z>VgX!h5`d@RDD~*3ft@r&FJF|kRIyv|*D2U!Kd=>H(#p8}_X*`QA4jdaK}9Y+IpG-y7Ds{m0#@J*J&_PIB-4IhdfSsTWwZU6_DY@ zCNyo$$Z>*$f#V_kHcyb*VY@2oB#OyPGn@8RL?78*kD3Ez9H!M+Z*2%E=PA zovOe!ruZWQll!+O0nZ!6k*t1gEh~MEgr5wn%ogtlySzJT7}ySqfrHS2P|k>f7)=mn1 zhhKLyJlI9%eM2wBcCD@do*~0KjdYOG*zY3Y_^(!)6{wWc?A=)}Za_cyW;ee14C7E?Y+(4VDnPjb5PZxYUGxj! zIkTp)2wiGMek4?+8O?yfmM5(1*KNLCs$-{9bXkEZkcwa3T?w|b(=?XF%nu| zS)S(bA-{&$Wu=9NLx%UHC^H4TgPA2F8n)^ISPpHWOBrBoOZxuhy&HGS|Lp=od6U>> zq<~3jwCcR+C>ai1s>+dPr9Mt!MvD+zzz6aft-ih~psLhMlSKryK{oU)dPnpU&-f5^ z)SlQ$d_tXa^aU6k9L6sm?S^WQ#Jo4r-40)Wodo^3fSjSI)!+%1)XdG)kX1qpOYdUi zUR4UA95%OVuf}Bzpi1=r5DS;KUn4^=h&=ffbDVtF%qam(IXAtia4GeQ>O)}We|^RB z|Nom8Kw{By+tZU~xm*ea^wDI`45lkMu@%xT&u$V^XCc^Lay9tKW_RT5lv03ZPv)*5VnE?PIOJ}B-SxQk` z$#<)x&ROq!b!%S0Xys!2v6`p)N>U4!i~*E47b=nRP<}a4FP~MWoYK65{)K0-I>DO1 zS)cQO=1q)@qt|X{i{DEec_pnX393W~EA*GNaU+jNw`kXX9A&F&`{mmtqIV5Mv3JMs zLFay3!Ve0GM^oemmt$2J%IvgVL}m3kw+sTcy^lm5KSO!a`I<(^@pdocZxi4_@TS?4 zdUL!)(>_mbF{}+4CEXDGP?^~am#BCYhlvy0G*!UC8v!ye@#7WGWnYM8=hN!C0+Fw{ zt1ti5>h=eyAr-!ANR|i_E-;Su=(pq;J%hjb*~ICD*Qr>w3t1-F=EE(1ld2-Zuix7d z41w1F`);;Y-$NSzd`o4R@-(v77C0?rTo}ml^89U*&-J;VM zCV<3hPjzkUyV@;8S9ahoBjG@n-~1ldMr=XF+YmHVhFi=m6SsuSDqisO(x!662vevF z(Foqs#L~~GCWwv4O)C-%my;vDvJ+o|HNuCbhR7qYNP=HSZ0(jFE# zULlDH6kiFh^AA*kTu^B2TD)Xf{R}Mog#@qX8HZBUR?{?=_~W@;Kd2jP@!O0`J$d3D z8>gJ+Y3fWc3>c|6mae?}Og4O?j%kB?$>bSEDUd{tG5J9WwS~-IO%MR=bHJ$JK#kr7 zW8k~Ax&@Q@B_;@6Je^T zO===%qPsiN?s?RlfoWS93^ODb61z=Is4muq<}O4Lo-lb#ZC+36_(Dse7Sis=_H3`2 zA%#8_cMA;1%KG9LGP(b%dZC+=J>x7@a^gy-WSkLy8&@~sNw-vI7ACr!GeFaPW5ea( z)erZdV|k6fA)eO$Ou^gur1e&*9!r+IP_){KL1C2tyAwv8pZ=ib9O=;g!?=b}bfGJT zgnv29L?-cb1f$YRFMmx2j?Dss*1S@;zR&AMkHqupQwozu`dtK$#0s?=HP1$deH8M- zS%|*Z&oV_BEA?<;ADIt4Nnv+$eF4RrUyO8*tdpt;hY=GOy6^%xhzncHrhOJ4LOeyx zsYZ7r|HGH#DXx$`ZAW#$a~m)a)2$$&U-;L}1ioDU%XGOe+lkyysn|4hpot{!rC(va zL5G+-9_>@5^gcsw>MWjWI-1Gpd*Li~t<}utM(T-IaaKAa@b0Cu=DdSZ{M3^8nL017 zil`nm#b$h8>_H7D#h%QC%FHiiG#s4P-UP;W!*|U68Zn{_Z702gjow**ZZ+%S#13r$ zRmsmRC8jiTC7IfE8DEPV&iPmoRVk6PKDqY^DTy`|&Zx4M)0L>g3-~Z(s%CS6&(Old zG(quibuJp$-CF1-8<$W*YEXI)D%Do_=jjSV?6;-t6Q=EIaGU#TGQDM8=UJZvG9vrI zs@h_>gyynIZ07~pmib8|sufF0lJPW$j9h=_0V6m1YY-lgb3lS8v8g|(5gE{&caZDW zi+@%N$twBgo4_hy{P>KXOjQ{H`T4BtA%1O2%V42iSGyOAJr$%3n$Ogk zMP#7Jx%-wgkWy~%8z_C3=gz*5nS2cy4YSuGzrVlut7Gw7$~qzIt=PBc-%kBH;l)bY z>Po`uWc?|5Rj)j?hU~AG6Eq(Z#^~67tN_`l)_YbWxq5hghS{4KsNvQQl^jkvVK{yS zZ~X@C^<9BXaPaEA13cR6orcdlH>v1Ms8`cbvX_5_Pe7SXqagT|fS5|66%P361RX_a zX4Cs!XhPSKHmceU#j*xnaRVVf#e_prMt0_EKM=*A`>Fve;-FL{*7lFFQP2Y7Q0#L(%ii+d8d_#?dV^&suAQ5;GlYjIzdc)PR%|mJdq@7 z$?Oy^@@Q#AJa2-c;@%&FHC+214_O8L{`*}d;|tFy;dgwa;VWgj3xo;g1nHI;q2P&> z!~OmlmADbI*umw!!r^9D9&=)_P;jMQTBa(W`KA-WkX)TJmZlJSq&*adI*;#0 z4t>+);>pe1CFM~Bzu&#Kg!Oj0l~YnX3zem>0$wxF%;4vS8vVjgct@<;LwCuz5m&bL z90GWTj7?b*l<|#x^6vIjOD<1Z&(by(>!QX>H}GY0J;H%7(oI;7%8Dzid67u=q@ss+ z-VtWMa{PWX?V%#d77t6AVsrS@*91!&Mj@W*8B_C~Q?T`P8n=c+RmAL0R6tsApY_Hi z7^{I{Iumk;{w+x|`R7k(#a*|mKK`-TWHSGud0XOiQLpnDve9Ewvzn>?GRAVCEBpMf zu813N#*5X00#8vr#{*_Vl8RD^qH13kiIx7)d~Bg^=26%~xE}3dZ13{TJ$rBdnR-`` zrx58~5vbuBj!3lm``}pas0U_!RmS9!XtF-aXPv9 zgQvI}(vA~WW8^>8X;NB<|NW2&O&=H!fM^XrqwtDVG~0Wc8j>(@mc1tNmSfX6NiwEp z9-Fr-SLzkR$k-(yv+Iyxfc(~CZpRj8!j0;;*ab+?bp|$(o9119neLrpTZnOSsgGbe z@4=kIQJr}^@48hIV`S{5brUCy`_N(A53;g7+)QR*P;|%^vatC3!-!1F#)*>mfZkT; z8}g-?XmB}rF;&HZ%Z)nQt) zYiPf{W8$D=kZ)SDvnNm;47icQDH@VapLONlAT@d?yZQ0nRyx}jcai9@L3YA_->hMh= z7OlrMoBngT$A6vB#ECtBg!sLS%m?xLp$8+FdA=_`Ne}V7b|DWLCVnnb%Uz$0daffM zk}vBLo%)vXG66}*!8ir)G{$_+P7yStFSWG0L7 zEf2tqNOw^=Z9$7oNtk05f$@a4MiP|$_7YW zr^-?KJXv0ZxAhRAlVENwx5 zcrZYiE~PUn1)2n=!K`NIfOJmsS$L?(h+#4v-&}nRBUE}lb1Fsn&$k|A+&t3bBL5v= zAiLOFWPqSTPZv6I=k#rBmxh$xfRw;xvF}K3sY*)fVs^qMBreaNw+Wfg+ETrHN2vLG zxbT&4y1v!80M~OBs`)3iRYO?GY(st4J24WsQd$^;5E5~s;uym%^)N^{dI$i_oAsIx zg96gA*vUA*w!;Ceeg&KvS3O}!-lo7>9-4teM^n(;rQsl-rQ2kW@o6kjID&b9B#p~@ z4=VSG9%jt|LvxVNiwZ)}bed(x0iw#ts7?b~A+!12O(X-@QHwD#N&FXAwvhr4R>%Tf z{&#^0kyGeD31*0HwWXD%&UdLJfR>Ts?lsUF#*&)N^9e`oB2V5qskMCt;Z0}GuT`Ul zAqow(q}>K+;aeD{etg7lQE~7CNJ_DWa(k@rbsNtxQ7$?RxSyRT#;!qU8FbPyAok{N z&%D6Ww~nhd&+QSP5T`)zZk%B-2yFRmOHL)f%WEQOU(ZLN82Fu`BV*;ynDj(R!T`&Wom5jmQj}RJjm@$K}_WZ;53b< z3U_=?p|^x^uKqq2-iDe66=qKC?E(22GN+oKA^?9Xm)aP@)QV`rV^!L_y0bnzi$u2} zxrU+Hd5lAlQO49Dm$>ly9fY8MK9S(?*V$v44o7y|c{ZOjVV|4w?!{TYU5ZJ`Cat!v z;8LS$NLe&G)g^b+;8sufAByWYNs9(WtJq)%C!^9ddB7ABu`8^AiA32O@)%&9*Q_du z1N~H)*kw4PjVZ$D!upQqa z{MtiC3qjgLqcrVmD7u~p;ZU&UmBvlWoAUKr=&5xYD|QA;SXaA7N89NevgqvTTuWu> z5eh^D0Wq-8eJnL`t0xhCdwbaq`;z3ZGvpG+zg*U{{B9ux*yJvW?3yBxd5p)nTuS71 zd&LO(I348dnE8k#*M_d4+K#*-#SkGn!vO6oCPZXAP1yh~^Di3aB|xPfB4vJ_yP059A6B875X0DiY+5%m2}NR#QwzPxZ;`xWBy=&TU45cr%GFZb&7T{Xw^XyZ)cFM+o~#-8Ilpl@%xAdLCqKa*uP3 zzYSTJH72oSp^hz)uz>7m6^kX{nG_DjCsNG&u<@uOs2c%75@R@Nl1zBRgyonj6)SSN5K5 zF5Pi$AocIi0kQyUFkdX^VsG~A~L4toBjN(bkQ^(69oz3U|p>m z6YR+t6)Ck$Z=vT=#&6C^ckh~V`4w-7(LS$&uwG1%q(0`aYZa_y1jCprjDP+lGDEqT zDKxhAmQcf3G@xJdxo6g;{tM8rcHgqsJ91qqMes|$4ZRqDNTaQPb|1pn9m1t%jw2Q~ zg4eyX!-4S5v)wQ*Pj{R*VqaFzdizGgp1{I zS67bX@OwM_{`FJZ^ur%N%+Ui*vZl-kaq{TAH~TdnEIny0cn4d%Vh%kFSnO*9{dEXV ztq3G`XT?6I)xZcxEUw{-WlcYSc`s#AR#)UVWJa;^l-c9Gu z;E!)+|K1M!)dBTs?@nVEq-T&$fPLmxWTjDhKNF=25R`xbT>p<}@IOEI#*KVV@&+df237bCc~VgT_s$F-&aVs11$AHr;=sIw_0!B$UGIl zo`3L6$Jp%BSXnT?@DgJ!K62w}=75B^wY{LxE(}h5D!(umZfb99JpsGKO`ZQZY~a6x zy`gJGqk>eQ@Y{cs^1gjnj*fF)A*b!^5x!TIjD#WtgE}4YbsRE2O74xVSU_Uw<2IR&$XBvl$D84M(E8aW?1EEq#VK@|#YCeX z{6_mKj~CCURPRZ#TpHl4jWkZcBOggnZ@df)eD`W_2o?J$J1gSd zV%`L&(-axk?5d=94%d^gII6WmC7-&ndc~pq{9vqV`-%8_Q6DO;Sbgh3ce#q>BFb+kALncoS%o#x6TU?7PzJdkjJNc)|Vq7 zRMYpz&xyyslv*HBMH`-I-S^zG&R3=&!8<9A=Myj-wvm@0#J27B(fn4(u2i!2eE>$r z>FKthx4@mQ`L``&Xmr%UjZ>%VIi_pJ5zVoC&DcUHZs+q|F2PU8X>@N}6)N~o!btFT z(8+~(&t=f9^1K9Zak^a*ny>2k-|qk^wMb;ngLLih{aR36E3fcEX!&pj;^gKGntL?T z%6;PEYX!&wJe(W#XtS04asYVT1Ei4zm;L8yQ+ zl$ok_SpX7O3)FY%%^x|j)d~`1ED>p%TZZe}9*t6kn*V+<09%I#j77+( zb<+slpuj!WdMa9iFMkNM@>3=B3En`i-q`K={0^V2fkv;TB_y@KDX3igKR>_aD3LCL z@~Fn02OlIR9)*Px>i1N)|?(7%&=lf155sD6Q_@^CIgFgmDT=Y2wM%nt-=Gg%+L6%@g}z8H z(nutn`H{DL@V67Gq)Yv z#>SYcwo(A z&ocQN`kSUmC#G$lLqIF{#zM|f!M#%Jh_Xny3-#s4nEYim{SmO$WJ^Y$36g^A=M=yL zeT&EeN&+KAZ*f9Um6U4qFjTG5*N)s=7=V{<9r1~wagdGa7=omN9Jknd^DT{=Er_b8 zsKM>Yn}8v4bpd@Xs3eGH5{lVSN1<|$Ab)#9t5$y63lPJGxg-JqF{x>A-CmZvdzyGP ze3ezXZ^QbkEA43bI|x$FP81>7=Pqs%m3>E}(KKbuTy01oGqU4DEAl1f`*NbxLMmz) zmO)Xe-2Afv88O%=xZmKPmOWmUGz^pfI#=OE@ZD$F%&%;Z>ZqJ3LV|@$_BTRfe~&)i zBy@Ghb&$(t-#_Rbqq2|8skW(RbOxj1N5xgI&dn~(>$cAqw_^Sz0l>T%d368WjPND& zT3;jh3la(oS(c|W2X1<4H%d%v!pV#&u@+NId{zSnPdgcotqE(=c3gkF0x59>M?yd_C~a5&pav`tn(nDk-F!_mP~tVH%g=LZ!Ax)R`%D5Z5dA zeAW>*@F*WjDzTbGT|`1qep2yUjFYT$Vz1|)$_T4AFKva5KvQ9y(j$EO6@TKJB#?sS z&&eXdZbT^Re6a!*xy@v(VJ`eDm$LacFG4!ygj#7QQk@}A@Ke8v9Fp=&!?zM2Ldd`A z5#s)nkUMN8YDknRGEEFTMU7~^$m0$kP!y8(Krl2=BhszEK^%fWA}m$`G}Dl1cnySc zAwiL2bfMrsU{W3oPiI%?>)UOLi=;iQy7g%ZeO^p7t+jjquztlq&&j_&>N2GYOx-Hr zpT%hi&|UTxl)&Z@ieTH&_;>adrRPCi(HUKgf(CZHmS)s8RjwmP-fuz`G!Zs4iD0mZ zG6g7dkwYSGB(e77(r8&yo+*Y+?HT3!Td*zwvL=v>>^}}x{w#|m92)=T3z#{~3gmkq zK_S@oh%!^gp2`30aN#n&`n&7xREaG}Br6j649BKS%TX8ISR(@bU%_IPZR$g zYzIM)hZ%zCX8Z!*$d+zg)7eG5(T09ogKAkm$Z?!kAO7pdo_qm&1l?luxs}K)9Hb_) zAl6HyxPe2;68Qtg6)QvSXx}w(6J^v7L$Pf$e@;zr>fumIl}nqwtZrdU8^4g%;cI~Z zC}vhL=WCb;mU1bB>HO(W<;Ujw`K;)(OpL4O5+ra=^3*RdS}j8y@(AhOBqV67nqij3 z^{#1L=YRqp5K7yo!h-*PgL)YK+2Up2X#<5b{9*-~hhaZyeGm97F`h#Zmf=x;3`phg zT!H*bo-zw*l)-tuOxh*!vxiCw5aR(}=prVTCS zx-WEmock&AUcPc98@^w}LzsJQ9nbR^_5;{iziwAp`p?Nl4u$9++d5WAFb#JP?v*!o z%b<-1AbC=ne)_!w(YWodL%glX#l?;5JovGg)PO~NMGfD{yj46OQFdlt3(}M{npfXU z@{AU_{-n<>U2ESyzU@kt_85q7#3FUd&QcDAhcc&sJ)K*LiAAuP?}1S0W77G!UpZ~% z>voMR%HNr&RBRw?1q7@AyhxB9-T9&l<_&pE)@~>ZX&5-0kL`nLT+RY$^GUc~-Ow5x zP-Rbp{Bqg=<`VoiONCVHF&o{;@)K$MMxovJywNCPs3Y7IA}9!7tL?GDP6- zsm-q=iDJMk;QenV^n{@xG?r@>jFU~C4^W8I{1BlMAw7EeErY>q6Ud=$*{}hMP7FNtv0u8?bSS(ZX(h9Fn9^|ha~4ehHJS0 zo0jS*Rb9&W8HbMnKT@J2A(aYeoF91IKXUOR3j3e6Xi(MWObs%W3wt!|1E|I0+qIMO zxqH96rdgJ~Sq;bCpee3)7fa1>FJKrv5j-WxGQv za<@-HyAGjX-n-{f)&Q*8IX|i}<6+d(1y+~84-;B!rPzrSoL(BI?JsCFyoTa+{Zj*- z%}09nk#0S8^QREwBIpqdfSkTZVD~aRJO_ZgtW16c3xuXLfi&?l zZplmGNL7wh@ZyED6nI{g{BfP1!9&7}upj}?dzeMvs>h5BGrLOCZBhkgRP%Z(dLBu1 z+8GU7n-ZP3%=vzTM7W=^BVYDJvFdX&hXUF%G{bT1q3k6K32?^zxoFK?*g69q4P8Cl zGO`DoOb|C$AX3I&uteyRUQD)lDNDLRzVCB;dGknkihYlCUcdUWGIQ}w+gVrWvr9_P zDTn7CQ27r3_nzq$@J<|+e7FZ%&E!|tA4P~^vwP6y(V2UO@Z4gBVj|<~DFUZ6T64&1 zK{{(ORx<5eK(U@x*vy~Nn-c9(ZYaZ0`({qKlaSxAB=hi->$1ycUzY;^n%q%0 zE6B5K7=Tt$Iyli&`BFITs9c{*z_#{$FHtpb*B2JK?M3Qnk8*0xP^M$^6Msqj;@{v( ztS=?gL?G|zd85pTeSK$-lmh)DbnX6>Z;5W{`$E|+_>+xx=4Me&e=3AIC*#&VdUb6P z6Sg}gJ+zD3u)FK`n}vYGFz*Pfh1pvj66Wn`zD>~SHT z-p#p7BL{LvxH6N*sd|#2Zx|jtA)nwup7XJC;lMigL`s<2xK`^*uH;FFCx%iNA2}+l z4ulY#^?vMtp70K-GXuu!WE%v&ZkPU^zEg1d41`DSeA+HR7>GLs2Bo?OB~3pm3{6N$ zBqLczx?#pO%zm=j77{Nr;;S6r#lL@&AIv)Vkx^Ns1{o-E7d2#}keJQO7>UxK5rj1wK=}%+JR&pI#^88xUks7 zq$2YeW6s0GPvSxy6)p9#=dxwxb!anpevwT)9i-9eo-kc~0~?i+i_RVhfX4a(C^Xhe z{`al(X)``v@oa-eZ{-eB$2lA&qhM@(zx_7w5gW+2KT=!mVEG8lZyP_8?q1UGkh(lt z-Uzpc06XCl3PW@~pkuF}P_!r@QMkyS*h0^5o`Q~8chGioPK~E8sa$$%@o;APB`nj5 zN@-9T&nti8={PcZo6D}gdz&Kj{`Ar#d>_mf?s`RX^L`0PuuKt**q;HG!EY9?`X@9P zsmz9Drsw3kw2t^537)feAzA+@WFqHiZj=x=K`jZVa6@^1tS(=Ph~qZ0k4Z8MPY?0b z63+?f3MRwdwYRqe56&M=z|!{lL=4k@uy5*83LMB1M#4wr1L*i=-YdQ6D8?@&uB43~ zDGd=kUe|`iy24fcU6>3P_*BRUGFU0xI5249_)TLuqT<<+;`foi_h%*DYX@$EK$Irh z-WiV8Cc6ra!ix53b$s5U&GEHrM5Mg-b5&t$vEI;VCO=T0(^*6F6Ur<6iF0PkvjHz6 zj{n0y2SnNHSkAzv6bOxS;?Ok0aZ2ah2h}&wtory)r26TSlj{6yA<`~MmS<-v6BRbjJC_P}AV{zUlY?VYk5>$WkcVEsfb5;OQRRz9YO@ zB|@87&Pq77m!`X{(wA0Tgi8IV`FfT@q$AZ-*ATI zJPGMPaAT~T4rA&}WmH})y!mpR?0H-lrKGx%-s}R$I@OFx_-I13khTcfkw2cO8g5gP zn^zK^2$tM4`mRDe(I0TEA5)?OVSWjH*8ze@3Q8KcB<&a8_~{Mbd)JR?8xW@#sa2NL z-FW_3@#!MPe4M5YtkXz^LLd6WXTAU#yhnoAowCB0StR|09xm_oqHm13=r2+8 zU1qrUdo*(AUM&@yMfX(fu54ld9i|vpk@lscvNxxMdlKz@L_lThL5})$ zld1e^ZivNDzUkRiZ+YtyaxuDh8J&LeLU!h4i9!!2g-~vE6xqN|f0Nb6^@1)s7Y9ls z2y{bkH)~#u$h$?mpU1>b6y8eRU56D^4dgt_&5Y)7r;~MBOAWUiI4Wjy6SbiKOQczt z*QHo{NF~oTrPncq>`5h2(-n4%?93SKCI>j#_h*XfJN$F-4hu}m#zK;BnvHyKK<*hy#&@WB^GArI-r&;lLKdd8dG> zg}UgRDI6RYr%`_W*9;~V)>GFbl6+EN$&7I%m&kqUbf$ywMj2+ky=f(iM8MPqh5LeM zLij6?QA@KG56)^o7m`gg+Kk0+ORdqA^itLcv8E4!?1SvTy4F%S4x-IDn$u5IQ&854y^W_-bnqrP~oIKZa$TfLy;P;*uw_}5+ zT&YF#7r3KUuCCw5!w~oF(m?j@?QQOR`2oXqPM8O0e>sf^{Ov@8Q^pDA5k?7au<$JE1ZP+w{ntVxma%7-WwS{gLC{cnc~K2zSGsSd~Z zbyF=L-KcifZK64p;EU80o03kV%%Oa{mVl0f(`FgreC&RblTegx6Ed@_tDf1#QHlIy z50cg&p4;v4V-w_tDHTN7d$b9%#K%hQm@y{5`&AE9O3eUQ*EQZ`kEIv(4=>&hU59NY zNP{1t6MiC9X68TI)iXA8%Z!f|)=leglGwRL$Xm}|iIwNF`PZc%mtj6qR=sgAlpLoiT?mq3%284Cm zKG^_KUdE%eBOq^K4dxK{yY8%ON%XQAB5McXUGNSv<1WEKk%6R^PokHq_`5ei;0gin^I?CW0-G|7Zx<3K|vbI2hfZA1J4G zrMrbgb~fJjs&9-RlDIaBbjsXHI;@CCAe}4#nVlgfZ($pnyXHM|gcKOgz3!AV`o%hJ ztfoSUw-n8o7IgF+c+{<-aPFlT&7!M0s^jdh(e*uzC-a4`>94-QN~&78WtqeeD&&)K zT%tHgxR(1}OHgw&LQYnXAQ+&8_8hcs6zEYxM;`r3+VLC7p|}>%$9^%rbtOFJW_Vnu z3PeXGs?b4f7k@3O@Pqe~?C<0$zC1b>kos!@*=L5JL4d;Ou2EutTm$dU7jgpWO4uuV zUstZ)FbhgD+AZ#tZ5$QnjEp1N%_2n(<@Sh9W%}8EXi5WbvO~(%ZgR0L-AK;aZ;8LM z_XqKxK*t`4co5tgf^SgJ_Vne@N|Vy)3yQ7j`T-{>ax5S9f^qY9F=PPsSWn;NM#kz0XGxFM|jm`obSCJIDU9eo4jMRkM4BC;@1!`QLjf$Fs(B5H54|%(?D?PFD~a zMb!B@8XmWD3$L|Mx=()$#_Q!qq>P%*c)bNkM@`jow z94R4lj-5*@(wW%}ar+0AjxdCl;aB#;gB^_{b=pJN(0c?{Oj8X{@3+a8BgfXT#CH&{ zl`}oKSoqK8wGIT^@>2tft%d z7&BDOP2bRF)a$>{&y_-50xOmbgc+QRsZ`Imoe&2J#oj8(rcWFCC}oNQTs|tJZlr zUD9Rt_=u&CsO~sBc_c9tbF(LD5ov^M3Sb3@%KB!7L9|;>Z7T`Wt$LwsE{%)6pIGcD zeztC9#n&+Ljmcf%Uun;eXSvfS)KNteQ*#;VePk#Z_nk>OU2`#Zm1Ewf z*BLv4&&KlXjj_%#n(=Zx394q}N-riPbVRYL&zAkc{dX}WMacOHvmNRekVKcH&CPb} zV24j42m6;&=x081Ns_mDs)|yib_7wK4T^;e8GXHz!r{P3)4!)f#@hY-lp+ zE$W;*X&+tmd!;x~fg9FLWK3JDvUX>MdqZTRHcPS;(?2GV-l`;M!j5m9ZTcFb0jI7Q zSDAY*fj-_K1U!-rS)TG;qSmQC7A3LBN)Xih=tF`uY)cW6uP*n3W(;eSwXKKVtMRYP)sncE`TM92z8Uy+wjUk4_yR zHhGcSh2Era+=f_%(_aeJ&fhuMP@7Y)nyH06xAdC3_@0cI7zvd%%$c1 z#S&2&klz#3WzqujC%6Edr3?{grLCBNWCW$#YwSR7!5LW!A@4wkwnL)*r%lJj6eIPgy^J^L6FYxJ9f_hhyy1_o=Gw^MJ;KJCQ+Cmpo za_-9h#4OdH;UsR+qtwi+RpB&Wk*MvlC{4EerBeDgGOcquVl}yFlg@y?I&DEn zIF?7?Y(I_HfyBd{3xWHwPjvBYi?5?so4 z0+;%06OGH|k#ZxL7{Td6N3cSXOabI$Js7F*T7;{xm-_pd@v;g!)daqYvu+BxOrQeB z;edN&kAR4)hw!!vW&t25*>oR_WW*{Pn_C-1I@D&VY#gPXiqB)S*3cOgg8ZI+7;LwE17a74g=1EmQ;+t(Nhszi_zk<>I!`N|hE{bMl`Nw*iwsCaAag#!X2;9Ew6y+nBa?ovJmb zCs`w(!GdI71D?Lh5MGyx1?ExTCjL?jxzWRGXAZOS#@4YOq)D7RtZjY~ld)0hc-+8H`4H~zn4{uD~$ZPejJ-xh5 zxqu{but0q2SkUK; z@ogHno!KpCt~lNM@E#=4DWi_x!lrs1hk-X=)}Q#(NF-lw#VCc5(P*-YiDyEJB5O2G zjikiCcT3gs$Wl#3nmhvz_E`Us_scQ~oJ@OJ&st!s;*1X^XN)>F(=Mt8lR%Hpt|v*r zEot2C#6?vq3RG^v-wPOuq}+}Co*u-2m#clZ3fC?41&O>CFDKcddV zk;=G#{~6hPo~(@H;2?Xi9D7FyiHz*6j8Mk0_nz6xN>&_^%wum-k}Vn>BcY++=k9r) z@9!_*KKJ;%$926f8P{zapNp+DuD}TIs}a5`u9(O1UU$y0Qbo|309K{-;QocIlh&e*Eip zASiJ|UT}Gs{_X(%WF1R+-*v_poa(W%eUv&6z|blV=8G=OmvAl!GBsHLbo%D+>p@Z6 zo4*%L=t7Xu)LH7Irp>l-OKd$Rf{e=*U;`&S@7#e6pG^vit_Wn9>S1c2`ooyTbe1PH zN_5HIj95<$Ie+6`y0qCyoQ4SySo8oA&S!}Az6kbo$FEWn;L80i&!HS*0Mcp`cDRO} zN;0})+hnA;Fq{~5=>2G@GUJfvm`Jgv27j>60?dOaLf!?|SEnbY4~&9!<*j`(v>&tUj@s?v45zBI$TJcsGs%)nrAKNgXWRb$xwsE}EH7%oIvZud zBIlZ5;3o2Fm%CHz-0#3g1Y~Y{G`~~YvLLx@YF;g^|LRcu2t43jH%;;TTXf4z{(aLo zR9OwVO%yM~hPWE&-+l?Xsw>qO5p|h5V}Lw83m8hm7s>F)fhyzHeLQN174>SmO=_%h zP_-D<6%YyC2_`U}Nuvq_3JHfe(E7YvwLpIbD-+NDjVtTB%dz(UVo46%FeR|^Mi7$D z=>K;n0$)DpnG2q&IcoqK(Z9Uy!vGHKfBP2409}Tt$tPAE^J?&AP3O2%Qk@{64TJ8N z!I|`XM|xa0zJpn&Nu6nMt&^F6wX5&;b}{p9%JMH}Wig2%5!Gr#w&U)k4yfze)>7*s z7i&?}`t{&^xVucg^bE2i)pCV!hk^S_OO3h%4G@B-XskkUs&d%yGr*_v19D<<5hJeI zQVulh^=j*`pr-9E)g%Ek3SI$OhuRWQ)ByDoxDL@5 zFo=6L-w*~1alw#=gS?|BfE0fq;HnHr>HZ2#C(ynqA6Ob(311JvInDo(UFIq!X_D{42{@*=(t^Pryw>~<`i3OSF4!OkPw|N&&g#9^FAqwW z|5H)fktAC*JH|sSOnTFfoiw*`4o0yb3>^xj<#x}L3d!=~0 z2oOVO>9lV!0yW>i5w&(7<=XRJb=k?Sc(^Ovx$Z3)`%a7&Dc~D-84X*iS&IdSBt-D5 ze*SE~L4qT*k-=0y20vHFb3tMhW1UKtAYaZ>pkag5o=PYPiwo--L$)A zagsGM>j;Xfw!>uzT=r&4BeU``(kaOmmgy_B;y;=yq5S+4s*DDBS0Npfi{v_yb`L9G zztaA~z(9+*F{L-fKI>s$w|_hfJrp|8RNYP~I>M9B#yc7ID}jYWC63$|m637X*`V2F z#f*re+n+QkFu#^0D?5o`YHzJKO4#dF9!boNm|?Yx3b*SQM12QPJR=U&7MBvt>=Baa z^Z1|8JpR$}YgI29MeRB3KRZf1BE&X{ajRAn%-N;IU@W*xBXO(V!fruJheDrsZ1+Ls zJs}G5T!;yk>`r3r%m#C26I8DNE)8h*UhbpD`;;d|w%kRQZi>n*Ry*$pD4n<48Yq@q z{sHJ>an|L4pFi|IoQnz_2Q6BO8vhmK9E!;>{abX_n7Mz%j)gWJ6@EMl=v|T_YyLPD^c!>&g^}jDAya&oBM~6;` za)&w~UF2&AMrQx|cad00)Nhb`2qYHujw?Ia-$!{iNCV2h9O#vBq#P|D7%bWJ^ z&uC?XoJgIK+m}CuRN`N6I>mFy&d`i>GtEZk1Fw)~M&&@ae;i9g*MRC9a2j_gF~l4r zd?oCuJA6ZbC9$lSfsns9q`cYTKF^@_GLgD3f)_*YKk;uGGCGytoRM6 zPer|*m{fS*lB5F<_J6L3Rafk$UmA07ps+E|;sk{Kpnuw`z@exJF|nGuyHix$>h0OU zWL)}PmAtVv$%*}j_Kyo$4*t-RF10x8Y?3+E*3UqaN7)cIl*%%7H9l`7k)F7(3*+Rs z!Kr@!Xz zjH?{?wv-TrbcMv5nhaif^KlMZarznwW|WrqEV` zcdafCGB@dn{bF+mS~DnuG5`vk4O}(e{Md6Zr!u7~PCfdr3RKuW(JG=5Sg}>7P;?%3 zZG>SmjSvlsl!6*r>N{+61u&k_^s(5GD}3#A*iLor1u(|V|BzVi3|%O@{u)W<;8-iz zax|g8nIyFpUqnAjJWQm?t~8D=M2V*K3&v{PNP7HCN|T}7^1q*DV+<*0LorSzVME97fwuMDh^Jn(e^nH4%NRX60*7f> zjJpu2pTPU%yNpGT1rCDu=~pzse9YT-3a{)7l+}^Vh8oq4UM4~hzV!OI)cLnstOj8z zR?bl@gSO6AP}Q{HS)}qLFTWaTxlF2Cq8v7^rWI9&g#@Xqsw($^s?lnayvs6dD!2oy zo==kuCPlYhG^^;pkx@H@w(&47j0X`St#Q7AW@ zm+wI1_uUQMS%sA3TYBAC0k_bc%3jPuE)s^a^0=c|-REumkz!1OP9ZD>B7y5H~uT z1TGn>EC#@XMFITE9XZ3M_2k#mvSusQTX7ec&4K27g$7N%-musCG!#3KxZ?!mPF{e; zzYj8Cn&anz+{4>O;2U!)%cG;m#ai~!0!@&^__&$t{cV9LbDK}V^IsTp4yCemM$WE> zlJp9koWXPoMh4aJZS8ZRs8*nSz zrt3}a9CbK#i!|i3DTJn4OT@?YFOG-wolYsO7l7RG*Bb?4zb2fwewV>Ri3=kPgXbl1#8kWUf0c_nx^LXXDq+gSvuaX z75Ludqo$U-#u2I`8+QpEU(C)&F^?Xhmc*m?Vev0C!7T|qUG0_B8?9jy=cTqf?olNfc-7m1QvQRG-Jk^c#xw{!XhDuIEeXa&kG?e_9Lz%;e|GL zAVLzgU!SQqOn?4xBj!Mz(i>(jo3Fi*Jou7mar|$7sfn_a18!Wz_6visiVA9)1}WSEiz*wZy7S!(Xs~YjOYKd3!wQF$5;YQG-c3@VWC8F!sfh zY7F@|nXW6Qq!a(^U7)qJ4T-S%t!nRJ)ephd8lT$P_DCh2IZRXJ^H+LCt6HM`a-~#5{$Hk zoU*%|)I|qq{KhItJAx0A%JB4$l>Eb@8{W5jf{iA5D!ckkgJD)+mnyd`V;`b#0@5Mg zkOHjO54}#q5_3ZiNeG_%pKKU=GMQz#xrfgxFB?8jYsGtE4090711PG!Dw3s-R^IgF z&A)B&=`ZyLSmICVs?J|Kv2Lto(n0rc41NKNorx!|57(l_9k3L~9Pa_luxsarr&`b5 zwA8T>*+YLT0>5H|uCFeHiAr+|;ww#@Q|7$g&c-1CR_YYhXmj+2*02*GDH+%kv)$sh zP01b8<2Ol+$^ZWkr<; ztlkKcpwR{CB$y0N3uEDg%K7aWgQ|O5I(_}gWTshp%*pX+))|jAC2ekd?B#41XG;$Z zXn7|po>@g%a+OO0$Hj=ZVonWB0;k@QH?|4_KPFEdShmrk2A+M6X6@6rJ3zJ4$7RuR6S2Dl$!j6;_5 z;Skx+X?|Cq4IBy@$ZjPN(HwpPff+9yhxJ8~VH1+0LHPTinM5q5U9zS>z=COv3lmh$ z#BU#N)T@(R<7!Y9-vCj|5`cZ{rUvG$Nr^xkAlweP3-or?EV_fHC^{g0bQB!$fy^1U zXwgLDyr_S=N9Xr3%QwG8g>ii{pAE5&d-$v-x)4%HQUX3FqIW>`msh0F9W@MOidhCw zuVvihzrU7cjNhcWJAyS~4gnEY^s-?0SHrP`pQXApK(n<6#U=U6JeRf&JISU*TS{?R z23EcHey74aC{O~i_&?}+N~sRmajIUH8pY5*HFJyqZj$VGKoS{r(>Uk#(xQ+XNIUw1 zdMD^9>&b$BlZ|O>ZhSR~RFE4uh^bL8iGOEgQwRxnkW_EWOBlHI!Eh`o79_li31;;K z#Ub;@%XV8}o5U*QS4v2KVj#Kn9$4WwfUa&I^r20Ia%K|@eMZ2MNih#?V+)S-`qw*2 zo4+SxuRP#LeWtk%^o@;0f&*Xt!=78c^aC=j2P?UP4Kqz*jT+XK1q)!f?lJWZk`^oY z?X4Bird1;~9aI;nCZ%?h#-xa(mlgKy0AF`}j0lgQ?B$X=pD*Z9|9%11&ujE%(CU!% z)!cBpMgzCH9D+4VhcAo9yFpSA9sdn%GhWd=9LC2l7wRyl>1Rx_u*3{OqGH`>_}f#Z zH#$idQQaioZiqg*4RxmKz;z;HR%#qTo-LuQo*6#zupT|*d#JR{!m$i`kk+PaINaIc zrf`vNUmeY3a9df;9}QEA1CmX__6rwH9<5Q3Z{=bqlZ-Fe{`h>%R-a;P2`IjIAb}Ya z_=G5!&m8&$emQ?IWO@K(=no*By}>}UC~TWemf2i$*V^)9<&kMdVEhPjArm$~=jtyq zVsKp+c8YmB?Q}Pk8XRMoj-Nc&csrcTOK8HOOrhglJ35_;u@kr18Eb7!4O^F9P{WK3 z_?btQKz`*XfnWe2`Vf*drhIOG_7OyQ*q5v;-MWarzjon|J1qJJXbyvZ{WDCPe;$H2 z$7R9BF$R3eEfl%0@_TN8j3y6N;fEL5qF4@)_BC2F|Wh1faS=OE(6 zQk$py;hTC%np~qAcch(YO&+ijDpgog<*pa_>g-s{IiqMgVxUBlhIsc)mJaiVeD|Hd z%jEl{r=LJ*w_kq@r9a5L4*+36rH}+){G)Lu+m-Ztpe2KJlzzE34{HoyXBRGt0O#I5 zDRul>LV!>Gk}ug5JF*^%rIMEA!et_sCKAzwAPUa#9anLBS0MTuciukO>mbq`Fkg=q zmIRk7z+l|e^V0mTs4vixg*;EL6febf7`W*t09A_jP+6EEGL$%xo(`vO@Z5aABCg|lPz1ip{#b@}w$D2`PNM-t#U>=)GYcE?&?U&Wh z`cT%p%?_5 z%^**^fkf?+KHgdV-N1Gj8@*cc_xFyp4Sdng#MnIUA+W(yp~PTYCQf+~EH+%tTQQwS z-`=-)ZXZEq@UqZ^?4_rfT!HwDsoL5@YLT`?dB3;*{x0rP32i9IE=OKoq}eGrS11`n z@tc}poLKa`wU+4VZfU*9SCrEyB&l;pyEA zrdBgLOFcdkL3RM~2$nhDf54)-y8u9i6XfEqFlmn7ppK1>e$Kobceyfd7y(Q4$?i0rZ{ww_4o4`bHhj+X8M85l2jrHl+RSQR#}mz2++3r+G!i@ z0dQ6UU{xUPt=*?O(!skCK@8$}ZA7yOs8$zS6+QEQr!U6f;-Y1i-oX#LL?aMe%py9d zC(I%?MZfB3wvE9kuefI`yX*#vEuxUr5lFtqS&5ANbVDG=D{zVquMC0y;q6bUr2in` z%_f~`1`bjCph=Hc2U)J`^1o8s=(}*On^|;xxC*3K0stE))0l-bUN`r!9CSEI=@qI= zL(|#a&(X^fYFa>mzJs@3z(;oG9siowC&pOD>fwjNHLjM~=DvSuEFH_nm!@ zB)=y5B4<}FCOI&Fns49wpJh@ zJbELljR13jrTRWjA5JgPxa;5SH?ny#e?zI`OhQFSZiE`4FK1Zf<5F?y)0Z0ZXY*l; zh}z z3=q(3Bulw#^cmy>#o+9$ilt$7IK{7!jW%9dXGy<{GvAsXX%xh^6K|pQQmmGhEFBF- z{%nO!@4@$>Yw}zpc}g_p-g5t0YNI!L$!|ra%}!q2zSt(*0asv{424_Jz80{nk`b((+g6`0X)hdsXh8wZEV|ok{n8_4#QI zLkc<(g2KfQK|3AD&E{+`uIbzfXgY~w{vJp%cdo3p)R*5-4X68n z=`#e&IkX@W?TANo8bora#3>7@0e<`u<=XnS!Lng3ft7L?h{A!n)!Q|3AJhP9xR*yu zk{SA?@RaaHxdm|fI3U$NTA5bnlHJre1tBooDM##N5x>2*iwDV>kf6j;asLIPw`yi` zOs-I*7d_$^D3Xac8tGRp1pc1~Vj%dH671>@mN)UnLl`Dh8JR54Wuk8%B$V~w+qi5Y z^jWdl-=l|;LyAR@x5|}h{Tv0T0-kvaFw?7=s5x}-zS|JKvycf&-G5U@%w9w zDwb;%_w(v6*nxZKQ%zrK-qzH8pN0C_=)CY)QVyx0oW`Y@m+rR&{SD$5Lz8uCnBeUW z(Z8TF*YA&MSOpurSZkpuX+vW$?9(Iv)eh*PG7S|Ow0^kB!cCR!A%B1OE&&rFw%+1q zcixVWYo9?nm|Ij*+mqGx#+8xuZIh51i_orw9hEd14X~0*ulB;z4yvI|6Cxaqvs{WH z(JIknd{Y@^idrT4pdqUj?)(oK+bV=gG9<^%_%_-68@yY;oCPUX_gN+ zu($IszebhibhRY)3Xr8s4rS9=Ld(#5*UdQlBIEXy0KHv!a^;lHZ1ms@Cl?!+_P`Hf zcJrFULEpP2_sip?(FC+dYJQkj!bD8W=?pLk-?ei@8#gW893iD&O!}onPtaGKSKqX; zuh6aw%hV_qEEYjfqn#yn2gTMw#Qk<@Iji8HesLcE`TlL|ioJ@;Gl;S8$-R;@LFFly zNYXkLR7;4jy`cH1=ZQa(C^6upf!jkS@t*Ye7 z{G+a*Q#LTuWGdEp1XHhrs}Ei!Yy?8KULtb+aemv8FW1%$x{qsH*TdB5u6JFg`En4z zZLKE>us%p)`p^~kC2@7{)i6P)8PW-86bKeTYFn>s(LOfR(@xEPQ(@PwsaGpnlYgWs zd(T%J$7j;c{ovW9jnQXi_1ffJj*}cI@ae~=$u0q@f2|LeUiKXL*(D_gO5y81UsK`} z;dD@0(Od1X7xFu7*f{6==RA(%p6)ev#cwx29CyL8tqk<-(Izt;>C2R-fBg77^p$BI z?+T5)kJ2`Cd0AnunMcQ^Q6D66rs=4=ff&=Ju_PzAWWxj?(ErXf015nBwyN z9JJ<=HY*E?ULjBwigAf;X=<}9jr>5Gh7cZVq z=Cy$v0nDHvO zM~V~;2c=yAF(?#q&BmiU#3F%f3z9v2>WDZ?bXlR8esRMt^?)Dw)yz*d>N4Tc^xEa0 z-hY31cS!J4tFtbJ*5aF-%mgZex6Ys$zU;%+*Ao`Wt9e_qEZL2hJ0&tVq+3Jlr=QE$FCMpXE@eDMvlZ8WHuRHZ!~@o)pY|oa{yx4Fr@2;c=>rWq?$GTyWD_5brX! z_*03R0o-(#2=MgQUv-_&w(XN7)hQjdu{sqg z5qn=Hg4GL;@XJWESkh*aw0Kwg=760V032rFdLZ%^)wOtZBGwuY;uIPzdG-9xwvUUS zs#JJaG&^_O(|&&p*D%deUEsjQTqG5wXyxSs>g^=n%%HXV0Ll*r|&reG?BSRu0hO6tvu9&V^208tU zZ+c7EFFf3gL*G4bCtTiPfklBZ0}?Z96*+&4^iMlb0$wEJW9PK-*JR#bKRmW}*e)Cq z+@Q2K3t9lL4u>}U&Ql&k9z5T(F0lsQ6pi-dJ%xtDt{ZLsTf^y0WZV!1Y9TiMH*op7 zda7C#XLpUMmh1mW4b-I*Nf(MZQ(Yl%NVdMKQ#>adQ>|Hu?M$z<=O%DRZce|MBR)@I zl>YZ709K|pL=^fEtUiV3K;a7|HbKEEh~QTwo$$b`P=Om23nLu2z+dcxT`#+DO5}DmG~M-^qNup9v8J z>P?Y#S(wsU)tj&5dihmGj8TeW>=RwV$IDQ-q97H7ig{2;!FfE$GbAky<0;d_5Fg_FNPOcQv!)%TN;I<^ zgJE((kPe$Z9ng*PePZu2XR#T{`wkSw>nIF4_3?iJkkp0P5ej<7qi5eho4@kQGNp+n zRyJ8XHGG-hE~!3lE|v2U!;yuK+jq;*t3qx<03>jjfH0#Ywd?QC7{p%{>jy~pN*yLN zD-+_7P@MrNjv;!eW#j`z+rGp**_>UEm#~nI|DD)xuZ@Y=>%Ye(rs^|a#E1scG434a zRALAnyc^`=7acg4IfKmP;il##*breL5*6CgUO_LK;o1_oW;tc02x3v{{%r z{{_N9DKVlzD>Md(QHO|l&WTi@v70$xswOADb>PeONKD4)wkgbX>jyHww+!%(z z#O>GsUrW*Z3kCH12IO=J!0YZ^%E?(YxJxpf^-)9Aso6L&`mFXzpul&1nhuYM!GWg&k@(X<*ico@24JihDUYVmoX zMl(&aU=v3I zsRomJxiTLQRQlM&+3&#Jz))&Q)iTW7#dU1X+HyMtUP9LQ0}8s=?HhNa@f4)V|GarX zRXKHh$K5K%rSDRfa~`}($9&w?;qdOg^$LbmyJ0&*MB>F2qoxvqtY zs46>%rraVtcOjALd!4nAP9L4e`+Ekpz62hME$8KzTfp#D0JPf*c4-@ny&Hc`uDy$YlMUH zz;RDsa|S)RF)x56u2U%<^x$lJFj&qss@QBrdB-lH+HL&Q4}9#jL-**ecQZ{61r`HK z(6xrKpm)P|YJLBBQ}Q^SQ`8~2$W-(H>MiYugHT z`XTkGf1fts^6Oi?qO}zgdq@H=%53tG;vC}gv=b9yVj4flLhOZ8V|zff<#^}ShD2Yt z4j(W^wa}yY)InN#T9| zH$C&ZIJ2O-8?`0-%w&KM^#yjuNfze;hWtN7hj&)e74UGV)jtMqTI>(9viG$ZYMv%O zsNwvvVK7Q4m|Pn*m~@dC&(tA^0h6*yl6Kx(EPo!*ru7y#qF?D5QHHul1#C(Ej>^xW>US$8q6IyDdXC zhUecy=z)*HMHh5~+m8+tZL_R@urhlgZ_*$#9dG0#Bw?7|qh@h!1BJhro(QfWw){qP zhbil7vQiw=Y`r-0?6G82a-kMiYNPMc$2ccPkQ3xV6Vo<7+tGpI5cPT)`g5yzuX0&B zuV%dXaZ&=_Qi}+ps6pA^7Jk$X~0u%E0){>Q5V3LYhuTtXH9ey^unKPuRrb)H?Y7;^_ocT-&z{ z??o;~>xEclQldE#85I0(!Zzw$w4za$%x}}ohwj)mtW$+R{Mq1u3|{BU-2~LZY)v-#3_cXE3>sz+hz4#Zh*~i@@9u;RB{@LNIY4#vR>TA#tN9I5iC(3Tr#-(sAeN{iq5l1^0x~2b!UTrq{(6T(sDaWguBerIqy2ut!8jR6 zYG-h1^iXo+l@2;~hbk)OVg5IhFHBRc1FWkLG?JSOl^%X?)G=9B&4q}`&BXTjp5z>C z*<`Hc$=I)zwr?IW?OTz*ko`%4YC8y3Z55D_(HGFv7O@G}|2C`pJ=I6;vkiDk^}xk} zDdFC&d9~b~qM~4+#vcuPB?_l}7RLRT@w|g9B35DDI8JO(owe&xq;*2l&PHS`JJ;Sh z_N4V*P}#pE7GZQXkT%9Wy)+Xuz6uvh{Hi=rN-aV)n(>x3PG5fs6vQ`7j%(7jtH3! zOfw=kVnA?aAHC{ziLGu!O~$x=dRzXSkCdMy6wP!tDePIn&fSG)bjf^Tj{5*prUYhy z%_#F|)}@xxAXPCJzUuk`g@`qIk9VTS>Zd1SK{#XISoaiq4h?(1z7@TyydmHznsI@S z47@1s(H9(H-)C}e^_=#^CX513qkM0XuKVe?*`NBvOA%liCWmMkJXAM4Ov;E z(yN!Qnh9Inl_0tXFMb5df_nK|N~pNYj4&UmA9oz)^TOX&$S&?`V@9rRmDu^IRh0|3 zz3U5E&16}X>v$EUVu|P6R5nXPr|Xqp*YIkTPV82BLe3P8Crv`}NaK)ioggY^AWs?L<@#sHL7;PRH!G_CG$665kgE&(b8yAE$c~YP*0XOT!~{Gy zLMHC(M0PQ3kIu0giaW|dDzsV$PS7f-ikhU&PAHjJ!Qm??jF(gT(dlf7zI_!WAD^=K zI6SpJ%&9eDpc=VlraKa^0l^R@73=vMESmZrfB>{#rCOBj6BmDeb%Mx{ogX*eOsCI& z7Jq-e^x%$&CUN4sUQXpPo#4&B$`fFW7c6MaC8e`d8?=A<70)~ic0K)DrcNyGA;IP= zOG>qGw9o~%+2L^x3u=T|jVOw4Ekjue!kiEevZj4ZKa0d4;xbNbrs-ti6lbLs6CPb0 zmwY(3jPI=i=eq_t3JAA*j$c~B;XD4uF2@I&*g0%FJ2t2s%;a*erhPFE;$@=uJX7E} z&>6A``f}q2x9#W0sA||wH5n|XzLCO$~a37vK^qX zc}PS9LopHr$}el;Ek0J%)t(%$rLi%EMfoQ6l4E;{bc_oGyI)2&@}Zs@0!5<5dbnu@ zXKoB+=WeFP9|d45&#Js>g4I7pVGD5q;CNudGBpm zQn)D}Gw*qC=uo+chR2I%2ambP{@pYEaa!R7%8fjJv2`UVSNp~X?X-_q+wa?)0X@Sz z`vjdxoltSX26z!qeXVgR63?uO1h4cJN#rKnO@f2Ne5(8TkSej+Xyvdn?L}-a`#MDx{DqPI z_va*^6<2`)(|mA=&HOEG4D@UoL*y?!8@{B|BSx>girRi_B5!_+w=V>you&qcUp83c zUTv#_6{ELO2NL-xhezT9A9P#GC;f53Wn8~KD4QN?%^(A~!3_Oy>!mB9ccEzL8-3Xgispk!)z-Sj-^$4`;Bq5Ksmy+Xw)57ZC{GuB zq%#1lL9I^sy1tHHDQYfA;S<7>UN~!P@?(Kpx}z0Elr7{+>FvuW6O#+w zc#OXJqz-+_FZ8Gm`=vJy(0Txjn z_){MgiFubzsWKSn-ZXC97Wy1!vKb5tfU0QWMi}jryMqx6!(ekbmyxs)wyR*cgbk+8 ziQiRU!RW?vf;3@EC`25`o?U!oRt*0v3QX^&KMnzG(qa@3OR-JTjSidLU#rQ(_VF(c z$OZs{!^-C6y-836(IxN5Q8{QYE%zv-f6aw%Y+nD&8?hls3avRWKthkn`^9lkJ>$C` zud4q7rKSEll0W*kBR%Yy!O`cHkPw!fiGRTw}<8h zqU(HDB_q_?Hwh0JH3tRREmCk~`5p~$-jJ~5en$A=DeV!>VxA&xL(ixz!57_rLdKa4 zHWqNDEvH2ruv4qPWbH&is=y0cWO~Rq{7l0wB2@I6Q(0`978Am`szA?R=kd237)Isw}j*0Uh9AF)=rb7;moa8(`q-a4IX|N)hVru4l3fjEE&LqdDB$Ne2MEoneK=NT3 z{v61GDQ;gvY~mrpf_Qh1H8q8vO3HGT`~KqhLvlA)%H?md#Zt0<8n)b3K-5r^BtPCt z*RqfQa880|MWr4G3yw6~Z5j91K@glE;rQ+A%FeHcshX>lhd%;5!uh-((qw)nLAp)qY>vlZ_|^7`j7$fJEQ785C0jVCh=qKI3{%<<{Jl*u>) zh8k`4*8Ju7E%-6pIk%UAy&->iu7Y7x7OkA_OpKd#v0$U8GA|jFC@5 z6-|+M&-lPKnO?@p=dsi(Q-HyO{aUE3lZ>IVT`#c`(jM{;WHOP&_U|_#0;H?;{Z6Y+5!!?h4^|irVEVmn24Zs5yTMsbaT`mdFC1=;C zNg?(tlQ_zZgQyEiH_|r``~g}VixqMrgRR!P+aFGsi8ZYiNPov&xn;7yl=0Lj>hL4a ziZN}Js?f-i#x}`Ot?@!K*^THvl37W_BO&EAyTPqg>F`j`IM^W5wOQ$|*V<&d{Fmb| z1Stvbca2)yd2=)>XdtP_Qwe}d|85taY3vfnemMRi7S#M5Iu=_Q>sJ$vGh%IA4)Zq}kBFb8Ogk zfGx#w9x_VA$)kIj(rSf)fYi|L8(4kA*aENE6a zze>tDQ~6hImBP!vcePZ+Prc|~8UjS9@SRul=F}S7cbic`G*O|otsr7b_sX9|sYPoC zlu(of;>N<{SWl(vw|7lH>Z_D|k0V?pK=4pqO0JXazZMki7{PfeNA9o;!W0(ea(iI( zwt%DEmS*XpN<77Ow6Rtb^^B4msgpFvsfg!%7;(xrMYbwOtoh*=C~T`fxsJ+lj&9lw zJUYMjFM{$XK2|1KXbG_IW*VpEplWh?Hq*JU2y|s=be}P9-jaF;H z2j<~le>vHNTvtK1awTczSUq?#g&`&4iNuUQ=^HRsX!YgMOlqVVw{JB1$++}dSBUCg zdJ2%Wv`>38!TWaDV40RnN|S0(P-3!+g-^aim1X|gz2@K8(Iw!@n_#q%D|XWZ)$5Nz zbi`R`x3}M=Z_Q)Zq)D(6?m*Y>uIsR!^Odji8^}y*EfR9=CEJ4*n5wetaospxPlwsT z@DyCZh`~#B)})>I5bcx*Ny0T~83PbsW8JLXJl%rb5*HKxiQ4L`DOy~Sl#7L*P@q)! zw=lkB$2i5H=tcgLKB#=NS8Mx~+g9yS1Wn5I4`4BhlOxhByqCUrWTt&Z`zIX3d>ZBk_cG5h_r;1c6>VanmA9$l%(X~3x2QSy4tN^eZa zM2OTa=ZhZr!a!DPLb-CO+rfOZ;~w|$S%g2RH!?D$)`?W5s&ml%Cf}a)6-K+6#wnr2 z%*7uw_L0X=&?XE6?tKaH#ctl=l@lL%r=u^Z{-6J^zNnc@JJNiW;9BO$MK6ldN6o3Z zR@KPUE;8;Jp5Px4k<-2YBt92$epCB?o~ECkdE3Z>A1EWA7AVRHYI@~EM_jE1Z#0dd zpZEQ}rG>t8Hvu8>D~>dwK=H}{9R){#Fv^~*7YD>4*hI6E%{yC)mZqF|^7!smoX@DZ zDNyouPX~XZ{DNsGqf1Tee6DfT0y5K!h-)J> z#*ecyrD3eNd^3(4{g9MSoyOR>V}6WypkU*&dcv(6t=!2jl4Qd1+$^O;(|0S3mGeA{ zvn9H#aK%=65)z$H{)$+ZUH_18GnwDA_}*PO#dwR1PG-09K3BEEyV~`WKauq64Y5o$ zZkKB5*d0su?cLlafqq|m*-_wo*sae~Xi|EQdQWT0PN@ePN_PR@d5J3^0u0{f0FS~7EAQ?3T-`*CY`M>&r7GFu#x6VM+=qdGN&-%6C2t#KWZ$2;HUxZ~9Uzpd^*ryE z_w(jv@vn8+x*H@eM%v4zYIEdK*My{)x=KrXY_b6I;vVypFLKvuIvCS|<_;=k9C+F2(yhU%-nTaZ!tN1Eq&Fvbu@MWNb zt*q$29n-M+QcqjviS`m_ZW=~4Hg|Ye1RPv#cT=vatYqipSOt`D`UwL=wq~bnU44CP zjm)dNYOwpv&E4HA1sd%q_&ISR1$-1)vYp_N_KY|LV@XYwviV)jNSj1Sxo;p<$LQf9 zqu;LF*ADlkj11|TVii;SDmvAZ_p{v7r+M#5veCPWHFtczU!ZDHM$7PBIZ)K}KLIY4 z=Z3zzOuF6GGRYZpuU7H^fp#xp=4JcJ+mqOUFEtFh71*$nikH*=v*%h>R=Txwb3`36 z1C)M|hWkr~HR(gITG|8ltlT5DX*!rdpVyKok&TeFO&joCWUHCmu*vkui^=_XgH9`v zP*@cBSFhY2$K597ZN0p z-;_$-c}nj`8_#;_5`MX|%q#aA0j+LkXKw#8jqD5|DJd!Ga%n27`(0oYYo1V{FyB2y zDRy*x{QPtZ7)k8NXH07dJr`=t-kxe@+Mxuf=ly3cHx{$f#Dd3R!h3nwlh@l+^EpR4 zfai@pJTD(@pO>_WV)(usZmqFFbSXT%`f3k2%{=CtafA88*1PzhAyFe@G_qZIKSPSh>IhSVr z&Ua>7E}P612Q0%~Mk`0tO<&bL33yn%bdnM8pUt7RD!4a4bB#4y_z|Fq%t#RUG5@>v zqd6lLO9!o!&Dp_b+~tB{4;Rx0reow@e4U4G$yZRzH?i|^Ws|<7xC2M#mnlmYaL^M= zq0DsFKz(iT#94!bgS|k_>;=9l0nGALUuY{dW&pOw z<5nupSB->M0?N{0VPUcVd|qDczp${d57f_%8h80Sld}||sLDO0$($qSwLK}{%u)yQ zk+-~hIqxKOm26LTXT3UP>EwR0xfwQ0)sUJYdBT7OanD-suhQCM?d40CXiArstOWB} zBwc9!c;UxEFtdGFwk(j&bN&>l@&cBuwi`38Sv;Qa^7p5=a?^RVmSC&0l2THZq_f{Y z&)orGu&==bIlW;?j!j4)Yk%2ZKNNzry}g|?eg({01qB3}Ug(h; zN3Xnn^ZD<0#0j}CxO zUvxr|JRo>w2T<4Plx}j`unYTcR`{3EmFV_B!D9gkw|dMZ7uoCQo3`2N>zVL0G&J5a zYb1LvB&tuaFU0bysH|SVH>ktBAIwy$@EI1l1m-NA%r@ydRRAd(S=hoO93nzE?r9<(0{)0&;t<;X4qB`V|iAdGNIUnU_4qPjCii zVu-o}s_-t>WU08v`1=*!ykFp>ZodN;h1y;;oy$4HqhCy~gwBu1bc%a>gVx8s(9^rO z%nmL&)p0df%TNyy*0@Nmqo9#E;!IbPL8Mk%Maq_0&k0S6!Jo54C0DZ2JS8-Cv$lGX%{ndTFxPxfq%&V)DNuUy>0Hx0dqc&1)VdAyDH+@-r zTJ74=$26zRzRA;|f3vCD@H;W%(DR;KlGlFY3Mc}8NDADY;b#hKpreHI!!-Awz6sJIkvBFWcn=@yNxz(y1|@F zSeM?O-R4q}tGzrFPC7{qMIKT{Um!z)7 zBRj?l%fBa$U)hv!*7=o85orZ<09Y;GZpDnv zV~#w01D6@?$L7GlSFq#^cI1JZ@ZT{;g+S0v|MAGv1N2qjLqVCXt?@PV6?j$YaUn*TWX7;W|dw4|- zmdC6AJ%+<{RD+VK7M`5We;B_!qHAD{ZRpP57g&NVMB2e5Kpe z?)>uyW0&G~==rF>HtqV_sPpE>B_`zdifeoPS5&9xeNx6}N^6gnkQGRmelgp1^P^p| z1~$v}8l3B|njfOG{B!`zs-I;mFrxfpxPb&9-ssrk--uU0-cbRNk6g0J1~P8!5@RqY zp9Bu2+VbGR-@}?4%t1n8N+9|0O%rJ7H9<~3^Yd08z0oa$bxsLWehY^yGk)mAbNp&* zYj2RZQ3O%spBCXwo@FCZ=5M#~Vf_(MVzu1#)J$y?G=BDJaL|ZYNC9=HI7Fqba<>lQ z+lTGBK(Mro@vPG~`s7JA5>@FB^5)o(q34qc+RHL;?8&)dx3X)`p};YT;s=+DkdHoM zwx`0iC-Xk1iTqXCaRZ0ghc%(%0nY8)Vnh*PBTwaJNuh7C8Kh;KfsM;??E+@&)#a6y zm8HJoI`|dsvw}4wQ&P7L&#t@>j{v>5W?`Urx5emV*}mVwUwIoB?uUbdVc+naVxw4V z3h{yns>gP-A7-a`#SYN;8oxBvm2ii2@w}3B^HcrlDzYwk%(2NMyBv=b=W7}p2RhA$ z_G`&?B;+b#y+||+!LJDTN>;}cJf6vO{vb0)9GU?5lU?$qdKZLOSz|N*V*)P@tAj3y z)?Hm)9k4OoMfhbOh;QE^UlzR@~g`IbqgQ4JvuIu53Zo07E(y{hM*1OZck*>PnEr!o2%9gV=p3_Yf{o& zFdJS{LM-|bQBDMvPS4?Z*S*wyVtwdAA46DPZZ>;z-1BZ?FHKRJWgU;`v4MH3KJKAo zG0+LQ?euioQKY53*Ln9pRw1X&^C3-YTK*YP$v6RVxnw%SQ;G5`8FO6f;wV15SG{kh z<>L42ss%P`TYTLdS7sj7chuwcy(Fz26bA^M&SW*%bvrx~oe6~pt*nG+yP@Fn zw{*rd@60N6ow8BA4ccTDjn<_BFGN+f(qYJb4mnnxHB$dT8COm}cC5fWRv(8Om0n&S zF$U_lp_75wKLdWb18C9arEaGe;;)u6DxOp(x@)(BgGt9bQuta$oyDmgtu}pwQvd-< zK>YCQknoh*`A(_b2iC*0klh!*U7CK;B+uDJdU}<-`MWhs!A_=jZ zR`L0kcy!<@{?%$>VAKEfaVgLXD literal 0 HcmV?d00001 diff --git a/beginner_source/quickstart/images/typesofdata.PNG b/beginner_source/quickstart/images/typesofdata.PNG new file mode 100644 index 0000000000000000000000000000000000000000..5d0e0291eefdedb8ca4b5e00223cca926ffacc11 GIT binary patch literal 14723 zcmeIZc{J4h|36HMvR>CGOJr%aBx{z4QG~2xmz}I*NoCKV5{e=_BZjO)sO;+`7s8BT z%n(9_8M_$U7{l+S>-#ysbKmFw_xHzr?%O$*;dsBEukHDKEYEpqVyw%-D!|IXz`&t* z=e8LG!!acIdG91M_)0du`W1X*3^3ExWT+YtTmUbQd)_j-#lY|}iEZEI1bBVw!5y0b z1_sW~qff>j|1viQ29+~qWjeOm$9Kg{w6$mah!2(+%oNA{#(hE zlP|s>pR?yJG`;=h^>^n>&u*Rjdj7|!{QS_H$L`qQ<#g5LJs$JHcXos!KK8E{LxG{; zONSAPZN%Z8z|l?2ek6_pnN}XQjIam_X)`Cwp8}yU+`J@v6gKqu6j@$n&l+o05`i2BrrUDo%BU@&!f$CfRmEEr~A7 z4Ebqs#!sp5b&{!GY#7ggGMGJVESAbGpKrx9&ONKaz<#V)Esz!Cso0m5r1TlC5QjH` zjb%`|qXjP>r*bhwAC2;^oN7_ygw|B7bx^#uHd%U0J_b=Xj94J6WLe+4W5&$d_(O=ipiT5eZmYsY`Y{vIXG5(rXF|w$$rr+} ziFM46#TzTS6P8~+8_3yr9BPW5Igl%U^NzOnU%NP7{VP^Bpb%ZJk&DD@(XPrrqJP*t z>5!VpqaT8NbE_`GJ7+_4)SX<`dR28zb=2o!8Yxx?t-m+FpSBiXx*IE`5nc%i<}9F9 zo$TuGKfJ1{oa>r#D=*IE;B3$~8$&81>9lGLs~L}><%ee`CvuZmRkAs+UC`dVW)#dW z;da4|v(9Lp=Es$rP-U~e!Je|68DbwHA}`{x3OAoi^}O9@<_6hws2<$*o%#mZipU!q zQHdL9S>4*cVZ~D$C9(Dx%Mj=7?3#@$$5~~Q3y@Sy;dCUUv;1ecX6c7x^3X7yTixap zl?)BXCaYtvXgMh^xM}|UFO!AmS|7n&yDPMHkq;PhKp;svDReS^b1VO z(=4zLlrZN;D`?te)JjnD%aQhk5932uRmHkSx2(0-KT(moxtGAnZzO~r@q;?0gObwgt%9{dX?QtsIi z@xEENsz$wU2cf-?1s>}c(hkPdP9|sKG8&o_%f%hDT@zYob}q^okG1v{4m2@*-u*nx zm3wbcQ%}(#-pQ@pA{24TZ1?>qZl3yRUzq@`hsme;8(ljeUDHh+C?S{ZNqG*A! z%f64#-s4Iq)M~N3_}edh1xo{@^uetw?Zu`%+%mPes~KE2T9YFblMpn6EdTFKtAhwBH^%HUw?{SP_g{VZrFNsKAqEdz;Gze@~r}y{3RgeEr z*k7*HOo!PWxclG{?T*DTaR;rtepLe+Ns-KBRVb%@HSX$pwRD!S1=pT|&Q`ulp7z~yFK z^x#fdLzw%YHPW6hf^q0Q-}6E+O<+Uk)(( zm?475zS#QyFIw`s?`{*9 zh)t3!`yx`TK0P}-rq6##ByN`;a?q>SYO#k?y5xeTk)u1JNv(EWm>N>_33o&gBZO~n zHwPYMctnFd*)5V6InOPB>Pp&PuZ%BNe3qxm!^`5Yvscbx%e-<97HfFE?MWxpyYI|g z`x@+!BxJ7hmDNLUG_!JOYp6-mP&nQP=0+nNqz_nPEHg}+x3xs?{G>S~te7)vQy0j| zzn0!oHpa>xwRyzuZ;m_+{H_Xf-AQ+-5SpkG<7y#eiYz-cLY(#-$!6VyBVoeN=aPlY zM^~J*Ch5mM?{*Gz$-2QQDpm6W?#6pRbD#_iXPf})(DL|>E%p8!7*QUv%tAWNaFouD zv1dP2MvkPvE0Z`2aEdIyGlVe~)0hL@+8S2meM&tJj`z{Ob~Ar{bctvzL)Y@Qr;C(Gqs+B*Vi-%D|0O{%anW`mfd)mb+&< zD2+YpmrCssiTWPH@I>AjQowUSLm%EyS^m(OlG!?Y*<8k(&}!unM4a8|mkB{l2}M%- zokV+tLhLs>xFTi8z#~j9t+Y}GdL6@8(y!kA@OkdILKjwf`fcm_Ykzh~9E)&hKHCWf zcTg!M%RHnvQ*^LPA(7;e6v?W_FU4@>>*Tiq&&ct=ymEs^3RD_zRkdx8&vG69=8A?% zRetKbQlxzIge-FKI)#jb<`nK{Qr75GYnQ=4-`hK;8x4!$vAPqv)DT%A76Fjs80rhO zZt8$q&}bVn86>C0As4dxeKx%s?k<~a6t$@vmA2m#wI7$j$exDX=pJn z$nLo-=Zn+v?<)i&Hiy(@NIU35dhcY&bgtUwpw>cDVMkH;4**jQ*wi#~!JpPz1kXUC zwydKRgB+{Fus(RvpX%G+E{;X)OqwQ)2aP%m9}o{WWDb8lI>!8y&1qM21E1MmOIBe! zMLA{ix*!{x{rA?XiO`wX!;wCO`1DM?KCHf1G^Yqwc1zvi7n8{DXM#Z(buJUS+P34^ zZ~`$~R+zXb!Em@PayaD24{4`<4;(g1B_pGz{21pUhdWV+nQ!46y|7H)L;n8Lq|^BV zLs+tFl6pq)K;&4zBQN}gX^?~zv1emQS0o6fW^XYJBbRXQLDkluPaHFP_f+Q~V$hO8 z6^q&sTVPOlV)`TMfE<;g#ibsIs*<8VryeiUv7m%a_%&SiscQ20{Zl*IXNzbJ)pg@M zI}L9?*s6vKNA9{D1==TBz1$mx5&P`CKN>~1{&aXi+flQ2|MLh13hKbiKeY4gGcIXw zWABvAUJ=p72~Tq2{bP%zC$RzGG;PhQgXkd|hg94!s$<`?{5{Av#25DLv z5?V)WeJW?gG^j5P=(}DQ0&nnPJj3nb>s{*9L=b0OGqYa}R!JRa&f@~Pr_&CwVXL~dbKK92nIzwh;IZ2f6v;Kxk z-<+dorToJT#zh3X((w^E^&P{=?HXj0VD?o7^B1DCJIKN}kzk88a=Z1Tv?k<`U1h}{ zW0-xMcvXho{ifO$D?YDDxJM3i&EAa0!OZNt1ZjPzV;XxyNhu-+zvIf1(scAI*koE| z!zek^f`;b9R5Ehy(Rt}C-1S${)bIBaq#jo>a_D9{K1TH?_uevi!uzr1-(0Ku|x{kULNZ> z{>I8N%$@W^@QKL&50(W3nY4O*J8gAIv-<>vKklI{axJvy#-9;AbAvL#h&TJ)GhoT# zU6PK;RSy8FtuwO|w|AhRV7UX=XI|Cv+A=-z#AL|@wLbhcV&`vk@Pq5%Tvo7n72(li zkcS%I@(WiYpj%Skyelf7ojNBJfRU5FM3{SVG4W(cWsz;M%jVi=c(==U44IVUpEn{4 zv*ah_4XE#YpRFA>HfSN;9j}m?D$1umBu4aBsY4%!^cD&DW#P=B>OrUo`nLaJ@J2Qc zUUDshg7`5peGt6Sf~K_?I7r6JOm;?rZCQKd5nAj0=ME}|k#9`%8!{{`R#XR{FOVK< z5{~d|{22=q{)hQ!?$?k})Xb=b_L%x)Q?n?j0ZmU1NzM&%M9s+)ix5rl zCnqW3Q3qVpL9`cM?lku^PQ=l>$*_)B{r6`kX%87c0^FPWbMU~hyjUoXR}TG|pk@6SsBpcowyNoI4* z<)Xzoc$u`Sc3GqhD@Emc7Ooc*ub}MX6fpe|kXlea%VJU$sNZz2PPq58sL7pzh`n*I z!q@l{j5_J^By+`KM6l_??tEd=yLhu^*k=z@4h<4T+XceHLQ!b~rAvC|KEyIw#b@L_ zYH6wTs1}B;Hj(Wy@NrqB=HxNRMsm{eouwsL5n5J)+~YJw@@NtGttlwWAGOaC9IW%)o*P4`v$M)*>gxm>!yJ-%nN0*pXl$#~3IQZc zvOOygg`Wje%0wB1qrrdEo8&?gVlkBZSfmX{cKA{sfwC_vnzmV~zjSl`u>>Dzi#KLSeS<>6R%c*mxTYM+&Q` zgq;BLfDtWo8kG7b+_NP74A`ZJ-*;2I!ziMiYZn~kkRb2EK2b?IrPKl&=5BBf|7T}& zG-IZ$?EHwSj-4qRD8r{|?s#k|Yq@gUf|2<;4L0*y*?6^iNt&;QrbfYBTTr2m{s`gc zPXo7-=Rjby2CpxxEQYWF;YX=}zkXn(z@ z<;`R%8_9evU#kAw1@#OM)o=G(R#I-PHg@_6amgY%F`hJM$kO^S@*ogZykcQqGW6TK znZA*UZcW?tp?>O@HaIHDd7&i^A(M)J$Jo2lt6DeuYBQwILXCWbvfL;rHT>tl6C_bK zTa#;%A)mm36g}(vp~Gy+qrdE634Ks8q1A8-OKTkWu257O3gyF;HveaLbToEPD$^#t zoMc`D^KD)z>G%yoa&_@*oiZ&S;_AwD^G*=$USoBTMcS_DOF19N5f?Xyk|GttW|MX8 z%Z8XK3*z!fL-8BeAK%r2NxDENHjNKlptgDy)$NaeDu_OQS_z^w=t_5!P53IHqZeDt znd%K$1Ep$K5kNYNR_peSbQlMM6jEEFFgKhVGL^jy=Bh5^nvJij<_=iz;wz;;S$P`~ z0m^D%0ki8jlDQ4Ry$B*|RESgXfM`#HYZ@O5Bj2V6BCQ)-nln9sGuUO ztlAsHNl8WSUs|aah?iXGA&L(MQDFEa^IWZTs#D8x(y*x;#I)rLoJ2uEBg(}pFI5Jc z-7X`UnW5|Cqir`IQD&qXzCY>(sF+-D<@z6NDJI(~XEP*fC*-~XrW9++NLly{=d{kk$v!^SMqlw?)01pFV_9MK&-R4>^l99s8q0qw z8>SEl7L6(7!2+_ml9+v)vT(K*z{;KoyH|;8XQQwu#xGk$`;bLZW8e0e`mz>n>ix>c z&ZbG{KzZ`y?aHYfdOEF6j1{o>@%%(8HzL4_EEMm#x9q90q$%^7L|?W}6b>A`CG8f% zuMM{HW*11D4e8^XCE)D+yH>AAESSXS(!<;pw)L$rmjR7KINuc!tX$@6<*%D6pyarJ z*+0mO6Nzk`Fnh)N;E*Lw$2Q~yeSV$A1eHXdNzz!Ym+q)uC#vtxrb3&gw&p)kYZ0)? zYXS_v3>@qTnD74Qpre@5!6dezkEgoQH#_gpohe_p?Y8SciVbN``X!zNP4$o-e8=Tr z>~oY2RqS0LofxDBS}gYx_R#A2o<}TV(Bf{nzvJKX)?Y04ET1;r_@s9mjw(fd!2>R6m4#xrYh{~17=Zzm8*BhLrAQ)|cXWG{p( zkn=^LGB4#z6qCT==@VnCrdsuL3gWpqvjw__og##lZ0Bz(P6w-}czirr@Zq3{0@88j zgc%pcGl*FciP-KvWK2tEzUckN?8UoAhqWCQY0_{JzeJ)6DQm!JB9W#IPI)7{7Rl^|!grz| z>RUXU!+KY)y_k7^+W793yrS?{@!YKx{H33ftvptD3r0R6x-TK1MisMXp9Bo3^!WAa zc4M6+?Qn*CIxQ6<*@t=fN7bsMYPAf{Vud6N2zxeZl3mCg&+trX z@b-AYjNZ&Wi2LBaVpWB{qRzn&4kO5lzjc5|HcS!n`zhDvKi49bT}1-UZw$%bAM4P` z@%^N?@r6yM@vozsf!Z`{wh^3kQl1P z>w#)|xzwQ3^|#POaX0(%o5LTTCyKpJpe~2JcArh+v??g|cofxw@K4Z@FUL7M7Py%I z+SID69}8_+sn40PJvtJaGRD)#cO?bK`kf?&df&r%_@naUm_27XMuxc3Qf46#%pyAhgGv@dS%p zQ672R>*msjKS^%*3w%V(=Tmv#8V;q2#k^22Ix0JpBk)a4)6r8==#-J@QNa{#!61Su z%rl7pvfhdJ2vZgpGi>R}prdq+ka><{3f0Cc= zkotyj&x{wn;2=3oaDe=WVv}2fDJ30X>YrsPq;^DeLFP5L-xXVGJ-k#m9~5>aUr!b( z+7@T5LEi)pM2#~<2%tR|$kN4`l5u^Wddnh&J`by{{;)}9>T1$bH5x($l8Lo<(LB-G zX)?85R53Rr`}tkigSEDulEO=u=^FE@Us8P`ia{v|90haJYB48$6ltueQ?6b6;mRfS z?--fJN^DAHo7thOgMm%pT3Kw@uL`FCAz365)(0=IzV9d*KeCCV$xT!zq_ab(oQtJp z^}tOdR$AOI>$<>u1bzc-vv7M}4_;U?niZJq8Y$>fDOdN7WTe7&d${NrTwrmn2_P2| zuW$K^#WhcKhl=OhhuvoDwP^Y1?DBJzrP1!i@qPhbtq8|DM8-iAR^Zc)J<5`%u_ zZMFFB^gRI0Q7Rl=rs%Em9Wz7GA-H~@jMODF55`o{|88QU5@Ng?YG;1B0!uhSX*y1C zG&wXTUODJ;+SgW%NFG@o6}RB9T2g*y56kd$L%e4s7Oh8WK!bjk-N0s$qHF12HwpY( ziPZPTsU&lHn-(p3gG7VR7sNfX);IIpm8A*r%Avr-MOcoA2n(X%9s7UhKBNsR^RUzK zGm74x5Il|{JD15uO-xKF1;P1GR$eNEG;QZP5~2l6CN;vBKlTtH#A&GH6hYfT2~JTA zS+#E{28e!90R{=Gj@!P7$}v5;wYKGX=XbHf@P#uDN7z-*AZStdk)81Iqbg|3Mp0>2 z+yC9U5VQ-J?CtqtRgf*VOetH&H&zsad=-<%%5}ZBH))pTgM$COb)RNC5)wh?-1?iC ze`Ufc*L3aQ;gYWZdhsK&sC6O34MKOn8zF&hZeC3M4Bz%He66Gp_@8iy2lh`IFO8uJ zI&9|eEqI$9svE6cI~uRID*Y(+6Z~wRv6rFW7JDd`nKP9`Q0=DpYZ4R%aruOLsJg3V z4&~@1;o;?Wcf2(4E^$I6&8>GL9f%A`Ze5Lx@#a;qfzncyZwLzido>M&!ASUc?asGT z$0;^kMm95}zW3|LJUX&j+-^psniHRtBgx}F)#^M>xG(4WQF~C@ zVz`L9+%P~}?-5x8gF$i;RZ`rZN0J~GW)0Z4!6+((QxPfkjQWD7fvkntD+&Mchw ze9U3{PMiN?=pq^Q_qsMFN7)|*$rn%A{4am0@LE?lk!s#+>@yXogC_P?Yc#?vvp>+f*t;k8yL-j>u@B?zo;p;voL#vx7TGevv=!qv_iVZKs+301YV&+3OJ?R?H$EJpb95 zjza$#aBk>|wfw8#tBTi(XC@Enh%FXvgU<1f7GKj({F@l} z$g$h!7p6?dpMO0F-Pk%3%0rBHrDb1|0_a<#P^>dXaYqjDDsnBDj;UJPZ_B^B-egpp zqpuuZjs%!^s|cm%Bg^Hw;wXagREhi@Epkb+x^;u_@Q)A=aQg$hiUI+X>D{XUU7L3s zNjmf%|E6TrRjlTHdW)-e2DQ+(e0%0-v0Wfr_s7D>xzV&Yo30nQ($5!MH=8<1tpeGP z!r>pPHNN6;X7bz-$NM)H1nPmcpE)~9)PN$kCkX%Dp$_vNf7m1B@Z#rF>Q$Y(oXDqz zLth@>;?gWT<(S%y3?7G>PwUzGq5Kq{^f`U1S^jUK-5Z*bx%Y-BQTp~5s0;^v0JX4p zCo|jBo?}jRhHM#UfnS)EPIn%mPEfq8W>BSh`cu2z%G>r(_7xx&tg>f(043=}=bX`T zq0<;oIor|6Z5bhud+zr;+du5$QD?SbO4;{_aG=OUuCwtqtJemWjU?93EFJwWnFlDU-NVt0*m$FqWv2#|#1 zF=QZXO7VTNQTP}zADWUe?9=}S#-KT$m4dJxzbR8ZxL#zcg!Dcb>Hmh#JF`(bWa{!u zd33LN_g8{L8Cav`B)qQBf#0FLM;O^O&X=Q1PvXu@D``>1a6>In2-7-GtQ+6kv?t)u+ znoy|gNl|KNMhP-pwU>Ov6KfYra+*yh-M?c>r=)BP<`+tl!QpGnSy0_%>g^TpG%Pt4 z3dtT3?w##QNhF8#nyi!{^9~e)Tr@o~o@aPYp7n2CCqxo2MS^S4Qi&~;Xm~GzL|D2O zo?4-Ihyx8UcOr$0RHGW51+D8wIfVgM*W0r-z*Z31AL-~$?3RCRbWIv46)9~yQ#W!+ z?-TwYi=W;E7D0|2El!mpF?FK}-x?#^A6#cKWPBNpxI9)f@!#g%>+6 z`HW)0ZPgJgKB?LXyiHOr&G#Yx0J3dPUhe#15MWdOGAs>%8`hE98wH^yKnEtqI0^(o zmps3>1F^07&7`~Fzglv;D-qXR@oxw~(L8hZexnQYG2-5e^ zCe!8S3?}vVx>i^VeIeF%es!}6PW$(oh6E?d8@bv#5ouNJ5t zU|t^F*wU%Xx`GdHZvd@_uu=eI9{}8bvQlhBq#p8J@lwbg(4Y@%G?{dsps2iNJpe|c z;fg_kQ=(>oTdMuzg<|#5YL%N508b^fL<@%$EU}(wcLk%xiizrq*vR?VUwY+q`gQ|) zMKd_ZR352{1A=6gKCV-&Tt1((qj9NhmzqBX9y+@)=?xr&HZ!KNO=OOGFU9CpYUEaSlk294+ z+Fo>uZSSY6(f|?w_#TKeSI2y++fsmOS11RIT0A4DnpY(7)FF86axO`N8(kInJyM1h zyEd{ZSEmu>M}7~mf(k$@;duO|i^TMHjV#<{_-X($XFRTm=Kn2Kor|q z@JLai5lV*>r(xspQab_SvmjB*JLV4a^%heT$AjAmx{h@8W8;Iy@13oQWY>MtMZWJ;@bnc;b zRv-msD}~-RWIEoA7YVsLZ$qJ(krdlPI^YLDq0;~3*o^cJ6mp=x6ujgqU1YzGr_Y}? z!JbBKO(b}ea4O`td34V@;0m_9DZB-gzLJG;;6M+%!q(qt&}uXcXxmAr{|ln-KLP|m zRD}9f7f;l^gk3>xP1eOHgGd~5Sd&g0Uq7F2m1V}L`YJFFXpB`{2lMya9`wIozIJNY zoaS3cwCXtU0qUS8x#R#@USMhD$OF5Xo~IFIL|22I>N^s4gzAKYFr}eDU&eoNti#(X zHNF6ShL>`u!~zBN6zQs&rtg3-Y7gYL1^wr?XEZe;{}pIeq(EI(V6~E&2Xw8P2Q|1+ zn%}$4_x@sKui7g}ETAgHY3wgO7NNkwublt!5|EnlS=az_otjr@^F2t;O`nOlk#9JC zNq4sOzEZ!)1omv%@?TmDN8t4^XE5f^l8Nmt}*#wzewnz#?DZmo1V| z+*hc$h@Der7bU_MyHF&9gwja+Lit5y<~_y+Ap9B?L(8Im0_0R6AjKhCx73?D#f0d6 z$0>&?HMDiCNGMX=&zaZ!>nz3-;HiI0e^=SI$;CjrkC*L@;`N!)ii0mYdgWV%H8WAc4obrW#z9G7M@MQJ1uq1;vST`2VYLsm5v1&p; zN!+rkl<=Q`d*6Bv$enUyRI10%GupDqhyUd|yoK$ih?G8bY)SF4@xdoNmC-`1)>i#e z4fcEPAM+4!W{^(QO_ zx9qf9jKILeO&>DR4O@z*Jz?U!!x#U}uVK>UDWR15D?uh0rHs9v11(tPKkHXJMA+I& z^=JtFA16y53Cf0%>a?2|TC~kvH|*#0Po#i1j4mJCT$q5N=L`WMJug}KM722oPWqIp z%$63Wu$-zuaN#VGuXv*9x;x2?3P;^*L8 zdeNz`w93HYS}LLGNZ;gI#T9j!Dt~_vogmkoCWlP%xHy=0x2#zqvcqXb3YvxE1C$ID z(^anbTQzXMy>PC%MTPenT0f-W8Df-OKJBtab(iOk|AFNL-T)>vK z0rTjs|Ks7D6e@+>a@W9X9PT}bYW2VaMOM`>LxfKjIr;bh)ouiKLDvaBdz0(U_GTKY zQJX<$nG$Q2B7&cMjXirmi}M`6SS{R)}H#~?E#}U2l^0Do05_t z{5h!nThKZZ=?zibQ{yhFqQ2#}Ahoaf`LmS>#aC>N*vW)U&1C^ky9=PF=u_uluSvi% z!o>J;8X5hqnsIc89n=nEy(FpZpWwOeHJ| z6x^1Dd9x8M`kpVwl(B^V1tSV5x;{_5TnQj+Z&K9$jPv(fMKj)&KL+KBfh<#%yfh3+ zvpNFyDIvfV?QqTB{J_NoG91WB18-HXsQ@;R3L09t>%^rZ%0)_SgNN~vfC4rG?32O) znlP?NEuI8D9kxIJ^acb~hdan}n6}{WG3y_?)W*hw7K`#j{UK}#=cr_5t=T7F!Iu=W zvkc`-?~~elhVS%PTG%cwC&_%L`xc8(+H&Je;zC69Fa3oB zu$%RDk-rPO0TG*^%rpV`^QQ?XMm4xX;`-)~cch|KHi2{%!>PlA9cV6q?CR1@vnhG4 zBHENwgNlx!KVxQRL=Jb&DE{03b|V47-`+{!_vy!>LqC97SeG*4kDe^v{{0(Ux;G60 z$m%|uLT_sr61u}~We9-TyD1V$s$P?)R;2x6)Ru~hOmySj`=)Js*bL}0pQJ{Ze;!=B}e+Q=MD8b*Y`0yRjJ82e%e7&Q-H@7Yx>*cU&DxN#k z;jSXHv&H;#>hpEYR3+N!yM^0zN768X<%v=w@UHN@rGu26mr(eZm}CF;?MXNOcvh<< zz4=ZDtM4OPp(MyKu07c`1ulfKdiYWL`~=z#iVTF9Ziaz=I)S5py86lBiNGMS>8k}E zMeXPsPn@y!-%L-g|M8RYUC4vY($piGv_LRqFahsgsqH&zbeKJ$twC8jy%dxL?1A!b zK7PC``y%Lk)cHU9{+8D-6_B8M-^s8knJSiXY`y-v0u2UbFnOH};B#fYtv7QGekTlc ztBRgg?|jS!T3jh4tbqp>F$cMo^U4V8$z%S4dOQnoUxt~uS2KXQu0o6o*GQP`)PrhI z4Q?G!)8$4;!-gfjAJ&4H#+42|2-wn`!9gP*<@?*$x5v`xQ;9C?jLy6B*$NcciPXIC z@I>wHnu3t%2&A9O`ixrxtx_Q@Muc5tl8Ez z8Zs>QQ#^Guso+@GeS5Z^*J^^8%{U9v<+LuG-I<7e+UHfCa8f#M4CleSWl!6AipHXK zmxWyC!^7iVH1C|BbzQU9$X&{Zf81$a8J6M7pKwD~F4vT`Zc)sUbq;;)WT~p&zKG6g zk2OZ|p1V0vWaNqHK_&Eub^n1#aAqHVlBsN^JPj4sV2O}(bJCX`pC79|b|t5w02db9 z9i=>99fhRgqP890aIi(YQcG%uk5BSN79V@XJLV}#+utJ%ukvf{Bu3QBf1f%XNtNEG zpN!ek(2D9Fhc9;HLquCpnxtX03(o-hWi9sp)bc~_0S#tSO}MxQJu~??1+y`}f&ajl zx-rFtoaB2Y+}Cr3G<^8BO=NlUM;v8ZZd>|mJgv+oZCgKtjzusOHylb22(WamQ@WJ- z_XbvZe#X`9$FAnXv54Cmhx@I#DVDwI4Z`8HN!}6!f3WX*a#DF)`pLcS5%``=RI;_~ znQ;&0fu}4OLoO)Zyp(!q&QD5;+ZY z2o2Zxdzyo_l#bXxhC^rOhb@Qk6nHA;9JDWdC7}*>G-zQ9&-U4Et9WeBm93Zm-GqiPTSq3`zyQ6!=111#w2V^ZS-d%4>L+3@yMME z&xW+9J~75h{t1vJ{sx-Pj{f%KkN&6wdT5H%e225RAKmlY)P#<>%+=P7k)#|&-VEyu zG^Ak7dH1fc{umj93s-O1_O)~L(4g(SPO7KwyoOK=Gvi8`yM@~f-=Dj3Bt*|{C3LY0M3B*;44>vBn3$x}| z%%Yhe)hirV=k+O literal 0 HcmV?d00001 From 8f249aae79f38936a0f714d2965252a2d2e6f1ea Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Fri, 6 Nov 2020 22:00:01 +0000 Subject: [PATCH 29/55] add images to data and optimization --- beginner_source/quickstart/data_quickstart_tutorial.py | 6 ++++-- beginner_source/quickstart/optimization_tutorial.py | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index 0b205af9e90..d622f63ca7a 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -8,7 +8,8 @@ # # Before we can even think about building a model with PyTorch, we need to first learn how to load and process data. Data can be sourced from local files, cloud datastores and database queries. It comes in all sorts of forms and formats from structured tables to image, audio, text, video files and more. # -# ADD IMAGE HERE /images/typesofdata.PNG +# .. figure:: /images/typesofdata.PNG +# :alt: # # Different data types require different python libraries to load and process such as `openCV `_ and `PIL `_ for images, `NLTK `_ and `spaCy `_ for text and `Librosa `_ for audio. # @@ -51,7 +52,8 @@ plt.show() ################################################################# -# Add Image Here +# .. figure:: /images/fashion_mnist.PNG +# :alt: # ################################################################# # Creating a Custom Dataset diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py index a2b7311b306..12cbfacdb19 100644 --- a/beginner_source/quickstart/optimization_tutorial.py +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -36,8 +36,10 @@ # Once we set our hyperparameters we can then optimize our our model with optimization loops. # # The optimziation loop is comprized of three main subloops in PyTorch. -# ![](../images/optimization_loops.PNG) -# +# +# .. figure:: /images/optimization_loops.PNG +# :alt: +# add # 1. The Train Loop - Core loop iterates over all the epochs # 2. The Validation Loop - Validate loss after each weight parameter update and can be used to gauge hyper parameter performance and update them for the next batch. # 3. The Test Loop - is used to evaluate our models performance after each epoch on traditional metrics to show how much our model is generalizing from the train and validation dataset to the test dataset it's never seen before. From 911a07a72730513bf84522b5c6d00fc6ae7afe43 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Mon, 9 Nov 2020 15:25:32 +0000 Subject: [PATCH 30/55] moved images fixed links --- .../img/quickstart}/fashion_mnist.png | Bin .../img/quickstart}/optimization_loops.PNG | Bin .../img/quickstart}/typesofdata.PNG | Bin .../quickstart/autograd_tutorial.py | 14 ++++++------ .../quickstart/build_model_tutorial.py | 14 ++++++------ .../quickstart/data_quickstart_tutorial.py | 8 +++---- .../quickstart/optimization_tutorial.py | 20 +++++++++--------- beginner_source/quickstart_tutorial.py | 4 +--- 8 files changed, 29 insertions(+), 31 deletions(-) rename {beginner_source/quickstart/images => _static/img/quickstart}/fashion_mnist.png (100%) rename {beginner_source/quickstart/images => _static/img/quickstart}/optimization_loops.PNG (100%) rename {beginner_source/quickstart/images => _static/img/quickstart}/typesofdata.PNG (100%) diff --git a/beginner_source/quickstart/images/fashion_mnist.png b/_static/img/quickstart/fashion_mnist.png similarity index 100% rename from beginner_source/quickstart/images/fashion_mnist.png rename to _static/img/quickstart/fashion_mnist.png diff --git a/beginner_source/quickstart/images/optimization_loops.PNG b/_static/img/quickstart/optimization_loops.PNG similarity index 100% rename from beginner_source/quickstart/images/optimization_loops.PNG rename to _static/img/quickstart/optimization_loops.PNG diff --git a/beginner_source/quickstart/images/typesofdata.PNG b/_static/img/quickstart/typesofdata.PNG similarity index 100% rename from beginner_source/quickstart/images/typesofdata.PNG rename to _static/img/quickstart/typesofdata.PNG diff --git a/beginner_source/quickstart/autograd_tutorial.py b/beginner_source/quickstart/autograd_tutorial.py index f016719181d..d8b493f2149 100644 --- a/beginner_source/quickstart/autograd_tutorial.py +++ b/beginner_source/quickstart/autograd_tutorial.py @@ -235,11 +235,11 @@ ################################################################## # More help with the FashionMNIST Pytorch Blitz # ---------------------- -# `Tensors `_ -# `DataSets and DataLoaders `_ -# `Transformations `_ -# `Build Model `_ -# `Optimization Loop `_ -# `AutoGrad `_ -# `Back to FashionMNIST main code base <>`_ +#| `Tensors `_ +#| `DataSets and DataLoaders `_ +#| `Transformations `_ +#| `Build Model `_ +#| `Optimization Loop `_ +#| `AutoGrad `_ +#| `Back to FashionMNIST main code base <>`_ # \ No newline at end of file diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index d0e012640cc..debc817b2a3 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -136,10 +136,10 @@ def forward(self, x): ################################################################## # More help with the FashionMNIST Pytorch Blitz # ------------------------- -# `Tensors `_ -# `DataSets and DataLoaders `_ -# `Transformations `_ -# `Build Model `_ -# `Optimization Loop `_ -# `AutoGrad `_ -# `Back to FashionMNIST main code base <>`_ +#| `Tensors `_ +#| `DataSets and DataLoaders `_ +#| `Transformations `_ +#| `Build Model `_ +#| `Optimization Loop `_ +#| `AutoGrad `_ +#| `Back to FashionMNIST main code base <>`_ \ No newline at end of file diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index d622f63ca7a..1c1bbe5ee8e 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -7,8 +7,8 @@ # ----------------- # # Before we can even think about building a model with PyTorch, we need to first learn how to load and process data. Data can be sourced from local files, cloud datastores and database queries. It comes in all sorts of forms and formats from structured tables to image, audio, text, video files and more. -# -# .. figure:: /images/typesofdata.PNG +# +# .. figure:: /_static/img/quickstart/typesofdata.png # :alt: # # Different data types require different python libraries to load and process such as `openCV `_ and `PIL `_ for images, `NLTK `_ and `spaCy `_ for text and `Librosa `_ for audio. @@ -52,7 +52,7 @@ plt.show() ################################################################# -# .. figure:: /images/fashion_mnist.PNG +# .. figure:: /_static/img/quickstart/fashion_mnist.png # :alt: # ################################################################# @@ -179,5 +179,5 @@ def __getitem__(self, idx): #| `Transformations `_ #| `Build Model `_ #| `Optimization Loop `_ -#| `AutoGrad `_ +#| `AutoGrad `_ #| `Back to FashionMNIST main code base <>`_ diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py index 12cbfacdb19..67c4fa36867 100644 --- a/beginner_source/quickstart/optimization_tutorial.py +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -37,9 +37,9 @@ # # The optimziation loop is comprized of three main subloops in PyTorch. # -# .. figure:: /images/optimization_loops.PNG -# :alt: -# add +# .. figure:: /_static/img/quickstart/optimization_loops.png +# :alt: +# # 1. The Train Loop - Core loop iterates over all the epochs # 2. The Validation Loop - Validate loss after each weight parameter update and can be used to gauge hyper parameter performance and update them for the next batch. # 3. The Test Loop - is used to evaluate our models performance after each epoch on traditional metrics to show how much our model is generalizing from the train and validation dataset to the test dataset it's never seen before. @@ -104,10 +104,10 @@ ################################################################## # More help with the FashionMNIST Pytorch Blitz # ----------------- -# `Tensors `_ -# `DataSets and DataLoaders `_ -# `Transformations `_ -# `Build Model `_ -# `Optimization Loop `_ -# `AutoGrad `_ -# `Back to FashionMNIST main code base <>`_ +#| `Tensors `_ +#| `DataSets and DataLoaders `_ +#| `Transformations `_ +#| `Build Model `_ +#| `Optimization Loop `_ +#| `AutoGrad `_ +#| `Back to FashionMNIST main code base <>`_ \ No newline at end of file diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index b812dd59cc8..ce48b4b7f26 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -141,8 +141,6 @@ def test(dataloader, model): ###################################################################### # More details `optimization and training loops `_ -# More deatils `AutoGrad `_ -# # More details `automatic differentiation and AutoGrad `_ # # @@ -201,5 +199,5 @@ def test(dataloader, model): # | `Transformations `_ # | `Build Model `_ # | `Optimization Loop `_ -# | `AutoGrad `_ +# | `AutoGrad `_ # | `Back to FashionMNIST main code base <>`_ \ No newline at end of file From c881fe37f51ecd99a249eff79c9776b0e999bdaa Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Mon, 9 Nov 2020 17:01:46 +0000 Subject: [PATCH 31/55] more work on quickstart --- .../quickstart/autograd_tutorial.py | 1 - .../quickstart/data_quickstart_tutorial.py | 9 +- .../quickstart/tensor_quickstart_tutorial.py | 122 ------------------ beginner_source/quickstart_tutorial.py | 45 +++---- 4 files changed, 24 insertions(+), 153 deletions(-) delete mode 100644 beginner_source/quickstart/tensor_quickstart_tutorial.py diff --git a/beginner_source/quickstart/autograd_tutorial.py b/beginner_source/quickstart/autograd_tutorial.py index d8b493f2149..1aaecd27b2e 100644 --- a/beginner_source/quickstart/autograd_tutorial.py +++ b/beginner_source/quickstart/autograd_tutorial.py @@ -241,5 +241,4 @@ #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ -#| `Back to FashionMNIST main code base <>`_ # \ No newline at end of file diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index 1c1bbe5ee8e..4403c2cb0c7 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -18,9 +18,10 @@ # A whole set of example datasets such as Fashion MNIST that implement this interface are built into PyTorch extension libraries. These are useful for benchmarking and testing your models before training on your own custom datasets. # # You can find some of them below. -# * `Image Datasets _` -# * `Text Datasets `_ -# * `Audio Datasets `_ +# +# - `Image Datasets _` +# - `Text Datasets `_ +# - `Audio Datasets `_ # ################################################################# # Iterating through a Dataset @@ -179,5 +180,5 @@ def __getitem__(self, idx): #| `Transformations `_ #| `Build Model `_ #| `Optimization Loop `_ -#| `AutoGrad `_ +#| `AutoGrad `_ #| `Back to FashionMNIST main code base <>`_ diff --git a/beginner_source/quickstart/tensor_quickstart_tutorial.py b/beginner_source/quickstart/tensor_quickstart_tutorial.py deleted file mode 100644 index c37dd9eacd6..00000000000 --- a/beginner_source/quickstart/tensor_quickstart_tutorial.py +++ /dev/null @@ -1,122 +0,0 @@ -""" -Tensors and Operations -=================== - -Tensors and Operations -When training neural network models for real world tasks, we need to be able to effectively represent different types of input data: sets of numerical features, images, videos, sounds, etc. All those different input types can be represented as multi-dimensional arrays of numbers that are called tensors. - -Tensor is the basic computational unit in PyTorch. It is very similar to NumPy array, and supports similar operations. However, there are two very important features of Torch tensors that make the especially useful for training large-scale neural networks: - - - Tensor operations can be performed on GPU using CUDA - - Tensor operations support automatic differentiation using `AutoGrad `_ - -Conversion between Torch tensors and NumPy arrays can be done easily: -""" - -import torch -import numpy as np - -np_array = np.arange(10) -tensor = torch.from_numpy(np_array) - -print(f"Tensor={tensor}, Array={tensor.numpy()}") - -################################################################# -# .. code:: python -# Output: Tensor=tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=torch.int32), Array=[0 1 2 3 4 5 6 7 8 9] -# -# .. note:: When using CPU for computations, tensors converted from arrays share the same memory for data. Thus, changing the underlying array will also affect the tensor. -# -# -# Creating Tensors -# ------------- -# The fastest way to create a tensor is to define an uninitialized tensor - the values of this tensor are not set, and depend on the whatever data was there in memory: -# - -x = torch.empty(3,6) -print(x) - -############################################################################ -# .. code:: python -# Output: tensor([[-1.3822e-06, 6.5301e-43, -1.3822e-06, 6.5301e-43, -1.4041e-06, -# 6.5301e-43], -# [-1.3855e-06, 6.5301e-43, -2.9163e-07, 6.5301e-43, -2.9163e-07, -# 6.5301e-43], -# [-1.4066e-06, 6.5301e-43, -1.3788e-06, 6.5301e-43, -2.9163e-07, -# 6.5301e-43]]) -# -# -# In practice, we ofter want to create tensors initialized to some values, such as zeros, ones or random values. Note that you can also specify the type of elements using dtype parameter, and chosing one of torch types: - - -x = torch.randn(3,5) -print(x) -y = torch.zeros(3,5,dtype=torch.int) -print(y) -z = torch.ones(3,5,dtype=torch.double) -print(z) - -###################################################################### -# Output: -# tensor([[-1.0166, -0.6828, 1.8886, -1.2115, 0.0202], -# [-1.1278, 0.7447, 0.4260, -2.1909, 0.5653], -# [ 0.0562, -0.1393, 0.6145, -0.6181, 0.1879]]) -# tensor([[0, 0, 0, 0, 0], -# [0, 0, 0, 0, 0], -# [0, 0, 0, 0, 0]], dtype=torch.int32) -# tensor([[1., 1., 1., 1., 1.], -# [1., 1., 1., 1., 1.], -# [1., 1., 1., 1., 1.]], dtype=torch.float64) -# -# -# You can also create random tensors with values sampled from different distributions, as described `in documentation. `_ -# -#Similarly to NumPy, you can use eye to create a diagonal identity matrix: - -print(torch.eye(10)) - -################################################################ -# Output: -# tensor([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], -# [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], -# [0., 0., 1., 0., 0., 0., 0., 0., 0., 0.], -# [0., 0., 0., 1., 0., 0., 0., 0., 0., 0.], -# [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], -# [0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], -# [0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], -# [0., 0., 0., 0., 0., 0., 0., 1., 0., 0.], -# [0., 0., 0., 0., 0., 0., 0., 0., 1., 0.], -# [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]) -# -# -# You can also create new tensors with the same properties or size as existing tensors: -# - -print(z.new_ones(2,2)) -print(torch.zeros_like(x,dtype=torch.long)) - -############################################################################ -# Tensor Operations -# ------------- -# Tensors support all basic arithmetic operations, which can be specified in different ways: -# -# - Using operators, such as +, -, etc. -# - Using functions such as add, mult, etc. Functions can either return values, or store them in the specified ouput variable (using out= parameter) -# - In-place operations, which modify one of the arguments. Those operations have _ appended to their name, eg. add_. -# -# Complete reference to all tensor operations can be found in documentation. -# -# Let us see examples of those operations on two tensors, x and y. -# -# -# -################################################################## -# More help with the FashionMNIST Pytorch Blitz -# ---------------------------------- -# `Tensors `_ -# `DataSets and DataLoaders `_ -# `Transformations `_ -# `Build Model `_ -# `Optimization Loop `_ -# `AutoGrad `_ -# `Back to FashionMNIST main code base <>`_ diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index ce48b4b7f26..26afe25a69c 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -2,7 +2,7 @@ PyTorch Quickstart =================== -The basic machine learning concepts in any framework should include: Working with data, Creating models, Optimizing Parameters, Saving and Loading Models +The basic machine learning concepts in any framework should include: Working with data, Creating models, Optimizing Parameters, Saving and Loading Models. Below is an example of an applied machine learning model using the FashionMNIST dataset that demonstrates these steps using Pytorch. """ @@ -19,7 +19,12 @@ # # PyTorch has two basic data primitives: ``DataSet`` and ``DataLoader``. # These ``DataSet`` objects include a ``transforms`` mechanism to -# modify data in-place. +# modify data in-place. Below is an example of how to load that data from the Pytorch open datasets. +# +# To see more examples and details of how to work with Tensors, Datasets, DataLoaders and Transforms in Pytoch checkout these resources: +# - `DataSet and DataLoader `_ +# - `Tensors `_ +# - `Transformations `_ classes = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"] @@ -37,34 +42,25 @@ ]) ) -###################################################################### -# DataLoader - # batch size batch_size = 64 -# loader +# data loader train_dataloader = DataLoader(training_data, batch_size=batch_size, num_workers=0, pin_memory=True) test_dataloader = DataLoader(test_data, batch_size=batch_size, num_workers=0, pin_memory=True) ###################################################################### -# More details `DataSet and DataLoader `_ -# More details `Tensors `_ -# More details `Transformations `_ -# -# # Creating Models # --------------- # # There are two ways of creating models: in-line or as a class. This -# quickstart will consider an in-line definition. +# quickstart will consider an in-line definition. For a more examples checkout `building the model `_ -# where to run device = 'cuda' if torch.cuda.is_available() else 'cpu' print('Using {} device'.format(device)) -# model +# in-line model model = nn.Sequential( nn.Flatten(), nn.Linear(28*28, 512), @@ -78,13 +74,15 @@ print(model) ###################################################################### -# More details `on building the model `_ # # Optimizing Parameters # --------------------- # # Optimizing model parameters requires a loss function, and optimizer, # and the optimization loop. +# - More details on `optimization and training loops `_ +# - More details on `automatic differentiation and AutoGrad `_ +# # cost function used to determine best parameters cost = torch.nn.BCELoss() @@ -140,10 +138,6 @@ def test(dataloader, model): print('Done!') ###################################################################### -# More details `optimization and training loops `_ -# More details `automatic differentiation and AutoGrad `_ -# -# # Saving Models # ------------- # @@ -194,10 +188,9 @@ def test(dataloader, model): ################################################################## # More help with the FashionMNIST Pytorch Blitz # ---------------------------------------- -# | `Tensors `_ -# | `DataSets and DataLoaders `_ -# | `Transformations `_ -# | `Build Model `_ -# | `Optimization Loop `_ -# | `AutoGrad `_ -# | `Back to FashionMNIST main code base <>`_ \ No newline at end of file +# | `Tensors `_ +# | `DataSets and DataLoaders `_ +# | `Transformations `_ +# | `Build Model `_ +# | `Optimization Loop `_ +# | `AutoGrad `_ \ No newline at end of file From 8592289e806c50c861c6666b81051b7ae0d66c8d Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Mon, 9 Nov 2020 17:39:26 +0000 Subject: [PATCH 32/55] more updates to make quickstart page --- beginner_source/quickstart_tutorial.py | 49 +++++++++++++++----------- 1 file changed, 29 insertions(+), 20 deletions(-) diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index 26afe25a69c..e4bb9fd06eb 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -6,26 +6,26 @@ """ -import torch -import torch.nn as nn -import torch.onnx as onnx -import matplotlib.pyplot as plt -from torch.utils.data import DataLoader -from torchvision import datasets, transforms - ###################################################################### # Working with data # ----------------- # # PyTorch has two basic data primitives: ``DataSet`` and ``DataLoader``. # These ``DataSet`` objects include a ``transforms`` mechanism to -# modify data in-place. Below is an example of how to load that data from the Pytorch open datasets. +# modify data in-place. Below is an example of how to load that data from the Pytorch open datasets and transform the data to a normalized tensor. # -# To see more examples and details of how to work with Tensors, Datasets, DataLoaders and Transforms in Pytoch checkout these resources: +# To see more examples and details of how to work with Tensors, Datasets, DataLoaders and Transforms in Pytoch with this example checkout these resources: # - `DataSet and DataLoader `_ # - `Tensors `_ # - `Transformations `_ +import torch +import torch.nn as nn +import torch.onnx as onnx +import matplotlib.pyplot as plt +from torch.utils.data import DataLoader +from torchvision import datasets, transforms + classes = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"] training_data = datasets.FashionMNIST('data', train=True, download=True, @@ -55,7 +55,7 @@ # --------------- # # There are two ways of creating models: in-line or as a class. This -# quickstart will consider an in-line definition. For a more examples checkout `building the model `_ +# quickstart will consider an in-line definition. For more examples checkout `building the model `_. device = 'cuda' if torch.cuda.is_available() else 'cpu' print('Using {} device'.format(device)) @@ -78,10 +78,12 @@ # Optimizing Parameters # --------------------- # -# Optimizing model parameters requires a loss function, and optimizer, +# Optimizing model parameters requires a loss function, optimizer, # and the optimization loop. -# - More details on `optimization and training loops `_ -# - More details on `automatic differentiation and AutoGrad `_ +# +# To see more examples and details of how to work with Optimization and Training loops in Pytoch with this example checkout these resources: +# - `Optimization and training loops `_ +# - `Automatic differentiation and AutoGrad `_ # # cost function used to determine best parameters @@ -92,7 +94,9 @@ optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) ###################################################################### -# training function +# Create the training function +# ----------------------------- + def train(dataloader, model, loss, optimizer): size = len(dataloader.dataset) for batch, (X, Y) in enumerate(dataloader): @@ -108,7 +112,9 @@ def train(dataloader, model, loss, optimizer): print(f'loss: {loss:>7f} [{current:>5d}/{size:>5d}]') ###################################################################### -# validation/test function +# Create the validation/test function +# ----------------------------- + def test(dataloader, model): size = len(dataloader.dataset) model.eval() @@ -128,7 +134,9 @@ def test(dataloader, model): print(f'\nTest Error:\nacc: {(100*correct):>0.1f}%, avg loss: {test_loss:>8f}\n') ###################################################################### -# training loop +# Call the train and test function in a training loop with the number of epochs +# + epochs = 5 for t in range(epochs): @@ -154,8 +162,6 @@ def test(dataloader, model): print('Saved onnx model to model.onnx') ###################################################################### -# More details `Saving loading and running `_ -# # Loading Models # ---------------------------- # @@ -163,7 +169,7 @@ def test(dataloader, model): # parameters includes re-creating the model shape and then loading # the state dictionary. Once loaded the model can be used for either # retraining or inference purposes (in this example it is used for -# inference) +# inference). Check out more details on `saving, loading and running models with Pytorch `_ loaded_model = nn.Sequential( nn.Flatten(), @@ -193,4 +199,7 @@ def test(dataloader, model): # | `Transformations `_ # | `Build Model `_ # | `Optimization Loop `_ -# | `AutoGrad `_ \ No newline at end of file +# | `AutoGrad `_ +# +# +# *Authors* - Seth Juarez, Ari Bornstein, Cassie Breviu, Dmitry Soshnikov \ No newline at end of file From 89d590226dd7c1b1bb2045f440cf2642f9e5965a Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Mon, 9 Nov 2020 19:05:19 +0000 Subject: [PATCH 33/55] fix subsections on main quickstart --- beginner_source/quickstart_tutorial.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index e4bb9fd06eb..32d5c4134df 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -2,9 +2,19 @@ PyTorch Quickstart =================== -The basic machine learning concepts in any framework should include: Working with data, Creating models, Optimizing Parameters, Saving and Loading Models. Below is an example of an applied machine learning model using the FashionMNIST dataset that demonstrates these steps using Pytorch. +The basic machine learning concepts in any framework should include: Working with data, Creating models, Optimizing Parameters, Saving and Loading Models. In this quickstart we will go through an example of an applied machine learning model using the FashionMNIST dataset that demonstrates these core steps using Pytorch. """ +##################################################################### +# Jump to: +# ~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# | `Working with data `_ +# | `Creating Models `_ +# | `Optimizing Parameters `_ +# | `Saving Models `_ +# | `Loading Models `_ +# ###################################################################### # Working with data @@ -74,7 +84,6 @@ print(model) ###################################################################### -# # Optimizing Parameters # --------------------- # @@ -95,7 +104,7 @@ ###################################################################### # Create the training function -# ----------------------------- +# ~~~~~~~~~~~~~~~~~~~~~~ def train(dataloader, model, loss, optimizer): size = len(dataloader.dataset) @@ -113,7 +122,7 @@ def train(dataloader, model, loss, optimizer): ###################################################################### # Create the validation/test function -# ----------------------------- +# ~~~~~~~~~~~~~~~~~~~~~~ def test(dataloader, model): size = len(dataloader.dataset) @@ -135,6 +144,7 @@ def test(dataloader, model): ###################################################################### # Call the train and test function in a training loop with the number of epochs +# ~~~~~~~~~~~~~~~~~~~~~~ # epochs = 5 @@ -202,4 +212,8 @@ def test(dataloader, model): # | `AutoGrad `_ # # -# *Authors* - Seth Juarez, Ari Bornstein, Cassie Breviu, Dmitry Soshnikov \ No newline at end of file +# +# +# *Authors* - Seth Juarez, Ari Bornstein, Cassie Breviu, Dmitry Soshnikov +# +# \ No newline at end of file From ad5023a7b6037e29fb9a6ff0b756033627442513 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Mon, 9 Nov 2020 20:05:59 +0000 Subject: [PATCH 34/55] updates to main, data and model --- .../quickstart/build_model_tutorial.py | 46 +++++++++++-------- .../quickstart/data_quickstart_tutorial.py | 18 +++++--- beginner_source/quickstart_tutorial.py | 28 +++-------- 3 files changed, 44 insertions(+), 48 deletions(-) diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index debc817b2a3..8d8a72ea202 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -6,10 +6,15 @@ In the below example, for our FashionMNIT image dataset, we are using a `Sequential` container from class `torch.nn.Sequential `_ that allows us to define the model layers inline. The neural network modules layers will be added to it in the order they are passed in. -Another way this model could be bulid is with a class using `nn.Module `_. We will break down each of these step of the model below. +Another way this model could be bulid is with a class using `nn.Module `_. -Inline nn.Sequential Example: +We will break down the model below. """ +############################################# +# Inline nn.Sequential Example: +# ---------------------------- +# + import os import torch import torch.nn as nn @@ -34,9 +39,11 @@ print(model) -""" -Class nn.Module Example: -""" +############################################# +# Class nn.Module Example: +# -------------------------- +# + class Model(nn.Module): def __init__(self, x): super(Model, self).__init__() @@ -56,6 +63,7 @@ def forward(self, x): # Here we check to see if `torch.cuda `_ is available to use the GPU, else we will use the CPU. # # Example: +# device = 'cuda' if torch.cuda.is_available() else 'cpu' print('Using {} device'.format(device)) @@ -64,17 +72,18 @@ def forward(self, x): # The Model Module Layers # ------------------------- # -# # Lets break down each model layer in the FashionMNIST model. # + ################################################## -# [nn.Flatten](https://pytorch.org/docs/stable/generated/torch.nn.Flatten.html) to reduce tensor dimensions to one. -# +# `nn.Flatten `_ to reduce tensor dimensions to one. +# ----------------------------------------------- +# # From the docs: -# ``` -# torch.nn.Flatten(start_dim: int = 1, end_dim: int = -1) -# ``` # + +torch.nn.Flatten(start_dim: int = 1, end_dim: int = -1) + #Here is an example using one of the training_data set items: tensor = training_data[0][0] print(tensor.size()) @@ -87,15 +96,16 @@ def forward(self, x): flattened_tensor = model(tensor) flattened_tensor.size() -#vOutput: torch.Size([1, 784]) +# Output: torch.Size([1, 784]) ############################################## -# [nn.Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html) to add a linear layer to the model. +# `nn.Linear `_ to add a linear layer to the model. +# ------------------------------- # # Now that we have flattened our tensor dimension we will apply a linear layer transform that will calculate/learn the weights and the bias. # # From the docs: -# ``` +# # torch.nn.Linear(in_features: int, out_features: int, bias: bool = True) # # in_features – size of each input sample @@ -122,17 +132,13 @@ def forward(self, x): ################################################# # Activation Functions +# ------------------------- # # - [nn.ReLU](https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html) Activation: # "Applies the rectified linear unit function element-wise" # - [nn.Softmax]() Activation: # "Applies the Softmax function to an n-dimensional input Tensor rescaling them so that the elements of the n-dimensional output Tensor lie in the range [0,1] and sum to 1." -###################################################### -# Resources -# -# `torch.nn `_ - ################################################################## # More help with the FashionMNIST Pytorch Blitz # ------------------------- @@ -142,4 +148,4 @@ def forward(self, x): #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ -#| `Back to FashionMNIST main code base <>`_ \ No newline at end of file +# \ No newline at end of file diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index 4403c2cb0c7..1fe992f902d 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -19,7 +19,7 @@ # # You can find some of them below. # -# - `Image Datasets _` +# - `Image Datasets `_ # - `Text Datasets `_ # - `Audio Datasets `_ # @@ -30,8 +30,7 @@ # Once we have a Dataset we can index it manually like a list *clothing[index]*. # # Here is an example of how to load the fashion MNIST dataset from torch vision. -# -# +# import torch from torch.utils.data import Dataset @@ -56,6 +55,7 @@ # .. figure:: /_static/img/quickstart/fashion_mnist.png # :alt: # + ################################################################# # Creating a Custom Dataset # ----------------- @@ -97,9 +97,11 @@ def __getitem__(self, idx): # Imports # ----------------- # -# Import os for file handling, torch for PyTorch, [pandas](https://pandas.pydata.org/) for loading labels, [torch vision](https://pytorch.org/blog/pytorch-1.7-released/) to read image files, and Dataset to implement the Dataset interface. +# Import os for file handling, torch for PyTorch, `pandas `_ for loading labels, `torch vision `_ to read image files, and Dataset to implement the Dataset interface. # # Example: +# + import os import torch import pandas as pd @@ -110,7 +112,7 @@ def __getitem__(self, idx): ################################################################# # Init # ----------------- -## +# # The init function is used for all the first time operations when our Dataset is loaded. In this case we use it to load our annotation labels to memory and the keep track of directory of our image file. Note that different types of data can take different init inputs you are not limited to just an annotations file, directory_path and transforms but for images this is a standard practice. # # Example: @@ -141,6 +143,8 @@ def __len__(self): # In this sample if provided a tensor we convert the tensor to a list containing our index. We then load the file at the given index from our image directory as well as the image label from our pandas annotations DataFrame. This image and label are then wrapped in a single sample dictionary which we can apply a Transform on and return. To learn more about Transforms see the next section of the Blitz. # # Example: +# + def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() @@ -171,6 +175,8 @@ def __getitem__(self, idx): ################################################################# # With this we have all we need to know to load an process data of any kind in PyTorch to train deep learning models. # + + ################################################################## # More help with the FashionMNIST Pytorch Blitz # ----------------- @@ -181,4 +187,4 @@ def __getitem__(self, idx): #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ -#| `Back to FashionMNIST main code base <>`_ +# diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index 32d5c4134df..aeb936a3b1e 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -5,17 +5,6 @@ The basic machine learning concepts in any framework should include: Working with data, Creating models, Optimizing Parameters, Saving and Loading Models. In this quickstart we will go through an example of an applied machine learning model using the FashionMNIST dataset that demonstrates these core steps using Pytorch. """ -##################################################################### -# Jump to: -# ~~~~~~~~~~~~~~~~~~~~~~~~~ -# -# | `Working with data `_ -# | `Creating Models `_ -# | `Optimizing Parameters `_ -# | `Saving Models `_ -# | `Loading Models `_ -# - ###################################################################### # Working with data # ----------------- @@ -102,10 +91,11 @@ learning_rate = 1e-3 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) -###################################################################### -# Create the training function +# ~~~~~~~~~~~~~~~~~~~~~~ +# Create the training and validation/test functions # ~~~~~~~~~~~~~~~~~~~~~~ + def train(dataloader, model, loss, optimizer): size = len(dataloader.dataset) for batch, (X, Y) in enumerate(dataloader): @@ -120,10 +110,6 @@ def train(dataloader, model, loss, optimizer): loss, current = loss.item(), batch * len(X) print(f'loss: {loss:>7f} [{current:>5d}/{size:>5d}]') -###################################################################### -# Create the validation/test function -# ~~~~~~~~~~~~~~~~~~~~~~ - def test(dataloader, model): size = len(dataloader.dataset) model.eval() @@ -142,10 +128,10 @@ def test(dataloader, model): print(f'\nTest Error:\nacc: {(100*correct):>0.1f}%, avg loss: {test_loss:>8f}\n') -###################################################################### +# ~~~~~~~~~~~~~~~~~~~~~~ # Call the train and test function in a training loop with the number of epochs # ~~~~~~~~~~~~~~~~~~~~~~ -# + epochs = 5 @@ -180,6 +166,7 @@ def test(dataloader, model): # the state dictionary. Once loaded the model can be used for either # retraining or inference purposes (in this example it is used for # inference). Check out more details on `saving, loading and running models with Pytorch `_ +# loaded_model = nn.Sequential( nn.Flatten(), @@ -212,8 +199,5 @@ def test(dataloader, model): # | `AutoGrad `_ # # -# -# # *Authors* - Seth Juarez, Ari Bornstein, Cassie Breviu, Dmitry Soshnikov # -# \ No newline at end of file From 5175725e4740d5e51c026434f53f27abfe89f462 Mon Sep 17 00:00:00 2001 From: Cassie <46505951+cassieview@users.noreply.github.com> Date: Mon, 9 Nov 2020 21:05:47 +0000 Subject: [PATCH 35/55] format updates --- .../quickstart/autograd_tutorial.py | 2 +- .../quickstart/build_model_tutorial.py | 34 +++++++++-------- .../quickstart/data_quickstart_tutorial.py | 7 +++- .../quickstart/optimization_tutorial.py | 37 ++++++++++--------- .../quickstart/save_load_run_tutorial.py | 5 ++- beginner_source/quickstart/tensor_tutorial.py | 12 ++++++ .../quickstart/transforms_tutorial.py | 14 +++---- beginner_source/quickstart_tutorial.py | 12 +++--- 8 files changed, 71 insertions(+), 52 deletions(-) diff --git a/beginner_source/quickstart/autograd_tutorial.py b/beginner_source/quickstart/autograd_tutorial.py index 1aaecd27b2e..6f32fcc86dc 100644 --- a/beginner_source/quickstart/autograd_tutorial.py +++ b/beginner_source/quickstart/autograd_tutorial.py @@ -233,7 +233,7 @@ # ################################################################## -# More help with the FashionMNIST Pytorch Blitz +# More help with the Pytorch Quickstart # ---------------------- #| `Tensors `_ #| `DataSets and DataLoaders `_ diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index 8d8a72ea202..fedfb739def 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -1,16 +1,20 @@ """ Build Model Tutorial =================== - -The data has been loaded and transformed we can now build the model. We will leverage `torch.nn `_ predefined layers that Pytorch has that can both simplify our code, and make it faster. - -In the below example, for our FashionMNIT image dataset, we are using a `Sequential` container from class `torch.nn.Sequential `_ that allows us to define the model layers inline. The neural network modules layers will be added to it in the order they are passed in. - -Another way this model could be bulid is with a class using `nn.Module `_. - -We will break down the model below. """ + ############################################# +# The data has been loaded and transformed we can now build the model. We will leverage `torch.nn `_ predefined layers that Pytorch has that can both simplify our code, and make it faster. +# +# In the below example, for our FashionMNIT image dataset, we are using a `Sequential` container from class `torch.nn.Sequential `_ that allows us to define the model layers inline. The neural network modules layers will be added to it in the order they are passed in. +# +# Another way this model could be bulid is with a class using `nn.Module `_. +# +# We will break down the model below. +# +# + +########################################## # Inline nn.Sequential Example: # ---------------------------- # @@ -44,9 +48,9 @@ # -------------------------- # -class Model(nn.Module): +class NeuralNework(nn.Module): def __init__(self, x): - super(Model, self).__init__() + super(NeuralNework, self).__init__() self.layer1 = nn.Linear(28*28, 512) self.layer2 = nn.Linear(512, 512) self.output = nn.Linear(512, 10) @@ -115,6 +119,7 @@ def forward(self, x): # bias – If set to False, the layer will not learn an additive bias. Default: True # # Lets take a look at the resulting data example with the flatten layer and linear layer added: +# input = training_data[0][0] print(input.size()) @@ -134,13 +139,12 @@ def forward(self, x): # Activation Functions # ------------------------- # -# - [nn.ReLU](https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html) Activation: -# "Applies the rectified linear unit function element-wise" -# - [nn.Softmax]() Activation: -# "Applies the Softmax function to an n-dimensional input Tensor rescaling them so that the elements of the n-dimensional output Tensor lie in the range [0,1] and sum to 1." +# - `nn.ReLU `_ Activation +# - `nn.Softmax `_ Activation +# ################################################################## -# More help with the FashionMNIST Pytorch Blitz +# More help with the Pytorch Quickstart # ------------------------- #| `Tensors `_ #| `DataSets and DataLoaders `_ diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index 1fe992f902d..e7b43c033b5 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -2,6 +2,7 @@ Datasets & Dataloaders =================== """ + ################################################################# # Getting Started With Data in PyTorch # ----------------- @@ -23,11 +24,12 @@ # - `Text Datasets `_ # - `Audio Datasets `_ # + ################################################################# # Iterating through a Dataset # ----------------- # -# Once we have a Dataset we can index it manually like a list *clothing[index]*. +# Once we have a Dataset we can index it manually like a list `clothing[index]`. # # Here is an example of how to load the fashion MNIST dataset from torch vision. # @@ -61,6 +63,7 @@ # ----------------- # # To work with your own data lets look at the a simple custom image Dataset implementation: +# import os import torch @@ -178,7 +181,7 @@ def __getitem__(self, idx): ################################################################## -# More help with the FashionMNIST Pytorch Blitz +# More help with the Pytorch Quickstart # ----------------- # #| `Tensors `_ diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py index 67c4fa36867..62023ea4f6e 100644 --- a/beginner_source/quickstart/optimization_tutorial.py +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -19,7 +19,7 @@ # Hyperparameters # ----------------- # -#Hyperparameters are adjustable parameters that let you control the model optimization process. For example, with neural networks, you can configure: +# Hyperparameters are adjustable parameters that let you control the model optimization process. For example, with neural networks, you can configure: # # - **Number of Epochs**- the number times iterate over the dataset to update model parameters # - **Batch Size** - the number of samples in the dataset to evaluate before you update model parameters @@ -33,12 +33,14 @@ ###################################################### # Optimizaton Loops # ----------------- +# # Once we set our hyperparameters we can then optimize our our model with optimization loops. # # The optimziation loop is comprized of three main subloops in PyTorch. # # .. figure:: /_static/img/quickstart/optimization_loops.png -# :alt: +# :alt: +# # # 1. The Train Loop - Core loop iterates over all the epochs # 2. The Validation Loop - Validate loss after each weight parameter update and can be used to gauge hyper parameter performance and update them for the next batch. @@ -59,7 +61,8 @@ ###################################################### # Loss # ----------------- -#The loss is the value used to update our parameters. To calculate the loss we make a prediction using the inputs of our given data sample. +# +# The loss is the value used to update our parameters. To calculate the loss we make a prediction using the inputs of our given data sample. # preds = model(inputs) @@ -68,6 +71,7 @@ ###################################################### # AutoGrad and Optimizer (We might want to split this when we go more in depth on autograd ) # ----------------- +# # By default each tensor maintains a graph of every operation applied on it unless otherwise specified using the torch.no_grad() command. # # `Autograd graph `_ @@ -85,24 +89,22 @@ # Putting it all together lets look at a basic optimization loop # ----------------- # -# -# -# #initilize optimizer and example cost function +# Initilize optimizer and example cost function # # # For loop to iterate over epoch -# # Train loop over batches -# # Set model to train mode -# # Calculate loss using -# # clear optimizer gradient -# # loss.backword -# # optimizer step -# # Set model to evaluate mode and start validation loop -# #calculate validation loss and update optimizer hyper parameters -# # Set model to evaluate test loop +# - Train loop over batches +# - Set model to train mode +# - Calculate loss using +# - clear optimizer gradient +# - loss.backword +# - optimizer step +# - Set model to evaluate mode and start validation loop +# - calculate validation loss and update optimizer hyper parameters +# - Set model to evaluate test loop ################################################################## -# More help with the FashionMNIST Pytorch Blitz +# More help with the PyTorch Quickstart # ----------------- #| `Tensors `_ #| `DataSets and DataLoaders `_ @@ -110,4 +112,5 @@ #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ -#| `Back to FashionMNIST main code base <>`_ \ No newline at end of file +#| `Back to FashionMNIST main code base <>`_ +# \ No newline at end of file diff --git a/beginner_source/quickstart/save_load_run_tutorial.py b/beginner_source/quickstart/save_load_run_tutorial.py index cfa64d380c5..85baca71000 100644 --- a/beginner_source/quickstart/save_load_run_tutorial.py +++ b/beginner_source/quickstart/save_load_run_tutorial.py @@ -91,7 +91,7 @@ print(f'Predicted: "{predicted}", Actual: "{actual}"') ################################################################## -# More help with the FashionMNIST Pytorch Blitz +# More help with the PyTorch Quickstart # ---------------------------------------- # | `Tensors `_ # | `DataSets and DataLoaders `_ @@ -99,4 +99,5 @@ # | `Build Model `_ # | `Optimization Loop `_ # | `AutoGrad `_ -# | `Back to FashionMNIST main code base <>`_ \ No newline at end of file +# | `Back to FashionMNIST main code base <>`_ +# \ No newline at end of file diff --git a/beginner_source/quickstart/tensor_tutorial.py b/beginner_source/quickstart/tensor_tutorial.py index b37ff5a2e4a..906b11ad203 100644 --- a/beginner_source/quickstart/tensor_tutorial.py +++ b/beginner_source/quickstart/tensor_tutorial.py @@ -248,3 +248,15 @@ # moving it from GPU anyway. # + +################################################################## +# More help with the PyTorch Quickstart +# ---------------------------------------- +# | `Tensors `_ +# | `DataSets and DataLoaders `_ +# | `Transformations `_ +# | `Build Model `_ +# | `Optimization Loop `_ +# | `AutoGrad `_ +# | `Back to FashionMNIST main code base <>`_ +# \ No newline at end of file diff --git a/beginner_source/quickstart/transforms_tutorial.py b/beginner_source/quickstart/transforms_tutorial.py index 5b4ceb95f03..086d1d2571b 100644 --- a/beginner_source/quickstart/transforms_tutorial.py +++ b/beginner_source/quickstart/transforms_tutorial.py @@ -62,6 +62,9 @@ # # ..note: ToTensor only normalized image data that is in PIL mode of (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) or if the numpy.ndarray has dtype = np.uint8. In the other cases, tensors are returned without scaling. # +# +# Check out the other `TorchVision Transforms `_ +# ############################################## # Target_Transform: Labels @@ -81,7 +84,7 @@ # -------------------------------------- # Below is an example for processing image data using a dataset from a local directory. # -#Example: +# Example: data_dir='data' batch_size=4 @@ -114,14 +117,8 @@ class_names = image_datasets['train'].classes -################################################## -# Resources -#------------------------------------------- -#Check out the other TorchVision Transforms available: https://pytorch.org/docs/stable/torchvision/transforms.html -# -# ################################################################## -# More help with the FashionMNIST Pytorch Blitz +# More help with the PyTorch Quickstart # ---------------------------------------- # | `Tensors `_ # | `DataSets and DataLoaders `_ @@ -130,6 +127,7 @@ # | `Optimization Loop `_ # | `AutoGrad `_ # | `Back to FashionMNIST main code base <>`_ +# diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index aeb936a3b1e..62fc5e632d1 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -91,10 +91,8 @@ learning_rate = 1e-3 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) -# ~~~~~~~~~~~~~~~~~~~~~~ -# Create the training and validation/test functions -# ~~~~~~~~~~~~~~~~~~~~~~ +# Create the training function def train(dataloader, model, loss, optimizer): size = len(dataloader.dataset) @@ -110,6 +108,8 @@ def train(dataloader, model, loss, optimizer): loss, current = loss.item(), batch * len(X) print(f'loss: {loss:>7f} [{current:>5d}/{size:>5d}]') +# Create the validation/test function + def test(dataloader, model): size = len(dataloader.dataset) model.eval() @@ -128,10 +128,8 @@ def test(dataloader, model): print(f'\nTest Error:\nacc: {(100*correct):>0.1f}%, avg loss: {test_loss:>8f}\n') -# ~~~~~~~~~~~~~~~~~~~~~~ -# Call the train and test function in a training loop with the number of epochs -# ~~~~~~~~~~~~~~~~~~~~~~ +# Call the train and test function in a training loop with the number of epochs indicated epochs = 5 @@ -199,5 +197,5 @@ def test(dataloader, model): # | `AutoGrad `_ # # -# *Authors* - Seth Juarez, Ari Bornstein, Cassie Breviu, Dmitry Soshnikov +# *Authors: Seth Juarez, Ari Bornstein, Cassie Breviu, Dmitry Soshnikov* # From 3bd01bbed09edbaad2be2a776b8a8b0462898413 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Mon, 9 Nov 2020 15:27:26 -0600 Subject: [PATCH 36/55] fix link, fix format, fix stuff --- .../quickstart/autograd_tutorial.py | 2 +- .../quickstart/build_model_tutorial.py | 24 +++++++++---------- .../quickstart/data_quickstart_tutorial.py | 2 +- .../quickstart/optimization_tutorial.py | 3 +-- .../quickstart/save_load_run_tutorial.py | 2 +- beginner_source/quickstart/tensor_tutorial.py | 2 +- .../quickstart/transforms_tutorial.py | 2 +- beginner_source/quickstart_tutorial.py | 17 +++++++------ 8 files changed, 27 insertions(+), 27 deletions(-) diff --git a/beginner_source/quickstart/autograd_tutorial.py b/beginner_source/quickstart/autograd_tutorial.py index 6f32fcc86dc..fd90875702e 100644 --- a/beginner_source/quickstart/autograd_tutorial.py +++ b/beginner_source/quickstart/autograd_tutorial.py @@ -235,7 +235,7 @@ ################################################################## # More help with the Pytorch Quickstart # ---------------------- -#| `Tensors `_ +#| `Tensors `_ #| `DataSets and DataLoaders `_ #| `Transformations `_ #| `Build Model `_ diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index fedfb739def..b7d06b6d8f4 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -1,18 +1,16 @@ """ Build Model Tutorial -=================== -""" +======================================= -############################################# -# The data has been loaded and transformed we can now build the model. We will leverage `torch.nn `_ predefined layers that Pytorch has that can both simplify our code, and make it faster. -# -# In the below example, for our FashionMNIT image dataset, we are using a `Sequential` container from class `torch.nn.Sequential `_ that allows us to define the model layers inline. The neural network modules layers will be added to it in the order they are passed in. -# -# Another way this model could be bulid is with a class using `nn.Module `_. -# -# We will break down the model below. -# -# +The data has been loaded and transformed we can now build the model. We will leverage `torch.nn `_ predefined layers that Pytorch has that can both simplify our code, and make it faster. + +In the below example, for our FashionMNIT image dataset, we are using a `Sequential` container from class `torch.nn.Sequential `_ that allows us to define the model layers inline. The neural network modules layers will be added to it in the order they are passed in. + +Another way this model could be bulid is with a class using `nn.Module `_. + +We will break down the model below. + +""" ########################################## # Inline nn.Sequential Example: @@ -146,7 +144,7 @@ def forward(self, x): ################################################################## # More help with the Pytorch Quickstart # ------------------------- -#| `Tensors `_ +#| `Tensors `_ #| `DataSets and DataLoaders `_ #| `Transformations `_ #| `Build Model `_ diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index e7b43c033b5..436b7a54c7a 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -184,7 +184,7 @@ def __getitem__(self, idx): # More help with the Pytorch Quickstart # ----------------- # -#| `Tensors `_ +#| `Tensors `_ #| `DataSets and DataLoaders `_ #| `Transformations `_ #| `Build Model `_ diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py index 62023ea4f6e..5071127a925 100644 --- a/beginner_source/quickstart/optimization_tutorial.py +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -106,11 +106,10 @@ ################################################################## # More help with the PyTorch Quickstart # ----------------- -#| `Tensors `_ +#| `Tensors `_ #| `DataSets and DataLoaders `_ #| `Transformations `_ #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ -#| `Back to FashionMNIST main code base <>`_ # \ No newline at end of file diff --git a/beginner_source/quickstart/save_load_run_tutorial.py b/beginner_source/quickstart/save_load_run_tutorial.py index 85baca71000..9cc2ec2e5c5 100644 --- a/beginner_source/quickstart/save_load_run_tutorial.py +++ b/beginner_source/quickstart/save_load_run_tutorial.py @@ -93,7 +93,7 @@ ################################################################## # More help with the PyTorch Quickstart # ---------------------------------------- -# | `Tensors `_ +# | `Tensors `_ # | `DataSets and DataLoaders `_ # | `Transformations `_ # | `Build Model `_ diff --git a/beginner_source/quickstart/tensor_tutorial.py b/beginner_source/quickstart/tensor_tutorial.py index 906b11ad203..b01fbb78f1b 100644 --- a/beginner_source/quickstart/tensor_tutorial.py +++ b/beginner_source/quickstart/tensor_tutorial.py @@ -252,7 +252,7 @@ ################################################################## # More help with the PyTorch Quickstart # ---------------------------------------- -# | `Tensors `_ +# | `Tensors `_ # | `DataSets and DataLoaders `_ # | `Transformations `_ # | `Build Model `_ diff --git a/beginner_source/quickstart/transforms_tutorial.py b/beginner_source/quickstart/transforms_tutorial.py index 086d1d2571b..85ad732a981 100644 --- a/beginner_source/quickstart/transforms_tutorial.py +++ b/beginner_source/quickstart/transforms_tutorial.py @@ -120,7 +120,7 @@ ################################################################## # More help with the PyTorch Quickstart # ---------------------------------------- -# | `Tensors `_ +# | `Tensors `_ # | `DataSets and DataLoaders `_ # | `Transformations `_ # | `Build Model `_ diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index 62fc5e632d1..a48141d7565 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -128,8 +128,12 @@ def test(dataloader, model): print(f'\nTest Error:\nacc: {(100*correct):>0.1f}%, avg loss: {test_loss:>8f}\n') - +###################################################################### +# Training Models +# ------------- +# # Call the train and test function in a training loop with the number of epochs indicated +# epochs = 5 @@ -143,14 +147,13 @@ def test(dataloader, model): # Saving Models # ------------- # -# PyTorch has can serialize the internal model state to a file. It also -# has built-in ONNX support. +# PyTorch has different ways you can save your model. One was is to serialize the internal model state to a file. Another would be to use the built-in `ONNX `_ support. -# saving PyTorch Model Dictionary +# Saving PyTorch Model Dictionary torch.save(model.state_dict(), 'model.pth') print('Saved PyTorch Model to model.pth') -# create dummy variable to traverse graph +# Save to ONNX, create dummy variable to traverse graph x = torch.randint(255, (1, 28*28), dtype=torch.float).to(device) / 255 onnx.export(model, x, 'model.onnx') print('Saved onnx model to model.onnx') @@ -187,9 +190,9 @@ def test(dataloader, model): print(f'Predicted: "{predicted}", Actual: "{actual}"') ################################################################## -# More help with the FashionMNIST Pytorch Blitz +# More help with the PyTorch Quickstart # ---------------------------------------- -# | `Tensors `_ +# | `Tensors `_ # | `DataSets and DataLoaders `_ # | `Transformations `_ # | `Build Model `_ From 374f8cfe864baee27c44064448f0d824b25987b0 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Mon, 9 Nov 2020 15:45:38 -0600 Subject: [PATCH 37/55] more formatting --- beginner_source/quickstart/build_model_tutorial.py | 11 ++++------- beginner_source/quickstart/tensor_tutorial.py | 1 - beginner_source/quickstart/transforms_tutorial.py | 4 ++++ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index b7d06b6d8f4..3be5cf61e4a 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -9,14 +9,7 @@ Another way this model could be bulid is with a class using `nn.Module `_. We will break down the model below. - """ - -########################################## -# Inline nn.Sequential Example: -# ---------------------------- -# - import os import torch import torch.nn as nn @@ -24,6 +17,10 @@ from torch.utils.data import DataLoader from torchvision import datasets, transforms +########################################## +# Inline nn.Sequential Example: +# ---------------------------- +# device = 'cuda' if torch.cuda.is_available() else 'cpu' print('Using {} device'.format(device)) diff --git a/beginner_source/quickstart/tensor_tutorial.py b/beginner_source/quickstart/tensor_tutorial.py index b01fbb78f1b..7670ff97724 100644 --- a/beginner_source/quickstart/tensor_tutorial.py +++ b/beginner_source/quickstart/tensor_tutorial.py @@ -258,5 +258,4 @@ # | `Build Model `_ # | `Optimization Loop `_ # | `AutoGrad `_ -# | `Back to FashionMNIST main code base <>`_ # \ No newline at end of file diff --git a/beginner_source/quickstart/transforms_tutorial.py b/beginner_source/quickstart/transforms_tutorial.py index 85ad732a981..03784a2c2b7 100644 --- a/beginner_source/quickstart/transforms_tutorial.py +++ b/beginner_source/quickstart/transforms_tutorial.py @@ -53,6 +53,7 @@ transform=transforms.Compose([transforms.ToTensor()]) +##################################################### # *Compose* # The `transforms.compose` allows us to string together different steps of transformations in a sequential order. This allows us to add an array of transforms for both the features and labels when preparing our data for training. # @@ -71,9 +72,11 @@ # ------------------------------- # #Example: +# target_transform= transforms.Lambda(lambda y: torch.zeros(10, dtype=torchfloat).scatter_(dim=0, index=torchtensor(y), value=1)) +################################################# # This function is taking the y input and creating a tensor of size 10 with a float datatype. Then its calling scatter ([torch.Tensor.scatter_ class](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.scatter_)) to send each item to torch.zeros, according to the row, index and current item value. # * *Dim=0* is row wise index # * *index* = torchtensor(y)` is the index of the element toscatter @@ -85,6 +88,7 @@ # Below is an example for processing image data using a dataset from a local directory. # # Example: +# data_dir='data' batch_size=4 From 138420ae935b16ff6ab8a10ce511439e049c3059 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Tue, 10 Nov 2020 09:56:23 -0600 Subject: [PATCH 38/55] fix image and format --- beginner_source/quickstart/autograd_tutorial.py | 3 +-- beginner_source/quickstart/build_model_tutorial.py | 14 ++++++-------- .../quickstart/data_quickstart_tutorial.py | 5 ++++- .../quickstart/optimization_tutorial.py | 3 +-- .../quickstart/save_load_run_tutorial.py | 4 +--- beginner_source/quickstart/tensor_tutorial.py | 3 +-- beginner_source/quickstart/transforms_tutorial.py | 2 -- beginner_source/quickstart_tutorial.py | 2 +- 8 files changed, 15 insertions(+), 21 deletions(-) diff --git a/beginner_source/quickstart/autograd_tutorial.py b/beginner_source/quickstart/autograd_tutorial.py index fd90875702e..45c8d8aa386 100644 --- a/beginner_source/quickstart/autograd_tutorial.py +++ b/beginner_source/quickstart/autograd_tutorial.py @@ -240,5 +240,4 @@ #| `Transformations `_ #| `Build Model `_ #| `Optimization Loop `_ -#| `AutoGrad `_ -# \ No newline at end of file +#| `AutoGrad `_ \ No newline at end of file diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index 3be5cf61e4a..8223584714d 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -6,10 +6,14 @@ In the below example, for our FashionMNIT image dataset, we are using a `Sequential` container from class `torch.nn.Sequential `_ that allows us to define the model layers inline. The neural network modules layers will be added to it in the order they are passed in. -Another way this model could be bulid is with a class using `nn.Module `_. +Another way this model could be bulid is with a class using `nn.Module `_ We will break down the model below. """ +########################################## +# Inline nn.Sequential Example: +# ---------------------------- +# import os import torch import torch.nn as nn @@ -17,11 +21,6 @@ from torch.utils.data import DataLoader from torchvision import datasets, transforms -########################################## -# Inline nn.Sequential Example: -# ---------------------------- -# - device = 'cuda' if torch.cuda.is_available() else 'cpu' print('Using {} device'.format(device)) @@ -146,5 +145,4 @@ def forward(self, x): #| `Transformations `_ #| `Build Model `_ #| `Optimization Loop `_ -#| `AutoGrad `_ -# \ No newline at end of file +#| `AutoGrad `_ \ No newline at end of file diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index 436b7a54c7a..2b9f1d57e12 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -9,9 +9,13 @@ # # Before we can even think about building a model with PyTorch, we need to first learn how to load and process data. Data can be sourced from local files, cloud datastores and database queries. It comes in all sorts of forms and formats from structured tables to image, audio, text, video files and more. # + +############################################################### # .. figure:: /_static/img/quickstart/typesofdata.png # :alt: # + +############################################################ # Different data types require different python libraries to load and process such as `openCV `_ and `PIL `_ for images, `NLTK `_ and `spaCy `_ for text and `Librosa `_ for audio. # # If not properly organized, code for processing data samples can quickly get messy and become hard to maintain. Since different model architectures can be applied to many data types, we ideally want our dataset code to be decoupled from our model training code. To this end, PyTorch provides a simple Datasets interface for linking managing collections of data. @@ -190,4 +194,3 @@ def __getitem__(self, idx): #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ -# diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py index 5071127a925..7255163344d 100644 --- a/beginner_source/quickstart/optimization_tutorial.py +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -111,5 +111,4 @@ #| `Transformations `_ #| `Build Model `_ #| `Optimization Loop `_ -#| `AutoGrad `_ -# \ No newline at end of file +#| `AutoGrad `_ \ No newline at end of file diff --git a/beginner_source/quickstart/save_load_run_tutorial.py b/beginner_source/quickstart/save_load_run_tutorial.py index 9cc2ec2e5c5..a66cb0bd26b 100644 --- a/beginner_source/quickstart/save_load_run_tutorial.py +++ b/beginner_source/quickstart/save_load_run_tutorial.py @@ -98,6 +98,4 @@ # | `Transformations `_ # | `Build Model `_ # | `Optimization Loop `_ -# | `AutoGrad `_ -# | `Back to FashionMNIST main code base <>`_ -# \ No newline at end of file +# | `AutoGrad `_ \ No newline at end of file diff --git a/beginner_source/quickstart/tensor_tutorial.py b/beginner_source/quickstart/tensor_tutorial.py index 7670ff97724..97330b01e24 100644 --- a/beginner_source/quickstart/tensor_tutorial.py +++ b/beginner_source/quickstart/tensor_tutorial.py @@ -257,5 +257,4 @@ # | `Transformations `_ # | `Build Model `_ # | `Optimization Loop `_ -# | `AutoGrad `_ -# \ No newline at end of file +# | `AutoGrad `_ \ No newline at end of file diff --git a/beginner_source/quickstart/transforms_tutorial.py b/beginner_source/quickstart/transforms_tutorial.py index 03784a2c2b7..29e124019e8 100644 --- a/beginner_source/quickstart/transforms_tutorial.py +++ b/beginner_source/quickstart/transforms_tutorial.py @@ -130,8 +130,6 @@ # | `Build Model `_ # | `Optimization Loop `_ # | `AutoGrad `_ -# | `Back to FashionMNIST main code base <>`_ -# diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index a48141d7565..c899976bffe 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -201,4 +201,4 @@ def test(dataloader, model): # # # *Authors: Seth Juarez, Ari Bornstein, Cassie Breviu, Dmitry Soshnikov* -# + From b01c6c7cec7d49c90e5cf120de0b36591e4ba650 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Tue, 10 Nov 2020 10:12:33 -0600 Subject: [PATCH 39/55] tensor updates --- .../quickstart/data_quickstart_tutorial.py | 2 +- beginner_source/quickstart/tensor_tutorial.py | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index 2b9f1d57e12..a34256ca8ba 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -25,7 +25,7 @@ # You can find some of them below. # # - `Image Datasets `_ -# - `Text Datasets `_ +# - `Text Datasets `_ # - `Audio Datasets `_ # diff --git a/beginner_source/quickstart/tensor_tutorial.py b/beginner_source/quickstart/tensor_tutorial.py index 97330b01e24..ec7c2bf8793 100644 --- a/beginner_source/quickstart/tensor_tutorial.py +++ b/beginner_source/quickstart/tensor_tutorial.py @@ -25,7 +25,7 @@ ###################################################################### -# **Note:** When using CPU for computations, tensors converted from arrays +# ..note: When using CPU for computations, tensors converted from arrays # share the same memory for data. Thus, changing the underlying array will # also affect the tensor. # @@ -44,7 +44,7 @@ ###################################################################### -# In practice, we ofter want to create tensors initialized to some values, +# In practice, we often want to create tensors initialized to some values, # such as zeros, ones or random values. Note that you can also specify the # type of elements using ``dtype`` parameter, and chosing one of ``torch`` # types: @@ -56,7 +56,7 @@ ###################################################################### # You can also create random tensors with values sampled from different -# distributions, as described `in +# distributions, as described `in the # documentation `__. # # Similarly to NumPy, you can use ``eye`` to create a diagonal identity @@ -95,7 +95,7 @@ # arguments. Those operations have ``_`` appended to their name, eg. # ``add_``. # -# Complete reference to all tensor operations can be found `in +# Complete reference to all tensor operations can be found `in the # documentation `__. # # Let us see examples of those operations on two tensors, ``x`` and ``y``. @@ -168,8 +168,8 @@ # Resizing and Indexing # ~~~~~~~~~~~~~~~~~~~~~ # -# Very often you need to change the shape of the tensor without modifying -# its valies, eg. to add an extra dimension. To do that, you can use +# Often you need to change the shape of the tensor without modifying +# its values, eg. to add an extra dimension. To do that, you can use # ``view`` method, which provides a **view** to the same in-memory values # using different dimensions: # @@ -180,14 +180,14 @@ ###################################################################### -# Note that the number of elements in a view should be the same as in the +# The number of elements in a view should be the same as in the # original tensor, and that you can use ``-1`` in one of the dimensions to # figure out this dimension automatically. # ###################################################################### -# **Note:** ``view`` is similar to ``reshape`` operation in NumPy. There +# ..note: ``view`` is similar to ``reshape`` operation in NumPy. There # is also a ``reshape`` method available in PyTorch, and it is more # powerful than ``view``, because it can also reshape non-contiguous # arrays by copying them to the new shape. However, in vast majority of From ee60f9f0707e0fa9b78e4ffdf8406b7347359bf4 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Tue, 10 Nov 2020 13:39:52 -0600 Subject: [PATCH 40/55] format fixes --- beginner_source/quickstart/autograd_tutorial.py | 7 ++++--- .../quickstart/build_model_tutorial.py | 6 ++++-- .../quickstart/optimization_tutorial.py | 7 ++++--- beginner_source/quickstart/tensor_tutorial.py | 17 +++++++++-------- beginner_source/quickstart_tutorial.py | 4 ++-- 5 files changed, 23 insertions(+), 18 deletions(-) diff --git a/beginner_source/quickstart/autograd_tutorial.py b/beginner_source/quickstart/autograd_tutorial.py index 45c8d8aa386..540bf26294f 100644 --- a/beginner_source/quickstart/autograd_tutorial.py +++ b/beginner_source/quickstart/autograd_tutorial.py @@ -234,10 +234,11 @@ ################################################################## # More help with the Pytorch Quickstart -# ---------------------- -#| `Tensors `_ +# ----------------- +# +#| `Tensors `_ #| `DataSets and DataLoaders `_ #| `Transformations `_ #| `Build Model `_ #| `Optimization Loop `_ -#| `AutoGrad `_ \ No newline at end of file +#| `AutoGrad `_ diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index 8223584714d..b23bdad85d0 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -14,6 +14,7 @@ # Inline nn.Sequential Example: # ---------------------------- # + import os import torch import torch.nn as nn @@ -139,10 +140,11 @@ def forward(self, x): ################################################################## # More help with the Pytorch Quickstart -# ------------------------- +# ----------------- +# #| `Tensors `_ #| `DataSets and DataLoaders `_ #| `Transformations `_ #| `Build Model `_ #| `Optimization Loop `_ -#| `AutoGrad `_ \ No newline at end of file +#| `AutoGrad `_ diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py index 7255163344d..98a7bb30ef4 100644 --- a/beginner_source/quickstart/optimization_tutorial.py +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -104,11 +104,12 @@ ################################################################## -# More help with the PyTorch Quickstart +# More help with the Pytorch Quickstart # ----------------- -#| `Tensors `_ +# +#| `Tensors `_ #| `DataSets and DataLoaders `_ #| `Transformations `_ #| `Build Model `_ #| `Optimization Loop `_ -#| `AutoGrad `_ \ No newline at end of file +#| `AutoGrad `_ diff --git a/beginner_source/quickstart/tensor_tutorial.py b/beginner_source/quickstart/tensor_tutorial.py index ec7c2bf8793..234ad0cc5f4 100644 --- a/beginner_source/quickstart/tensor_tutorial.py +++ b/beginner_source/quickstart/tensor_tutorial.py @@ -250,11 +250,12 @@ ################################################################## -# More help with the PyTorch Quickstart -# ---------------------------------------- -# | `Tensors `_ -# | `DataSets and DataLoaders `_ -# | `Transformations `_ -# | `Build Model `_ -# | `Optimization Loop `_ -# | `AutoGrad `_ \ No newline at end of file +# More help with the Pytorch Quickstart +# ----------------- +# +#| `Tensors `_ +#| `DataSets and DataLoaders `_ +#| `Transformations `_ +#| `Build Model `_ +#| `Optimization Loop `_ +#| `AutoGrad `_ diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index c899976bffe..e720485ff2f 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -16,7 +16,7 @@ # To see more examples and details of how to work with Tensors, Datasets, DataLoaders and Transforms in Pytoch with this example checkout these resources: # - `DataSet and DataLoader `_ # - `Tensors `_ -# - `Transformations `_ +# - `Transforms `_ import torch import torch.nn as nn @@ -147,7 +147,7 @@ def test(dataloader, model): # Saving Models # ------------- # -# PyTorch has different ways you can save your model. One was is to serialize the internal model state to a file. Another would be to use the built-in `ONNX `_ support. +# PyTorch has different ways you can save your model. One way is to serialize the internal model state to a file. Another would be to use the built-in `ONNX `_ support. # Saving PyTorch Model Dictionary torch.save(model.state_dict(), 'model.pth') From 99f361385ee2bbdbbdfb313e8b71a4a083fd9b65 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Tue, 10 Nov 2020 13:45:13 -0600 Subject: [PATCH 41/55] more formatting --- .../quickstart/build_model_tutorial.py | 23 +++++++++++++------ .../quickstart/data_quickstart_tutorial.py | 4 ++-- .../quickstart/transforms_tutorial.py | 4 ++-- 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index b23bdad85d0..f5bcf884221 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -1,15 +1,24 @@ """ Build Model Tutorial ======================================= +""" -The data has been loaded and transformed we can now build the model. We will leverage `torch.nn `_ predefined layers that Pytorch has that can both simplify our code, and make it faster. - -In the below example, for our FashionMNIT image dataset, we are using a `Sequential` container from class `torch.nn.Sequential `_ that allows us to define the model layers inline. The neural network modules layers will be added to it in the order they are passed in. - -Another way this model could be bulid is with a class using `nn.Module `_ +############################################### +# The data has been loaded and transformed we can now build the model. +# We will leverage `torch.nn `_ +# predefined layers that Pytorch has that can both simplify our code, and make it faster. +# +# In the below example, for our FashionMNIT image dataset, we are using a `Sequential` +# container from class `torch.nn.# Sequential `_ +# that allows us to define the model # layers inline. +# The neural network modules layers will be added to it in the order they are passed in. +# +# Another way this model could be bulid is with a class +# using `nn.Module `_ +# +# We will break down the model below. +# -We will break down the model below. -""" ########################################## # Inline nn.Sequential Example: # ---------------------------- diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index a34256ca8ba..ad56999d668 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -12,7 +12,7 @@ ############################################################### # .. figure:: /_static/img/quickstart/typesofdata.png -# :alt: +# :alt: typesofdata # ############################################################ @@ -59,7 +59,7 @@ ################################################################# # .. figure:: /_static/img/quickstart/fashion_mnist.png -# :alt: +# :alt: fashion_mnist # ################################################################# diff --git a/beginner_source/quickstart/transforms_tutorial.py b/beginner_source/quickstart/transforms_tutorial.py index 29e124019e8..a6f1606be99 100644 --- a/beginner_source/quickstart/transforms_tutorial.py +++ b/beginner_source/quickstart/transforms_tutorial.py @@ -58,7 +58,7 @@ # The `transforms.compose` allows us to string together different steps of transformations in a sequential order. This allows us to add an array of transforms for both the features and labels when preparing our data for training. # # *ToTensor()* -#For the feature transforms we have an array of transforms to process our image data for training. The first transform in the array is `transforms.ToTensor()` this is from class [torchvision.transforms.ToTensor](https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.ToTensor). We need to take our images and turn them into a tensor. (To learn more about Tensors check out [this]() resource.) The ToTensor() transformation is doing more than converting our image into a tensor. Its also normalizing our data for us by scaling the images to be between 0 and 1. +# For the feature transforms we have an array of transforms to process our image data for training. The first transform in the array is `transforms.ToTensor()` this is from class [torchvision.transforms.ToTensor](https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.ToTensor). We need to take our images and turn them into a tensor. (To learn more about Tensors check out [this]() resource.) The ToTensor() transformation is doing more than converting our image into a tensor. Its also normalizing our data for us by scaling the images to be between 0 and 1. # # # ..note: ToTensor only normalized image data that is in PIL mode of (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) or if the numpy.ndarray has dtype = np.uint8. In the other cases, tensors are returned without scaling. @@ -71,7 +71,7 @@ # Target_Transform: Labels # ------------------------------- # -#Example: +# Example: # target_transform= transforms.Lambda(lambda y: torch.zeros(10, dtype=torchfloat).scatter_(dim=0, index=torchtensor(y), value=1)) From 43716fb198af89abdb43d0047e9ee665e9f63dc4 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Tue, 10 Nov 2020 14:27:06 -0600 Subject: [PATCH 42/55] fix note formatting and optimzation text --- .../quickstart/build_model_tutorial.py | 10 +--- .../quickstart/optimization_tutorial.py | 59 +++++++++---------- .../quickstart/save_load_run_tutorial.py | 1 + beginner_source/quickstart/tensor_tutorial.py | 4 +- .../quickstart/transforms_tutorial.py | 2 +- 5 files changed, 32 insertions(+), 44 deletions(-) diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index f5bcf884221..6bdcd70876c 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -114,15 +114,7 @@ def forward(self, x): # # From the docs: # -# torch.nn.Linear(in_features: int, out_features: int, bias: bool = True) -# -# in_features – size of each input sample -# -# out_features – size of each output sample -# -# bias – If set to False, the layer will not learn an additive bias. Default: True -# -# Lets take a look at the resulting data example with the flatten layer and linear layer added: +# `torch.nn.Linear(in_features: int, out_features: int, bias: bool = True)` # input = training_data[0][0] diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py index 98a7bb30ef4..3c91d13fc65 100644 --- a/beginner_source/quickstart/optimization_tutorial.py +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -38,25 +38,29 @@ # # The optimziation loop is comprized of three main subloops in PyTorch. # + +############################################################ # .. figure:: /_static/img/quickstart/optimization_loops.png # :alt: # -# + +############################################################# # 1. The Train Loop - Core loop iterates over all the epochs # 2. The Validation Loop - Validate loss after each weight parameter update and can be used to gauge hyper parameter performance and update them for the next batch. # 3. The Test Loop - is used to evaluate our models performance after each epoch on traditional metrics to show how much our model is generalizing from the train and validation dataset to the test dataset it's never seen before. # -for epoch in range(num_epochs): # Optimization Loop +for epoch in range(num_epochs): +# Optimization Loop # Train loop over batches - model.train() # set model to train - # Model Update Code - model.eval() # After exiting batch loop set model to eval to speed up evaluation and not track gradients (this is explained below) - # Validation Loop - # - Put sample validation metric logging and hyperparameter update code here + model.train() # set model to train + # Model Update Code + model.eval() # After exiting batch loop set model to eval to speed up evaluation and not track gradients (this is explained below) + # Validation Loop + # - Put sample validation metric logging and hyperparameter update code here # After exiting train loop set model to eval to speed up evaluation and not track gradients (this is explained below) # Test Loop - # - Put sample test metric logging and hyperparameter update code here + # - Put sample test metric logging and hyperparameter update code here ###################################################### # Loss @@ -67,23 +71,14 @@ preds = model(inputs) loss = cost_function(preds, labels) - -###################################################### -# AutoGrad and Optimizer (We might want to split this when we go more in depth on autograd ) -# ----------------- -# -# By default each tensor maintains a graph of every operation applied on it unless otherwise specified using the torch.no_grad() command. -# -# `Autograd graph `_ -# -# PyTorch uses this graph to automatically update parameters with respect to our models loss during training. This is done with one line loss.backwards(). Once we have our gradients the optimizer is used to propgate the gradients from the backwards command to update all the parameters in our model. - -optimizer.zero_grad() # make sure previous gradients are cleared -loss.backward() # calculates gradients with respect to loss +# Make sure previous gradients are cleared +optimizer.zero_grad() +# Calculates gradients with respect to loss +loss.backward() optimizer.step() ###################################################### -# The standard method for optimization is called Stochastic Gradient Descent, to learn more check out this awesome video by `3blue1brown `_. There are many different optimizers and variations of this method in PyTorch such as ADAM and RMSProp that work better for different kinds of models, they are out side the scope of this Blitz, but can check out the full list of optimizers[here](https://pytorch.org/docs/stable/optim.html) +# The standard method for optimization is called Stochastic Gradient Descent, to learn more check out this awesome video by `3blue1brown `_. There are many different optimizers and variations of this method in PyTorch such as ADAM and RMSProp that work better for different kinds of models, they are out side the scope of this Blitz, but can check out the full list of optimizers `here `_ ###################################################### # Putting it all together lets look at a basic optimization loop @@ -91,16 +86,16 @@ # # Initilize optimizer and example cost function # -# # For loop to iterate over epoch -# - Train loop over batches -# - Set model to train mode -# - Calculate loss using -# - clear optimizer gradient -# - loss.backword -# - optimizer step -# - Set model to evaluate mode and start validation loop -# - calculate validation loss and update optimizer hyper parameters -# - Set model to evaluate test loop +# For loop to iterate over epoch +# - Train loop over batches +# - Set model to train mode +# - Calculate loss using +# - clear optimizer gradient +# - loss.backword +# - optimizer step +# - Set model to evaluate mode and start validation loop +# - calculate validation loss and update optimizer hyper parameters +# - Set model to evaluate test loop ################################################################## diff --git a/beginner_source/quickstart/save_load_run_tutorial.py b/beginner_source/quickstart/save_load_run_tutorial.py index a66cb0bd26b..f844e9a5c74 100644 --- a/beginner_source/quickstart/save_load_run_tutorial.py +++ b/beginner_source/quickstart/save_load_run_tutorial.py @@ -93,6 +93,7 @@ ################################################################## # More help with the PyTorch Quickstart # ---------------------------------------- +# # | `Tensors `_ # | `DataSets and DataLoaders `_ # | `Transformations `_ diff --git a/beginner_source/quickstart/tensor_tutorial.py b/beginner_source/quickstart/tensor_tutorial.py index 234ad0cc5f4..7c96c225726 100644 --- a/beginner_source/quickstart/tensor_tutorial.py +++ b/beginner_source/quickstart/tensor_tutorial.py @@ -25,7 +25,7 @@ ###################################################################### -# ..note: When using CPU for computations, tensors converted from arrays +# .. note:: When using CPU for computations, tensors converted from arrays # share the same memory for data. Thus, changing the underlying array will # also affect the tensor. # @@ -187,7 +187,7 @@ ###################################################################### -# ..note: ``view`` is similar to ``reshape`` operation in NumPy. There +# .. note:: ``view`` is similar to ``reshape`` operation in NumPy. There # is also a ``reshape`` method available in PyTorch, and it is more # powerful than ``view``, because it can also reshape non-contiguous # arrays by copying them to the new shape. However, in vast majority of diff --git a/beginner_source/quickstart/transforms_tutorial.py b/beginner_source/quickstart/transforms_tutorial.py index a6f1606be99..8d657350ced 100644 --- a/beginner_source/quickstart/transforms_tutorial.py +++ b/beginner_source/quickstart/transforms_tutorial.py @@ -61,7 +61,7 @@ # For the feature transforms we have an array of transforms to process our image data for training. The first transform in the array is `transforms.ToTensor()` this is from class [torchvision.transforms.ToTensor](https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.ToTensor). We need to take our images and turn them into a tensor. (To learn more about Tensors check out [this]() resource.) The ToTensor() transformation is doing more than converting our image into a tensor. Its also normalizing our data for us by scaling the images to be between 0 and 1. # # -# ..note: ToTensor only normalized image data that is in PIL mode of (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) or if the numpy.ndarray has dtype = np.uint8. In the other cases, tensors are returned without scaling. +# .. note: ToTensor only normalized image data that is in PIL mode of (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) or if the numpy.ndarray has dtype = np.uint8. In the other cases, tensors are returned without scaling. # # # Check out the other `TorchVision Transforms `_ From 7e2881d5c31a6512c43475a74e0ccb61fbfbed1b Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Tue, 10 Nov 2020 15:33:40 -0600 Subject: [PATCH 43/55] img rename optmization formatting --- ...optimization_loops.PNG => optimizationloops.png} | Bin .../quickstart/{typesofdata.PNG => typesdata.png} | Bin .../quickstart/data_quickstart_tutorial.py | 4 ++-- beginner_source/quickstart/optimization_tutorial.py | 2 +- beginner_source/quickstart/transforms_tutorial.py | 2 +- 5 files changed, 4 insertions(+), 4 deletions(-) rename _static/img/quickstart/{optimization_loops.PNG => optimizationloops.png} (100%) rename _static/img/quickstart/{typesofdata.PNG => typesdata.png} (100%) diff --git a/_static/img/quickstart/optimization_loops.PNG b/_static/img/quickstart/optimizationloops.png similarity index 100% rename from _static/img/quickstart/optimization_loops.PNG rename to _static/img/quickstart/optimizationloops.png diff --git a/_static/img/quickstart/typesofdata.PNG b/_static/img/quickstart/typesdata.png similarity index 100% rename from _static/img/quickstart/typesofdata.PNG rename to _static/img/quickstart/typesdata.png diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index ad56999d668..bd2d3e4e7dd 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -11,8 +11,8 @@ # ############################################################### -# .. figure:: /_static/img/quickstart/typesofdata.png -# :alt: typesofdata +# .. figure:: /_static/img/quickstart/typesdata.png +# :alt: typesdata # ############################################################ diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py index 3c91d13fc65..498aeb4b4b7 100644 --- a/beginner_source/quickstart/optimization_tutorial.py +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -40,7 +40,7 @@ # ############################################################ -# .. figure:: /_static/img/quickstart/optimization_loops.png +# .. figure:: /_static/img/quickstart/optimizationloops.png # :alt: # diff --git a/beginner_source/quickstart/transforms_tutorial.py b/beginner_source/quickstart/transforms_tutorial.py index 8d657350ced..d4605fc03ab 100644 --- a/beginner_source/quickstart/transforms_tutorial.py +++ b/beginner_source/quickstart/transforms_tutorial.py @@ -61,7 +61,7 @@ # For the feature transforms we have an array of transforms to process our image data for training. The first transform in the array is `transforms.ToTensor()` this is from class [torchvision.transforms.ToTensor](https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.ToTensor). We need to take our images and turn them into a tensor. (To learn more about Tensors check out [this]() resource.) The ToTensor() transformation is doing more than converting our image into a tensor. Its also normalizing our data for us by scaling the images to be between 0 and 1. # # -# .. note: ToTensor only normalized image data that is in PIL mode of (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) or if the numpy.ndarray has dtype = np.uint8. In the other cases, tensors are returned without scaling. +# .. note:: ToTensor only normalized image data that is in PIL mode of (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) or if the numpy.ndarray has dtype = np.uint8. In the other cases, tensors are returned without scaling. # # # Check out the other `TorchVision Transforms `_ From 0823df8469ce0a4a45aaaec8d444b90b903ddd94 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Tue, 10 Nov 2020 16:42:10 -0600 Subject: [PATCH 44/55] updated links, next text, model format --- .../quickstart/autograd_tutorial.py | 5 +++- .../quickstart/build_model_tutorial.py | 24 ++++++++++--------- .../quickstart/data_quickstart_tutorial.py | 6 +++-- .../quickstart/optimization_tutorial.py | 6 ++++- .../quickstart/save_load_run_tutorial.py | 5 ++-- beginner_source/quickstart/tensor_tutorial.py | 6 +++-- .../quickstart/transforms_tutorial.py | 11 ++++++--- beginner_source/quickstart_tutorial.py | 7 +++--- 8 files changed, 45 insertions(+), 25 deletions(-) diff --git a/beginner_source/quickstart/autograd_tutorial.py b/beginner_source/quickstart/autograd_tutorial.py index 540bf26294f..784a1d1f991 100644 --- a/beginner_source/quickstart/autograd_tutorial.py +++ b/beginner_source/quickstart/autograd_tutorial.py @@ -231,6 +231,8 @@ # process, we will need to do a number of iterations to minimize the value # of **loss function**. # +# Next: Learn more about `saving, loading and running the model `_. +# ################################################################## # More help with the Pytorch Quickstart @@ -238,7 +240,8 @@ # #| `Tensors `_ #| `DataSets and DataLoaders `_ -#| `Transformations `_ +#| `Transforms `_ #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ +#| `Save, Load and Run Model `_ diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index 6bdcd70876c..b2619438d52 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -9,17 +9,17 @@ # predefined layers that Pytorch has that can both simplify our code, and make it faster. # # In the below example, for our FashionMNIT image dataset, we are using a `Sequential` -# container from class `torch.nn.# Sequential `_ -# that allows us to define the model # layers inline. +# container from class `torch.nn. Sequential `_ +# that allows us to define the model layers inline. # The neural network modules layers will be added to it in the order they are passed in. # -# Another way this model could be bulid is with a class +# Another way to bulid this model is with a class # using `nn.Module `_ # -# We will break down the model below. +# Lets break down the steps to build this model below # -########################################## +########################################## # Inline nn.Sequential Example: # ---------------------------- # @@ -88,11 +88,10 @@ def forward(self, x): # ----------------------------------------------- # # From the docs: +# ``torch.nn.Flatten(start_dim: int = 1, end_dim: int = -1)`` # - -torch.nn.Flatten(start_dim: int = 1, end_dim: int = -1) -#Here is an example using one of the training_data set items: +# Here is an example using one of the training_data set items: tensor = training_data[0][0] print(tensor.size()) @@ -114,7 +113,7 @@ def forward(self, x): # # From the docs: # -# `torch.nn.Linear(in_features: int, out_features: int, bias: bool = True)` +# ``torch.nn.Linear(in_features: int, out_features: int, bias: bool = True)`` # input = training_data[0][0] @@ -137,7 +136,9 @@ def forward(self, x): # # - `nn.ReLU `_ Activation # - `nn.Softmax `_ Activation -# +# +# Next: Learn more about how the `optimzation loop works with this example `_. +# ################################################################## # More help with the Pytorch Quickstart @@ -145,7 +146,8 @@ def forward(self, x): # #| `Tensors `_ #| `DataSets and DataLoaders `_ -#| `Transformations `_ +#| `Transforms `_ #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ +#| `Save, Load and Run Model `_ diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index bd2d3e4e7dd..4ee9d7b6f00 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -182,7 +182,8 @@ def __getitem__(self, idx): ################################################################# # With this we have all we need to know to load an process data of any kind in PyTorch to train deep learning models. # - +# Next: Learn more about how to `transform that data for training `_. +# ################################################################## # More help with the Pytorch Quickstart @@ -190,7 +191,8 @@ def __getitem__(self, idx): # #| `Tensors `_ #| `DataSets and DataLoaders `_ -#| `Transformations `_ +#| `Transforms `_ #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ +#| `Save, Load and Run Model `_ diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py index 498aeb4b4b7..9c657c9b05a 100644 --- a/beginner_source/quickstart/optimization_tutorial.py +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -96,6 +96,9 @@ # - Set model to evaluate mode and start validation loop # - calculate validation loss and update optimizer hyper parameters # - Set model to evaluate test loop +# +# Next: Learn more about `AutoGrad `_. +# ################################################################## @@ -104,7 +107,8 @@ # #| `Tensors `_ #| `DataSets and DataLoaders `_ -#| `Transformations `_ +#| `Transforms `_ #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ +#| `Save, Load and Run Model `_ diff --git a/beginner_source/quickstart/save_load_run_tutorial.py b/beginner_source/quickstart/save_load_run_tutorial.py index f844e9a5c74..8411a3e2c5a 100644 --- a/beginner_source/quickstart/save_load_run_tutorial.py +++ b/beginner_source/quickstart/save_load_run_tutorial.py @@ -96,7 +96,8 @@ # # | `Tensors `_ # | `DataSets and DataLoaders `_ -# | `Transformations `_ +# | `Transforms `_ # | `Build Model `_ # | `Optimization Loop `_ -# | `AutoGrad `_ \ No newline at end of file +# | `AutoGrad `_ +# | `Save, Load and Run Model `_ \ No newline at end of file diff --git a/beginner_source/quickstart/tensor_tutorial.py b/beginner_source/quickstart/tensor_tutorial.py index 7c96c225726..a5771956f8a 100644 --- a/beginner_source/quickstart/tensor_tutorial.py +++ b/beginner_source/quickstart/tensor_tutorial.py @@ -247,7 +247,8 @@ # computational time, because we need to copy and transform the data when # moving it from GPU anyway. # - +# Next learn how to load built in and custom `datasets with dataloaders `_ +# ################################################################## # More help with the Pytorch Quickstart @@ -255,7 +256,8 @@ # #| `Tensors `_ #| `DataSets and DataLoaders `_ -#| `Transformations `_ +#| `Transforms `_ #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ +#| `Save, Load and Run Model `_ diff --git a/beginner_source/quickstart/transforms_tutorial.py b/beginner_source/quickstart/transforms_tutorial.py index d4605fc03ab..ebe394d16f9 100644 --- a/beginner_source/quickstart/transforms_tutorial.py +++ b/beginner_source/quickstart/transforms_tutorial.py @@ -43,7 +43,7 @@ # # From the docs: # -# ```torchvision.datasets.FashionMNIST(root, train=True, transform=None, target_transform=None, download=False)``` +# ``torchvision.datasets.FashionMNIST(root, train=True, transform=None, target_transform=None, download=False)`` ############################################## # Transform: Features @@ -120,16 +120,21 @@ class_names = image_datasets['train'].classes +################################################################## +# Next learn how to `build the model `_ +# + ################################################################## # More help with the PyTorch Quickstart # ---------------------------------------- # | `Tensors `_ # | `DataSets and DataLoaders `_ -# | `Transformations `_ +# | `Transforms `_ # | `Build Model `_ # | `Optimization Loop `_ -# | `AutoGrad `_ +# | `AutoGrad ` +# | `Save, Load and Run Model `_ diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index e720485ff2f..12d4bd1c9ac 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -14,8 +14,9 @@ # modify data in-place. Below is an example of how to load that data from the Pytorch open datasets and transform the data to a normalized tensor. # # To see more examples and details of how to work with Tensors, Datasets, DataLoaders and Transforms in Pytoch with this example checkout these resources: -# - `DataSet and DataLoader `_ +# # - `Tensors `_ +# - `DataSet and DataLoader `_ # - `Transforms `_ import torch @@ -194,11 +195,11 @@ def test(dataloader, model): # ---------------------------------------- # | `Tensors `_ # | `DataSets and DataLoaders `_ -# | `Transformations `_ +# | `Transforms `_ # | `Build Model `_ # | `Optimization Loop `_ # | `AutoGrad `_ -# +# | `Save, Load and Run Model `_ # # *Authors: Seth Juarez, Ari Bornstein, Cassie Breviu, Dmitry Soshnikov* From 631962573e096ad42ce3c0563aa53eca5e8e9781 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Tue, 10 Nov 2020 18:43:46 -0600 Subject: [PATCH 45/55] fix transforms and autograd link --- .../quickstart/autograd_tutorial.py | 2 +- .../quickstart/transforms_tutorial.py | 27 ++++++++++++------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/beginner_source/quickstart/autograd_tutorial.py b/beginner_source/quickstart/autograd_tutorial.py index 784a1d1f991..00f129bbe6d 100644 --- a/beginner_source/quickstart/autograd_tutorial.py +++ b/beginner_source/quickstart/autograd_tutorial.py @@ -231,7 +231,7 @@ # process, we will need to do a number of iterations to minimize the value # of **loss function**. # -# Next: Learn more about `saving, loading and running the model `_. +# Next: Learn more about `saving, loading and running the model `_. # ################################################################## diff --git a/beginner_source/quickstart/transforms_tutorial.py b/beginner_source/quickstart/transforms_tutorial.py index ebe394d16f9..7be074d3b71 100644 --- a/beginner_source/quickstart/transforms_tutorial.py +++ b/beginner_source/quickstart/transforms_tutorial.py @@ -39,7 +39,7 @@ # Pytorch Datasets # -------------------------- # -# We are using the built-in open FashionMNIST datasets from the PyTorch library. For more info on the Datasets and Loaders check out [this]() resource. The `Train=True`indicates we want to download the training dataset from the built-in datasets, `Train=False` indicates to download the testing dataset. This way we have data partitioned out for training and testing within the provided PyTorch datasets. We will apply the same transfoms to both the training and testing datasets. +# We are using the built-in open FashionMNIST datasets from the PyTorch library. For more info on the Datasets and Loaders check out `this `_ resource. The ``Train=True`` indicates we want to download the training dataset from the built-in datasets, ``Train=False`` indicates to download the testing dataset. This way we have data partitioned out for training and testing within the provided PyTorch datasets. We will apply the same transfoms to both the training and testing datasets. # # From the docs: # @@ -54,11 +54,17 @@ transform=transforms.Compose([transforms.ToTensor()]) ##################################################### -# *Compose* -# The `transforms.compose` allows us to string together different steps of transformations in a sequential order. This allows us to add an array of transforms for both the features and labels when preparing our data for training. +# Compose +# ------------------------ # -# *ToTensor()* -# For the feature transforms we have an array of transforms to process our image data for training. The first transform in the array is `transforms.ToTensor()` this is from class [torchvision.transforms.ToTensor](https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.ToTensor). We need to take our images and turn them into a tensor. (To learn more about Tensors check out [this]() resource.) The ToTensor() transformation is doing more than converting our image into a tensor. Its also normalizing our data for us by scaling the images to be between 0 and 1. +# The `transforms.compose`` allows us to string together different steps of transformations in a sequential order. This allows us to add an array of transforms for both the features and labels when preparing our data for training. +# + +################################################# +# ToTensor() +# ------------------------------- +# +# For the feature transforms we have an array of transforms to process our image data for training. The first transform in the array is ``transforms.ToTensor()`` this is from class `torchvision.transforms.ToTensor `_. We need to take our images and turn them into a tensor. (To learn more about Tensors check out `this `_ resource.) The ``ToTensor()`` transformation is doing more than converting our image into a tensor. Its also normalizing our data for us by scaling the images to be between 0 and 1. # # # .. note:: ToTensor only normalized image data that is in PIL mode of (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) or if the numpy.ndarray has dtype = np.uint8. In the other cases, tensors are returned without scaling. @@ -77,14 +83,15 @@ target_transform= transforms.Lambda(lambda y: torch.zeros(10, dtype=torchfloat).scatter_(dim=0, index=torchtensor(y), value=1)) ################################################# -# This function is taking the y input and creating a tensor of size 10 with a float datatype. Then its calling scatter ([torch.Tensor.scatter_ class](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.scatter_)) to send each item to torch.zeros, according to the row, index and current item value. -# * *Dim=0* is row wise index -# * *index* = torchtensor(y)` is the index of the element toscatter -# * *value* = 1` is the source elemnt +# This function is taking the y input and creating a tensor of size 10 with a float datatype. Then its calling scatter `torch.Tensor.scatter_ class `_ to send each item to torch.zeros, according to the row, index and current item value. +# - Dim=0 is row wise index +# - index = torchtensor(y) is the index of the element toscatter +# - value = 1 is the source elemnt ############################################## # Using your own data # -------------------------------------- +# # Below is an example for processing image data using a dataset from a local directory. # # Example: @@ -133,7 +140,7 @@ # | `Transforms `_ # | `Build Model `_ # | `Optimization Loop `_ -# | `AutoGrad ` +# | `AutoGrad `_ # | `Save, Load and Run Model `_ From d18db14584a616ae3f80761fd46d71ace4900489 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Tue, 10 Nov 2020 19:12:49 -0600 Subject: [PATCH 46/55] fixers gonna fix --- beginner_source/quickstart/autograd_tutorial.py | 7 +++---- beginner_source/quickstart/build_model_tutorial.py | 5 ++--- beginner_source/quickstart/data_quickstart_tutorial.py | 5 ++--- beginner_source/quickstart/optimization_tutorial.py | 5 ++--- beginner_source/quickstart/save_load_run_tutorial.py | 5 ++--- beginner_source/quickstart/tensor_tutorial.py | 5 ++--- beginner_source/quickstart/transforms_tutorial.py | 6 +++--- beginner_source/quickstart_tutorial.py | 4 ++-- 8 files changed, 18 insertions(+), 24 deletions(-) diff --git a/beginner_source/quickstart/autograd_tutorial.py b/beginner_source/quickstart/autograd_tutorial.py index 00f129bbe6d..5048398d0c4 100644 --- a/beginner_source/quickstart/autograd_tutorial.py +++ b/beginner_source/quickstart/autograd_tutorial.py @@ -231,17 +231,16 @@ # process, we will need to do a number of iterations to minimize the value # of **loss function**. # -# Next: Learn more about `saving, loading and running the model `_. +# Next: Learn more about `saving, loading and running the model `_. # ################################################################## -# More help with the Pytorch Quickstart +# Pytorch Quickstart Topics # ----------------- -# #| `Tensors `_ #| `DataSets and DataLoaders `_ #| `Transforms `_ #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ -#| `Save, Load and Run Model `_ +#| `Save, Load and Run Model `_ diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index b2619438d52..a3cec5d8359 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -141,13 +141,12 @@ def forward(self, x): # ################################################################## -# More help with the Pytorch Quickstart +# Pytorch Quickstart Topics # ----------------- -# #| `Tensors `_ #| `DataSets and DataLoaders `_ #| `Transforms `_ #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ -#| `Save, Load and Run Model `_ +#| `Save, Load and Run Model `_ diff --git a/beginner_source/quickstart/data_quickstart_tutorial.py b/beginner_source/quickstart/data_quickstart_tutorial.py index 4ee9d7b6f00..93ea8512098 100644 --- a/beginner_source/quickstart/data_quickstart_tutorial.py +++ b/beginner_source/quickstart/data_quickstart_tutorial.py @@ -186,13 +186,12 @@ def __getitem__(self, idx): # ################################################################## -# More help with the Pytorch Quickstart +# Pytorch Quickstart Topics # ----------------- -# #| `Tensors `_ #| `DataSets and DataLoaders `_ #| `Transforms `_ #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ -#| `Save, Load and Run Model `_ +#| `Save, Load and Run Model `_ diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py index 9c657c9b05a..7e2fdb28faa 100644 --- a/beginner_source/quickstart/optimization_tutorial.py +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -102,13 +102,12 @@ ################################################################## -# More help with the Pytorch Quickstart +# Pytorch Quickstart Topics # ----------------- -# #| `Tensors `_ #| `DataSets and DataLoaders `_ #| `Transforms `_ #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ -#| `Save, Load and Run Model `_ +#| `Save, Load and Run Model `_ diff --git a/beginner_source/quickstart/save_load_run_tutorial.py b/beginner_source/quickstart/save_load_run_tutorial.py index 8411a3e2c5a..42112d11f32 100644 --- a/beginner_source/quickstart/save_load_run_tutorial.py +++ b/beginner_source/quickstart/save_load_run_tutorial.py @@ -91,13 +91,12 @@ print(f'Predicted: "{predicted}", Actual: "{actual}"') ################################################################## -# More help with the PyTorch Quickstart +# Pytorch Quickstart Topics # ---------------------------------------- -# # | `Tensors `_ # | `DataSets and DataLoaders `_ # | `Transforms `_ # | `Build Model `_ # | `Optimization Loop `_ # | `AutoGrad `_ -# | `Save, Load and Run Model `_ \ No newline at end of file +# | `Save, Load and Run Model `_ diff --git a/beginner_source/quickstart/tensor_tutorial.py b/beginner_source/quickstart/tensor_tutorial.py index a5771956f8a..ff0d56108ae 100644 --- a/beginner_source/quickstart/tensor_tutorial.py +++ b/beginner_source/quickstart/tensor_tutorial.py @@ -251,13 +251,12 @@ # ################################################################## -# More help with the Pytorch Quickstart +# Pytorch Quickstart Topics # ----------------- -# #| `Tensors `_ #| `DataSets and DataLoaders `_ #| `Transforms `_ #| `Build Model `_ #| `Optimization Loop `_ #| `AutoGrad `_ -#| `Save, Load and Run Model `_ +#| `Save, Load and Run Model `_ diff --git a/beginner_source/quickstart/transforms_tutorial.py b/beginner_source/quickstart/transforms_tutorial.py index 7be074d3b71..eafb6ede07e 100644 --- a/beginner_source/quickstart/transforms_tutorial.py +++ b/beginner_source/quickstart/transforms_tutorial.py @@ -133,15 +133,15 @@ ################################################################## -# More help with the PyTorch Quickstart +# Pytorch Quickstart Topics # ---------------------------------------- # | `Tensors `_ # | `DataSets and DataLoaders `_ -# | `Transforms `_ +# | `Transforms ` # | `Build Model `_ # | `Optimization Loop `_ # | `AutoGrad `_ -# | `Save, Load and Run Model `_ +# | `Save, Load and Run Model `_ diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index 12d4bd1c9ac..045c3dd88fe 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -191,7 +191,7 @@ def test(dataloader, model): print(f'Predicted: "{predicted}", Actual: "{actual}"') ################################################################## -# More help with the PyTorch Quickstart +# Pytorch Quickstart Topics # ---------------------------------------- # | `Tensors `_ # | `DataSets and DataLoaders `_ @@ -199,7 +199,7 @@ def test(dataloader, model): # | `Build Model `_ # | `Optimization Loop `_ # | `AutoGrad `_ -# | `Save, Load and Run Model `_ +# | `Save, Load and Run Model `_ # # *Authors: Seth Juarez, Ari Bornstein, Cassie Breviu, Dmitry Soshnikov* From 9fb145050cf4c1a64e84e2a300d8cb6139b42730 Mon Sep 17 00:00:00 2001 From: shwars Date: Wed, 11 Nov 2020 13:03:01 +0000 Subject: [PATCH 47/55] Fixed formatting, move autograd before optimization --- .../quickstart/autograd_tutorial.py | 41 +++++++++++-------- .../quickstart/build_model_tutorial.py | 3 +- .../quickstart/optimization_tutorial.py | 16 ++++---- beginner_source/quickstart/tensor_tutorial.py | 25 +++++------ beginner_source/quickstart_tutorial.py | 7 ++-- 5 files changed, 46 insertions(+), 46 deletions(-) diff --git a/beginner_source/quickstart/autograd_tutorial.py b/beginner_source/quickstart/autograd_tutorial.py index 00f129bbe6d..7890b6a788d 100644 --- a/beginner_source/quickstart/autograd_tutorial.py +++ b/beginner_source/quickstart/autograd_tutorial.py @@ -38,10 +38,12 @@ # optimize. Thus, we need to be able to compute the gradients of loss # function with respect to those variables. In orded to do that, we set # the ``requires_grad`` property of those tensors. -# -# **Note:** You can set the value of ``requires_grad`` when creating a -# tensor, or later by using ``x.requires_grad_(True)`` method. -# + +####################################################################### +# .. note:: You can set the value of ``requires_grad`` when creating a +# tensor, or later by using ``x.requires_grad_(True)`` method. + +####################################################################### # A function that we apply to tensors to construct computational graph is # in fact an object of class ``Function``. This object knows how to # compute the function in the *forward* direction, and also how to compute @@ -72,13 +74,15 @@ ###################################################################### -# **Notes:** \* We can only obtain the ``grad`` properties for the leaf -# nodes of the computational graph, which have ``requires_grad`` property -# set to ``True``. For all other nodes in our graph gradients will not be -# available. \* We can only perform gradient calculations using -# ``backward`` once on a given graph, for performance reasons. If we need -# to do several ``backward`` calls on the same graph, we need to pass -# ``retain_graph=True`` to the ``backward`` call. +# .. note:: +# - We can only obtain the ``grad`` properties for the leaf +# nodes of the computational graph, which have ``requires_grad`` property +# set to ``True``. For all other nodes in our graph gradients will not be +# available. +# - We can only perform gradient calculations using +# ``backward`` once on a given graph, for performance reasons. If we need +# to do several ``backward`` calls on the same graph, we need to pass +# ``retain_graph=True`` to the ``backward`` call. # @@ -135,12 +139,13 @@ # to compute the proper gradients, you need to zero out the ``grad`` # property before. In real-life training an *optimizer* helps us to do # this. -# -# **Note:** Previously we were calling ``backward()`` function without -# parameters. This is essentially equivalent to calling -# ``backward(torch.tensor(1.0))``, which is a useful way to compute the -# gradients in case of a scalar-valued function, such as loss during -# neural network training. + +###################################################################### +# .. note:: Previously we were calling ``backward()`` function without +# parameters. This is essentially equivalent to calling +# ``backward(torch.tensor(1.0))``, which is a useful way to compute the +# gradients in case of a scalar-valued function, such as loss during +# neural network training. # @@ -231,7 +236,7 @@ # process, we will need to do a number of iterations to minimize the value # of **loss function**. # -# Next: Learn more about `saving, loading and running the model `_. +# Next: Learn more about `how to use AutoGrad to train a neural network model `_. # ################################################################## diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index b2619438d52..80f6dc4d4fa 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -14,7 +14,8 @@ # The neural network modules layers will be added to it in the order they are passed in. # # Another way to bulid this model is with a class -# using `nn.Module `_ +# using `nn.Module `_ This gives us more flexibility, because +# we can construct layers of any complexity, including the ones with shared weights. # # Lets break down the steps to build this model below # diff --git a/beginner_source/quickstart/optimization_tutorial.py b/beginner_source/quickstart/optimization_tutorial.py index 9c657c9b05a..fda3cf92e7f 100644 --- a/beginner_source/quickstart/optimization_tutorial.py +++ b/beginner_source/quickstart/optimization_tutorial.py @@ -4,21 +4,19 @@ Now that we have a model and data it's time to train, validate and test our model by optimizating it's paramerters on our data! To do this we need to understand a how to handle 5 core deep learning concepts in PyTorch -1. Hyperparameters (learning rates, batch sizes, epochs etc) -2. Optimization Loops -3. Loss -4. AutoGrad -5. Optimizers + 1. Hyperparameters (learning rates, batch sizes, epochs etc) + 2. Optimization Loops + 3. Loss + 4. AutoGrad + 5. Optimizers Let's dissect these concepts one by one and look at some code at the end we'll see how it all fits together. - +Hyperparameters +----------------- """ ###################################################### -# Hyperparameters -# ----------------- -# # Hyperparameters are adjustable parameters that let you control the model optimization process. For example, with neural networks, you can configure: # # - **Number of Epochs**- the number times iterate over the dataset to update model parameters diff --git a/beginner_source/quickstart/tensor_tutorial.py b/beginner_source/quickstart/tensor_tutorial.py index a5771956f8a..f7e36e05721 100644 --- a/beginner_source/quickstart/tensor_tutorial.py +++ b/beginner_source/quickstart/tensor_tutorial.py @@ -25,9 +25,7 @@ ###################################################################### -# .. note:: When using CPU for computations, tensors converted from arrays -# share the same memory for data. Thus, changing the underlying array will -# also affect the tensor. +# .. note:: When using CPU for computations, tensors converted from arrays share the same memory for data. Thus, changing the underlying array will also affect the tensor. # @@ -88,13 +86,10 @@ # ~~~~~~~~~~~~~~~~~ # # Tensors support all basic arithmetic operations, which can be specified -# in different ways: \* Using operators, such as ``+``, ``-``, etc. \* -# Using functions such as ``add``, ``mult``, etc. Functions can either -# return values, or store them in the specified ouput variable (using -# ``out=`` parameter) \* In-place operations, which modify one of the -# arguments. Those operations have ``_`` appended to their name, eg. -# ``add_``. -# +# in different ways: +# - Using operators, such as ``+``, ``-``, etc. \* +# - Using functions such as ``add``, ``mult``, etc. Functions can either return values, or store them in the specified ouput variable (using ``out=`` parameter) +# - In-place operations, which modify one of the arguments. Those operations have ``_`` appended to their name, eg. ``add_``. # Complete reference to all tensor operations can be found `in the # documentation `__. # @@ -188,11 +183,11 @@ ###################################################################### # .. note:: ``view`` is similar to ``reshape`` operation in NumPy. There -# is also a ``reshape`` method available in PyTorch, and it is more -# powerful than ``view``, because it can also reshape non-contiguous -# arrays by copying them to the new shape. However, in vast majority of -# cases you can use ``view`` and make sure that no data copying occurs, -# and the operation is always efficient. +# is also a ``reshape`` method available in PyTorch, and it is more +# powerful than ``view``, because it can also reshape non-contiguous +# arrays by copying them to the new shape. However, in vast majority of +# cases you can use ``view`` and make sure that no data copying occurs, +# and the operation is always efficient. # diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index 12d4bd1c9ac..d6731059023 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -4,11 +4,12 @@ The basic machine learning concepts in any framework should include: Working with data, Creating models, Optimizing Parameters, Saving and Loading Models. In this quickstart we will go through an example of an applied machine learning model using the FashionMNIST dataset that demonstrates these core steps using Pytorch. +Working with data +----------------- """ + ###################################################################### -# Working with data -# ----------------- -# +# # PyTorch has two basic data primitives: ``DataSet`` and ``DataLoader``. # These ``DataSet`` objects include a ``transforms`` mechanism to # modify data in-place. Below is an example of how to load that data from the Pytorch open datasets and transform the data to a normalized tensor. From 45a02c6db21eebdf0061a6d6256c898f0f6eab7a Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Mon, 7 Dec 2020 15:59:00 -0600 Subject: [PATCH 48/55] Added more detail to the intro of the quickstart --- beginner_source/quickstart_tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index ef2c3c6b7f1..717febb9185 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -3,7 +3,7 @@ =================== -The basic machine learning concepts in any framework should include: Working with data, Creating models, Optimizing Parameters, Saving and Loading Models. In this quickstart we will go through an example of an applied machine learning model using the FashionMNIST dataset that demonstrates these core steps using Pytorch. +The basic machine learning concepts in any framework should include: Working with data, Creating models, Optimizing Parameters, Saving and Loading Models. In this PyTorch Quickstart we will go through these concepts and how to apply them with PyTorch. That dataset we will be using is the FashionMNIST clothing images dataset that demonstrates these core steps applied to create ML Models. You will be introduced to the complete ML workflow using PyTorch with links to learn more at each step. Using this dataset we will be able to predict if the image is one of the following classes: T-shirt/top, Trouser, Pullover, Dress, Coat, Sandal, Shirt, Sneaker, Bag, or Ankle boot. Lets get started! Working with data ----------------- From ac96721b9d0082582eb33617b7a7b10c376eca55 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Tue, 8 Dec 2020 10:20:08 -0600 Subject: [PATCH 49/55] primitive dataset text update --- beginner_source/quickstart_tutorial.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index 717febb9185..71ea6d38b79 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -12,8 +12,10 @@ ###################################################################### # # PyTorch has two basic data primitives: ``DataSet`` and ``DataLoader``. -# These ``DataSet`` objects include a ``transforms`` mechanism to +# The `torchvision.datasets` ``DataSet`` object includes a ``transforms`` mechanism to # modify data in-place. Below is an example of how to load that data from the Pytorch open datasets and transform the data to a normalized tensor. + +# This example is using the `torchvision.datasets` which is a subclass from the primitive `torch.utils.data.Dataset`. Note that the primitive dataset doesnt have the built in transforms param like the built in dataset in `torchvision.datasets.` # # To see more examples and details of how to work with Tensors, Datasets, DataLoaders and Transforms in Pytoch with this example checkout these resources: # From 0a3c2617f62aa14a218fd8f0d3d8f7c368bb6a0b Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Tue, 8 Dec 2020 12:16:56 -0600 Subject: [PATCH 50/55] fix build model --- .../quickstart/build_model_tutorial.py | 196 +++++++++++------- 1 file changed, 120 insertions(+), 76 deletions(-) diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index cc7442e4863..f139fe40882 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -1,100 +1,144 @@ """ -Optimizing Model Parameters -=================== -Now that we have a model and data it's time to train, validate and test our model by optimizating it's paramerters on our data! -To do this we need to understand a how to handle 5 core deep learning concepts in PyTorch - 1. Hyperparameters (learning rates, batch sizes, epochs etc) - 2. Optimization Loops - 3. Loss - 4. AutoGrad - 5. Optimizers -Let's dissect these concepts one by one and look at some code at the end we'll see how it all fits together. -Hyperparameters ------------------ +Build Model Tutorial +======================================= """ -###################################################### -# Hyperparameters are adjustable parameters that let you control the model optimization process. For example, with neural networks, you can configure: +############################################### +# The data has been loaded and transformed we can now build the model. +# We will leverage `torch.nn `_ +# predefined layers that Pytorch has that can simplify our code. +# +# In the below example, for our FashionMNIT image dataset, we are using a `Sequential` +# container from class `torch.nn. Sequential `_ +# that allows us to define the model layers inline. +# The neural network modules layers will be added to it in the order they are passed in. +# +# Another way to bulid this model is with a class +# using `nn.Module `_ This gives us more flexibility, because +# we can construct layers of any complexity, including the ones with shared weights. # -# - **Number of Epochs**- the number times iterate over the dataset to update model parameters -# - **Batch Size** - the number of samples in the dataset to evaluate before you update model parameters -# - **Cost Function** - the method used to decide how to evaluate the model on a data sample to update the model parameters -# - **Learning Rate** - how much to update models parameters at each batch/epoch set this to large and you won't update optimally if you set it to small you will learn really slowly +# Lets break down the steps to build this model below +# + +########################################## +# Inline nn.Sequential Example: +# ---------------------------- +# + +import os +import torch +import torch.nn as nn +import torch.onnx as onnx +from torch.utils.data import DataLoader +from torchvision import datasets, transforms + +device = 'cuda' if torch.cuda.is_available() else 'cpu' +print('Using {} device'.format(device)) -learning_rate = 1e-3 -batch_size = 64 -epochs = 5 +# model +model = nn.Sequential( + nn.Flatten(), + nn.Linear(28*28, 512), + nn.ReLU(), + nn.Linear(512, 512), + nn.ReLU(), + nn.Linear(512, len(classes)), + nn.Softmax(dim=1) + ).to(device) + +print(model) -###################################################### -# Optimizaton Loops -# ----------------- +############################################# +# Class nn.Module Example: +# -------------------------- # -# Once we set our hyperparameters we can then optimize our our model with optimization loops. + +class NeuralNework(nn.Module): + def __init__(self, x): + super(NeuralNework, self).__init__() + self.layer1 = nn.Linear(28*28, 512) + self.layer2 = nn.Linear(512, 512) + self.output = nn.Linear(512, 10) + + def forward(self, x): + x = F.relu(self.layer1(x)) + x = F.relu(self.layer2(x)) + x = self.output(x) + return F.softmax(x, dim=1) + +############################################# +# Get Device for Training +# ----------------------- +# Here we check to see if `torch.cuda `_ is available to use the GPU, else we will use the CPU. # -# The optimziation loop is comprized of three main subloops in PyTorch. +# Example: # -############################################################ -# .. figure:: /_static/img/quickstart/optimizationloops.png -# :alt: +device = 'cuda' if torch.cuda.is_available() else 'cpu' +print('Using {} device'.format(device)) + +############################################## +# The Model Module Layers +# ------------------------- +# +# Lets break down each model layer in the FashionMNIST model. # -############################################################# -# 1. The Train Loop - Core loop iterates over all the epochs -# 2. The Validation Loop - Validate loss after each weight parameter update and can be used to gauge hyper parameter performance and update them for the next batch. -# 3. The Test Loop - is used to evaluate our models performance after each epoch on traditional metrics to show how much our model is generalizing from the train and validation dataset to the test dataset it's never seen before. +################################################## +# `nn.Flatten `_ to reduce tensor dimensions to one. +# ----------------------------------------------- # +# From the docs: +# ``torch.nn.Flatten(start_dim: int = 1, end_dim: int = -1)`` +# + +# Here is an example using one of the training_data set items: +tensor = training_data[0][0] +print(tensor.size()) + +# Output: torch.Size([1, 28, 28]) -for epoch in range(num_epochs): - # Optimization Loop - # Train loop over batches - model.train() # set model to train - # Model Update Code - model.eval() # After exiting batch loop set model to eval to speed up evaluation and not track gradients (this is explained below) - # Validation Loop - # - Put sample validation metric logging and hyperparameter update code here - # After exiting train loop set model to eval to speed up evaluation and not track gradients (this is explained below) - # Test Loop - # - Put sample test metric logging and hyperparameter update code here +model = nn.Sequential( + nn.Flatten() +) +flattened_tensor = model(tensor) +flattened_tensor.size() -###################################################### -# Loss -# ----------------- +# Output: torch.Size([1, 784]) + +############################################## +# `nn.Linear `_ to add a linear layer to the model. +# ------------------------------- +# +# Now that we have flattened our tensor dimension we will apply a linear layer transform that will calculate/learn the weights and the bias. # -# The loss is the value used to update our parameters. To calculate the loss we make a prediction using the inputs of our given data sample. +# From the docs: +# +# ``torch.nn.Linear(in_features: int, out_features: int, bias: bool = True)`` # -preds = model(inputs) -loss = cost_function(preds, labels) -# Make sure previous gradients are cleared -optimizer.zero_grad() -# Calculates gradients with respect to loss -loss.backward() -optimizer.step() +input = training_data[0][0] +print(input.size()) +model = nn.Sequential( + nn.Flatten(), + nn.Linear(28*28, 512), +) +output = model(input) +output.size() -###################################################### -# The standard method for optimization is called Stochastic Gradient Descent, to learn more check out this awesome video by `3blue1brown `_. There are many different optimizers and variations of this method in PyTorch such as ADAM and RMSProp that work better for different kinds of models, they are out side the scope of this Blitz, but can check out the full list of optimizers `here `_ -###################################################### -# Putting it all together lets look at a basic optimization loop -# ----------------- -# -# Initilize optimizer and example cost function +# Output: +# torch.Size([1, 28, 28]) +# torch.Size([1, 512]) + +################################################# +# Activation Functions +# ------------------------- # -# For loop to iterate over epoch -# - Train loop over batches -# - Set model to train mode -# - Calculate loss using -# - clear optimizer gradient -# - loss.backword -# - optimizer step -# - Set model to evaluate mode and start validation loop -# - calculate validation loss and update optimizer hyper parameters -# - Set model to evaluate test loop +# - `nn.ReLU `_ Activation +# - `nn.Softmax `_ Activation # -# Next: Learn more about `AutoGrad `_. +# Next: Learn more about how the `optimzation loop works with this example `_. # - -################################################################## # .. include:: /beginner_source/quickstart/qs_toc.txt -# +# \ No newline at end of file From ab9c2cee53da39e8f21904fc49343d0996a9da78 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Wed, 9 Dec 2020 10:43:53 -0600 Subject: [PATCH 51/55] updated quickstart to class model --- .../quickstart/build_model_tutorial.py | 14 +++++-- beginner_source/quickstart_tutorial.py | 37 +++++++++++-------- 2 files changed, 31 insertions(+), 20 deletions(-) diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index f139fe40882..c4a0373749c 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -6,7 +6,7 @@ ############################################### # The data has been loaded and transformed we can now build the model. # We will leverage `torch.nn `_ -# predefined layers that Pytorch has that can simplify our code. +# predefined layers that PyTorch has that can simplify our code. # # In the below example, for our FashionMNIT image dataset, we are using a `Sequential` # container from class `torch.nn. Sequential `_ @@ -53,18 +53,24 @@ # -------------------------- # -class NeuralNework(nn.Module): - def __init__(self, x): - super(NeuralNework, self).__init__() +class NeuralNetwork(nn.Module): + def __init__(self): + super(NeuralNetwork, self).__init__() + self.flatten = nn.Flatten() self.layer1 = nn.Linear(28*28, 512) self.layer2 = nn.Linear(512, 512) self.output = nn.Linear(512, 10) def forward(self, x): + x = self.flatten(x) x = F.relu(self.layer1(x)) x = F.relu(self.layer2(x)) x = self.output(x) return F.softmax(x, dim=1) +model = NeuralNetwork().to(device) + +print(model) + ############################################# # Get Device for Training diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index 71ea6d38b79..54785f1d068 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -13,11 +13,10 @@ # # PyTorch has two basic data primitives: ``DataSet`` and ``DataLoader``. # The `torchvision.datasets` ``DataSet`` object includes a ``transforms`` mechanism to -# modify data in-place. Below is an example of how to load that data from the Pytorch open datasets and transform the data to a normalized tensor. - +# modify data in-place. Below is an example of how to load that data from the PyTorch open datasets and transform the data to a normalized tensor. # This example is using the `torchvision.datasets` which is a subclass from the primitive `torch.utils.data.Dataset`. Note that the primitive dataset doesnt have the built in transforms param like the built in dataset in `torchvision.datasets.` # -# To see more examples and details of how to work with Tensors, Datasets, DataLoaders and Transforms in Pytoch with this example checkout these resources: +# To see more examples and details of how to work with Tensors, Datasets, DataLoaders and Transforms in PyTorch with this example checkout these resources: # # - `Tensors `_ # - `DataSet and DataLoader `_ @@ -29,6 +28,7 @@ import matplotlib.pyplot as plt from torch.utils.data import DataLoader from torchvision import datasets, transforms +import torch.nn.functional as F classes = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"] @@ -55,22 +55,27 @@ # --------------- # # There are two ways of creating models: in-line or as a class. This -# quickstart will consider an in-line definition. For more examples checkout `building the model `_. +# quickstart will consider a class definition. For more examples checkout `building the model `_. device = 'cuda' if torch.cuda.is_available() else 'cpu' print('Using {} device'.format(device)) -# in-line model - -model = nn.Sequential( - nn.Flatten(), - nn.Linear(28*28, 512), - nn.ReLU(), - nn.Linear(512, 512), - nn.ReLU(), - nn.Linear(512, len(classes)), - nn.Softmax(dim=1) - ).to(device) +# Define model +class NeuralNetwork(nn.Module): + def __init__(self): + super(NeuralNetwork, self).__init__() + self.flatten = nn.Flatten() + self.layer1 = nn.Linear(28*28, 512) + self.layer2 = nn.Linear(512, 512) + self.output = nn.Linear(512, 10) + + def forward(self, x): + x = self.flatten(x) + x = F.relu(self.layer1(x)) + x = F.relu(self.layer2(x)) + x = self.output(x) + return F.softmax(x, dim=1) +model = NeuralNetwork().to(device) print(model) @@ -193,7 +198,7 @@ def test(dataloader, model): print(f'Predicted: "{predicted}", Actual: "{actual}"') ################################################################## -# Pytorch Quickstart Topics +# PyTorch Quickstart Topics # ---------------------------------------- # | `Tensors `_ # | `DataSets and DataLoaders `_ From 31efd4bc1b9043a066aab1a069bc291fec46f47a Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Thu, 10 Dec 2020 16:53:15 -0600 Subject: [PATCH 52/55] updates to load class model, build model page work --- .../quickstart/build_model_tutorial.py | 95 +++++++++++++------ .../quickstart/save_load_run_tutorial.py | 11 +-- beginner_source/quickstart_tutorial.py | 10 +- 3 files changed, 67 insertions(+), 49 deletions(-) diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index c4a0373749c..d0f9795de11 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -1,24 +1,22 @@ """ Build Model Tutorial ======================================= -""" -############################################### -# The data has been loaded and transformed we can now build the model. -# We will leverage `torch.nn `_ -# predefined layers that PyTorch has that can simplify our code. -# -# In the below example, for our FashionMNIT image dataset, we are using a `Sequential` -# container from class `torch.nn. Sequential `_ -# that allows us to define the model layers inline. -# The neural network modules layers will be added to it in the order they are passed in. -# -# Another way to bulid this model is with a class -# using `nn.Module `_ This gives us more flexibility, because -# we can construct layers of any complexity, including the ones with shared weights. -# -# Lets break down the steps to build this model below -# +The data has been loaded and transformed we can now build the model. +We will leverage `torch.nn `_ predefined layers that PyTorch has that can simplify our code. + +In the below example, for our FashionMNIT image dataset, we are using a `Sequential` +container from class `torch.nn. Sequential `_ +that allows us to define the model layers inline. In the "Sequential" in-line model building format the ``forward()`` +method is created for you and the modules you add are passed in as a list or dictionary in the order that are they are defined. + +Another way to bulid this model is with a class +using `nn.Module `_ +A big plus with using a class that inherits ``nn.Module`` is better parameter management across all nested submodules. +This gives us more flexibility, because we can construct layers of any complexity, including the ones with shared weights. + +Lets break down the steps to build this model below +""" ########################################## # Inline nn.Sequential Example: @@ -83,6 +81,15 @@ def forward(self, x): device = 'cuda' if torch.cuda.is_available() else 'cpu' print('Using {} device'.format(device)) +############################################## +# __init__ +# ------------------------- +# +# The ``init`` function inherits from ``nn.Module`` which is the base class for +# building neural network modules. This function defines the layers in your neural network +# then it initializes the modules to be called in the ``forward`` function. +# + ############################################## # The Model Module Layers # ------------------------- @@ -91,9 +98,11 @@ def forward(self, x): # ################################################## -# `nn.Flatten `_ to reduce tensor dimensions to one. +# `nn.Flatten `_ # ----------------------------------------------- # +# First we call nn.Flatten to reduce tensor dimensions to one. +# # From the docs: # ``torch.nn.Flatten(start_dim: int = 1, end_dim: int = -1)`` # @@ -102,21 +111,18 @@ def forward(self, x): tensor = training_data[0][0] print(tensor.size()) -# Output: torch.Size([1, 28, 28]) - model = nn.Sequential( nn.Flatten() ) flattened_tensor = model(tensor) flattened_tensor.size() -# Output: torch.Size([1, 784]) - ############################################## # `nn.Linear `_ to add a linear layer to the model. # ------------------------------- # -# Now that we have flattened our tensor dimension we will apply a linear layer transform that will calculate/learn the weights and the bias. +# Now that we have flattened our tensor dimension we will apply a linear layer +# transform that will calculate/learn the weights and the bias. # # From the docs: # @@ -132,17 +138,46 @@ def forward(self, x): output = model(input) output.size() - -# Output: -# torch.Size([1, 28, 28]) -# torch.Size([1, 512]) - ################################################# # Activation Functions # ------------------------- # -# - `nn.ReLU `_ Activation -# - `nn.Softmax `_ Activation +# After the first two linear layer we will call the `nn.ReLU `_ +# activation function. Then after the third linear layer we call the `nn.Softmax `_ +# activation to rescale between 0 and 1 and sum to one. +# + +model = nn.Sequential( + nn.Flatten(), + nn.Linear(28*28, 512), + nn.ReLU(), + nn.Linear(512, 512), + nn.ReLU(), + nn.Linear(512, len(classes)), + nn.Softmax(dim=1) + ).to(device) + +print(model) + + +################################################### +# Forward Function +# -------------------------------- +# +# In the class implementation of the neural network we define a `forward` function`. +# +# + def forward(self, x): + x = self.flatten(x) + x = F.relu(self.layer1(x)) + x = F.relu(self.layer2(x)) + x = self.output(x) + return F.softmax(x, dim=1) + model = NeuralNetwork().to(device) + + +################################################ +# In the next section you will learn about how to train the model and the optimization loop for this example. # # Next: Learn more about how the `optimzation loop works with this example `_. # diff --git a/beginner_source/quickstart/save_load_run_tutorial.py b/beginner_source/quickstart/save_load_run_tutorial.py index 3573e87b763..4f4d671ec04 100644 --- a/beginner_source/quickstart/save_load_run_tutorial.py +++ b/beginner_source/quickstart/save_load_run_tutorial.py @@ -42,16 +42,7 @@ # These two steps are illustrated here: # recreate model -loaded_model = nn.Sequential( - nn.Flatten(), - nn.Linear(28*28, 512), - nn.ReLU(), - nn.Linear(512, 512), - nn.ReLU(), - nn.Linear(512, len(classes)), - nn.Softmax(dim=1) -) - +loaded_model = NeuralNetwork() # hydrate state dictionary loaded_model.load_state_dict(torch.load('model.pth')) diff --git a/beginner_source/quickstart_tutorial.py b/beginner_source/quickstart_tutorial.py index 54785f1d068..5f61b415e69 100644 --- a/beginner_source/quickstart_tutorial.py +++ b/beginner_source/quickstart_tutorial.py @@ -177,15 +177,7 @@ def test(dataloader, model): # inference). Check out more details on `saving, loading and running models with Pytorch `_ # -loaded_model = nn.Sequential( - nn.Flatten(), - nn.Linear(28*28, 512), - nn.ReLU(), - nn.Linear(512, 512), - nn.ReLU(), - nn.Linear(512, len(classes)), - nn.Softmax(dim=1) - ) +loaded_model = NeuralNetwork() loaded_model.load_state_dict(torch.load('model.pth')) loaded_model.eval() From 0c595c0db75feccb08de6d49d41b7d6cf24142de Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Thu, 10 Dec 2020 17:16:14 -0600 Subject: [PATCH 53/55] more work on build model --- beginner_source/quickstart/build_model_tutorial.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index f73178057b3..3f10e798457 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -165,7 +165,9 @@ def forward(self, x): # Forward Function # -------------------------------- # -# In the class implementation of the neural network we define a `forward` function`. +# In the class implementation of the neural network we define a ``forward`` function. +# Then call the ``NeuralNetwork``class and assign the device. When training the model we will call ``model`` +# and pass the data (x) into the forward function and through each layer of our network. # # def forward(self, x): From ce9f9c48160fb7610b71bcdc632fab4474726f92 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Fri, 11 Dec 2020 16:32:57 -0600 Subject: [PATCH 54/55] fix formating on build model page --- .../quickstart/build_model_tutorial.py | 34 ++++++++++--------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index 3f10e798457..cd51ce3778e 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -1,23 +1,25 @@ """ Build Model Tutorial -======================================= - -The data has been loaded and transformed we can now build the model. -We will leverage `torch.nn `_ predefined layers that PyTorch has that can simplify our code. - -In the below example, for our FashionMNIT image dataset, we are using a `Sequential` -container from class `torch.nn. Sequential `_ -that allows us to define the model layers inline. In the "Sequential" in-line model building format the ``forward()`` -method is created for you and the modules you add are passed in as a list or dictionary in the order that are they are defined. - -Another way to bulid this model is with a class -using `nn.Module `_ -A big plus with using a class that inherits ``nn.Module`` is better parameter management across all nested submodules. -This gives us more flexibility, because we can construct layers of any complexity, including the ones with shared weights. - -Lets break down the steps to build this model below +============================ """ +########################################## +# The data has been loaded and transformed we can now build the model. +# We will leverage `torch.nn `_ predefined layers that PyTorch has that can simplify our code. +# +# In the below example, for our FashionMNIT image dataset, we are using a `Sequential` +# container from class `torch.nn. Sequential `_ +# that allows us to define the model layers inline. In the "Sequential" in-line model building format the ``forward()`` +# method is created for you and the modules you add are passed in as a list or dictionary in the order that are they are defined. +# +# Another way to bulid this model is with a class +# using `nn.Module `_ +# A big plus with using a class that inherits ``nn.Module`` is better parameter management across all nested submodules. +# This gives us more flexibility, because we can construct layers of any complexity, including the ones with shared weights. +# +# Lets break down the steps to build this model below +# + ########################################## # Inline nn.Sequential Example: # ---------------------------- From 102222e6a31d90a8c8f888eef58d9edefcba00f2 Mon Sep 17 00:00:00 2001 From: Cassie Breviu Date: Fri, 11 Dec 2020 17:01:26 -0600 Subject: [PATCH 55/55] build model format fix --- beginner_source/quickstart/build_model_tutorial.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/beginner_source/quickstart/build_model_tutorial.py b/beginner_source/quickstart/build_model_tutorial.py index cd51ce3778e..e1cc9c6867f 100644 --- a/beginner_source/quickstart/build_model_tutorial.py +++ b/beginner_source/quickstart/build_model_tutorial.py @@ -1,9 +1,12 @@ """ -Build Model Tutorial -============================ +Build the Neural Netowrk +=================== """ -########################################## +################################################################# +# Get Started Building the Model +# ----------------- +# # The data has been loaded and transformed we can now build the model. # We will leverage `torch.nn `_ predefined layers that PyTorch has that can simplify our code. # @@ -109,6 +112,7 @@ def forward(self, x): # # From the docs: # ``torch.nn.Flatten(start_dim: int = 1, end_dim: int = -1)`` +# # Here is an example using one of the training_data set items: tensor = training_data[0][0] print(tensor.size()) @@ -168,7 +172,7 @@ def forward(self, x): # -------------------------------- # # In the class implementation of the neural network we define a ``forward`` function. -# Then call the ``NeuralNetwork``class and assign the device. When training the model we will call ``model`` +# Then call the ``NeuralNetwork`` class and assign the device. When training the model we will call ``model`` # and pass the data (x) into the forward function and through each layer of our network. # #