From e0101d5f71f19a2687c096f95bd45a13f1513518 Mon Sep 17 00:00:00 2001
From: Louis-Dupont <35190946+Louis-Dupont@users.noreply.github.com>
Date: Thu, 18 May 2023 23:29:25 +0300
Subject: [PATCH] Fix doc (#1019)
* replace []() with
* fix
* fix
* fix
* rename tutorials
---
.../source/QuickstartBasicToolkit.md | 84 +++++++++----------
mkdocs.yml | 2 +-
.../datasets/Dataset_Setup_Instructions.md | 64 +++++++++-----
3 files changed, 87 insertions(+), 63 deletions(-)
diff --git a/documentation/source/QuickstartBasicToolkit.md b/documentation/source/QuickstartBasicToolkit.md
index e5c205e680..4df3348b97 100644
--- a/documentation/source/QuickstartBasicToolkit.md
+++ b/documentation/source/QuickstartBasicToolkit.md
@@ -24,8 +24,8 @@ from super_gradients.training.utils.distributed_training_utils import setup_devi
```python
init_trainer()
```
-2. Call [setup_device()](device.md) according to your available hardware and needs.
-For example, if you want the training to be performed entirely on the CPU:
+
+2. Call setup_device() according to your available hardware and needs. For example, if you want the training to be performed entirely on the CPU:
```python
setup_device("cpu")
@@ -43,18 +43,20 @@ setup_device(num_gpus=-1)
```
-3. Instantiate a [Trainer]() object #TODO: ADD TRAINER API LINK
-```python
+3. Instantiate a Trainer object:
+```python
trainer = Trainer(experiment_name="my_cifar_experiment", ckpt_root_dir="/path/to/checkpoints_directory/")
```
-4. [Instantiate a model](models.md):
+4. Instantiate a model:
+
```python
model = models.get(Models.RESNET18, num_classes=10)
```
-5. Define [metrics](Metrics.md) and other [training parameters](https://github.com/Deci-AI/super-gradients/blob/master/src/super_gradients/recipes/training_hyperparams/default_train_params.yaml):
+5. Define metrics and other training parameters:
+
```python
training_params = {
"max_epochs": 20,
@@ -67,13 +69,15 @@ training_params = {
}
```
-6. Instantiate [PyTorch data loaders](https://pytorch.org/tutorials/beginner/basics/data_tutorial.html#preparing-your-data-for-training-with-dataloaders) for training and validation:
+6. Instantiate PyTorch data loaders for training and validation:
+
```python
-train_loader=cifar10_train()
-valid_loader=cifar10_val()
+train_loader = cifar10_train()
+valid_loader = cifar10_val()
```
7. Launch training:
+
```python
trainer.train(model=model, training_params=training_params, train_loader=train_loader, valid_loader=valid_loader)
```
@@ -92,14 +96,14 @@ from super_gradients.training.metrics.classification_metrics import Accuracy, To
from super_gradients.training.dataloaders.dataloaders import cifar10_val
from super_gradients.training.utils.distributed_training_utils import setup_device
```
+
1. Call `init_trainer()` to initialize the super_gradients environment. This should be the first thing to be called by any code running super_gradients:
```python
init_trainer()
```
-2. Call [setup_device()](device.md) according to your available hardware and needs.
-For example, if you want the test to be performed entirely on the CPU:
+2. Call setup_device() according to your available hardware and needs. For example, if you want the test to be performed entirely on the CPU:
```python
setup_device("cpu")
@@ -112,32 +116,30 @@ setup_device(num_gpus=4)
```
It is also possible to launch the test with whatever available hardware there is (i.e., if there are 4 GPUs available, we will launch a DDP test with four processes) by passing `num_gpus=-1`:
+
```python
setup_device(num_gpus=-1)
-
```
-3. Instantiate a [Trainer]() object #TODO: ADD TRAINER API LINK
+3. Instantiate a Trainer object:
+
```python
trainer = Trainer(experiment_name="test_my_cifar_experiment", ckpt_root_dir="/path/to/checkpoints_directory/")
-
```
-4. [Instantiate a model](models.md) and load weights to it.
-
-Learn more about the different options for loading model weights from our [checkpoints tutorial](Checkpoints.md).
+4. Instantiate a model and load weights to it. Learn more about the different options for loading model weights from our checkpoints tutorial:
+
```python
model = models.get(Models.RESNET18, num_classes=10, checkpoint_path="/path/to/checkpoints_directory/my_cifar_experiment/ckpt_best.pth")
```
+5. Define metrics for test:
-
-5. Define [metrics](Metrics.md) for test:
```python
test_metrics = [Accuracy(), Top5()]
```
-6. Instantiate a [PyTorch data loader](https://pytorch.org/tutorials/beginner/basics/data_tutorial.html#preparing-your-data-for-training-with-dataloaders) for testing:
+6. Instantiate a PyTorch data loader for testing:
```python
test_data_loader = cifar10_val()
@@ -155,7 +157,6 @@ print(f"Test results: Accuracy: {test_results['Accuracy']}, Top5: {test_results[
3. Use Pre-trained Models
-
0. Imports:
```python
@@ -164,16 +165,15 @@ from super_gradients.training import models
from super_gradients.training.metrics.classification_metrics import Accuracy, Top5
from super_gradients.training.dataloaders.dataloaders import cifar10_train, cifar10_val
from super_gradients import Trainer, init_trainer
-
```
+
1. Call `init_trainer()` to initialize the super_gradients environment. This should be the first thing to be called by any code running super_gradients:
```python
init_trainer()
```
-2. Call [setup_device()](device.md) according to your available hardware and needs.
-For example, if you want the finetuning/test to be performed entirely on the CPU:
+2. Call setup_device() according to your available hardware and needs. For example, if you want the finetuning/test to be performed entirely on the CPU:
```python
setup_device("cpu")
@@ -186,11 +186,12 @@ setup_device(num_gpus=4)
```
It is also possible to launch the finetuning/test with whatever available hardware there is (i.e., if there are 4 GPUs available, a DDP finetuning/test with four processes will be launched) by passing `num_gpus=-1`:
+
```python
setup_device(num_gpus=-1)
-
```
-3. Instantiate a pre-trained model from SGs [model zoo](http://bit.ly/3EGfKD4):
+
+3. Instantiate a pre-trained model from SG's model zoo:
```python
model = models.get(Models.RESNET18, num_classes=10, pretrained_weights="imagenet")
@@ -205,10 +206,10 @@ model = models.get(Models.RESNET18, num_classes=10, checkpoint_path="/path/to/im
Finetune or test your pre-trained model as done in the previous sections.
+
4. Predict
-
0. Imports:
```python
@@ -220,7 +221,6 @@ from super_gradients.common.object_names import Models
import torchvision.transforms as T
import torch
from super_gradients.training.utils.distributed_training_utils import setup_device
-
```
1. Call `init_trainer()` to initialize the super_gradients environment. This should be the first thing to be called by any code running super_gradients:
@@ -229,22 +229,22 @@ from super_gradients.training.utils.distributed_training_utils import setup_devi
init_trainer()
```
-2. Call [setup_device()](device.md) according to your available hardware and needs:
+2. Call setup_device() according to your available hardware and needs:
```python
setup_device("cpu")
```
-3. [Instantiate a model](models.md), load weights to it, and put it in `eval` mode:
+3. Instantiate a model, load weights to it, and put it in `eval` mode:
```python
-
# Load the best model that we trained
-best_model = models.get(Models.RESNET18, num_classes=10,checkpoint_path="/path/to/checkpoints_directory/my_cifar_experiment/ckpt_best.pth")
+best_model = models.get(Models.RESNET18, num_classes=10, checkpoint_path="/path/to/checkpoints_directory/my_cifar_experiment/ckpt_best.pth")
best_model.eval()
```
4. Create input data and preprocess it:
+
```python
url = "https://www.aquariumofpacific.org/images/exhibits/Magnificent_Tree_Frog_900.jpg"
image = np.array(Image.open(requests.get(url, stream=True).raw))
@@ -253,11 +253,12 @@ transforms = T.Compose([
T.ToTensor(),
T.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)),
T.Resize((32, 32))
- ])
+])
input_tensor = transforms(image).unsqueeze(0).to(next(best_model.parameters()).device)
```
5. Predict and visualize results:
+
```python
predictions = best_model(input_tensor)
@@ -266,38 +267,37 @@ plt.xlabel(classes[torch.argmax(predictions)])
plt.imshow(image)
```
-
-
- 5. Train using SGs Training Recipes
-
+ 5. Train using SG's Training Recipes
0. Setup:
- - Clone the SG repo:
+ - Clone the SG repo:
```shell
git clone https://github.com/Deci-AI/super-gradients
```
- - Move to the root of the clone project (where you find "requirements.txt" and "setup.py") and install super-gradients:
+ - Move to the root of the cloned project (where you find "requirements.txt" and "setup.py") and install super-gradients:
```shell
pip install -e .
```
-- Append super-gradients to the python path: (Replace "YOUR-LOCAL-PATH" with the path to the downloaded repo) to avoid conflicts with any installed version of SG:
+- Append super-gradients to the python path (Replace "YOUR-LOCAL-PATH" with the path to the downloaded repo) to avoid conflicts with any installed version of SG:
+
```shell
export PYTHONPATH=$PYTHONPATH:/super-gradients/
```
-1. Launch one of SGs [training recipes](https://github.com/Deci-AI/super-gradients/blob/master/src/super_gradients/recipes/Training_Recipes.md). For example, Resnet18 on Cifar10:
+1. Launch one of SG's training recipes. For example, Resnet18 on Cifar10:
+
```shell
python -m super_gradients.train_from_recipe --config-name=cifar10_resnet experiment_name=my_resnet18_cifar10_experiment
```
-Learn more in detail on how to launch, customize and evaluate training recipes from our [training with configuration files tutorial](configuration_files.md)
+Learn more in detail on how to launch, customize, and evaluate training recipes from our training with configuration files tutorial.
diff --git a/mkdocs.yml b/mkdocs.yml
index 3d10de8d5d..fb54d22bf9 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -14,7 +14,7 @@ nav:
- Pretrained Model Prediction:
- Prediction: ./documentation/source/ModelPredictions.md
- Custom training Setup: ./documentation/source/PredictionSetup.md
-- Tutorials:
+- Main Components:
- Models: ./documentation/source/models.md
- Dataset:
- Data: ./documentation/source/Data.md
diff --git a/src/super_gradients/training/datasets/Dataset_Setup_Instructions.md b/src/super_gradients/training/datasets/Dataset_Setup_Instructions.md
index 0cfa79511d..fc8c3b8be9 100644
--- a/src/super_gradients/training/datasets/Dataset_Setup_Instructions.md
+++ b/src/super_gradients/training/datasets/Dataset_Setup_Instructions.md
@@ -20,7 +20,9 @@ dataset = Cifar10(..., download=True)
Imagenet
1. Download imagenet dataset:
- - https://image-net.org/download.php
+
+- https://image-net.org/download.php
+
2. Unzip:
@@ -54,9 +56,11 @@ valid_set = ImageNetDataset(root='.../Imagenet/val', ...)
Coco
1. Download coco dataset:
- - annotations: http://images.cocodataset.org/annotations/annotations_trainval2017.zip
- - train2017: http://images.cocodataset.org/zips/train2017.zip
- - val2017: http://images.cocodataset.org/zips/val2017.zip
+
+- annotations: http://images.cocodataset.org/annotations/annotations_trainval2017.zip
+- train2017: http://images.cocodataset.org/zips/train2017.zip
+- val2017: http://images.cocodataset.org/zips/val2017.zip
+
2. Unzip and organize it as below:
```
@@ -136,6 +140,7 @@ Dataset Structure:
1. Download your dataset (can be from https://roboflow.com/universe)
+
2. You should have a structure similar to this.
```
data_dir
@@ -151,6 +156,7 @@ Dataset Structure:
```
*Note: train/test/val folders are not required, any folder structure is supported.*
+
3. Instantiate the dataset:
```python
from super_gradients.training.datasets import YoloDarknetFormatDetectionDataset
@@ -158,6 +164,8 @@ data_set = YoloDarknetFormatDetectionDataset(data_dir='/data_dir', imag
```
+
+
### Segmentation Datasets
@@ -165,10 +173,11 @@ data_set = YoloDarknetFormatDetectionDataset(data_dir='/data_dir', imag
Cityscapes
1. Download dataset:
- - a. Cityscapes dataset:
- - gtFine: https://www.cityscapes-dataset.com/file-handling/?packageID=1
- - leftImg8bit: https://www.cityscapes-dataset.com/file-handling/?packageID=3
- - b. metadata folder: https://deci-pretrained-models.s3.amazonaws.com/cityscape_lists.zip
+
+- a. Cityscapes dataset:
+ - gtFine: https://www.cityscapes-dataset.com/file-handling/?packageID=1
+ - leftImg8bit: https://www.cityscapes-dataset.com/file-handling/?packageID=3
+- b. metadata folder: https://deci-pretrained-models.s3.amazonaws.com/cityscape_lists.zip
2. a. Unzip and organize cityscapes dataset as below:
@@ -225,6 +234,7 @@ from super_gradients.training.datasets import CityscapesDataset
train_set = CityscapesDataset(root_dir='.../root_dir', list_file='lists/train.lst', labels_csv_path='lists/labels.csv', ...)
```
+
4. AutoLabelling dataset [Optional]
Cityscapes AutoLabelled dataset were introduced by NVIDIA research group
@@ -272,9 +282,11 @@ leftImg8bit_train_extra: https://www.cityscapes-dataset.com/file-handling/?packa
Coco
1. Download coco dataset:
- - annotations: http://images.cocodataset.org/annotations/annotations_trainval2017.zip
- - train2017: http://images.cocodataset.org/zips/train2017.zip
- - val2017: http://images.cocodataset.org/zips/val2017.zip
+
+- annotations: http://images.cocodataset.org/annotations/annotations_trainval2017.zip
+- train2017: http://images.cocodataset.org/zips/train2017.zip
+- val2017: http://images.cocodataset.org/zips/val2017.zip
+
2. Unzip and organize it as below:
```
@@ -291,6 +303,7 @@ leftImg8bit_train_extra: https://www.cityscapes-dataset.com/file-handling/?packa
└─ ...
```
+
3. Instantiate the dataset:
```python
from super_gradients.training.datasets import CoCoSegmentationDataSet
@@ -304,7 +317,9 @@ valid_set = CoCoSegmentationDataSet(data_dir='.../coco', subdir='images/val2017'
Pascal VOC 2012
1. Download pascal datasets:
- - VOC 2012: http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar
+
+- VOC 2012: http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar
+
2. Unzip and organize it as below:
```
@@ -323,6 +338,7 @@ valid_set = CoCoSegmentationDataSet(data_dir='.../coco', subdir='images/val2017'
└──SegmentationObject
```
+
3. Instantiate the dataset:
```python
from super_gradients.training.datasets import PascalVOC2012SegmentationDataSet
@@ -348,8 +364,9 @@ valid_set = PascalVOC2012SegmentationDataSet(
Pascal AUG 2012
-1. Download pascal datasets:
- - AUG 2012: https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz
+1. Download pascal dataset
+
+- AUG 2012: https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz
2. Unzip and organize it as below:
```
@@ -385,8 +402,10 @@ NOTE: this dataset is only available for training. To test, please use PascalVOC
Pascal AUG & VOC 2012
1. Download pascal datasets:
- - VOC 2012: http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar
- - AUG 2012: https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz
+
+- VOC 2012: http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar
+- AUG 2012: https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz
+
2. Unzip and organize it as below:
```
@@ -426,7 +445,9 @@ train_set = PascalVOCAndAUGUnifiedDataset(root='.../pascal_voc_2012', ...)
Supervisely Persons
1. Download supervisely dataset:
- - https://deci-pretrained-models.s3.amazonaws.com/supervisely-persons.zip)
+
+- https://deci-pretrained-models.s3.amazonaws.com/supervisely-persons.zip
+
2. Unzip:
```
@@ -461,9 +482,11 @@ NOTE: this dataset is only available for training. To test, please use PascalVOC
COCO 2017
1. Download coco dataset:
- - annotations: http://images.cocodataset.org/annotations/annotations_trainval2017.zip
- - train2017: http://images.cocodataset.org/zips/train2017.zip
- - val2017: http://images.cocodataset.org/zips/val2017.zip
+
+- annotations: http://images.cocodataset.org/annotations/annotations_trainval2017.zip
+- train2017: http://images.cocodataset.org/zips/train2017.zip
+- val2017: http://images.cocodataset.org/zips/val2017.zip
+
2. Unzip and organize it as below:
```
@@ -480,6 +503,7 @@ NOTE: this dataset is only available for training. To test, please use PascalVOC
└─ ...
```
+
3. Instantiate the dataset:
```python
from super_gradients.training.datasets import COCOKeypointsDataset