Skip to content

Commit

Permalink
Version 0.3.0 (#6)
Browse files Browse the repository at this point in the history
  • Loading branch information
creafz authored Jan 12, 2021
1 parent 68de6fc commit c0b1895
Show file tree
Hide file tree
Showing 39 changed files with 1,030 additions and 157 deletions.
7 changes: 1 addition & 6 deletions .github/workflows/publish_docker_image.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,6 @@ jobs:
- name: Login to Github Container Registry
run: echo ${{ secrets.CR_PAT }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Build image
run: |
docker build
-f docker/Dockerfile
--tag ghcr.io/albumentations-team/autoalbument:${{ github.event.release.tag_name }}
--tag ghcr.io/albumentations-team/autoalbument:latest
.
run: docker build -f docker/Dockerfile --tag ghcr.io/albumentations-team/autoalbument:${{ github.event.release.tag_name }} --tag ghcr.io/albumentations-team/autoalbument:latest .
- name: Push image
run: docker push ghcr.io/albumentations-team/autoalbument
2 changes: 1 addition & 1 deletion autoalbument/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.2.0"
__version__ = "0.3.0"
2 changes: 1 addition & 1 deletion autoalbument/cli/conf/data/default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,6 @@ dataloader:
_target_: torch.utils.data.DataLoader
batch_size: 128
shuffle: True
num_workers: 4
num_workers: 8
pin_memory: True
drop_last: True
4 changes: 2 additions & 2 deletions autoalbument/cli/templates/classification/search.yaml.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ policy_model:
# number of sub-policies leads to a more diverse set of augmentations and better performance of a model trained on
# augmented images. However, an increase in the number of sub-policies leads to the exponential growth of a search
# space of augmentations, so you need more training data for Policy Model to find good augmentation policies.
num_sub_policies: 20
num_sub_policies: 50

# Number of chunks in a batch. Faster AutoAugment splits each batch of images into `num_chunks` chunks. Then it
# applies the same sub-policy with the same parameters to each image in a chunk. This parameter controls the tradeoff
Expand Down Expand Up @@ -101,7 +101,7 @@ data:
dataloader:
_target_: torch.utils.data.DataLoader
batch_size: 64
num_workers: 4
num_workers: 8

optim:
# Number of epochs to search parameters of augmentations.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ policy_model:
# number of sub-policies leads to a more diverse set of augmentations and better performance of a model trained on
# augmented images. However, an increase in the number of sub-policies leads to the exponential growth of a search
# space of augmentations, so you need more training data for Policy Model to find good augmentation policies.
num_sub_policies: 20
num_sub_policies: 50

# Number of chunks in a batch. Faster AutoAugment splits each batch of images into `num_chunks` chunks. Then it
# applies the same sub-policy with the same parameters to each image in a chunk. This parameter controls the tradeoff
Expand Down Expand Up @@ -128,7 +128,7 @@ data:
_target_: torch.utils.data.DataLoader
batch_size: 64
shuffle: True
num_workers: 4
num_workers: 8
pin_memory: True
drop_last: True

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ policy_model:
# number of sub-policies leads to a more diverse set of augmentations and better performance of a model trained on
# augmented images. However, an increase in the number of sub-policies leads to the exponential growth of a search
# space of augmentations, so you need more training data for Policy Model to find good augmentation policies.
num_sub_policies: 20
num_sub_policies: 25

# Number of chunks in a batch. Faster AutoAugment splits each batch of images into `num_chunks` chunks. Then it
# applies the same sub-policy with the same parameters to each image in a chunk. This parameter controls the tradeoff
Expand Down Expand Up @@ -110,7 +110,7 @@ data:
dataloader:
_target_: torch.utils.data.DataLoader
batch_size: 64
num_workers: 4
num_workers: 8

optim:
# Number of epochs to search parameters of augmentations.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ policy_model:
# number of sub-policies leads to a more diverse set of augmentations and better performance of a model trained on
# augmented images. However, an increase in the number of sub-policies leads to the exponential growth of a search
# space of augmentations, so you need more training data for Policy Model to find good augmentation policies.
num_sub_policies: 20
num_sub_policies: 25

# Number of chunks in a batch. Faster AutoAugment splits each batch of images into `num_chunks` chunks. Then it
# applies the same sub-policy with the same parameters to each image in a chunk. This parameter controls the tradeoff
Expand Down Expand Up @@ -135,7 +135,7 @@ data:
_target_: torch.utils.data.DataLoader
batch_size: 64
shuffle: True
num_workers: 4
num_workers: 8
pin_memory: True
drop_last: True

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import random

import torch
import torch.nn.functional as F

from autoalbument.faster_autoaugment.albumentations_pytorch.affine import (
get_scaling_matrix,
Expand Down Expand Up @@ -66,11 +67,10 @@ def scale(img_batch, scale, padding_mode=TorchPadding.REFLECTION):
def cutout(img_batch, num_holes, hole_size, fill_value=0):
img_batch = img_batch.clone()
height, width = img_batch.shape[-2:]
for i in range(len(img_batch)):
for _n in range(num_holes):
y1 = random.randint(0, height - hole_size)
x1 = random.randint(0, width - hole_size)
y2 = y1 + hole_size
x2 = x1 + hole_size
img_batch[i, :, y1:y2, x1:x2] = fill_value
for _n in range(num_holes):
y1 = random.randint(0, height - hole_size)
x1 = random.randint(0, width - hole_size)
y2 = y1 + hole_size
x2 = x1 + hole_size
img_batch[:, :, y1:y2, x1:x2] = fill_value
return img_batch
5 changes: 3 additions & 2 deletions autoalbument/faster_autoaugment/operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import warnings

import albumentations as A
import cv2
import torch
from torch import nn
from torch.autograd import Function
Expand Down Expand Up @@ -292,7 +293,7 @@ def as_transform(self, value, p):

class Cutout(Operation):
def __init__(self, temperature, value_range=(0.0, 1.0)):
super().__init__(temperature, value_range=value_range)
super().__init__(temperature, value_range=value_range, ste=True)
self.register_buffer("saved_image_shape", torch.Tensor([0, 0]).type(torch.int64))
self.is_image_shape_saved = False

Expand Down Expand Up @@ -325,7 +326,7 @@ def _as_cutout_transform(self, value, p, image_shape):
raise NotImplementedError


class CutoutFixedNumerOfHoles(Cutout):
class CutoutFixedNumberOfHoles(Cutout):
def __init__(self, temperature, num_holes=16):
super().__init__(temperature)
self.num_holes = num_holes
Expand Down
4 changes: 2 additions & 2 deletions autoalbument/faster_autoaugment/policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from torch import Tensor, nn

from autoalbument.faster_autoaugment.operations import (
CutoutFixedNumerOfHoles,
CutoutFixedNumberOfHoles,
CutoutFixedSize,
HorizontalFlip,
RandomBrightness,
Expand Down Expand Up @@ -164,7 +164,7 @@ def dda_operations(temperature):
ShiftX(temperature=temperature),
ShiftY(temperature=temperature),
Scale(temperature=temperature),
CutoutFixedNumerOfHoles(temperature=temperature),
CutoutFixedNumberOfHoles(temperature=temperature),
CutoutFixedSize(temperature=temperature),
]

Expand Down
28 changes: 16 additions & 12 deletions autoalbument/faster_autoaugment/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import torch
from albumentations.pytorch import ToTensorV2
from hydra.utils import instantiate
from omegaconf import OmegaConf
from torch import Tensor, nn
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
Expand Down Expand Up @@ -51,7 +52,8 @@ def set_seed(self):

def create_tensorboard_writer(self):
if self.cfg.tensorboard_logs_dir:
return SummaryWriter(os.path.join(self.cfg.tensorboard_logs_dir, os.getcwd().replace(os.sep, ".")))
filename = os.getcwd().replace(os.sep, ".").lstrip(".")
return SummaryWriter(os.path.join(self.cfg.tensorboard_logs_dir, filename))
return None

def get_policy_state_dict(self):
Expand Down Expand Up @@ -83,19 +85,21 @@ def create_metric_tracker(self):

def get_preprocessing_transforms(self):
preprocessing_config = self.cfg.data.preprocessing
if not preprocessing_config:
return []
preprocessing_config = OmegaConf.to_container(preprocessing_config, resolve=True)
preprocessing_transforms = []
if preprocessing_config:
for preprocessing_transform in preprocessing_config:
for transform_name, transform_args in preprocessing_transform.items():
transform = A.from_dict(
{
"transform": {
"__class_fullname__": "albumentations.augmentations.transforms." + transform_name,
**transform_args,
}
for preprocessing_transform in preprocessing_config:
for transform_name, transform_args in preprocessing_transform.items():
transform = A.from_dict(
{
"transform": {
"__class_fullname__": "albumentations.augmentations.transforms." + transform_name,
**transform_args,
}
)
preprocessing_transforms.append(transform)
}
)
preprocessing_transforms.append(transform)
return preprocessing_transforms

def create_preprocessing_transform(self):
Expand Down
3 changes: 3 additions & 0 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ RUN useradd --create-home --shell /bin/bash --no-log-init autoalbument
USER autoalbument
ENV PATH="/home/autoalbument/.local/bin:${PATH}"
WORKDIR /opt/autoalbument
COPY ./docker/requirements.txt /opt/autoalbument/docker/requirements.txt
RUN pip install --no-cache-dir -r /opt/autoalbument/docker/requirements.txt

COPY . .
RUN pip install --no-cache-dir .
COPY docker/entrypoint.sh entrypoint.sh
Expand Down
89 changes: 89 additions & 0 deletions docker/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
absl-py==0.11.0
albumentations==0.5.2
antlr4-python3-runtime==4.8
backcall==0.2.0
beautifulsoup4==4.9.3
cachetools==4.2.0
certifi==2020.6.20
cffi==1.14.0
chardet==3.0.4
click==7.1.2
colorama==0.4.4
cryptography==2.9.2
cycler==0.10.0
dataclasses==0.6
decorator==4.4.2
dnspython==2.0.0
efficientnet-pytorch==0.6.3
filelock==3.0.12
future==0.18.2
glob2==0.7
google-auth==1.24.0
google-auth-oauthlib==0.4.2
grpcio==1.34.0
hydra-core==1.0.4
idna==2.9
imageio==2.9.0
imgaug==0.4.0
importlib-resources==4.0.0
ipython==7.18.1
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==2.11.2
kiwisolver==1.3.1
libarchive-c==2.9
Markdown==3.3.3
MarkupSafe==1.1.1
matplotlib==3.3.3
munch==2.5.0
networkx==2.5
numpy==1.19.2
oauthlib==3.1.0
olefile==0.46
omegaconf==2.0.5
opencv-python==4.4.0.46
opencv-python-headless==4.4.0.46
parso==0.7.0
pexpect==4.8.0
pickleshare==0.7.5
Pillow==8.0.0
pkginfo==1.6.0
pretrainedmodels==0.7.4
prompt-toolkit==3.0.8
protobuf==3.14.0
psutil==5.7.2
ptyprocess==0.6.0
pyasn1==0.4.8
pyasn1-modules==0.2.8
pycosat==0.6.3
pycparser==2.20
Pygments==2.7.1
pyOpenSSL==19.1.0
pyparsing==2.4.7
PySocks==1.7.1
python-dateutil==2.8.1
python-etcd==0.4.5
pytz==2020.1
PyWavelets==1.1.1
PyYAML==5.3.1
requests==2.23.0
requests-oauthlib==1.3.0
rsa==4.6
ruamel-yaml==0.15.87
scikit-image==0.18.1
scipy==1.5.4
segmentation-models-pytorch==0.1.3
Shapely==1.7.1
six==1.14.0
soupsieve==2.0.1
tensorboard==2.4.0
tensorboard-plugin-wit==1.7.0
tifffile==2020.12.8
timm==0.3.2
torchelastic==0.2.1
tqdm==4.46.0
traitlets==5.0.5
typing-extensions==3.7.4.3
urllib3==1.25.8
wcwidth==0.2.5
Werkzeug==1.0.1
7 changes: 6 additions & 1 deletion examples/cifar10/dataset.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
import torchvision

import cv2

class SearchDataset(torchvision.datasets.CIFAR10):
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)


class Cifar10SearchDataset(torchvision.datasets.CIFAR10):
def __init__(self, root="~/data/cifar10", train=True, download=True, transform=None):
super().__init__(root=root, train=train, download=download, transform=transform)

Expand Down
Loading

0 comments on commit c0b1895

Please sign in to comment.