-
Notifications
You must be signed in to change notification settings - Fork 0
/
augmentation.py
107 lines (75 loc) · 3.16 KB
/
augmentation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import albumentations as A
from albumentations.pytorch import ToTensor
from torch.utils.data import Dataset, DataLoader
import cv2
BORDER_CONSTANT = 0
BORDER_REFLECT = 2
def get_medium_augmentations(image_size):
return A.Compose([
A.OneOf([
A.Transpose(),
A.HorizontalFlip(),
A.VerticalFlip(),
A.ShiftScaleRotate(),
A.NoOp()
], p = 0.5),
A.OneOf([
A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2),
A.RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1)),
A.NoOp()
], p = 0.5),
A.Cutout(p=0.15),
A.Resize(image_size[0], image_size[1], p = 1.0),
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p = 1.0),
ToTensor()
],
bbox_params=A.BboxParams(format='albumentations', label_fields=['category_ids']),
)
def get_hard_augmentations(image_size):
return A.Compose([
A.OneOf([A.CLAHE(clip_limit=(10, 10), tile_grid_size=(3, 3)),
A.FancyPCA(alpha=0.4),
A.NoOp(),
], p = 0.5),
A.OneOf([
A.Transpose(),
A.HorizontalFlip(),
A.VerticalFlip(),
A.ShiftScaleRotate(),
A.NoOp()
], p = 0.5),
A.OneOf([
A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2),
A.RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1)),
A.RandomGamma(gamma_limit=(50, 150)),
A.NoOp()
], p = 0.5),
A.Cutout(p=0.15),
A.Resize(image_size[0], image_size[1], p = 1.0),
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p = 1.0),
ToTensor()
],
bbox_params=A.BboxParams(format='albumentations', label_fields=['category_ids']),
)
def light_aug(image_size):
return A.Compose(
[A.HorizontalFlip(p = 0.5),
A.VerticalFlip(p = 0.5),
A.Resize(image_size[0], image_size[1]),
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p = 1.0),
ToTensor()
],
bbox_params=A.BboxParams(format='albumentations', label_fields=['category_ids']),
)
def get_test_transform(image_size):
return A.Compose([A.Resize(image_size[0], image_size[1], p = 1.0),
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p = 1.0),
ToTensor()],
bbox_params=A.BboxParams(format='albumentations', label_fields=['category_ids']))
def get_train_transform(augmentation, image_size):
if augmentation == 'light':
return light_aug(image_size)
if augmentation == 'medium':
return get_medium_augmentations(image_size)
if augmentation == 'hard':
return get_hard_augmentations(image_size)