Skip to content
This repository has been archived by the owner on Jan 12, 2024. It is now read-only.

CI for fork PRs #118

Merged
merged 2 commits into from
Aug 22, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .github/workflows/unittests_linux.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
name: Unittests Linux

on: [push]
on: # Trigger the workflow on push or pull request, but only for the main branch
push:
branches: [master]
pull_request: {}

jobs:
build:
Expand Down
5 changes: 4 additions & 1 deletion .github/workflows/unittests_mac.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
name: Unittests MacOS

on: [push]
on: # Trigger the workflow on push or pull request, but only for the main branch
push:
branches: [master]
pull_request: {}

jobs:
build:
Expand Down
5 changes: 4 additions & 1 deletion .github/workflows/unittests_windows.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
name: Unittests Windows

on: [push]
on: # Trigger the workflow on push or pull request, but only for the main branch
push:
branches: [master]
pull_request: {}

jobs:
build:
Expand Down
22 changes: 11 additions & 11 deletions rising/transforms/functional/intensity.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,30 +231,30 @@ def scale_by_value(data: torch.Tensor, value: float,
return torch.mul(data, value, out=out)


def bezier_3rd_order(data: torch.Tensor, maxv: float=1.0, minv: float=0.0,
def bezier_3rd_order(data: torch.Tensor, maxv: float = 1.0, minv: float = 0.0,
out: Optional[torch.Tensor] = None) -> torch.Tensor:
p0 = torch.zeros((1,2))
p1 = torch.rand((1,2))
p2 = torch.rand((1,2))
p3 = torch.ones((1,2))
p0 = torch.zeros((1, 2))
p1 = torch.rand((1, 2))
p2 = torch.rand((1, 2))
p3 = torch.ones((1, 2))

t = torch.linspace(0.0, 1.0, 1000).unsqueeze(1)

points = (1-t*t*t)*p0 + 3*(1-t)*(1-t)*t*p1 + 3*(1-t)*t*t*p2 + t*t*t*p3
points = (1 - t * t * t) * p0 + 3 * (1 - t) * (1 - t) * t * p1 + 3 * (1 - t) * t * t * p2 + t * t * t * p3

# scaling according to maxv,minv
points = points*(maxv-minv) + minv
points = points * (maxv - minv) + minv

xvals = points[:,0]
yvals = points[:,1]
xvals = points[:, 0]
yvals = points[:, 1]

out_flat = Interp1d.apply(xvals, yvals, data.view(-1))

return out_flat.view(data.shape)


def random_inversion(data: torch.Tensor, prob_inversion: float=0.5,
maxv: float=1.0, minv: float=0.0,
def random_inversion(data: torch.Tensor, prob_inversion: float = 0.5,
maxv: float = 1.0, minv: float = 0.0,
out: Optional[torch.Tensor] = None) -> torch.Tensor:

if torch.rand((1)) < prob_inversion:
Expand Down
49 changes: 28 additions & 21 deletions rising/transforms/functional/painting.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,25 @@
__all__ = ["local_pixel_shuffle", "random_inpainting", "random_outpainting"]


def local_pixel_shuffle(data: torch.Tensor, n: int = -1, block_size: tuple=(0,0,0), rel_block_size: float = 0.1) -> torch.Tensor:
def local_pixel_shuffle(
data: torch.Tensor,
n: int = -1,
block_size: tuple = (
0,
0,
0),
rel_block_size: float = 0.1) -> torch.Tensor:

batch_size, channels, img_rows, img_cols, img_deps = data.size()

if n < 0:
n = int(1000*channels) # changes ~ 12.5% of voxels
n = int(1000 * channels) # changes ~ 12.5% of voxels
for b in range(batch_size):
for _ in range(n):
c = torch.randint(0,channels-1, (1,))
(block_size_x, block_size_y, block_size_z) = (torch.tensor([size]) for size in block_size)
c = torch.randint(0, channels - 1, (1,))

(block_size_x, block_size_y, block_size_z) = (torch.tensor([size]) for size in block_size)

if rel_block_size > 0:
block_size_x = torch.randint(2, int(img_rows * rel_block_size), (1,))
block_size_y = torch.randint(2, int(img_cols * rel_block_size), (1,))
Expand All @@ -25,15 +32,15 @@ def local_pixel_shuffle(data: torch.Tensor, n: int = -1, block_size: tuple=(0,0,
z = torch.randint(0, int(img_deps - block_size_z), (1,))

window = data[b, c, x:x + block_size_x,
y:y + block_size_y,
z:z + block_size_z,
]
y:y + block_size_y,
z:z + block_size_z,
]
idx = torch.randperm(window.numel())
window = window.view(-1)[idx].view(window.size())

data[b, c, x:x + block_size_x,
y:y + block_size_y,
z:z + block_size_z] = window
y:y + block_size_y,
z:z + block_size_z] = window

return data

Expand All @@ -52,11 +59,11 @@ def random_inpainting(data: torch.Tensor, n: int = 5, maxv: float = 1.0, minv: f
z = torch.randint(3, int(img_deps - block_size_z - 3), (1,))

block = torch.rand((1, channels, block_size_x, block_size_y, block_size_z)) \
* (maxv-minv) + minv
* (maxv - minv) + minv

data[b, :, x:x + block_size_x,
y:y + block_size_y,
z:z + block_size_z] = block
y:y + block_size_y,
z:z + block_size_z] = block

n = n - 1

Expand All @@ -69,17 +76,17 @@ def random_outpainting(data: torch.Tensor, maxv: float = 1.0, minv: float = 0.0)

out = torch.rand(data.size()) * (maxv - minv) + minv

block_size_x = torch.randint(5*img_rows // 7, 6*img_rows // 7, (1,))
block_size_y = torch.randint(5*img_cols // 7, 6*img_cols // 7, (1,))
block_size_z = torch.randint(5*img_deps // 7, 6*img_deps // 7, (1,))
block_size_x = torch.randint(5 * img_rows // 7, 6 * img_rows // 7, (1,))
block_size_y = torch.randint(5 * img_cols // 7, 6 * img_cols // 7, (1,))
block_size_z = torch.randint(5 * img_deps // 7, 6 * img_deps // 7, (1,))
x = torch.randint(3, int(img_rows - block_size_x - 3), (1,))
y = torch.randint(3, int(img_cols - block_size_y - 3), (1,))
z = torch.randint(3, int(img_deps - block_size_z - 3), (1,))

out[:, :, x:x + block_size_x,
y:y + block_size_y,
z:z + block_size_z] = data[:, :, x:x + block_size_x,
y:y + block_size_y,
z:z + block_size_z]
y:y + block_size_y,
z:z + block_size_z] = data[:, :, x:x + block_size_x,
y:y + block_size_y,
z:z + block_size_z]

return out
5 changes: 2 additions & 3 deletions rising/transforms/intensity.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ class RandomBezierTransform(BaseTransform):
""" Apply a random 3rd order bezier spline to the intensity values,
as proposed in Models Genesis """

def __init__(self, maxv: float = 1.0, minv: float=0.0, keys: Sequence = ('data',), **kwargs):
def __init__(self, maxv: float = 1.0, minv: float = 0.0, keys: Sequence = ('data',), **kwargs):

super().__init__(augment_fn=bezier_3rd_order, maxv=maxv, minv=minv, keys=keys, grad=False, **kwargs)

Expand All @@ -324,9 +324,8 @@ class InvertAmplitude(BaseTransform):
out = maxv + minv - data
"""

def __init__(self, prob: float = 0.5, maxv: float = 1.0, minv: float=0.0,
def __init__(self, prob: float = 0.5, maxv: float = 1.0, minv: float = 0.0,
keys: Sequence = ('data',), **kwargs):

super().__init__(augment_fn=random_inversion, prob_inversion=prob, maxv=maxv, minv=minv,
keys=keys, grad=False, **kwargs)

10 changes: 5 additions & 5 deletions rising/transforms/painting.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ class LocalPixelShuffle(BaseTransform):
""" Shuffels Pixels locally in n patches,
as proposed in Models Genesis """

def __init__(self, n: int=-1,
def __init__(self, n: int = -1,
keys: Sequence = ('data',), grad: bool = False, **kwargs):
"""
Args:
Expand All @@ -32,7 +32,7 @@ class RandomInpainting(BaseTransform):
as proposed in Models Genesis """

def __init__(self, n: int = 5,
maxv: float=1.0, minv: float = 0.0,
maxv: float = 1.0, minv: float = 0.0,
keys: Sequence = ('data',), grad: bool = False, **kwargs):
"""
Args:
Expand All @@ -50,7 +50,7 @@ class RandomOutpainting(AbstractTransform):
""" The border of the images will be replaced by uniform noise,
as proposed in Models Genesis """

def __init__(self, prob: float = 0.5, maxv: float=1.0, minv: float = 0.0,
def __init__(self, prob: float = 0.5, maxv: float = 1.0, minv: float = 0.0,
keys: Sequence = ('data',), grad: bool = False, **kwargs):
"""
Args:
Expand Down Expand Up @@ -78,7 +78,7 @@ class RandomInOrOutpainting(AbstractTransform):
as proposed in Models Genesis """

def __init__(self, prob: float = 0.5, n: int = 5,
maxv: float=1.0, minv: float = 0.0,
maxv: float = 1.0, minv: float = 0.0,
keys: Sequence = ('data',), grad: bool = False, **kwargs):
"""
Args:
Expand All @@ -103,4 +103,4 @@ def forward(self, **data) -> dict:
else:
for key in self.keys:
data[key] = random_inpainting(data[key], n=self.n, maxv=self.maxv, minv=self.minv)
return data
return data
37 changes: 19 additions & 18 deletions rising/utils/torchinterp1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
import torch
import contextlib


class Interp1d(torch.autograd.Function):

@staticmethod
Expand Down Expand Up @@ -82,17 +83,17 @@ def forward(ctx, x, y, xnew, out=None):
# Checking for the dimensions
assert (v['x'].shape[1] == v['y'].shape[1]
and (
v['x'].shape[0] == v['y'].shape[0]
or v['x'].shape[0] == 1
or v['y'].shape[0] == 1
)
), ("x and y must have the same number of columns, and either "
"the same number of row or one of them having only one "
"row.")
v['x'].shape[0] == v['y'].shape[0]
or v['x'].shape[0] == 1
or v['y'].shape[0] == 1
)
), ("x and y must have the same number of columns, and either "
"the same number of row or one of them having only one "
"row.")

reshaped_xnew = False
if ((v['x'].shape[0] == 1) and (v['y'].shape[0] == 1)
and (v['xnew'].shape[0] > 1)):
and (v['xnew'].shape[0] > 1)):
# if there is only one row for both x and y, there is no need to
# loop over the rows of xnew because they will all have to face the
# same interpolation problem. We should just stack them together to
Expand All @@ -105,7 +106,7 @@ def forward(ctx, x, y, xnew, out=None):
D = max(v['x'].shape[0], v['xnew'].shape[0])
shape_ynew = (D, v['xnew'].shape[-1])
if out is not None:
if out.numel() != shape_ynew[0]*shape_ynew[1]:
if out.numel() != shape_ynew[0] * shape_ynew[1]:
# The output provided is of incorrect shape.
# Going for a new one
out = None
Expand Down Expand Up @@ -162,14 +163,14 @@ def sel(name):
# output. Hence, we start also activating gradient tracking
with torch.enable_grad() if enable_grad else contextlib.suppress():
v['slopes'] = (
(v['y'][:, 1:]-v['y'][:, :-1])
/
(eps + (v['x'][:, 1:]-v['x'][:, :-1]))
)
(v['y'][:, 1:] - v['y'][:, :-1])
/
(eps + (v['x'][:, 1:] - v['x'][:, :-1]))
)

# now build the linear interpolation
ynew = sel('y') + sel('slopes')*(
v['xnew'] - sel('x'))
ynew = sel('y') + sel('slopes') * (
v['xnew'] - sel('x'))

if reshaped_xnew:
ynew = ynew.view(original_xnew_shape)
Expand All @@ -181,9 +182,9 @@ def sel(name):
def backward(ctx, grad_out):
inputs = ctx.saved_tensors[1:]
gradients = torch.autograd.grad(
ctx.saved_tensors[0],
[i for i in inputs if i is not None],
grad_out, retain_graph=True)
ctx.saved_tensors[0],
[i for i in inputs if i is not None],
grad_out, retain_graph=True)
result = [None, ] * 5
pos = 0
for index in range(len(inputs)):
Expand Down
2 changes: 1 addition & 1 deletion tests/transforms/functional/test_affine.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ def test_matrix_parametrization(self):

torch.tensor([[[0., -4., -12.], [5., 0., 10.], [0., 0., 1.]]]),

torch.tensor([[[0., 1/5., -2.], [-1/4., 0., -3.], [0., 0., 1.]]]),
torch.tensor([[[0., 1 / 5., -2.], [-1 / 4., 0., -3.], [0., 0., 1.]]]),

torch.bmm(torch.bmm(torch.tensor([[[2., 0., 0], [0., 5., 0.], [0., 0., 1.]],
[[2., 0., 0.], [0., 5., 0.], [0., 0., 1.]],
Expand Down