Skip to content

Commit

Permalink
Remove assert statement from non-test files (#3745)
Browse files Browse the repository at this point in the history
* Remove assert statement from non-test files

* [MONAI] python code formatting

Signed-off-by: monai-bot <monai.miccai2019@gmail.com>

Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com>
Co-authored-by: monai-bot <monai.miccai2019@gmail.com>
  • Loading branch information
deepsource-autofix[bot] and monai-bot authored Jan 30, 2022
1 parent db61a08 commit f08f1d3
Show file tree
Hide file tree
Showing 20 changed files with 39 additions and 38 deletions.
8 changes: 4 additions & 4 deletions monai/config/deviceconfig.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,9 +161,9 @@ def get_system_info() -> OrderedDict:
),
)
mem = psutil.virtual_memory()
_dict_append(output, "Total physical memory (GB)", lambda: round(mem.total / 1024 ** 3, 1))
_dict_append(output, "Available memory (GB)", lambda: round(mem.available / 1024 ** 3, 1))
_dict_append(output, "Used memory (GB)", lambda: round(mem.used / 1024 ** 3, 1))
_dict_append(output, "Total physical memory (GB)", lambda: round(mem.total / 1024**3, 1))
_dict_append(output, "Available memory (GB)", lambda: round(mem.available / 1024**3, 1))
_dict_append(output, "Used memory (GB)", lambda: round(mem.used / 1024**3, 1))

return output

Expand Down Expand Up @@ -209,7 +209,7 @@ def get_gpu_info() -> OrderedDict:
_dict_append(output, f"GPU {gpu} Is integrated", lambda: bool(gpu_info.is_integrated))
_dict_append(output, f"GPU {gpu} Is multi GPU board", lambda: bool(gpu_info.is_multi_gpu_board))
_dict_append(output, f"GPU {gpu} Multi processor count", lambda: gpu_info.multi_processor_count)
_dict_append(output, f"GPU {gpu} Total memory (GB)", lambda: round(gpu_info.total_memory / 1024 ** 3, 1))
_dict_append(output, f"GPU {gpu} Total memory (GB)", lambda: round(gpu_info.total_memory / 1024**3, 1))
_dict_append(output, f"GPU {gpu} CUDA capability (maj.min)", lambda: f"{gpu_info.major}.{gpu_info.minor}")

return output
Expand Down
2 changes: 1 addition & 1 deletion monai/data/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,7 @@ def __init__(
self.db_file = self.cache_dir / f"{db_name}.lmdb"
self.lmdb_kwargs = lmdb_kwargs or {}
if not self.lmdb_kwargs.get("map_size", 0):
self.lmdb_kwargs["map_size"] = 1024 ** 4 # default map_size
self.lmdb_kwargs["map_size"] = 1024**4 # default map_size
# lmdb is single-writer multi-reader by default
# the cache is created without multi-threading
self._read_env = None
Expand Down
2 changes: 1 addition & 1 deletion monai/data/dataset_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def calculate_statistics(self, foreground_threshold: int = 0):

self.data_max, self.data_min = max(voxel_max), min(voxel_min)
self.data_mean = (voxel_sum / voxel_ct).item()
self.data_std = (torch.sqrt(voxel_square_sum / voxel_ct - self.data_mean ** 2)).item()
self.data_std = (torch.sqrt(voxel_square_sum / voxel_ct - self.data_mean**2)).item()

def calculate_percentiles(
self,
Expand Down
2 changes: 1 addition & 1 deletion monai/handlers/parameter_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def _exponential(initial_value: float, gamma: float, current_step: int) -> float
Returns:
float: new parameter value
"""
return initial_value * gamma ** current_step
return initial_value * gamma**current_step

@staticmethod
def _step(initial_value: float, gamma: float, step_size: int, current_step: int) -> float:
Expand Down
6 changes: 3 additions & 3 deletions monai/losses/image_dissimilarity.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
if target.shape != pred.shape:
raise ValueError(f"ground truth has differing shape ({target.shape}) from pred ({pred.shape})")

t2, p2, tp = target ** 2, pred ** 2, target * pred
t2, p2, tp = target**2, pred**2, target * pred
kernel, kernel_vol = self.kernel.to(pred), self.kernel_vol.to(pred)
# sum over kernel
t_sum = separable_filtering(target, kernels=[kernel.to(pred)] * self.ndim)
Expand Down Expand Up @@ -217,7 +217,7 @@ def __init__(
self.num_bins = num_bins
self.kernel_type = kernel_type
if self.kernel_type == "gaussian":
self.preterm = 1 / (2 * sigma ** 2)
self.preterm = 1 / (2 * sigma**2)
self.bin_centers = bin_centers[None, None, ...]
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
Expand Down Expand Up @@ -280,7 +280,7 @@ def parzen_windowing_b_spline(self, img: torch.Tensor, order: int) -> Tuple[torc
weight = weight + (sample_bin_matrix < 0.5) + (sample_bin_matrix == 0.5) * 0.5
elif order == 3:
weight = (
weight + (4 - 6 * sample_bin_matrix ** 2 + 3 * sample_bin_matrix ** 3) * (sample_bin_matrix < 1) / 6
weight + (4 - 6 * sample_bin_matrix**2 + 3 * sample_bin_matrix**3) * (sample_bin_matrix < 1) / 6
)
weight = weight + (2 - sample_bin_matrix) ** 3 * (sample_bin_matrix >= 1) * (sample_bin_matrix < 2) / 6
else:
Expand Down
2 changes: 1 addition & 1 deletion monai/networks/blocks/selfattention.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def __init__(self, hidden_size: int, num_heads: int, dropout_rate: float = 0.0)
self.drop_output = nn.Dropout(dropout_rate)
self.drop_weights = nn.Dropout(dropout_rate)
self.head_dim = hidden_size // num_heads
self.scale = self.head_dim ** -0.5
self.scale = self.head_dim**-0.5

def forward(self, x):
q, k, v = einops.rearrange(self.qkv(x), "b h (qkv l d) -> qkv b l h d", qkv=3, l=self.num_heads)
Expand Down
4 changes: 2 additions & 2 deletions monai/networks/blocks/upsample.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def __init__(
out_channels = out_channels or in_channels
if not out_channels:
raise ValueError("in_channels need to be specified.")
conv_out_channels = out_channels * (scale_factor ** self.dimensions)
conv_out_channels = out_channels * (scale_factor**self.dimensions)
self.conv_block = Conv[Conv.CONV, self.dimensions](
in_channels=in_channels, out_channels=conv_out_channels, kernel_size=3, stride=1, padding=1, bias=bias
)
Expand Down Expand Up @@ -247,7 +247,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
x: Tensor in shape (batch, channel, spatial_1[, spatial_2, ...).
"""
x = self.conv_block(x)
if x.shape[1] % (self.scale_factor ** self.dimensions) != 0:
if x.shape[1] % (self.scale_factor**self.dimensions) != 0:
raise ValueError(
f"Number of channels after `conv_block` ({x.shape[1]}) must be evenly "
"divisible by scale_factor ** dimensions "
Expand Down
2 changes: 1 addition & 1 deletion monai/networks/blocks/warp.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def forward(self, dvf):
Returns:
a dense displacement field
"""
ddf: torch.Tensor = dvf / (2 ** self.num_steps)
ddf: torch.Tensor = dvf / (2**self.num_steps)
for _ in range(self.num_steps):
ddf = ddf + self.warp_layer(image=ddf, ddf=ddf)
return ddf
2 changes: 1 addition & 1 deletion monai/networks/layers/convutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def gaussian_1d(
out = out.clamp(min=0)
elif approx.lower() == "sampled":
x = torch.arange(-tail, tail + 1, dtype=torch.float, device=sigma.device)
out = torch.exp(-0.5 / (sigma * sigma) * x ** 2)
out = torch.exp(-0.5 / (sigma * sigma) * x**2)
if not normalize: # compute the normalizer
out = out / (2.5066282 * sigma)
elif approx.lower() == "scalespace":
Expand Down
10 changes: 5 additions & 5 deletions monai/networks/nets/dints.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def __init__(self, in_channel: int, out_channel: int, spatial_dims: int = 3):
# s0 is upsampled 2x from s1, representing feature sizes at two resolutions.
# in_channel * s0 (activation) + 3 * out_channel * s1 (convolution, concatenation, normalization)
# s0 = s1 * 2^(spatial_dims) = output_size / out_channel * 2^(spatial_dims)
self.ram_cost = in_channel / out_channel * 2 ** self._spatial_dims + 3
self.ram_cost = in_channel / out_channel * 2**self._spatial_dims + 3


class MixedOp(nn.Module):
Expand Down Expand Up @@ -330,7 +330,7 @@ def __init__(
# define downsample stems before DiNTS search
if use_downsample:
self.stem_down[str(res_idx)] = StemTS(
nn.Upsample(scale_factor=1 / (2 ** res_idx), mode=mode, align_corners=True),
nn.Upsample(scale_factor=1 / (2**res_idx), mode=mode, align_corners=True),
conv_type(
in_channels=in_channels,
out_channels=self.filter_nums[res_idx],
Expand Down Expand Up @@ -373,7 +373,7 @@ def __init__(

else:
self.stem_down[str(res_idx)] = StemTS(
nn.Upsample(scale_factor=1 / (2 ** res_idx), mode=mode, align_corners=True),
nn.Upsample(scale_factor=1 / (2**res_idx), mode=mode, align_corners=True),
conv_type(
in_channels=in_channels,
out_channels=self.filter_nums[res_idx],
Expand Down Expand Up @@ -789,7 +789,7 @@ def get_ram_cost_usage(self, in_size, full: bool = False):
image_size = np.array(in_size[-self._spatial_dims :])
sizes = []
for res_idx in range(self.num_depths):
sizes.append(batch_size * self.filter_nums[res_idx] * (image_size // (2 ** res_idx)).prod())
sizes.append(batch_size * self.filter_nums[res_idx] * (image_size // (2**res_idx)).prod())
sizes = torch.tensor(sizes).to(torch.float32).to(self.device) / (2 ** (int(self.use_downsample)))
probs_a, arch_code_prob_a = self.get_prob_a(child=False)
cell_prob = F.softmax(self.log_alpha_c, dim=-1)
Expand All @@ -807,7 +807,7 @@ def get_ram_cost_usage(self, in_size, full: bool = False):
* (1 + (ram_cost[blk_idx, path_idx] * cell_prob[blk_idx, path_idx]).sum())
* sizes[self.arch_code2out[path_idx]]
)
return usage * 32 / 8 / 1024 ** 2
return usage * 32 / 8 / 1024**2

def get_topology_entropy(self, probs):
"""
Expand Down
2 changes: 1 addition & 1 deletion monai/networks/nets/highresnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def __init__(
# residual blocks
for (idx, params) in enumerate(layer_params[1:-2]): # res blocks except the 1st and last two conv layers.
_in_chns, _out_chns = _out_chns, params["n_features"]
_dilation = 2 ** idx
_dilation = 2**idx
for _ in range(params["repeat"]):
blocks.append(
HighResBlock(
Expand Down
6 changes: 3 additions & 3 deletions monai/networks/nets/regunet.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def __init__(
raise AssertionError
self.encode_kernel_sizes: List[int] = encode_kernel_sizes

self.num_channels = [self.num_channel_initial * (2 ** d) for d in range(self.depth + 1)]
self.num_channels = [self.num_channel_initial * (2**d) for d in range(self.depth + 1)]
self.min_extract_level = min(self.extract_levels)

# init layers
Expand Down Expand Up @@ -310,14 +310,14 @@ def __init__(
encode_kernel_sizes: Union[int, List[int]] = 3,
):
for size in image_size:
if size % (2 ** depth) != 0:
if size % (2**depth) != 0:
raise ValueError(
f"given depth {depth}, "
f"all input spatial dimension must be divisible by {2 ** depth}, "
f"got input of size {image_size}"
)
self.image_size = image_size
self.decode_size = [size // (2 ** depth) for size in image_size]
self.decode_size = [size // (2**depth) for size in image_size]
super().__init__(
spatial_dims=spatial_dims,
in_channels=in_channels,
Expand Down
6 changes: 3 additions & 3 deletions monai/networks/nets/segresnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def _make_down_layers(self):
down_layers = nn.ModuleList()
blocks_down, spatial_dims, filters, norm = (self.blocks_down, self.spatial_dims, self.init_filters, self.norm)
for i in range(len(blocks_down)):
layer_in_channels = filters * 2 ** i
layer_in_channels = filters * 2**i
pre_conv = (
get_conv_layer(spatial_dims, layer_in_channels // 2, layer_in_channels, stride=2)
if i > 0
Expand Down Expand Up @@ -299,12 +299,12 @@ def _get_vae_loss(self, net_input: torch.Tensor, vae_input: torch.Tensor):
if self.vae_estimate_std:
z_sigma = self.vae_fc2(x_vae)
z_sigma = F.softplus(z_sigma)
vae_reg_loss = 0.5 * torch.mean(z_mean ** 2 + z_sigma ** 2 - torch.log(1e-8 + z_sigma ** 2) - 1)
vae_reg_loss = 0.5 * torch.mean(z_mean**2 + z_sigma**2 - torch.log(1e-8 + z_sigma**2) - 1)

x_vae = z_mean + z_sigma * z_mean_rand
else:
z_sigma = self.vae_default_std
vae_reg_loss = torch.mean(z_mean ** 2)
vae_reg_loss = torch.mean(z_mean**2)

x_vae = z_mean + z_sigma * z_mean_rand

Expand Down
2 changes: 1 addition & 1 deletion monai/networks/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ def pixelshuffle(
dim, factor = spatial_dims, scale_factor
input_size = list(x.size())
batch_size, channels = input_size[:2]
scale_divisor = factor ** dim
scale_divisor = factor**dim

if channels % scale_divisor != 0:
raise ValueError(
Expand Down
4 changes: 2 additions & 2 deletions monai/transforms/intensity/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,9 +182,9 @@ def _add_noise(self, img: NdarrayOrTensor, mean: float, std: float):
if isinstance(img, torch.Tensor):
n1 = torch.tensor(self._noise1, device=img.device)
n2 = torch.tensor(self._noise2, device=img.device)
return torch.sqrt((img + n1) ** 2 + n2 ** 2)
return torch.sqrt((img + n1) ** 2 + n2**2)

return np.sqrt((img + self._noise1) ** 2 + self._noise2 ** 2)
return np.sqrt((img + self._noise1) ** 2 + self._noise2**2)

def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
"""
Expand Down
2 changes: 1 addition & 1 deletion monai/transforms/smooth_field/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTen
# everything below here is to be computed using the destination type (numpy, tensor, etc.)

img = (img - img_min) / (img_rng + 1e-10) # rescale to unit values
img = img ** rfield # contrast is changed by raising image data to a power, in this case the field
img = img**rfield # contrast is changed by raising image data to a power, in this case the field

out = (img * img_rng) + img_min # rescale back to the original image value range

Expand Down
5 changes: 3 additions & 2 deletions monai/transforms/utils_create_transform_ims.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,8 @@ def update_docstring(code_path, transform_name):
contents.insert(image_line + 1, " :alt: example of " + transform_name + "\n")

# check that we've only added two lines
assert len(contents) == len(contents_orig) + 2
if len(contents) != len(contents_orig) + 2:
raise AssertionError

# write the updated doc to overwrite the original
with open(code_path, "w") as f:
Expand Down Expand Up @@ -382,7 +383,7 @@ def get_images(data, is_label=False):
# we might need to panel the images. this happens if a transform produces e.g. 4 output images.
# In this case, we create a 2-by-2 grid from them. Output will be a list containing n_orthog_views,
# each element being either the image (if num_samples is 1) or the panelled image.
nrows = int(np.floor(num_samples ** 0.5))
nrows = int(np.floor(num_samples**0.5))
for view in range(num_orthog_views):
result = np.asarray([d[view] for d in data])
nindex, height, width = result.shape
Expand Down
6 changes: 3 additions & 3 deletions tests/test_lmdbdataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
SimulateDelayd(keys=["image", "label", "extra"], delay_time=[1e-7, 1e-6, 1e-5]),
],
(128, 128, 128),
{"pickle_protocol": 2, "lmdb_kwargs": {"map_size": 100 * 1024 ** 2}},
{"pickle_protocol": 2, "lmdb_kwargs": {"map_size": 100 * 1024**2}},
]

TEST_CASE_6 = [
Expand All @@ -66,7 +66,7 @@
SimulateDelayd(keys=["image", "label", "extra"], delay_time=[1e-7, 1e-6, 1e-5]),
],
(128, 128, 128),
{"db_name": "testdb", "lmdb_kwargs": {"map_size": 100 * 1024 ** 2}},
{"db_name": "testdb", "lmdb_kwargs": {"map_size": 100 * 1024**2}},
]

TEST_CASE_7 = [
Expand All @@ -75,7 +75,7 @@
SimulateDelayd(keys=["image", "label", "extra"], delay_time=[1e-7, 1e-6, 1e-5]),
],
(128, 128, 128),
{"db_name": "testdb", "lmdb_kwargs": {"map_size": 2 * 1024 ** 2}},
{"db_name": "testdb", "lmdb_kwargs": {"map_size": 2 * 1024**2}},
]


Expand Down
2 changes: 1 addition & 1 deletion tests/test_tile_on_grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def make_image(

tiles = np.stack(tiles_list, axis=0) # type: ignore

if (filter_mode == "min" or filter_mode == "max") and len(tiles) > tile_count ** 2:
if (filter_mode == "min" or filter_mode == "max") and len(tiles) > tile_count**2:
tiles = tiles[np.argsort(tiles.sum(axis=(1, 2, 3)))]

return imlarge, tiles
Expand Down
2 changes: 1 addition & 1 deletion tests/test_tile_on_grid_dict.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def make_image(

tiles = np.stack(tiles_list, axis=0) # type: ignore

if (filter_mode == "min" or filter_mode == "max") and len(tiles) > tile_count ** 2:
if (filter_mode == "min" or filter_mode == "max") and len(tiles) > tile_count**2:
tiles = tiles[np.argsort(tiles.sum(axis=(1, 2, 3)))]

return imlarge, tiles
Expand Down

0 comments on commit f08f1d3

Please sign in to comment.