From 46c1a4cb7834d3fde81ab29cbdbf287047365a74 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Wed, 24 Feb 2021 15:02:32 +0000 Subject: [PATCH 001/457] Enhance csrc/resample (#1471) * update openmp flag Signed-off-by: Wenqi Li * improves boundtype docs Signed-off-by: Wenqi Li * update setup.py Signed-off-by: Wenqi Li * input validation 1d Signed-off-by: Wenqi Li * fixes typos Signed-off-by: Wenqi Li * fixes typos Signed-off-by: Wenqi Li * merge upstream changes Signed-off-by: Wenqi Li * tests enums Signed-off-by: Wenqi Li * init. test grid pull Signed-off-by: Wenqi Li * update Signed-off-by: Wenqi Li * test grid_pull Signed-off-by: Wenqi Li * fixes min test Signed-off-by: Wenqi Li * adds device tests Signed-off-by: Wenqi Li * bwd tests Signed-off-by: Wenqi Li --- monai/csrc/ext.cpp | 22 +- monai/csrc/resample/pushpull.h | 22 +- monai/csrc/resample/pushpull_cpu.cpp | 1220 ++++++++++++------ monai/csrc/resample/pushpull_cuda.cu | 1222 +++++++++++++------ monai/csrc/utils/common_utils.h | 33 +- monai/csrc/utils/resample_utils.h | 2 + monai/networks/layers/spatial_transforms.py | 193 +-- runtests.sh | 1 + setup.py | 4 +- tests/test_enum_bound_interp.py | 73 ++ tests/test_grid_pull.py | 94 ++ tests/testing_data/1D_BP_bwd.txt | 224 ++++ tests/testing_data/1D_BP_fwd.txt | 56 + tests/testing_data/cpp_resample_answers.py | 41 + 14 files changed, 2355 insertions(+), 852 deletions(-) create mode 100644 tests/test_enum_bound_interp.py create mode 100644 tests/test_grid_pull.py create mode 100644 tests/testing_data/1D_BP_bwd.txt create mode 100644 tests/testing_data/1D_BP_fwd.txt create mode 100644 tests/testing_data/cpp_resample_answers.py diff --git a/monai/csrc/ext.cpp b/monai/csrc/ext.cpp index 2e0644bc78..b4bb0f2c04 100644 --- a/monai/csrc/ext.cpp +++ b/monai/csrc/ext.cpp @@ -29,14 +29,20 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { // resample bound mode py::enum_(m, "BoundType") - .value("replicate", monai::BoundType::Replicate) - .value("dct1", monai::BoundType::DCT1) - .value("dct2", monai::BoundType::DCT2) - .value("dst1", monai::BoundType::DST1) - .value("dst2", monai::BoundType::DST2) - .value("dft", monai::BoundType::DFT) - .value("sliding", monai::BoundType::Sliding) - .value("zero", monai::BoundType::Zero) + .value("replicate", monai::BoundType::Replicate, "a a a | a b c d | d d d") + .value("nearest", monai::BoundType::Replicate, "a a a | a b c d | d d d") + .value("dct1", monai::BoundType::DCT1, "d c b | a b c d | c b a") + .value("mirror", monai::BoundType::DCT1, "d c b | a b c d | c b a") + .value("dct2", monai::BoundType::DCT2, "c b a | a b c d | d c b") + .value("reflect", monai::BoundType::DCT2, "c b a | a b c d | d c b") + .value("dst1", monai::BoundType::DST1, "-b -a 0 | a b c d | 0 -d -c") + .value("antimirror", monai::BoundType::DST1, "-b -a 0 | a b c d | 0 -d -c") + .value("dst2", monai::BoundType::DST2, "-c -b -a | a b c d | -d -c -b") + .value("antireflect", monai::BoundType::DST2, "-c -b -a | a b c d | -d -c -b") + .value("dft", monai::BoundType::DFT, "b c d | a b c d | a b c") + .value("wrap", monai::BoundType::DFT, "b c d | a b c d | a b c") + // .value("sliding", monai::BoundType::Sliding) + .value("zero", monai::BoundType::Zero, "0 0 0 | a b c d | 0 0 0") .export_values(); // resample interpolation mode diff --git a/monai/csrc/resample/pushpull.h b/monai/csrc/resample/pushpull.h index 45fd5ce564..1c20cc0114 100644 --- a/monai/csrc/resample/pushpull.h +++ b/monai/csrc/resample/pushpull.h @@ -69,8 +69,8 @@ at::Tensor grid_pull( CHECK_STRIDED(grid_opt) CHECK_SAME_DEVICE(input_opt, grid_opt) CHECK_SAME_DTYPE(input_opt, grid_opt) - CHECK_SPATIAL_2D_OR_3D(input) - CHECK_SPATIAL_2D_OR_3D(grid) + CHECK_SPATIAL_1D_2D_OR_3D(input) + CHECK_SPATIAL_1D_2D_OR_3D(grid) CHECK_GRID_COMPONENT(grid, grid.dim()) CHECK_SPATIAL_NOT_EMPTY(input) CHECK_SPATIAL_NOT_EMPTY(grid) @@ -165,8 +165,8 @@ at::Tensor grid_push( CHECK_STRIDED(grid_opt) CHECK_SAME_DEVICE(input_opt, grid_opt) CHECK_SAME_DTYPE(input_opt, grid_opt) - CHECK_SPATIAL_2D_OR_3D(input) - CHECK_SPATIAL_2D_OR_3D(grid) + CHECK_SPATIAL_1D_2D_OR_3D(input) + CHECK_SPATIAL_1D_2D_OR_3D(grid) CHECK_GRID_COMPONENT(grid, grid.dim()) CHECK_SPATIAL_NOT_EMPTY(input) CHECK_SPATIAL_NOT_EMPTY(grid) @@ -175,7 +175,10 @@ at::Tensor grid_push( CHECK_VEC_NOT_EMPTY(interpolation_mode); if (source_size.empty()) { - auto size = c10::IntArrayRef({input.size(2), input.size(3), input.dim() == 5 ? input.size(4) : 1}); + auto size = c10::IntArrayRef( + {input.dim() >= 3 ? input.size(2) : 1, + input.dim() >= 4 ? input.size(3) : 1, + input.dim() >= 5 ? input.size(4) : 1}); if (input.is_cuda()) #ifdef WITH_CUDA return cuda::pushpull( @@ -295,14 +298,15 @@ at::Tensor grid_count( CHECK_DEFINED(grid) auto grid_opt = grid.options(); CHECK_STRIDED(grid_opt) - CHECK_SPATIAL_2D_OR_3D(grid) + CHECK_SPATIAL_1D_2D_OR_3D(grid) CHECK_GRID_COMPONENT(grid, grid.dim()) CHECK_SPATIAL_NOT_EMPTY(grid) CHECK_VEC_NOT_EMPTY(bound_mode); CHECK_VEC_NOT_EMPTY(interpolation_mode); if (source_size.empty()) { - auto size = c10::IntArrayRef({grid.size(1), grid.size(2), grid.dim() == 5 ? grid.size(3) : 1}); + auto size = c10::IntArrayRef( + {grid.dim() >= 3 ? grid.size(2) : 1, grid.dim() >= 4 ? grid.size(3) : 1, grid.dim() >= 5 ? grid.size(4) : 1}); if (grid.is_cuda()) #ifdef WITH_CUDA return cuda::pushpull( @@ -422,8 +426,8 @@ at::Tensor grid_grad( CHECK_STRIDED(grid_opt) CHECK_SAME_DEVICE(input_opt, grid_opt) CHECK_SAME_DTYPE(input_opt, grid_opt) - CHECK_SPATIAL_2D_OR_3D(input) - CHECK_SPATIAL_2D_OR_3D(grid) + CHECK_SPATIAL_1D_2D_OR_3D(input) + CHECK_SPATIAL_1D_2D_OR_3D(grid) CHECK_GRID_COMPONENT(grid, grid.dim()) CHECK_SPATIAL_NOT_EMPTY(input) CHECK_SPATIAL_NOT_EMPTY(grid) diff --git a/monai/csrc/resample/pushpull_cpu.cpp b/monai/csrc/resample/pushpull_cpu.cpp index 40743a6cf1..dd10dd76ee 100644 --- a/monai/csrc/resample/pushpull_cpu.cpp +++ b/monai/csrc/resample/pushpull_cpu.cpp @@ -18,13 +18,14 @@ limitations under the License. // It handles boundary conditions and interpolation orders defined in // `utils/resample_utils.h` and `utils/resample_utils.h`. // These parameters can be specified per dimension. -// Isotorpic 0-th and 1-st order interpolation have their own (faster) +// Isotropic 0-th and 1-st order interpolation have their own (faster) // implementations. Sliding boundary conditions are also implemented // separately. // TODO: // . [DONE] generic 3d // . [DONE] generic 2d +// . [DONE] generic 1d // . sliding nearest 3d // . sliding nearest 2d // . sliding linear 3d @@ -37,6 +38,7 @@ limitations under the License. // . input bound/inter are always vectors -> clean unused constructors #include +#include #include #include "bounds_common.h" #include "interpolation_common.h" @@ -44,7 +46,7 @@ limitations under the License. //#include // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// CPU/GPU -specific parameters +// CPU-specific parameters #include namespace { // This parameter specifies the minimum number of voxels that should be @@ -74,18 +76,27 @@ MONAI_NAMESPACE_DEVICE { // cpu namespace { // anonymous namespace > everything inside has internal linkage // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - // GENERIC PUSHPULL CLASS + // INDEXING UTILS // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - // This class implements the bulk of the code. - // /!\ No type and shape checking is performed here. - template - class PushPullImpl { + // This class reads and sets all the parameters that will later be used + // by the algorithm in PushPullImpl. All of this is done outside of the + // implementation class so that we do not depend on generic types. The + // point is to pre-allocate all necessary tensors so that we can check + // if they're all compatible with 32 bit math. If it's the case, we can + // dispatch to a 32b cuda implementation, which might increase + // performance. Else, we use 64 bit math to compute offsets. + // (On CPU, we always use 64 bit offsets because it doesn't make a huge + // difference. It would be different if we had a vectorized + // implementation as in PyTorch). + class PushPullAllocator { public: + static constexpr int64_t max_int32 = std::numeric_limits::max(); + // ~~~ CONSTRUCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MONAI_HOST - PushPullImpl( + PushPullAllocator( int dim, BoundVectorRef bound, InterpolationVectorRef interpolation, @@ -125,101 +136,418 @@ MONAI_NAMESPACE_DEVICE { // cpu iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; } - MONAI_HOST - PushPullImpl( - int dim, - BoundType bound, - InterpolationVectorRef interpolation, - bool extrapolate, - bool do_pull, - bool do_push, - bool do_count, - bool do_grad, - bool do_sgrad) - : dim(dim), - bound0(bound), - bound1(bound), - bound2(bound), - interpolation0(interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear), - interpolation1( - interpolation.size() > 1 ? interpolation[1] - : interpolation.size() > 0 ? interpolation[0] - : InterpolationType::Linear), - interpolation2( - interpolation.size() > 2 ? interpolation[2] - : interpolation.size() > 1 ? interpolation[1] - : interpolation.size() > 0 ? interpolation[0] - : InterpolationType::Linear), - extrapolate(extrapolate), - do_pull(do_pull), - do_push(do_push), - do_count(do_count), - do_grad(do_grad), - do_sgrad(do_sgrad) { - iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; + // ~~~ FUNCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + // Usually used for pull: + // - do_pull -> return source[grid] + // - do_push -> fails + // - do_grad -> return J(source)[grid] + // - do_sgrad -> return H(source)[grid] + MONAI_HOST void ioset(const Tensor& source, const Tensor& grid) { + init_all(); + init_source(source); + init_grid(grid); + init_output(); } - MONAI_HOST - PushPullImpl( - int dim, - BoundVectorRef bound, - InterpolationType interpolation, - bool extrapolate, - bool do_pull, - bool do_push, - bool do_count, - bool do_grad, - bool do_sgrad) - : dim(dim), - bound0(bound.size() > 0 ? bound[0] : BoundType::Replicate), - bound1( - bound.size() > 1 ? bound[1] - : bound.size() > 0 ? bound[0] - : BoundType::Replicate), - bound2( - bound.size() > 2 ? bound[2] - : bound.size() > 1 ? bound[1] - : bound.size() > 0 ? bound[0] - : BoundType::Replicate), - interpolation0(interpolation), - interpolation1(interpolation), - interpolation2(interpolation), - extrapolate(extrapolate), - do_pull(do_pull), - do_push(do_push), - do_count(do_count), - do_grad(do_grad), - do_sgrad(do_sgrad) { - iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; + // Usually used for pull_backward: + // - do_pull -> return source[grid] + // - do_push -> return push(target, grid, source.shape) + // - do_grad -> return J(source)[grid] + // - do_sgrad -> return H(source)[grid] + MONAI_HOST void ioset(const Tensor& source, const Tensor& grid, const Tensor& target) { + init_all(); + init_source(source); + init_grid(grid); + init_target(target); + init_output(); } - MONAI_HOST - PushPullImpl( - int dim, - BoundType bound, - InterpolationType interpolation, - bool extrapolate, - bool do_pull, - bool do_push, - bool do_count, - bool do_grad, - bool do_sgrad) - : dim(dim), - bound0(bound), - bound1(bound), - bound2(bound), - interpolation0(interpolation), - interpolation1(interpolation), - interpolation2(interpolation), - extrapolate(extrapolate), - do_pull(do_pull), - do_push(do_push), - do_count(do_count), - do_grad(do_grad), - do_sgrad(do_sgrad) { - iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; + // Usually used for push: + // - do_pull -> fails + // - do_push -> return push(target, grid, source_size) + // - do_grad -> fails + // - do_sgrad -> fails + MONAI_HOST void ioset(IntArrayRef source_size, const Tensor& grid, const Tensor& target) { + init_all(); + init_source(source_size); + init_grid(grid); + init_target(target); + init_output(); } + // Usually used for count: + // - do_pull -> fails + // - do_push -> return push(ones, grid, source_size) + // - do_grad -> fails + // - do_sgrad -> fails + MONAI_HOST void ioset(IntArrayRef source_size, const Tensor& grid) { + init_all(); + init_source(source_size); + init_grid(grid); + init_output(); + } + + // We just check that all tensors that we own are compatible with 32b math + bool canUse32BitIndexMath(int64_t max_elem = max_int32) const { + return src_32b_ok && trgt_32b_ok && grid_32b_ok && grad_32b_ok && out_32b_ok; + } + + private: + // Copied from aten/src/ATen/native/IndexingUtils.cpp in PyTorch 1.6. + // It is used to decide to which pointer type we should dispatch to. + // Basically, we need to make sure that the "furthest" element we need + // to reach is less than max_elem away. + static bool tensorCanUse32BitIndexMath(const Tensor& t, int64_t max_elem = max_int32) { + int64_t elements = t.numel(); + if (elements >= max_elem) { + return false; + } + if (elements == 0) { + return max_elem > 0; + } + + int64_t offset = 0; + int64_t linearId = elements - 1; + + // NOTE: Assumes all strides are positive, which is true for now + for (int i = t.dim() - 1; i >= 0; --i) { + int64_t curDimIndex = linearId % t.size(i); + int64_t curDimOffset = curDimIndex * t.stride(i); + offset += curDimOffset; + linearId /= t.size(i); + } + + if (offset >= max_elem) { + return false; + } + + return true; + } + + // ~~~ COMPONENTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + MONAI_HOST void init_all(); + MONAI_HOST void init_source(const Tensor& source); + MONAI_HOST void init_source(IntArrayRef source_size); + MONAI_HOST void init_grid(const Tensor& grid); + MONAI_HOST void init_target(const Tensor& target); + MONAI_HOST void init_output(); + + // ~~~ OPTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + int dim; // dimensionality (2 or 3) + BoundType bound0; // boundary condition // x|W + BoundType bound1; // boundary condition // y|H + BoundType bound2; // boundary condition // z|D + InterpolationType interpolation0; // interpolation order // x|W + InterpolationType interpolation1; // interpolation order // y|H + InterpolationType interpolation2; // interpolation order // z|D + bool iso; // isotropic interpolation? + bool extrapolate; // compute out-of-bound values + bool do_pull; // sample a volume + bool do_push; // splat a volume + bool do_count; // splatting weights (= jacobian determinant) + bool do_grad; // backprop: gradient of grid // pull + bool do_sgrad; // sample spatial gradients + + // ~~~ NAVIGATORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + std::deque output; + TensorOptions src_opt; + TensorOptions grid_opt; + TensorOptions trgt_opt; + int64_t N; + int64_t C; + int64_t src_X; + int64_t src_Y; + int64_t src_Z; + int64_t trgt_X; + int64_t trgt_Y; + int64_t trgt_Z; + int64_t trgt_K; + int64_t src_sN; + int64_t src_sC; + int64_t src_sX; + int64_t src_sY; + int64_t src_sZ; + bool src_32b_ok; + void* src_ptr; + int64_t trgt_sN; + int64_t trgt_sC; + int64_t trgt_sX; + int64_t trgt_sY; + int64_t trgt_sZ; + int64_t trgt_sK; + bool trgt_32b_ok; + void* trgt_ptr; + int64_t grid_sN; + int64_t grid_sC; + int64_t grid_sX; + int64_t grid_sY; + int64_t grid_sZ; + bool grid_32b_ok; + void* grid_ptr; + int64_t out_sN; + int64_t out_sC; + int64_t out_sX; + int64_t out_sY; + int64_t out_sZ; + int64_t out_sK; // gradient dimension + bool out_32b_ok; + void* out_ptr; + int64_t grad_sN; + int64_t grad_sC; + int64_t grad_sX; + int64_t grad_sY; + int64_t grad_sZ; + bool grad_32b_ok; + void* grad_ptr; + + // Allow PushPullImpl's constructor to access PushPullAllocator's + // private members. + template + friend class PushPullImpl; + }; + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // INITIALISATION + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + MONAI_HOST + void PushPullAllocator::init_all() { + src_opt = grid_opt = trgt_opt = TensorOptions(); + N = C = 1L; + src_X = src_Y = src_Z = 1L; + trgt_X = trgt_Y = trgt_Z = 1L; + trgt_K = 0L; + src_sN = src_sC = src_sX = src_sY = src_sZ = 0L; + grid_sN = grid_sC = grid_sX = grid_sY = grid_sZ = 0L; + grad_sN = grad_sC = grad_sX = grad_sY = grad_sZ = 0L; + trgt_sN = trgt_sC = trgt_sX = trgt_sY = trgt_sZ = trgt_sK = 0L; + out_sN = out_sC = out_sX = out_sY = out_sZ = out_sK = 0L; + src_ptr = trgt_ptr = grid_ptr = out_ptr = grad_ptr = static_cast(0); + src_32b_ok = trgt_32b_ok = grid_32b_ok = out_32b_ok = grad_32b_ok = true; + } + + MONAI_HOST + void PushPullAllocator::init_source(const Tensor& source) { + N = source.size(0); + C = source.size(1); + src_X = source.size(2); + src_Y = dim < 2 ? 1L : source.size(3); + src_Z = dim < 3 ? 1L : source.size(4); + src_sN = source.stride(0); + src_sC = source.stride(1); + src_sX = source.stride(2); + src_sY = dim < 2 ? 0L : source.stride(3); + src_sZ = dim < 3 ? 0L : source.stride(4); + src_ptr = source.data_ptr(); + src_opt = source.options(); + src_32b_ok = tensorCanUse32BitIndexMath(source); + } + + MONAI_HOST + void PushPullAllocator::init_source(IntArrayRef source_size) { + src_X = source_size[0]; + src_Y = dim < 2 ? 1L : source_size[1]; + src_Z = dim < 3 ? 1L : source_size[2]; + } + + MONAI_HOST + void PushPullAllocator::init_grid(const Tensor& grid) { + N = grid.size(0); + trgt_X = grid.size(1); + trgt_Y = dim < 2 ? 1L : grid.size(2); + trgt_Z = dim < 3 ? 1L : grid.size(3); + grid_sN = grid.stride(0); + grid_sX = grid.stride(1); + grid_sY = dim < 2 ? 0L : grid.stride(2); + grid_sZ = dim < 3 ? 0L : grid.stride(3); + grid_sC = grid.stride(dim == 1 ? 2 : dim == 2 ? 3 : 4); + grid_ptr = grid.data_ptr(); + grid_opt = grid.options(); + grid_32b_ok = tensorCanUse32BitIndexMath(grid); + } + + MONAI_HOST + void PushPullAllocator::init_target(const Tensor& target) { + N = target.size(0); + C = target.size(1); + trgt_X = target.size(2); + trgt_Y = dim < 2 ? 1L : target.size(3); + trgt_Z = dim < 3 ? 1L : target.size(4); + trgt_K = target.dim() == dim + 3 ? target.size(dim == 1 ? 3 : dim == 2 ? 4 : 5) : 0L; + trgt_sN = target.stride(0); + trgt_sC = target.stride(1); + trgt_sX = target.stride(2); + trgt_sY = dim < 2 ? 0L : target.stride(3); + trgt_sZ = dim < 3 ? 0L : target.stride(4); + trgt_sK = target.dim() == dim + 3 ? target.stride(dim == 1 ? 3 : dim == 2 ? 4 : 5) : 0L; + trgt_ptr = target.data_ptr(); + trgt_opt = target.options(); + trgt_32b_ok = tensorCanUse32BitIndexMath(target); + } + + MONAI_HOST + void PushPullAllocator::init_output() { + output.clear(); + if (do_pull) { + if (dim == 1) + output.push_back(at::empty({N, C, trgt_X}, src_opt)); + else if (dim == 2) + output.push_back(at::empty({N, C, trgt_X, trgt_Y}, src_opt)); + else + output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z}, src_opt)); + auto pull = output.back(); + out_sN = pull.stride(0); + out_sC = pull.stride(1); + out_sX = pull.stride(2); + out_sY = dim < 2 ? 0L : pull.stride(3); + out_sZ = dim < 3 ? 0L : pull.stride(4); + out_sK = 0L; + out_ptr = pull.data_ptr(); + out_32b_ok = tensorCanUse32BitIndexMath(pull); + } else if (do_sgrad) { + if (dim == 1) + output.push_back(at::empty({N, C, trgt_X, 1}, src_opt)); + else if (dim == 2) + output.push_back(at::empty({N, C, trgt_X, trgt_Y, 2}, src_opt)); + else + output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z, 3}, src_opt)); + auto sgrad = output.back(); + out_sN = sgrad.stride(0); + out_sC = sgrad.stride(1); + out_sX = sgrad.stride(2); + out_sY = dim < 2 ? 0L : sgrad.stride(3); + out_sZ = dim < 3 ? 0L : sgrad.stride(4); + out_sK = sgrad.stride(dim == 1 ? 3 : dim == 2 ? 4 : 5); + out_ptr = sgrad.data_ptr(); + out_32b_ok = tensorCanUse32BitIndexMath(sgrad); + + if (iso && interpolation0 == InterpolationType::Nearest) + sgrad.zero_(); + if (iso && interpolation0 == InterpolationType::Linear && dim == 1) + sgrad.zero_(); + } else if (do_push) { + if (dim == 1) + output.push_back(at::zeros({N, C, src_X}, trgt_opt)); + else if (dim == 2) + output.push_back(at::zeros({N, C, src_X, src_Y}, trgt_opt)); + else + output.push_back(at::zeros({N, C, src_X, src_Y, src_Z}, trgt_opt)); + auto push = output.back(); + out_sN = push.stride(0); + out_sC = push.stride(1); + out_sX = push.stride(2); + out_sY = dim < 2 ? 0L : push.stride(3); + out_sZ = dim < 3 ? 0L : push.stride(4); + out_sK = 0L; + out_ptr = push.data_ptr(); + out_32b_ok = tensorCanUse32BitIndexMath(push); + } else if (do_count) { + if (dim == 1) + output.push_back(at::zeros({N, 1, src_X}, grid_opt)); + else if (dim == 2) + output.push_back(at::zeros({N, 1, src_X, src_Y}, grid_opt)); + else + output.push_back(at::zeros({N, 1, src_X, src_Y, src_Z}, grid_opt)); + auto count = output.back(); + out_sN = count.stride(0); + out_sC = count.stride(1); + out_sX = count.stride(2); + out_sY = dim < 2 ? 0L : count.stride(3); + out_sZ = dim < 3 ? 0L : count.stride(4); + out_sK = 0L; + out_ptr = count.data_ptr(); + out_32b_ok = tensorCanUse32BitIndexMath(count); + } + if (do_grad) { + if (dim == 1) + output.push_back(at::zeros({N, trgt_X, 1}, grid_opt)); + else if (dim == 2) + output.push_back(at::zeros({N, trgt_X, trgt_Y, 2}, grid_opt)); + else + output.push_back(at::zeros({N, trgt_X, trgt_Y, trgt_Z, 3}, grid_opt)); + auto grad = output.back(); + grad_sN = grad.stride(0); + grad_sX = grad.stride(1); + grad_sY = dim < 2 ? 0L : grad.stride(2); + grad_sZ = dim < 3 ? 0L : grad.stride(3); + grad_sC = grad.stride(dim == 1 ? 2 : dim == 2 ? 3 : 4); + grad_ptr = grad.data_ptr(); + out_32b_ok = tensorCanUse32BitIndexMath(grad); + + if (iso && interpolation0 == InterpolationType::Nearest) + grad.zero_(); + } + } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // GENERIC PUSHPULL CLASS + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // This class implements the bulk of the code. + // /!\ No type and shape checking is performed here. + + template + class PushPullImpl { + public: + // ~~~ CONSTRUCTOR ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + PushPullImpl(const PushPullAllocator& info) + : output(info.output), + dim(info.dim), + bound0(info.bound0), + bound1(info.bound1), + bound2(info.bound2), + interpolation0(info.interpolation0), + interpolation1(info.interpolation1), + interpolation2(info.interpolation1), + iso(info.iso), + extrapolate(info.extrapolate), + do_pull(info.do_pull), + do_push(info.do_push), + do_count(info.do_count), + do_grad(info.do_grad), + do_sgrad(info.do_sgrad), + N(static_cast(info.N)), + C(static_cast(info.C)), + src_X(static_cast(info.src_X)), + src_Y(static_cast(info.src_Y)), + src_Z(static_cast(info.src_Z)), + trgt_X(static_cast(info.trgt_X)), + trgt_Y(static_cast(info.trgt_Y)), + trgt_Z(static_cast(info.trgt_Z)), + trgt_K(static_cast(info.trgt_K)), + src_sN(static_cast(info.src_sN)), + src_sC(static_cast(info.src_sC)), + src_sX(static_cast(info.src_sX)), + src_sY(static_cast(info.src_sY)), + src_sZ(static_cast(info.src_sZ)), + src_ptr(static_cast(info.src_ptr)), + trgt_sN(static_cast(info.trgt_sN)), + trgt_sC(static_cast(info.trgt_sC)), + trgt_sX(static_cast(info.trgt_sX)), + trgt_sY(static_cast(info.trgt_sY)), + trgt_sZ(static_cast(info.trgt_sZ)), + trgt_sK(static_cast(info.trgt_sK)), + trgt_ptr(static_cast(info.trgt_ptr)), + grid_sN(static_cast(info.grid_sN)), + grid_sC(static_cast(info.grid_sC)), + grid_sX(static_cast(info.grid_sX)), + grid_sY(static_cast(info.grid_sY)), + grid_sZ(static_cast(info.grid_sZ)), + grid_ptr(static_cast(info.grid_ptr)), + out_sN(static_cast(info.out_sN)), + out_sC(static_cast(info.out_sC)), + out_sX(static_cast(info.out_sX)), + out_sY(static_cast(info.out_sY)), + out_sZ(static_cast(info.out_sZ)), + out_sK(static_cast(info.out_sK)), + out_ptr(static_cast(info.out_ptr)), + grad_sN(static_cast(info.grad_sN)), + grad_sC(static_cast(info.grad_sC)), + grad_sX(static_cast(info.grad_sX)), + grad_sY(static_cast(info.grad_sY)), + grad_sZ(static_cast(info.grad_sZ)), + grad_ptr(static_cast(info.grad_ptr)) {} + // ~~~ PUBLIC VALUE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ std::deque output; @@ -247,39 +575,8 @@ MONAI_NAMESPACE_DEVICE { // cpu // } // ~~~ FUNCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - MONAI_HOST void ioset // Pull - (const Tensor& source, const Tensor& grid) { - init_all(); - init_source(source); - init_grid(grid); - init_output(); - } - - MONAI_HOST void ioset(const Tensor& source, const Tensor& grid, const Tensor& target) { - init_all(); - init_source(source); - init_grid(grid); - init_target(target); - init_output(); - } - - MONAI_HOST void ioset // Push - (IntArrayRef source_size, const Tensor& grid, const Tensor& target) { - init_all(); - init_source(source_size); - init_grid(grid); - init_target(target); - init_output(); - } - - MONAI_HOST void ioset // Count - (IntArrayRef source_size, const Tensor& grid) { - init_all(); - init_source(source_size); - init_grid(grid); - init_output(); - } + // Loop over all voxels void loop() const; MONAI_HOST MONAI_DEVICE int64_t voxcount() const { @@ -288,14 +585,18 @@ MONAI_NAMESPACE_DEVICE { // cpu private: // ~~~ COMPONENTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - MONAI_HOST void init_all(); - MONAI_HOST void init_source(const Tensor& source); - MONAI_HOST void init_source(IntArrayRef source_size); - MONAI_HOST void init_grid(const Tensor& grid); - MONAI_HOST void init_target(const Tensor& target); - MONAI_HOST void init_output(); + MONAI_DEVICE void check1d(offset_t w, offset_t n) const; MONAI_DEVICE void check2d(offset_t w, offset_t h, offset_t n) const; MONAI_DEVICE void check3d(offset_t w, offset_t h, offset_t d, offset_t n) const; + MONAI_DEVICE void interpolate1d(scalar_t x, offset_t w, offset_t n) const; + MONAI_DEVICE void interpolate1d_nearest(scalar_t x, offset_t w, offset_t n) const; + MONAI_DEVICE void interpolate1d_linear(scalar_t x, offset_t w, offset_t n) const; + MONAI_DEVICE void interpolate1d_sliding(scalar_t x, offset_t w, offset_t n) const { /*TODO*/ + } + MONAI_DEVICE void interpolate1d_sliding_nearest(scalar_t x, offset_t w, offset_t n) const { /*TODO*/ + } + MONAI_DEVICE void interpolate1d_sliding_linear(scalar_t x, offset_t w, offset_t n) const { /*TODO*/ + } MONAI_DEVICE void interpolate2d(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const; MONAI_DEVICE void interpolate2d_nearest(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const; MONAI_DEVICE void interpolate2d_bilinear(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const; @@ -370,9 +671,6 @@ MONAI_NAMESPACE_DEVICE { // cpu bool do_sgrad; // sample spatial gradients // ~~~ NAVIGATORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - TensorOptions src_opt; - TensorOptions grid_opt; - TensorOptions trgt_opt; offset_t N; offset_t C; offset_t src_X; @@ -402,174 +700,24 @@ MONAI_NAMESPACE_DEVICE { // cpu offset_t grid_sZ; scalar_t* grid_ptr; offset_t out_sN; - offset_t out_sC; - offset_t out_sX; - offset_t out_sY; - offset_t out_sZ; - offset_t out_sK; // gradient dimension - scalar_t* out_ptr; - offset_t grad_sN; - offset_t grad_sC; - offset_t grad_sX; - offset_t grad_sY; - offset_t grad_sZ; - scalar_t* grad_ptr; - }; - - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - // INITIALISATION - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - template - void PushPullImpl::init_all() { - src_opt = grid_opt = trgt_opt = TensorOptions(); - N = C = static_cast(1); - src_X = src_Y = src_Z = static_cast(1); - trgt_X = trgt_Y = trgt_Z = trgt_K = static_cast(1); - src_sN = src_sC = src_sX = src_sY = src_sZ = static_cast(0); - grid_sN = grid_sC = grid_sX = grid_sY = grid_sZ = static_cast(0); - grad_sN = grad_sC = grad_sX = grad_sY = grad_sZ = static_cast(0); - trgt_sN = trgt_sC = trgt_sX = trgt_sY = trgt_sZ = trgt_sK = static_cast(0); - out_sN = out_sC = out_sX = out_sY = out_sZ = out_sK = static_cast(0); - src_ptr = trgt_ptr = grid_ptr = out_ptr = grad_ptr = static_cast(0); - } - - template - MONAI_HOST void PushPullImpl::init_source(const Tensor& source) { - N = source.size(0); - C = source.size(1); - src_X = source.size(2); - src_Y = source.size(3); - src_Z = dim == 2 ? static_cast(1) : source.size(4); - src_sN = source.stride(0); - src_sC = source.stride(1); - src_sX = source.stride(2); - src_sY = source.stride(3); - src_sZ = dim == 2 ? static_cast(0) : source.stride(4); - src_ptr = source.data_ptr(); - src_opt = source.options(); - } - - template - MONAI_HOST void PushPullImpl::init_source(IntArrayRef source_size) { - src_X = source_size[0]; - src_Y = source_size[1]; - src_Z = dim == 2 ? static_cast(1) : source_size[2]; - } - - template - MONAI_HOST void PushPullImpl::init_grid(const Tensor& grid) { - N = grid.size(0); - trgt_X = grid.size(1); - trgt_Y = grid.size(2); - trgt_Z = dim == 2 ? static_cast(1) : grid.size(3); - grid_sN = grid.stride(0); - grid_sX = grid.stride(1); - grid_sY = grid.stride(2); - grid_sZ = dim == 2 ? static_cast(0) : grid.stride(3); - grid_sC = grid.stride(dim == 2 ? 3 : 4); - grid_ptr = grid.data_ptr(); - grid_opt = grid.options(); - } - - template - MONAI_HOST void PushPullImpl::init_target(const Tensor& target) { - N = target.size(0); - C = target.size(1); - trgt_X = target.size(2); - trgt_Y = target.size(3); - trgt_Z = dim == 2 ? static_cast(1) : target.size(4); - trgt_K = target.dim() == dim + 3 ? target.size(dim == 2 ? 4 : 5) : static_cast(1); - trgt_sN = target.stride(0); - trgt_sC = target.stride(1); - trgt_sX = target.stride(2); - trgt_sY = target.stride(3); - trgt_sZ = dim == 2 ? static_cast(0) : target.stride(4); - trgt_sK = target.dim() == dim + 3 ? target.stride(dim == 2 ? 4 : 5) : static_cast(0); - trgt_ptr = target.data_ptr(); - trgt_opt = target.options(); - } - - template - MONAI_HOST void PushPullImpl::init_output() { - output.clear(); - if (do_pull) { - if (dim == 2) - output.push_back(at::empty({N, C, trgt_X, trgt_Y}, src_opt)); - else - output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z}, src_opt)); - auto pull = output.back(); - out_sN = pull.stride(0); - out_sC = pull.stride(1); - out_sX = pull.stride(2); - out_sY = pull.stride(3); - out_sZ = dim == 2 ? static_cast(0) : pull.stride(4); - out_sK = static_cast(0); - out_ptr = pull.template data_ptr(); - } else if (do_sgrad) { - if (dim == 2) - output.push_back(at::empty({N, C, trgt_X, trgt_Y, 2}, src_opt)); - else - output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z, 3}, src_opt)); - auto sgrad = output.back(); - out_sN = sgrad.stride(0); - out_sC = sgrad.stride(1); - out_sX = sgrad.stride(2); - out_sY = sgrad.stride(3); - out_sZ = dim == 2 ? static_cast(0) : sgrad.stride(4); - out_sK = sgrad.stride(dim == 2 ? 4 : 5); - out_ptr = sgrad.template data_ptr(); - - if (iso && interpolation0 == InterpolationType::Nearest) - sgrad.zero_(); - } else if (do_push) { - if (dim == 2) - output.push_back(at::zeros({N, C, src_X, src_Y}, trgt_opt)); - else - output.push_back(at::zeros({N, C, src_X, src_Y, src_Z}, trgt_opt)); - auto push = output.back(); - out_sN = push.stride(0); - out_sC = push.stride(1); - out_sX = push.stride(2); - out_sY = push.stride(3); - out_sZ = dim == 2 ? static_cast(0) : push.stride(4); - out_sK = static_cast(0); - out_ptr = push.template data_ptr(); - } else if (do_count) { - if (dim == 2) - output.push_back(at::zeros({N, 1, src_X, src_Y}, grid_opt)); - else - output.push_back(at::zeros({N, 1, src_X, src_Y, src_Z}, grid_opt)); - auto count = output.back(); - out_sN = count.stride(0); - out_sC = count.stride(1); - out_sX = count.stride(2); - out_sY = count.stride(3); - out_sZ = dim == 2 ? static_cast(0) : count.stride(4); - out_sK = static_cast(0); - out_ptr = count.template data_ptr(); - } - if (do_grad) { - if (dim == 2) - output.push_back(at::zeros({N, src_X, src_Y, 2}, grid_opt)); - else - output.push_back(at::zeros({N, src_X, src_Y, src_Z, 3}, grid_opt)); - auto grad = output.back(); - grad_sN = grad.stride(0); - grad_sX = grad.stride(1); - grad_sY = grad.stride(2); - grad_sZ = dim == 2 ? static_cast(0) : grad.stride(3); - grad_sC = grad.stride(dim == 2 ? 3 : 4); - grad_ptr = grad.template data_ptr(); - - if (iso && interpolation0 == InterpolationType::Nearest) - grad.zero_(); - } - } + offset_t out_sC; + offset_t out_sX; + offset_t out_sY; + offset_t out_sZ; + offset_t out_sK; // gradient dimension + scalar_t* out_ptr; + offset_t grad_sN; + offset_t grad_sC; + offset_t grad_sX; + offset_t grad_sY; + offset_t grad_sZ; + scalar_t* grad_ptr; + }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // LOOP // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // This bit loops over all target voxels. We therefore need to // convert linear indices to multivariate indices. The way I do it // might not be optimal. @@ -586,7 +734,10 @@ MONAI_NAMESPACE_DEVICE { // cpu // parallelize across voxels. at::parallel_for(0, N, 0, [&](offset_t start, offset_t end) { for (offset_t n = start; n < end; ++n) { - if (dim == 2) { + if (dim == 1) { + for (offset_t w = 0; w < trgt_X; ++w) + check1d(w, n); + } else if (dim == 2) { for (offset_t h = 0; h < trgt_Y; ++h) for (offset_t w = 0; w < trgt_X; ++w) check2d(w, h, n); @@ -600,8 +751,8 @@ MONAI_NAMESPACE_DEVICE { // cpu }); return; } -#endif +#endif // Parallelize across voxels offset_t trgt_NXYZ = trgt_Z * trgt_Y * trgt_X * N; offset_t trgt_XYZ = trgt_Z * trgt_Y * trgt_X; @@ -615,7 +766,9 @@ MONAI_NAMESPACE_DEVICE { // cpu h = (i / trgt_Z) % trgt_Y; d = i % trgt_Z; - if (dim == 2) + if (dim == 1) + check1d(w, n); + else if (dim == 2) check2d(w, h, n); else check3d(w, h, d, n); @@ -631,6 +784,59 @@ MONAI_NAMESPACE_DEVICE { // cpu // 1) read the [x,y,z] source coordinate for the current target voxel // 3) check if the source coordinate is in bounds + template + MONAI_DEVICE void PushPullImpl::check3d(offset_t w, offset_t h, offset_t d, offset_t n) const { + // get the corresponding input x, y, z co-ordinates from grid + scalar_t* grid_ptr_NXYZ = grid_ptr + n * grid_sN + w * grid_sX + h * grid_sY + d * grid_sZ; + scalar_t x = *grid_ptr_NXYZ; + scalar_t y = grid_ptr_NXYZ[grid_sC]; + scalar_t z = grid_ptr_NXYZ[grid_sC * 2]; + + // Check if out-of-bound + if (!(extrapolate || + (inbounds(x, src_X, static_cast(TINY)) && inbounds(y, src_Y, static_cast(TINY)) && + inbounds(z, src_Z, static_cast(TINY))))) { + if (do_pull || do_sgrad) { + scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ; + for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC) { + *out_ptr_NCXYZ = static_cast(0); + if (do_sgrad) { + out_ptr_NCXYZ[out_sK] = static_cast(0); + out_ptr_NCXYZ[out_sK * 2] = static_cast(0); + } + } + } + if (do_grad) { + scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ; + (*grad_ptr_NXYZ) = static_cast(0); + grad_ptr_NXYZ[grad_sC] = static_cast(0); + grad_ptr_NXYZ[grad_sC * 2] = static_cast(0); + } + return; + } + + // Next step + if (bound0 == BoundType::Sliding) { + if (iso) + switch (static_cast(interpolation0)) { + case 0: + return interpolate3d_sliding_nearest(x, y, z, w, h, d, n); + case 1: + return interpolate3d_sliding_trilinear(x, y, z, w, h, d, n); + } + return interpolate3d_sliding(x, y, z, w, h, d, n); + } else { + if (iso) + switch (static_cast(interpolation0)) { + case 0: + return interpolate3d_nearest(x, y, z, w, h, d, n); + case 1: + return interpolate3d_trilinear(x, y, z, w, h, d, n); + } + return interpolate3d(x, y, z, w, h, d, n); + } + } + template MONAI_DEVICE void PushPullImpl::check2d(offset_t w, offset_t h, offset_t n) const { // get the corresponding input x, y, z co-ordinates from grid @@ -642,7 +848,7 @@ MONAI_NAMESPACE_DEVICE { // cpu if (!(extrapolate || (inbounds(x, src_X, static_cast(TINY)) && inbounds(y, src_Y, static_cast(TINY))))) { if (do_pull || do_sgrad) { - scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sZ + h * out_sY; + scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC) { *out_ptr_NCXY = static_cast(0); if (do_sgrad) @@ -680,32 +886,25 @@ MONAI_NAMESPACE_DEVICE { // cpu } template - MONAI_DEVICE void PushPullImpl::check3d(offset_t w, offset_t h, offset_t d, offset_t n) const { + MONAI_DEVICE void PushPullImpl::check1d(offset_t w, offset_t n) const { // get the corresponding input x, y, z co-ordinates from grid - scalar_t* grid_ptr_NXYZ = grid_ptr + n * grid_sN + w * grid_sX + h * grid_sY + d * grid_sZ; - scalar_t x = *grid_ptr_NXYZ; - scalar_t y = grid_ptr_NXYZ[grid_sC]; - scalar_t z = grid_ptr_NXYZ[grid_sC * 2]; + scalar_t* grid_ptr_NX = grid_ptr + n * grid_sN + w * grid_sX; + scalar_t x = *grid_ptr_NX; // Check if out-of-bound - if (!(extrapolate || - (inbounds(x, src_X, static_cast(TINY)) && inbounds(y, src_Y, static_cast(TINY)) && - inbounds(z, src_Z, static_cast(TINY))))) { + if (!(extrapolate || inbounds(x, src_X, static_cast(TINY)))) { if (do_pull || do_sgrad) { - scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ; - for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC) { - *out_ptr_NCXYZ = static_cast(0); - if (do_sgrad) { - out_ptr_NCXYZ[out_sK] = static_cast(0); - out_ptr_NCXYZ[out_sK * 2] = static_cast(0); - } + scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX; + for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC) { + *out_ptr_NCX = static_cast(0); + if (do_sgrad) + out_ptr_NCX[out_sK] = static_cast(0); } } if (do_grad) { - scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ; - (*grad_ptr_NXYZ) = static_cast(0); - grad_ptr_NXYZ[grad_sC] = static_cast(0); - grad_ptr_NXYZ[grad_sC * 2] = static_cast(0); + scalar_t* grad_ptr_NX = grad_ptr + n * grad_sN + w * grad_sX; + (*grad_ptr_NX) = static_cast(0); + grad_ptr_NX[grad_sC] = static_cast(0); } return; } @@ -715,20 +914,20 @@ MONAI_NAMESPACE_DEVICE { // cpu if (iso) switch (static_cast(interpolation0)) { case 0: - return interpolate3d_sliding_nearest(x, y, z, w, h, d, n); + return interpolate1d_sliding_nearest(x, w, n); case 1: - return interpolate3d_sliding_trilinear(x, y, z, w, h, d, n); + return interpolate1d_sliding_linear(x, w, n); } - return interpolate3d_sliding(x, y, z, w, h, d, n); + return interpolate1d_sliding(x, w, n); } else { if (iso) switch (static_cast(interpolation0)) { case 0: - return interpolate3d_nearest(x, y, z, w, h, d, n); + return interpolate1d_nearest(x, w, n); case 1: - return interpolate3d_trilinear(x, y, z, w, h, d, n); + return interpolate1d_linear(x, w, n); } - return interpolate3d(x, y, z, w, h, d, n); + return interpolate1d(x, w, n); } } @@ -763,7 +962,7 @@ MONAI_NAMESPACE_DEVICE { // cpu if (trgt_ptr && (do_push || do_grad)) for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC) { target[c] = *trgt_ptr_NCXYZ; - if (trgt_K > 1) { + if (trgt_K > 0) { target[c + C] = trgt_ptr_NCXYZ[trgt_sK]; target[c + C * 2] = trgt_ptr_NCXYZ[trgt_sK * 2]; } @@ -881,7 +1080,7 @@ MONAI_NAMESPACE_DEVICE { // cpu // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_push) { - if (trgt_K == 1) { + if (trgt_K == 0) { // Diff w.r.t. push/pull scalar_t* out_ptr_NC = out_ptr_NC0; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) @@ -904,7 +1103,7 @@ MONAI_NAMESPACE_DEVICE { // cpu // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { - if (trgt_K == 1) { + if (trgt_K == 0) { // Diff w.r.t. pull/push scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t dot = static_cast(0); @@ -973,7 +1172,7 @@ MONAI_NAMESPACE_DEVICE { // cpu if (trgt_ptr && (do_push || do_grad)) for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC) { target[c] = *trgt_ptr_NCXY; - if (trgt_K > 1) { + if (trgt_K > 0) { target[c + C] = trgt_ptr_NCXY[trgt_sK]; } } @@ -1066,7 +1265,7 @@ MONAI_NAMESPACE_DEVICE { // cpu // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_push) { - if (trgt_K == 1) { + if (trgt_K == 0) { // Diff w.r.t. push/pull scalar_t* out_ptr_NC = out_ptr_NC0; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) @@ -1088,7 +1287,7 @@ MONAI_NAMESPACE_DEVICE { // cpu // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { - if (trgt_K == 1) { + if (trgt_K == 0) { // Diff w.r.t. pull/push scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t dot = static_cast(0); @@ -1125,6 +1324,150 @@ MONAI_NAMESPACE_DEVICE { // cpu } } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // GENERIC INTERPOLATION 1D + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + template + MONAI_DEVICE void PushPullImpl::interpolate1d(scalar_t x, offset_t w, offset_t n) const { + // Get corner pixel values from (x, y) + offset_t bx0, bx1; + interpolation::bounds(interpolation0, x, bx0, bx1); + offset_t dbx = bx1 - bx0; + + // Pre-compute offsets and target value + scalar_t* src_ptr_NC0 = src_ptr + n * src_sN; + scalar_t* out_ptr_NC0 = out_ptr + n * out_sN; + scalar_t* out_ptr_NCX0 = out_ptr + n * out_sN + w * out_sX; + scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX; + scalar_t target[2 * MONAI_MAX_NUM_CHANNELS]; + if (trgt_ptr && (do_push || do_grad)) + for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC) { + target[c] = *trgt_ptr_NCX; + if (trgt_K > 0) { + target[c + C] = trgt_ptr_NCX[trgt_sK]; + } + } + + // Initialize output + scalar_t* out_ptr_NCX = out_ptr_NCX0; + if (do_pull || do_sgrad) { + for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC) { + *out_ptr_NCX = static_cast(0); + if (do_sgrad) { + out_ptr_NCX[out_sK] = static_cast(0); + } + } + } + + // Pre-compute indices/weights/grad + scalar_t wx[8]; // B-spline weights + scalar_t gx[8]; // B-spline derivatives + scalar_t hx[8]; // B-spline 2nd derivatives + offset_t ix[8]; // Warped indices + uint8_t sx[8]; // Warped indices + + { + scalar_t *owx = static_cast(wx), *ogx = static_cast(gx), *ohx = static_cast(hx); + offset_t* oix = static_cast(ix); + uint8_t* osx = static_cast(sx); + for (offset_t bx = bx0; bx <= bx1; ++bx) { + scalar_t dx = x - bx; + *(owx++) = interpolation::fastweight(interpolation0, dx); + if (do_grad || do_sgrad) + *(ogx++) = interpolation::fastgrad(interpolation0, dx); + if (do_grad && trgt_sK > 1) + *(ohx++) = interpolation::fasthess(interpolation0, dx); + *(osx++) = bound::sign(bound0, bx, src_X); + *(oix++) = bound::index(bound0, bx, src_X); + } + } + + // Convolve coefficients with basis functions + scalar_t ogx; + ogx = static_cast(0); + for (offset_t i = 0; i <= dbx; ++i) { + offset_t oox = ix[i] * out_sX; + offset_t osx = ix[i] * src_sX; + uint8_t sxx = sx[i]; + scalar_t wxx = wx[i]; + scalar_t gxx = gx[i]; + scalar_t hxx = hx[i]; + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (do_pull) { + scalar_t* src_ptr_NC = src_ptr_NC0; + scalar_t* out_ptr_NCX = out_ptr_NCX0; + for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) + *out_ptr_NCX += bound::get(src_ptr_NC, osx, sxx) * wxx; + } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + else if (do_sgrad) { + scalar_t* src_ptr_NC = src_ptr_NC0; + scalar_t* out_ptr_NCX = out_ptr_NCX0; + for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) { + scalar_t src = bound::get(src_ptr_NC, osx, sxx); + *out_ptr_NCX += src * gxx; + } + } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + else if (do_push) { + if (trgt_K == 0) { + // Diff w.r.t. push/pull + scalar_t* out_ptr_NC = out_ptr_NC0; + for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) + bound::add(out_ptr_NC, oox, wxx * target[c], sxx); + } else { + // Diff w.r.t. sgrad + scalar_t* out_ptr_NC = out_ptr_NC0; + for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) { + scalar_t val = gxx * target[c]; + bound::add(out_ptr_NC, oox, val, sxx); + } + } + } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Count ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + else if (do_count) { + bound::add(out_ptr_NC0, oox, wxx, sxx); + } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (do_grad) { + if (trgt_K == 0) { + // Diff w.r.t. pull/push + scalar_t* src_ptr_NC = src_ptr_NC0; + scalar_t dot = static_cast(0); + for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) { + scalar_t src = bound::get(src_ptr_NC, osx, sxx); + dot += (trgt_ptr ? src * target[c] : src); + // trgt_ptr == 0 in the backward pass of 'count' + } + ogx += gxx * dot; + } else { + // Diff w.r.t. sgrad + scalar_t* src_ptr_NC = src_ptr_NC0; + scalar_t dot; + dot = static_cast(0); + for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) { + scalar_t src = bound::get(src_ptr_NC, osx, sxx); + dot += src * target[c]; + } + ogx += hxx * dot; + } + } + + } // x + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (do_grad) { + scalar_t* grad_ptr_NX = grad_ptr + n * grad_sN + w * grad_sX; + (*grad_ptr_NX) = ogx; + } + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // LINEAR INTERPOLATION 3D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1214,7 +1557,7 @@ MONAI_NAMESPACE_DEVICE { // cpu scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ; scalar_t* src_ptr_NC = src_ptr + n * src_sN; - if (trgt_K == 1) { + if (trgt_K == 0) { // backward w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, src_ptr_NC += src_sC) { scalar_t src; @@ -1376,7 +1719,7 @@ MONAI_NAMESPACE_DEVICE { // cpu o111 = ix1 * out_sX + iy1 * out_sY + iz1 * out_sZ; scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ; scalar_t* out_ptr_NC = out_ptr + n * out_sN; - if (trgt_K == 1) { + if (trgt_K == 0) { // Diff w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, out_ptr_NC += out_sC) { scalar_t trgt = *trgt_ptr_NCXYZ; @@ -1461,7 +1804,6 @@ MONAI_NAMESPACE_DEVICE { // cpu scalar_t w10 = dx1 * dy0; scalar_t w01 = dx0 * dy1; scalar_t w11 = dx1 * dy1; - ; // Sign (/!\ compute sign before warping indices) int8_t sx1 = bound::sign(bound0, ix0 + 1, src_X); @@ -1500,7 +1842,7 @@ MONAI_NAMESPACE_DEVICE { // cpu scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY; scalar_t* src_ptr_NC = src_ptr + n * src_sN; - if (trgt_K == 1) { + if (trgt_K == 0) { // backward w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, src_ptr_NC += src_sC) { scalar_t src; @@ -1547,9 +1889,9 @@ MONAI_NAMESPACE_DEVICE { // cpu } } - scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY; - (*grad_ptr_NXYZ) = gx; - grad_ptr_NXYZ[grad_sC] = gy; + scalar_t* grad_ptr_NXY = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY; + (*grad_ptr_NXY) = gx; + grad_ptr_NXY[grad_sC] = gy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_pull) { @@ -1591,7 +1933,7 @@ MONAI_NAMESPACE_DEVICE { // cpu o11 = ix1 * out_sX + iy1 * out_sY; scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY; scalar_t* out_ptr_NC = out_ptr + n * out_sN; - if (trgt_K == 1) { + if (trgt_K == 0) { // Diff w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, out_ptr_NC += out_sC) { scalar_t trgt = *trgt_ptr_NCXY; @@ -1632,6 +1974,123 @@ MONAI_NAMESPACE_DEVICE { // cpu } } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // LINEAR INTERPOLATION 1D + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + template + MONAI_DEVICE void PushPullImpl::interpolate1d_linear(scalar_t x, offset_t w, offset_t n) const { + // Get corner pixel values from (x) + offset_t ix0 = static_cast(std::floor(x)); + + // Interpolation weights (inversely proportional to distance) + scalar_t w1 = x - ix0; + scalar_t w0 = 1. - w1; + + // Sign (/!\ compute sign before warping indices) + int8_t s1 = bound::sign(bound0, ix0 + 1, src_X); + int8_t s0 = bound::sign(bound0, ix0, src_X); + + // Warp indices + offset_t ix1; + ix1 = bound::index(bound0, ix0 + 1, src_X); + ix0 = bound::index(bound0, ix0, src_X); + + // Offsets into source volume + offset_t o0, o1; + if (do_pull || do_grad || do_sgrad) { + o0 = ix0 * src_sX; + o1 = ix1 * src_sX; + } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Grid gradient ~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (do_grad) { + if (trgt_K == 0) { + // backward w.r.t. push/pull + + o0 = ix0 * src_sX; + o1 = ix1 * src_sX; + scalar_t gx = static_cast(0); + scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX; + scalar_t* src_ptr_NC = src_ptr + n * src_sN; + + for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, src_ptr_NC += src_sC) { + scalar_t src; + scalar_t trgt = trgt_ptr ? *trgt_ptr_NCX : static_cast(1); + // ^ trgt_ptr == 0 during the backward pass of count + src = bound::get(src_ptr_NC, o0, s0); + if (trgt_ptr) + src *= trgt; + gx -= src; + src = bound::get(src_ptr_NC, o1, s1); + if (trgt_ptr) + src *= trgt; + gx += src; + } + + scalar_t* grad_ptr_NX = grad_ptr + n * grad_sN + w * grad_sX; + (*grad_ptr_NX) = gx; + } else { + // backward w.r.t. sgrad + // -> zero (make sure this is done at initialization) + } + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (do_pull) { + o0 = ix0 * src_sX; + o1 = ix1 * src_sX; + scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX; + scalar_t* src_ptr_NC = src_ptr + n * src_sN; + for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) { + *out_ptr_NCX = bound::get(src_ptr_NC, o0, s0) * w0 + bound::get(src_ptr_NC, o1, s1) * w1; + } + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + else if (do_sgrad) { + o0 = ix0 * src_sX; + o1 = ix1 * src_sX; + scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX; + scalar_t* src_ptr_NC = src_ptr + n * src_sN; + + for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) { + *out_ptr_NCX = bound::get(src_ptr_NC, o1, s1) - bound::get(src_ptr_NC, o0, s0); + } + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + else if (do_push) { + // Offsets into 'push' volume + o0 = ix0 * out_sX; + o1 = ix1 * out_sX; + scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX; + scalar_t* out_ptr_NC = out_ptr + n * out_sN; + if (trgt_K == 0) { + // Diff w.r.t. push/pull + for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, out_ptr_NC += out_sC) { + scalar_t trgt = *trgt_ptr_NCX; + bound::add(out_ptr_NC, o0, w0 * trgt, s0); + bound::add(out_ptr_NC, o1, w1 * trgt, s1); + } + } else { + // Diff w.r.t. sgrad + for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, out_ptr_NC += out_sC) { + scalar_t trgt0 = *trgt_ptr_NCX; + bound::add(out_ptr_NC, o0, -trgt0, s0); + bound::add(out_ptr_NC, o1, trgt0, s1); + } + } + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + else if (do_count) { + // Offsets into 'push' volume + o0 = ix0 * out_sX; + o1 = ix1 * out_sX; + + scalar_t* out_ptr_N = out_ptr + n * out_sN; + bound::add(out_ptr_N, o0, w0, s0); + bound::add(out_ptr_N, o1, w1, s1); + } + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // NEAREST NEIGHBOR INTERPOLATION 3D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1666,7 +2125,7 @@ MONAI_NAMESPACE_DEVICE { // cpu scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) *out_ptr_NCXYZ = bound::get(src_ptr_NC, o, s); - } else if (do_push && trgt_K == 1) { + } else if (do_push && trgt_K == 0) { offset_t o = iz * out_sZ + iy * out_sY + ix * out_sX; scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ; scalar_t* out_ptr_NC = out_ptr + n * out_sN; @@ -1709,7 +2168,7 @@ MONAI_NAMESPACE_DEVICE { // cpu scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) *out_ptr_NCXY = bound::get(src_ptr_NC, o, s); - } else if (do_push && trgt_K == 1) { + } else if (do_push && trgt_K == 0) { offset_t o = iy * out_sY + ix * out_sX; scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY; scalar_t* out_ptr_NC = out_ptr + n * out_sN; @@ -1722,10 +2181,48 @@ MONAI_NAMESPACE_DEVICE { // cpu bound::add(out_ptr_NC, o, static_cast(1), s); } } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // NEAREST NEIGHBOR INTERPOLATION 1D + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + template + MONAI_DEVICE void PushPullImpl::interpolate1d_nearest(scalar_t x, offset_t w, offset_t n) const { + offset_t i = static_cast(std::round(x)); + + // Boundary condition (/!\ compute sign before warping indices) + int8_t s = bound::sign(bound0, i, src_X); + i = bound::index(bound0, i, src_X); + + if (do_pull) { + offset_t o = i * src_sX; + scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX; + scalar_t* src_ptr_NC = src_ptr + n * src_sN; + for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) + *out_ptr_NCX = bound::get(src_ptr_NC, o, s); + } else if (do_push && trgt_K == 0) { + offset_t o = i * out_sX; + scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX; + scalar_t* out_ptr_NC = out_ptr + n * out_sN; + for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, out_ptr_NC += out_sC) + bound::add(out_ptr_NC, o, *trgt_ptr_NCX, s); + } else if (do_count) { + offset_t o = i * out_sX; + scalar_t* out_ptr_NC = out_ptr + n * out_sN; + for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) + bound::add(out_ptr_NC, o, static_cast(1), s); + } + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // LINEAR INTERPOLATION 3D + SLIDING BOUNDARY // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // TODO + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // CUDA KERNEL (MUST BE OUT OF CLASS) + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + } // namespace // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1757,8 +2254,6 @@ MONAI_NAMESPACE_DEVICE { // cpu PUSHPULL_INSTANTIATE1(BoundType); \ PUSHPULL_INSTANTIATE1(BoundVectorRef) - // ~~~ CPU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - // Two arguments (source, grid) // > `bound` and `interpolation` can be single arguments or vectors. template @@ -1773,12 +2268,14 @@ MONAI_NAMESPACE_DEVICE { // cpu bool do_count, bool do_grad, bool do_sgrad) { + PushPullAllocator info( + grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad); + info.ioset(source, grid); + return AT_DISPATCH_FLOATING_TYPES(grid.scalar_type(), "pushpull", [&] { - PushPullImpl f( - grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad); - f.ioset(source, grid); - f.loop(); - return f.output; + PushPullImpl algo(info); + algo.loop(); + return algo.output; }); } @@ -1798,17 +2295,18 @@ MONAI_NAMESPACE_DEVICE { // cpu bool do_count, bool do_grad, bool do_sgrad) { + PushPullAllocator info( + grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad); + info.ioset(source, grid, target); + return AT_DISPATCH_FLOATING_TYPES(grid.scalar_type(), "pushpull", [&] { - PushPullImpl f( - grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad); - f.ioset(source, grid, target); - f.loop(); - return f.output; + PushPullImpl algo(info); + algo.loop(); + return algo.output; }); } PUSHPULL_INSTANTIATE; -} // namespace - +} // namespace cpu } // namespace monai diff --git a/monai/csrc/resample/pushpull_cuda.cu b/monai/csrc/resample/pushpull_cuda.cu index ecfeb562ab..38d34ffe98 100644 --- a/monai/csrc/resample/pushpull_cuda.cu +++ b/monai/csrc/resample/pushpull_cuda.cu @@ -25,6 +25,7 @@ limitations under the License. // TODO: // . [DONE] generic 3d // . [DONE] generic 2d +// . [DONE] generic 1d // . sliding nearest 3d // . sliding nearest 2d // . sliding linear 3d @@ -37,6 +38,7 @@ limitations under the License. // . input bound/inter are always vectors -> clean unused constructors #include +#include #include #include "bounds_common.h" #include "interpolation_common.h" @@ -71,18 +73,27 @@ MONAI_NAMESPACE_DEVICE { // cuda namespace { // anonymous namespace > everything inside has internal linkage // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - // GENERIC PUSHPULL CLASS + // INDEXING UTILS // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - // This class implements the bulk of the code. - // /!\ No type and shape checking is performed here. - template - class PushPullImpl { + // This class reads and sets all the parameters that will later be used + // by the algorithm in PushPullImpl. All of this is done outside of the + // implementation class so that we do not depend on generic types. The + // point is to pre-allocate all necessary tensors so that we can check + // if they're all compatible with 32 bit math. If it's the case, we can + // dispatch to a 32b cuda implementation, which might increase + // performance. Else, we use 64 bit math to compute offsets. + // (On CPU, we always use 64 bit offsets because it doesn't make a huge + // difference. It would be different if we had a vectorized + // implementation as in PyTorch). + class PushPullAllocator { public: + static constexpr int64_t max_int32 = std::numeric_limits::max(); + // ~~~ CONSTRUCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MONAI_HOST - PushPullImpl( + PushPullAllocator( int dim, BoundVectorRef bound, InterpolationVectorRef interpolation, @@ -122,100 +133,417 @@ MONAI_NAMESPACE_DEVICE { // cuda iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; } - MONAI_HOST - PushPullImpl( - int dim, - BoundType bound, - InterpolationVectorRef interpolation, - bool extrapolate, - bool do_pull, - bool do_push, - bool do_count, - bool do_grad, - bool do_sgrad) - : dim(dim), - bound0(bound), - bound1(bound), - bound2(bound), - interpolation0(interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear), - interpolation1( - interpolation.size() > 1 ? interpolation[1] - : interpolation.size() > 0 ? interpolation[0] - : InterpolationType::Linear), - interpolation2( - interpolation.size() > 2 ? interpolation[2] - : interpolation.size() > 1 ? interpolation[1] - : interpolation.size() > 0 ? interpolation[0] - : InterpolationType::Linear), - extrapolate(extrapolate), - do_pull(do_pull), - do_push(do_push), - do_count(do_count), - do_grad(do_grad), - do_sgrad(do_sgrad) { - iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; + // ~~~ FUNCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + // Usually used for pull: + // - do_pull -> return source[grid] + // - do_push -> fails + // - do_grad -> return J(source)[grid] + // - do_sgrad -> return H(source)[grid] + MONAI_HOST void ioset(const Tensor& source, const Tensor& grid) { + init_all(); + init_source(source); + init_grid(grid); + init_output(); } - MONAI_HOST - PushPullImpl( - int dim, - BoundVectorRef bound, - InterpolationType interpolation, - bool extrapolate, - bool do_pull, - bool do_push, - bool do_count, - bool do_grad, - bool do_sgrad) - : dim(dim), - bound0(bound.size() > 0 ? bound[0] : BoundType::Replicate), - bound1( - bound.size() > 1 ? bound[1] - : bound.size() > 0 ? bound[0] - : BoundType::Replicate), - bound2( - bound.size() > 2 ? bound[2] - : bound.size() > 1 ? bound[1] - : bound.size() > 0 ? bound[0] - : BoundType::Replicate), - interpolation0(interpolation), - interpolation1(interpolation), - interpolation2(interpolation), - extrapolate(extrapolate), - do_pull(do_pull), - do_push(do_push), - do_count(do_count), - do_grad(do_grad), - do_sgrad(do_sgrad) { - iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; + // Usually used for pull_backward: + // - do_pull -> return source[grid] + // - do_push -> return push(target, grid, source.shape) + // - do_grad -> return J(source)[grid] + // - do_sgrad -> return H(source)[grid] + MONAI_HOST void ioset(const Tensor& source, const Tensor& grid, const Tensor& target) { + init_all(); + init_source(source); + init_grid(grid); + init_target(target); + init_output(); } - MONAI_HOST - PushPullImpl( - int dim, - BoundType bound, - InterpolationType interpolation, - bool extrapolate, - bool do_pull, - bool do_push, - bool do_count, - bool do_grad, - bool do_sgrad) - : dim(dim), - bound0(bound), - bound1(bound), - bound2(bound), - interpolation0(interpolation), - interpolation1(interpolation), - interpolation2(interpolation), - extrapolate(extrapolate), - do_pull(do_pull), - do_push(do_push), - do_count(do_count), - do_grad(do_grad), - do_sgrad(do_sgrad) { - iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; + // Usually used for push: + // - do_pull -> fails + // - do_push -> return push(target, grid, source_size) + // - do_grad -> fails + // - do_sgrad -> fails + MONAI_HOST void ioset(IntArrayRef source_size, const Tensor& grid, const Tensor& target) { + init_all(); + init_source(source_size); + init_grid(grid); + init_target(target); + init_output(); + } + + // Usually used for count: + // - do_pull -> fails + // - do_push -> return push(ones, grid, source_size) + // - do_grad -> fails + // - do_sgrad -> fails + MONAI_HOST void ioset(IntArrayRef source_size, const Tensor& grid) { + init_all(); + init_source(source_size); + init_grid(grid); + init_output(); + } + + // We just check that all tensors that we own are compatible with 32b math + bool canUse32BitIndexMath(int64_t max_elem = max_int32) const { + return src_32b_ok && trgt_32b_ok && grid_32b_ok && grad_32b_ok && out_32b_ok; + } + + private: + // Copied from aten/src/ATen/native/IndexingUtils.cpp in PyTorch 1.6. + // It is used to decide to which pointer type we should dispatch to. + // Basically, we need to make sure that the "furthest" element we need + // to reach is less than max_elem away. + static bool tensorCanUse32BitIndexMath(const Tensor& t, int64_t max_elem = max_int32) { + int64_t elements = t.numel(); + if (elements >= max_elem) { + return false; + } + if (elements == 0) { + return max_elem > 0; + } + + int64_t offset = 0; + int64_t linearId = elements - 1; + + // NOTE: Assumes all strides are positive, which is true for now + for (int i = t.dim() - 1; i >= 0; --i) { + int64_t curDimIndex = linearId % t.size(i); + int64_t curDimOffset = curDimIndex * t.stride(i); + offset += curDimOffset; + linearId /= t.size(i); + } + + if (offset >= max_elem) { + return false; + } + + return true; + } + + // ~~~ COMPONENTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + MONAI_HOST void init_all(); + MONAI_HOST void init_source(const Tensor& source); + MONAI_HOST void init_source(IntArrayRef source_size); + MONAI_HOST void init_grid(const Tensor& grid); + MONAI_HOST void init_target(const Tensor& target); + MONAI_HOST void init_output(); + + // ~~~ OPTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + int dim; // dimensionality (2 or 3) + BoundType bound0; // boundary condition // x|W + BoundType bound1; // boundary condition // y|H + BoundType bound2; // boundary condition // z|D + InterpolationType interpolation0; // interpolation order // x|W + InterpolationType interpolation1; // interpolation order // y|H + InterpolationType interpolation2; // interpolation order // z|D + bool iso; // isotropic interpolation? + bool extrapolate; // compute out-of-bound values + bool do_pull; // sample a volume + bool do_push; // splat a volume + bool do_count; // splatting weights (= jacobian determinant) + bool do_grad; // backprop: gradient of grid // pull + bool do_sgrad; // sample spatial gradients + + // ~~~ NAVIGATORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + std::deque output; + TensorOptions src_opt; + TensorOptions grid_opt; + TensorOptions trgt_opt; + int64_t N; + int64_t C; + int64_t src_X; + int64_t src_Y; + int64_t src_Z; + int64_t trgt_X; + int64_t trgt_Y; + int64_t trgt_Z; + int64_t trgt_K; + int64_t src_sN; + int64_t src_sC; + int64_t src_sX; + int64_t src_sY; + int64_t src_sZ; + bool src_32b_ok; + void* src_ptr; + int64_t trgt_sN; + int64_t trgt_sC; + int64_t trgt_sX; + int64_t trgt_sY; + int64_t trgt_sZ; + int64_t trgt_sK; + bool trgt_32b_ok; + void* trgt_ptr; + int64_t grid_sN; + int64_t grid_sC; + int64_t grid_sX; + int64_t grid_sY; + int64_t grid_sZ; + bool grid_32b_ok; + void* grid_ptr; + int64_t out_sN; + int64_t out_sC; + int64_t out_sX; + int64_t out_sY; + int64_t out_sZ; + int64_t out_sK; // gradient dimension + bool out_32b_ok; + void* out_ptr; + int64_t grad_sN; + int64_t grad_sC; + int64_t grad_sX; + int64_t grad_sY; + int64_t grad_sZ; + bool grad_32b_ok; + void* grad_ptr; + + // Allow PushPullImpl's constructor to access PushPullAllocator's + // private members. + template + friend class PushPullImpl; + }; + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // INITIALISATION + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + MONAI_HOST + void PushPullAllocator::init_all() { + src_opt = grid_opt = trgt_opt = TensorOptions(); + N = C = 1L; + src_X = src_Y = src_Z = 1L; + trgt_X = trgt_Y = trgt_Z = 1L; + trgt_K = 0L; + src_sN = src_sC = src_sX = src_sY = src_sZ = 0L; + grid_sN = grid_sC = grid_sX = grid_sY = grid_sZ = 0L; + grad_sN = grad_sC = grad_sX = grad_sY = grad_sZ = 0L; + trgt_sN = trgt_sC = trgt_sX = trgt_sY = trgt_sZ = trgt_sK = 0L; + out_sN = out_sC = out_sX = out_sY = out_sZ = out_sK = 0L; + src_ptr = trgt_ptr = grid_ptr = out_ptr = grad_ptr = static_cast(0); + src_32b_ok = trgt_32b_ok = grid_32b_ok = out_32b_ok = grad_32b_ok = true; + } + + MONAI_HOST + void PushPullAllocator::init_source(const Tensor& source) { + N = source.size(0); + C = source.size(1); + src_X = source.size(2); + src_Y = dim < 2 ? 1L : source.size(3); + src_Z = dim < 3 ? 1L : source.size(4); + src_sN = source.stride(0); + src_sC = source.stride(1); + src_sX = source.stride(2); + src_sY = dim < 2 ? 0L : source.stride(3); + src_sZ = dim < 3 ? 0L : source.stride(4); + src_ptr = source.data_ptr(); + src_opt = source.options(); + src_32b_ok = tensorCanUse32BitIndexMath(source); + } + + MONAI_HOST + void PushPullAllocator::init_source(IntArrayRef source_size) { + src_X = source_size[0]; + src_Y = dim < 2 ? 1L : source_size[1]; + src_Z = dim < 3 ? 1L : source_size[2]; + } + + MONAI_HOST + void PushPullAllocator::init_grid(const Tensor& grid) { + N = grid.size(0); + trgt_X = grid.size(1); + trgt_Y = dim < 2 ? 1L : grid.size(2); + trgt_Z = dim < 3 ? 1L : grid.size(3); + grid_sN = grid.stride(0); + grid_sX = grid.stride(1); + grid_sY = dim < 2 ? 0L : grid.stride(2); + grid_sZ = dim < 3 ? 0L : grid.stride(3); + grid_sC = grid.stride(dim == 1 ? 2 : dim == 2 ? 3 : 4); + grid_ptr = grid.data_ptr(); + grid_opt = grid.options(); + grid_32b_ok = tensorCanUse32BitIndexMath(grid); + } + + MONAI_HOST + void PushPullAllocator::init_target(const Tensor& target) { + N = target.size(0); + C = target.size(1); + trgt_X = target.size(2); + trgt_Y = dim < 2 ? 1L : target.size(3); + trgt_Z = dim < 3 ? 1L : target.size(4); + trgt_K = target.dim() == dim + 3 ? target.size(dim == 1 ? 3 : dim == 2 ? 4 : 5) : 0L; + trgt_sN = target.stride(0); + trgt_sC = target.stride(1); + trgt_sX = target.stride(2); + trgt_sY = dim < 2 ? 0L : target.stride(3); + trgt_sZ = dim < 3 ? 0L : target.stride(4); + trgt_sK = target.dim() == dim + 3 ? target.stride(dim == 1 ? 3 : dim == 2 ? 4 : 5) : 0L; + trgt_ptr = target.data_ptr(); + trgt_opt = target.options(); + trgt_32b_ok = tensorCanUse32BitIndexMath(target); + } + + MONAI_HOST + void PushPullAllocator::init_output() { + output.clear(); + if (do_pull) { + if (dim == 1) + output.push_back(at::empty({N, C, trgt_X}, src_opt)); + else if (dim == 2) + output.push_back(at::empty({N, C, trgt_X, trgt_Y}, src_opt)); + else + output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z}, src_opt)); + auto pull = output.back(); + out_sN = pull.stride(0); + out_sC = pull.stride(1); + out_sX = pull.stride(2); + out_sY = dim < 2 ? 0L : pull.stride(3); + out_sZ = dim < 3 ? 0L : pull.stride(4); + out_sK = 0L; + out_ptr = pull.data_ptr(); + out_32b_ok = tensorCanUse32BitIndexMath(pull); + } else if (do_sgrad) { + if (dim == 1) + output.push_back(at::empty({N, C, trgt_X, 1}, src_opt)); + else if (dim == 2) + output.push_back(at::empty({N, C, trgt_X, trgt_Y, 2}, src_opt)); + else + output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z, 3}, src_opt)); + auto sgrad = output.back(); + out_sN = sgrad.stride(0); + out_sC = sgrad.stride(1); + out_sX = sgrad.stride(2); + out_sY = dim < 2 ? 0L : sgrad.stride(3); + out_sZ = dim < 3 ? 0L : sgrad.stride(4); + out_sK = sgrad.stride(dim == 1 ? 3 : dim == 2 ? 4 : 5); + out_ptr = sgrad.data_ptr(); + out_32b_ok = tensorCanUse32BitIndexMath(sgrad); + + if (iso && interpolation0 == InterpolationType::Nearest) + sgrad.zero_(); + if (iso && interpolation0 == InterpolationType::Linear && dim == 1) + sgrad.zero_(); + } else if (do_push) { + if (dim == 1) + output.push_back(at::zeros({N, C, src_X}, trgt_opt)); + else if (dim == 2) + output.push_back(at::zeros({N, C, src_X, src_Y}, trgt_opt)); + else + output.push_back(at::zeros({N, C, src_X, src_Y, src_Z}, trgt_opt)); + auto push = output.back(); + out_sN = push.stride(0); + out_sC = push.stride(1); + out_sX = push.stride(2); + out_sY = dim < 2 ? 0L : push.stride(3); + out_sZ = dim < 3 ? 0L : push.stride(4); + out_sK = 0L; + out_ptr = push.data_ptr(); + out_32b_ok = tensorCanUse32BitIndexMath(push); + } else if (do_count) { + if (dim == 1) + output.push_back(at::zeros({N, 1, src_X}, grid_opt)); + else if (dim == 2) + output.push_back(at::zeros({N, 1, src_X, src_Y}, grid_opt)); + else + output.push_back(at::zeros({N, 1, src_X, src_Y, src_Z}, grid_opt)); + auto count = output.back(); + out_sN = count.stride(0); + out_sC = count.stride(1); + out_sX = count.stride(2); + out_sY = dim < 2 ? 0L : count.stride(3); + out_sZ = dim < 3 ? 0L : count.stride(4); + out_sK = 0L; + out_ptr = count.data_ptr(); + out_32b_ok = tensorCanUse32BitIndexMath(count); + } + if (do_grad) { + if (dim == 1) + output.push_back(at::zeros({N, trgt_X, 1}, grid_opt)); + else if (dim == 2) + output.push_back(at::zeros({N, trgt_X, trgt_Y, 2}, grid_opt)); + else + output.push_back(at::zeros({N, trgt_X, trgt_Y, trgt_Z, 3}, grid_opt)); + auto grad = output.back(); + grad_sN = grad.stride(0); + grad_sX = grad.stride(1); + grad_sY = dim < 2 ? 0L : grad.stride(2); + grad_sZ = dim < 3 ? 0L : grad.stride(3); + grad_sC = grad.stride(dim == 1 ? 2 : dim == 2 ? 3 : 4); + grad_ptr = grad.data_ptr(); + out_32b_ok = tensorCanUse32BitIndexMath(grad); + + if (iso && interpolation0 == InterpolationType::Nearest) + grad.zero_(); } + } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // GENERIC PUSHPULL CLASS + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // This class implements the bulk of the code. + // /!\ No type and shape checking is performed here. + + template + class PushPullImpl { + public: + // ~~~ CONSTRUCTOR ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + PushPullImpl(const PushPullAllocator& info) + : output(info.output), + dim(info.dim), + bound0(info.bound0), + bound1(info.bound1), + bound2(info.bound2), + interpolation0(info.interpolation0), + interpolation1(info.interpolation1), + interpolation2(info.interpolation1), + iso(info.iso), + extrapolate(info.extrapolate), + do_pull(info.do_pull), + do_push(info.do_push), + do_count(info.do_count), + do_grad(info.do_grad), + do_sgrad(info.do_sgrad), + N(static_cast(info.N)), + C(static_cast(info.C)), + src_X(static_cast(info.src_X)), + src_Y(static_cast(info.src_Y)), + src_Z(static_cast(info.src_Z)), + trgt_X(static_cast(info.trgt_X)), + trgt_Y(static_cast(info.trgt_Y)), + trgt_Z(static_cast(info.trgt_Z)), + trgt_K(static_cast(info.trgt_K)), + src_sN(static_cast(info.src_sN)), + src_sC(static_cast(info.src_sC)), + src_sX(static_cast(info.src_sX)), + src_sY(static_cast(info.src_sY)), + src_sZ(static_cast(info.src_sZ)), + src_ptr(static_cast(info.src_ptr)), + trgt_sN(static_cast(info.trgt_sN)), + trgt_sC(static_cast(info.trgt_sC)), + trgt_sX(static_cast(info.trgt_sX)), + trgt_sY(static_cast(info.trgt_sY)), + trgt_sZ(static_cast(info.trgt_sZ)), + trgt_sK(static_cast(info.trgt_sK)), + trgt_ptr(static_cast(info.trgt_ptr)), + grid_sN(static_cast(info.grid_sN)), + grid_sC(static_cast(info.grid_sC)), + grid_sX(static_cast(info.grid_sX)), + grid_sY(static_cast(info.grid_sY)), + grid_sZ(static_cast(info.grid_sZ)), + grid_ptr(static_cast(info.grid_ptr)), + out_sN(static_cast(info.out_sN)), + out_sC(static_cast(info.out_sC)), + out_sX(static_cast(info.out_sX)), + out_sY(static_cast(info.out_sY)), + out_sZ(static_cast(info.out_sZ)), + out_sK(static_cast(info.out_sK)), + out_ptr(static_cast(info.out_ptr)), + grad_sN(static_cast(info.grad_sN)), + grad_sC(static_cast(info.grad_sC)), + grad_sX(static_cast(info.grad_sX)), + grad_sY(static_cast(info.grad_sY)), + grad_sZ(static_cast(info.grad_sZ)), + grad_ptr(static_cast(info.grad_ptr)) {} // ~~~ PUBLIC VALUE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -244,39 +572,9 @@ MONAI_NAMESPACE_DEVICE { // cuda // } // ~~~ FUNCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - MONAI_HOST void ioset // Pull - (const Tensor& source, const Tensor& grid) { - init_all(); - init_source(source); - init_grid(grid); - init_output(); - } - - MONAI_HOST void ioset(const Tensor& source, const Tensor& grid, const Tensor& target) { - init_all(); - init_source(source); - init_grid(grid); - init_target(target); - init_output(); - } - - MONAI_HOST void ioset // Push - (IntArrayRef source_size, const Tensor& grid, const Tensor& target) { - init_all(); - init_source(source_size); - init_grid(grid); - init_target(target); - init_output(); - } - - MONAI_HOST void ioset // Count - (IntArrayRef source_size, const Tensor& grid) { - init_all(); - init_source(source_size); - init_grid(grid); - init_output(); - } + // Loop over voxels that belong to one CUDA block + // This function is called by the CUDA kernel MONAI_DEVICE void loop(int threadIdx, int blockIdx, int blockDim, int gridDim) const; MONAI_HOST MONAI_DEVICE int64_t voxcount() const { @@ -285,14 +583,18 @@ MONAI_NAMESPACE_DEVICE { // cuda private: // ~~~ COMPONENTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - MONAI_HOST void init_all(); - MONAI_HOST void init_source(const Tensor& source); - MONAI_HOST void init_source(IntArrayRef source_size); - MONAI_HOST void init_grid(const Tensor& grid); - MONAI_HOST void init_target(const Tensor& target); - MONAI_HOST void init_output(); + MONAI_DEVICE void check1d(offset_t w, offset_t n) const; MONAI_DEVICE void check2d(offset_t w, offset_t h, offset_t n) const; MONAI_DEVICE void check3d(offset_t w, offset_t h, offset_t d, offset_t n) const; + MONAI_DEVICE void interpolate1d(scalar_t x, offset_t w, offset_t n) const; + MONAI_DEVICE void interpolate1d_nearest(scalar_t x, offset_t w, offset_t n) const; + MONAI_DEVICE void interpolate1d_linear(scalar_t x, offset_t w, offset_t n) const; + MONAI_DEVICE void interpolate1d_sliding(scalar_t x, offset_t w, offset_t n) const { /*TODO*/ + } + MONAI_DEVICE void interpolate1d_sliding_nearest(scalar_t x, offset_t w, offset_t n) const { /*TODO*/ + } + MONAI_DEVICE void interpolate1d_sliding_linear(scalar_t x, offset_t w, offset_t n) const { /*TODO*/ + } MONAI_DEVICE void interpolate2d(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const; MONAI_DEVICE void interpolate2d_nearest(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const; MONAI_DEVICE void interpolate2d_bilinear(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const; @@ -367,9 +669,6 @@ MONAI_NAMESPACE_DEVICE { // cuda bool do_sgrad; // sample spatial gradients // ~~~ NAVIGATORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - TensorOptions src_opt; - TensorOptions grid_opt; - TensorOptions trgt_opt; offset_t N; offset_t C; offset_t src_X; @@ -396,173 +695,22 @@ MONAI_NAMESPACE_DEVICE { // cuda offset_t grid_sC; offset_t grid_sX; offset_t grid_sY; - offset_t grid_sZ; - scalar_t* grid_ptr; - offset_t out_sN; - offset_t out_sC; - offset_t out_sX; - offset_t out_sY; - offset_t out_sZ; - offset_t out_sK; // gradient dimension - scalar_t* out_ptr; - offset_t grad_sN; - offset_t grad_sC; - offset_t grad_sX; - offset_t grad_sY; - offset_t grad_sZ; - scalar_t* grad_ptr; - }; - - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - // INITIALISATION - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - template - void PushPullImpl::init_all() { - src_opt = grid_opt = trgt_opt = TensorOptions(); - N = C = static_cast(1); - src_X = src_Y = src_Z = static_cast(1); - trgt_X = trgt_Y = trgt_Z = trgt_K = static_cast(1); - src_sN = src_sC = src_sX = src_sY = src_sZ = static_cast(0); - grid_sN = grid_sC = grid_sX = grid_sY = grid_sZ = static_cast(0); - grad_sN = grad_sC = grad_sX = grad_sY = grad_sZ = static_cast(0); - trgt_sN = trgt_sC = trgt_sX = trgt_sY = trgt_sZ = trgt_sK = static_cast(0); - out_sN = out_sC = out_sX = out_sY = out_sZ = out_sK = static_cast(0); - src_ptr = trgt_ptr = grid_ptr = out_ptr = grad_ptr = static_cast(0); - } - - template - MONAI_HOST void PushPullImpl::init_source(const Tensor& source) { - N = source.size(0); - C = source.size(1); - src_X = source.size(2); - src_Y = source.size(3); - src_Z = dim == 2 ? static_cast(1) : source.size(4); - src_sN = source.stride(0); - src_sC = source.stride(1); - src_sX = source.stride(2); - src_sY = source.stride(3); - src_sZ = dim == 2 ? static_cast(0) : source.stride(4); - src_ptr = source.data_ptr(); - src_opt = source.options(); - } - - template - MONAI_HOST void PushPullImpl::init_source(IntArrayRef source_size) { - src_X = source_size[0]; - src_Y = source_size[1]; - src_Z = dim == 2 ? static_cast(1) : source_size[2]; - } - - template - MONAI_HOST void PushPullImpl::init_grid(const Tensor& grid) { - N = grid.size(0); - trgt_X = grid.size(1); - trgt_Y = grid.size(2); - trgt_Z = dim == 2 ? static_cast(1) : grid.size(3); - grid_sN = grid.stride(0); - grid_sX = grid.stride(1); - grid_sY = grid.stride(2); - grid_sZ = dim == 2 ? static_cast(0) : grid.stride(3); - grid_sC = grid.stride(dim == 2 ? 3 : 4); - grid_ptr = grid.data_ptr(); - grid_opt = grid.options(); - } - - template - MONAI_HOST void PushPullImpl::init_target(const Tensor& target) { - N = target.size(0); - C = target.size(1); - trgt_X = target.size(2); - trgt_Y = target.size(3); - trgt_Z = dim == 2 ? static_cast(1) : target.size(4); - trgt_K = target.dim() == dim + 3 ? target.size(dim == 2 ? 4 : 5) : static_cast(1); - trgt_sN = target.stride(0); - trgt_sC = target.stride(1); - trgt_sX = target.stride(2); - trgt_sY = target.stride(3); - trgt_sZ = dim == 2 ? static_cast(0) : target.stride(4); - trgt_sK = target.dim() == dim + 3 ? target.stride(dim == 2 ? 4 : 5) : static_cast(0); - trgt_ptr = target.data_ptr(); - trgt_opt = target.options(); - } - - template - MONAI_HOST void PushPullImpl::init_output() { - output.clear(); - if (do_pull) { - if (dim == 2) - output.push_back(at::empty({N, C, trgt_X, trgt_Y}, src_opt)); - else - output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z}, src_opt)); - auto pull = output.back(); - out_sN = pull.stride(0); - out_sC = pull.stride(1); - out_sX = pull.stride(2); - out_sY = pull.stride(3); - out_sZ = dim == 2 ? static_cast(0) : pull.stride(4); - out_sK = static_cast(0); - out_ptr = pull.template data_ptr(); - } else if (do_sgrad) { - if (dim == 2) - output.push_back(at::empty({N, C, trgt_X, trgt_Y, 2}, src_opt)); - else - output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z, 3}, src_opt)); - auto sgrad = output.back(); - out_sN = sgrad.stride(0); - out_sC = sgrad.stride(1); - out_sX = sgrad.stride(2); - out_sY = sgrad.stride(3); - out_sZ = dim == 2 ? static_cast(0) : sgrad.stride(4); - out_sK = sgrad.stride(dim == 2 ? 4 : 5); - out_ptr = sgrad.template data_ptr(); - - if (iso && interpolation0 == InterpolationType::Nearest) - sgrad.zero_(); - } else if (do_push) { - if (dim == 2) - output.push_back(at::zeros({N, C, src_X, src_Y}, trgt_opt)); - else - output.push_back(at::zeros({N, C, src_X, src_Y, src_Z}, trgt_opt)); - auto push = output.back(); - out_sN = push.stride(0); - out_sC = push.stride(1); - out_sX = push.stride(2); - out_sY = push.stride(3); - out_sZ = dim == 2 ? static_cast(0) : push.stride(4); - out_sK = static_cast(0); - out_ptr = push.template data_ptr(); - } else if (do_count) { - if (dim == 2) - output.push_back(at::zeros({N, 1, src_X, src_Y}, grid_opt)); - else - output.push_back(at::zeros({N, 1, src_X, src_Y, src_Z}, grid_opt)); - auto count = output.back(); - out_sN = count.stride(0); - out_sC = count.stride(1); - out_sX = count.stride(2); - out_sY = count.stride(3); - out_sZ = dim == 2 ? static_cast(0) : count.stride(4); - out_sK = static_cast(0); - out_ptr = count.template data_ptr(); - } - if (do_grad) { - if (dim == 2) - output.push_back(at::zeros({N, src_X, src_Y, 2}, grid_opt)); - else - output.push_back(at::zeros({N, src_X, src_Y, src_Z, 3}, grid_opt)); - auto grad = output.back(); - grad_sN = grad.stride(0); - grad_sX = grad.stride(1); - grad_sY = grad.stride(2); - grad_sZ = dim == 2 ? static_cast(0) : grad.stride(3); - grad_sC = grad.stride(dim == 2 ? 3 : 4); - grad_ptr = grad.template data_ptr(); - - if (iso && interpolation0 == InterpolationType::Nearest) - grad.zero_(); - } - } + offset_t grid_sZ; + scalar_t* grid_ptr; + offset_t out_sN; + offset_t out_sC; + offset_t out_sX; + offset_t out_sY; + offset_t out_sZ; + offset_t out_sK; // gradient dimension + scalar_t* out_ptr; + offset_t grad_sN; + offset_t grad_sC; + offset_t grad_sX; + offset_t grad_sY; + offset_t grad_sZ; + scalar_t* grad_ptr; + }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // LOOP @@ -583,7 +731,9 @@ MONAI_NAMESPACE_DEVICE { // cuda h = (i / trgt_Z) % trgt_Y; d = i % trgt_Z; - if (dim == 2) + if (dim == 1) + check1d(w, n); + else if (dim == 2) check2d(w, h, n); else check3d(w, h, d, n); @@ -598,6 +748,59 @@ MONAI_NAMESPACE_DEVICE { // cuda // 1) read the [x,y,z] source coordinate for the current target voxel // 3) check if the source coordinate is in bounds + template + MONAI_DEVICE void PushPullImpl::check3d(offset_t w, offset_t h, offset_t d, offset_t n) const { + // get the corresponding input x, y, z co-ordinates from grid + scalar_t* grid_ptr_NXYZ = grid_ptr + n * grid_sN + w * grid_sX + h * grid_sY + d * grid_sZ; + scalar_t x = *grid_ptr_NXYZ; + scalar_t y = grid_ptr_NXYZ[grid_sC]; + scalar_t z = grid_ptr_NXYZ[grid_sC * 2]; + + // Check if out-of-bound + if (!(extrapolate || + (inbounds(x, src_X, static_cast(TINY)) && inbounds(y, src_Y, static_cast(TINY)) && + inbounds(z, src_Z, static_cast(TINY))))) { + if (do_pull || do_sgrad) { + scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ; + for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC) { + *out_ptr_NCXYZ = static_cast(0); + if (do_sgrad) { + out_ptr_NCXYZ[out_sK] = static_cast(0); + out_ptr_NCXYZ[out_sK * 2] = static_cast(0); + } + } + } + if (do_grad) { + scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ; + (*grad_ptr_NXYZ) = static_cast(0); + grad_ptr_NXYZ[grad_sC] = static_cast(0); + grad_ptr_NXYZ[grad_sC * 2] = static_cast(0); + } + return; + } + + // Next step + if (bound0 == BoundType::Sliding) { + if (iso) + switch (static_cast(interpolation0)) { + case 0: + return interpolate3d_sliding_nearest(x, y, z, w, h, d, n); + case 1: + return interpolate3d_sliding_trilinear(x, y, z, w, h, d, n); + } + return interpolate3d_sliding(x, y, z, w, h, d, n); + } else { + if (iso) + switch (static_cast(interpolation0)) { + case 0: + return interpolate3d_nearest(x, y, z, w, h, d, n); + case 1: + return interpolate3d_trilinear(x, y, z, w, h, d, n); + } + return interpolate3d(x, y, z, w, h, d, n); + } + } + template MONAI_DEVICE void PushPullImpl::check2d(offset_t w, offset_t h, offset_t n) const { // get the corresponding input x, y, z co-ordinates from grid @@ -609,7 +812,7 @@ MONAI_NAMESPACE_DEVICE { // cuda if (!(extrapolate || (inbounds(x, src_X, static_cast(TINY)) && inbounds(y, src_Y, static_cast(TINY))))) { if (do_pull || do_sgrad) { - scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sZ + h * out_sY; + scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC) { *out_ptr_NCXY = static_cast(0); if (do_sgrad) @@ -647,32 +850,25 @@ MONAI_NAMESPACE_DEVICE { // cuda } template - MONAI_DEVICE void PushPullImpl::check3d(offset_t w, offset_t h, offset_t d, offset_t n) const { + MONAI_DEVICE void PushPullImpl::check1d(offset_t w, offset_t n) const { // get the corresponding input x, y, z co-ordinates from grid - scalar_t* grid_ptr_NXYZ = grid_ptr + n * grid_sN + w * grid_sX + h * grid_sY + d * grid_sZ; - scalar_t x = *grid_ptr_NXYZ; - scalar_t y = grid_ptr_NXYZ[grid_sC]; - scalar_t z = grid_ptr_NXYZ[grid_sC * 2]; + scalar_t* grid_ptr_NX = grid_ptr + n * grid_sN + w * grid_sX; + scalar_t x = *grid_ptr_NX; // Check if out-of-bound - if (!(extrapolate || - (inbounds(x, src_X, static_cast(TINY)) && inbounds(y, src_Y, static_cast(TINY)) && - inbounds(z, src_Z, static_cast(TINY))))) { + if (!(extrapolate || inbounds(x, src_X, static_cast(TINY)))) { if (do_pull || do_sgrad) { - scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ; - for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC) { - *out_ptr_NCXYZ = static_cast(0); - if (do_sgrad) { - out_ptr_NCXYZ[out_sK] = static_cast(0); - out_ptr_NCXYZ[out_sK * 2] = static_cast(0); - } + scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX; + for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC) { + *out_ptr_NCX = static_cast(0); + if (do_sgrad) + out_ptr_NCX[out_sK] = static_cast(0); } } if (do_grad) { - scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ; - (*grad_ptr_NXYZ) = static_cast(0); - grad_ptr_NXYZ[grad_sC] = static_cast(0); - grad_ptr_NXYZ[grad_sC * 2] = static_cast(0); + scalar_t* grad_ptr_NX = grad_ptr + n * grad_sN + w * grad_sX; + (*grad_ptr_NX) = static_cast(0); + grad_ptr_NX[grad_sC] = static_cast(0); } return; } @@ -682,20 +878,20 @@ MONAI_NAMESPACE_DEVICE { // cuda if (iso) switch (static_cast(interpolation0)) { case 0: - return interpolate3d_sliding_nearest(x, y, z, w, h, d, n); + return interpolate1d_sliding_nearest(x, w, n); case 1: - return interpolate3d_sliding_trilinear(x, y, z, w, h, d, n); + return interpolate1d_sliding_linear(x, w, n); } - return interpolate3d_sliding(x, y, z, w, h, d, n); + return interpolate1d_sliding(x, w, n); } else { if (iso) switch (static_cast(interpolation0)) { case 0: - return interpolate3d_nearest(x, y, z, w, h, d, n); + return interpolate1d_nearest(x, w, n); case 1: - return interpolate3d_trilinear(x, y, z, w, h, d, n); + return interpolate1d_linear(x, w, n); } - return interpolate3d(x, y, z, w, h, d, n); + return interpolate1d(x, w, n); } } @@ -730,7 +926,7 @@ MONAI_NAMESPACE_DEVICE { // cuda if (trgt_ptr && (do_push || do_grad)) for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC) { target[c] = *trgt_ptr_NCXYZ; - if (trgt_K > 1) { + if (trgt_K > 0) { target[c + C] = trgt_ptr_NCXYZ[trgt_sK]; target[c + C * 2] = trgt_ptr_NCXYZ[trgt_sK * 2]; } @@ -848,7 +1044,7 @@ MONAI_NAMESPACE_DEVICE { // cuda // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_push) { - if (trgt_K == 1) { + if (trgt_K == 0) { // Diff w.r.t. push/pull scalar_t* out_ptr_NC = out_ptr_NC0; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) @@ -871,7 +1067,7 @@ MONAI_NAMESPACE_DEVICE { // cuda // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { - if (trgt_K == 1) { + if (trgt_K == 0) { // Diff w.r.t. pull/push scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t dot = static_cast(0); @@ -940,7 +1136,7 @@ MONAI_NAMESPACE_DEVICE { // cuda if (trgt_ptr && (do_push || do_grad)) for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC) { target[c] = *trgt_ptr_NCXY; - if (trgt_K > 1) { + if (trgt_K > 0) { target[c + C] = trgt_ptr_NCXY[trgt_sK]; } } @@ -1033,7 +1229,7 @@ MONAI_NAMESPACE_DEVICE { // cuda // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_push) { - if (trgt_K == 1) { + if (trgt_K == 0) { // Diff w.r.t. push/pull scalar_t* out_ptr_NC = out_ptr_NC0; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) @@ -1055,7 +1251,7 @@ MONAI_NAMESPACE_DEVICE { // cuda // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { - if (trgt_K == 1) { + if (trgt_K == 0) { // Diff w.r.t. pull/push scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t dot = static_cast(0); @@ -1092,6 +1288,150 @@ MONAI_NAMESPACE_DEVICE { // cuda } } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // GENERIC INTERPOLATION 1D + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + template + MONAI_DEVICE void PushPullImpl::interpolate1d(scalar_t x, offset_t w, offset_t n) const { + // Get corner pixel values from (x, y) + offset_t bx0, bx1; + interpolation::bounds(interpolation0, x, bx0, bx1); + offset_t dbx = bx1 - bx0; + + // Pre-compute offsets and target value + scalar_t* src_ptr_NC0 = src_ptr + n * src_sN; + scalar_t* out_ptr_NC0 = out_ptr + n * out_sN; + scalar_t* out_ptr_NCX0 = out_ptr + n * out_sN + w * out_sX; + scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX; + scalar_t target[2 * MONAI_MAX_NUM_CHANNELS]; + if (trgt_ptr && (do_push || do_grad)) + for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC) { + target[c] = *trgt_ptr_NCX; + if (trgt_K > 0) { + target[c + C] = trgt_ptr_NCX[trgt_sK]; + } + } + + // Initialize output + scalar_t* out_ptr_NCX = out_ptr_NCX0; + if (do_pull || do_sgrad) { + for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC) { + *out_ptr_NCX = static_cast(0); + if (do_sgrad) { + out_ptr_NCX[out_sK] = static_cast(0); + } + } + } + + // Pre-compute indices/weights/grad + scalar_t wx[8]; // B-spline weights + scalar_t gx[8]; // B-spline derivatives + scalar_t hx[8]; // B-spline 2nd derivatives + offset_t ix[8]; // Warped indices + uint8_t sx[8]; // Warped indices + + { + scalar_t *owx = static_cast(wx), *ogx = static_cast(gx), *ohx = static_cast(hx); + offset_t* oix = static_cast(ix); + uint8_t* osx = static_cast(sx); + for (offset_t bx = bx0; bx <= bx1; ++bx) { + scalar_t dx = x - bx; + *(owx++) = interpolation::fastweight(interpolation0, dx); + if (do_grad || do_sgrad) + *(ogx++) = interpolation::fastgrad(interpolation0, dx); + if (do_grad && trgt_sK > 1) + *(ohx++) = interpolation::fasthess(interpolation0, dx); + *(osx++) = bound::sign(bound0, bx, src_X); + *(oix++) = bound::index(bound0, bx, src_X); + } + } + + // Convolve coefficients with basis functions + scalar_t ogx; + ogx = static_cast(0); + for (offset_t i = 0; i <= dbx; ++i) { + offset_t oox = ix[i] * out_sX; + offset_t osx = ix[i] * src_sX; + uint8_t sxx = sx[i]; + scalar_t wxx = wx[i]; + scalar_t gxx = gx[i]; + scalar_t hxx = hx[i]; + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (do_pull) { + scalar_t* src_ptr_NC = src_ptr_NC0; + scalar_t* out_ptr_NCX = out_ptr_NCX0; + for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) + *out_ptr_NCX += bound::get(src_ptr_NC, osx, sxx) * wxx; + } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + else if (do_sgrad) { + scalar_t* src_ptr_NC = src_ptr_NC0; + scalar_t* out_ptr_NCX = out_ptr_NCX0; + for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) { + scalar_t src = bound::get(src_ptr_NC, osx, sxx); + *out_ptr_NCX += src * gxx; + } + } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + else if (do_push) { + if (trgt_K == 0) { + // Diff w.r.t. push/pull + scalar_t* out_ptr_NC = out_ptr_NC0; + for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) + bound::add(out_ptr_NC, oox, wxx * target[c], sxx); + } else { + // Diff w.r.t. sgrad + scalar_t* out_ptr_NC = out_ptr_NC0; + for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) { + scalar_t val = gxx * target[c]; + bound::add(out_ptr_NC, oox, val, sxx); + } + } + } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Count ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + else if (do_count) { + bound::add(out_ptr_NC0, oox, wxx, sxx); + } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (do_grad) { + if (trgt_K == 0) { + // Diff w.r.t. pull/push + scalar_t* src_ptr_NC = src_ptr_NC0; + scalar_t dot = static_cast(0); + for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) { + scalar_t src = bound::get(src_ptr_NC, osx, sxx); + dot += (trgt_ptr ? src * target[c] : src); + // trgt_ptr == 0 in the backward pass of 'count' + } + ogx += gxx * dot; + } else { + // Diff w.r.t. sgrad + scalar_t* src_ptr_NC = src_ptr_NC0; + scalar_t dot; + dot = static_cast(0); + for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) { + scalar_t src = bound::get(src_ptr_NC, osx, sxx); + dot += src * target[c]; + } + ogx += hxx * dot; + } + } + + } // x + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (do_grad) { + scalar_t* grad_ptr_NX = grad_ptr + n * grad_sN + w * grad_sX; + (*grad_ptr_NX) = ogx; + } + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // LINEAR INTERPOLATION 3D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1181,7 +1521,7 @@ MONAI_NAMESPACE_DEVICE { // cuda scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ; scalar_t* src_ptr_NC = src_ptr + n * src_sN; - if (trgt_K == 1) { + if (trgt_K == 0) { // backward w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, src_ptr_NC += src_sC) { scalar_t src; @@ -1343,7 +1683,7 @@ MONAI_NAMESPACE_DEVICE { // cuda o111 = ix1 * out_sX + iy1 * out_sY + iz1 * out_sZ; scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ; scalar_t* out_ptr_NC = out_ptr + n * out_sN; - if (trgt_K == 1) { + if (trgt_K == 0) { // Diff w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, out_ptr_NC += out_sC) { scalar_t trgt = *trgt_ptr_NCXYZ; @@ -1428,7 +1768,6 @@ MONAI_NAMESPACE_DEVICE { // cuda scalar_t w10 = dx1 * dy0; scalar_t w01 = dx0 * dy1; scalar_t w11 = dx1 * dy1; - ; // Sign (/!\ compute sign before warping indices) int8_t sx1 = bound::sign(bound0, ix0 + 1, src_X); @@ -1467,7 +1806,7 @@ MONAI_NAMESPACE_DEVICE { // cuda scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY; scalar_t* src_ptr_NC = src_ptr + n * src_sN; - if (trgt_K == 1) { + if (trgt_K == 0) { // backward w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, src_ptr_NC += src_sC) { scalar_t src; @@ -1514,9 +1853,9 @@ MONAI_NAMESPACE_DEVICE { // cuda } } - scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY; - (*grad_ptr_NXYZ) = gx; - grad_ptr_NXYZ[grad_sC] = gy; + scalar_t* grad_ptr_NXY = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY; + (*grad_ptr_NXY) = gx; + grad_ptr_NXY[grad_sC] = gy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_pull) { @@ -1558,7 +1897,7 @@ MONAI_NAMESPACE_DEVICE { // cuda o11 = ix1 * out_sX + iy1 * out_sY; scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY; scalar_t* out_ptr_NC = out_ptr + n * out_sN; - if (trgt_K == 1) { + if (trgt_K == 0) { // Diff w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, out_ptr_NC += out_sC) { scalar_t trgt = *trgt_ptr_NCXY; @@ -1599,6 +1938,123 @@ MONAI_NAMESPACE_DEVICE { // cuda } } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // LINEAR INTERPOLATION 1D + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + template + MONAI_DEVICE void PushPullImpl::interpolate1d_linear(scalar_t x, offset_t w, offset_t n) const { + // Get corner pixel values from (x) + offset_t ix0 = static_cast(std::floor(x)); + + // Interpolation weights (inversely proportional to distance) + scalar_t w1 = x - ix0; + scalar_t w0 = 1. - w1; + + // Sign (/!\ compute sign before warping indices) + int8_t s1 = bound::sign(bound0, ix0 + 1, src_X); + int8_t s0 = bound::sign(bound0, ix0, src_X); + + // Warp indices + offset_t ix1; + ix1 = bound::index(bound0, ix0 + 1, src_X); + ix0 = bound::index(bound0, ix0, src_X); + + // Offsets into source volume + offset_t o0, o1; + if (do_pull || do_grad || do_sgrad) { + o0 = ix0 * src_sX; + o1 = ix1 * src_sX; + } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Grid gradient ~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (do_grad) { + if (trgt_K == 0) { + // backward w.r.t. push/pull + + o0 = ix0 * src_sX; + o1 = ix1 * src_sX; + scalar_t gx = static_cast(0); + scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX; + scalar_t* src_ptr_NC = src_ptr + n * src_sN; + + for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, src_ptr_NC += src_sC) { + scalar_t src; + scalar_t trgt = trgt_ptr ? *trgt_ptr_NCX : static_cast(1); + // ^ trgt_ptr == 0 during the backward pass of count + src = bound::get(src_ptr_NC, o0, s0); + if (trgt_ptr) + src *= trgt; + gx -= src; + src = bound::get(src_ptr_NC, o1, s1); + if (trgt_ptr) + src *= trgt; + gx += src; + } + + scalar_t* grad_ptr_NX = grad_ptr + n * grad_sN + w * grad_sX; + (*grad_ptr_NX) = gx; + } else { + // backward w.r.t. sgrad + // -> zero (make sure this is done at initialization) + } + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (do_pull) { + o0 = ix0 * src_sX; + o1 = ix1 * src_sX; + scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX; + scalar_t* src_ptr_NC = src_ptr + n * src_sN; + for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) { + *out_ptr_NCX = bound::get(src_ptr_NC, o0, s0) * w0 + bound::get(src_ptr_NC, o1, s1) * w1; + } + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + else if (do_sgrad) { + o0 = ix0 * src_sX; + o1 = ix1 * src_sX; + scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX; + scalar_t* src_ptr_NC = src_ptr + n * src_sN; + + for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) { + *out_ptr_NCX = bound::get(src_ptr_NC, o1, s1) - bound::get(src_ptr_NC, o0, s0); + } + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + else if (do_push) { + // Offsets into 'push' volume + o0 = ix0 * out_sX; + o1 = ix1 * out_sX; + scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX; + scalar_t* out_ptr_NC = out_ptr + n * out_sN; + if (trgt_K == 0) { + // Diff w.r.t. push/pull + for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, out_ptr_NC += out_sC) { + scalar_t trgt = *trgt_ptr_NCX; + bound::add(out_ptr_NC, o0, w0 * trgt, s0); + bound::add(out_ptr_NC, o1, w1 * trgt, s1); + } + } else { + // Diff w.r.t. sgrad + for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, out_ptr_NC += out_sC) { + scalar_t trgt0 = *trgt_ptr_NCX; + bound::add(out_ptr_NC, o0, -trgt0, s0); + bound::add(out_ptr_NC, o1, trgt0, s1); + } + } + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + else if (do_count) { + // Offsets into 'push' volume + o0 = ix0 * out_sX; + o1 = ix1 * out_sX; + + scalar_t* out_ptr_N = out_ptr + n * out_sN; + bound::add(out_ptr_N, o0, w0, s0); + bound::add(out_ptr_N, o1, w1, s1); + } + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // NEAREST NEIGHBOR INTERPOLATION 3D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1633,7 +2089,7 @@ MONAI_NAMESPACE_DEVICE { // cuda scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) *out_ptr_NCXYZ = bound::get(src_ptr_NC, o, s); - } else if (do_push && trgt_K == 1) { + } else if (do_push && trgt_K == 0) { offset_t o = iz * out_sZ + iy * out_sY + ix * out_sX; scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ; scalar_t* out_ptr_NC = out_ptr + n * out_sN; @@ -1676,7 +2132,7 @@ MONAI_NAMESPACE_DEVICE { // cuda scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) *out_ptr_NCXY = bound::get(src_ptr_NC, o, s); - } else if (do_push && trgt_K == 1) { + } else if (do_push && trgt_K == 0) { offset_t o = iy * out_sY + ix * out_sX; scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY; scalar_t* out_ptr_NC = out_ptr + n * out_sN; @@ -1689,6 +2145,39 @@ MONAI_NAMESPACE_DEVICE { // cuda bound::add(out_ptr_NC, o, static_cast(1), s); } } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // NEAREST NEIGHBOR INTERPOLATION 1D + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + template + MONAI_DEVICE void PushPullImpl::interpolate1d_nearest(scalar_t x, offset_t w, offset_t n) const { + offset_t i = static_cast(std::round(x)); + + // Boundary condition (/!\ compute sign before warping indices) + int8_t s = bound::sign(bound0, i, src_X); + i = bound::index(bound0, i, src_X); + + if (do_pull) { + offset_t o = i * src_sX; + scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX; + scalar_t* src_ptr_NC = src_ptr + n * src_sN; + for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) + *out_ptr_NCX = bound::get(src_ptr_NC, o, s); + } else if (do_push && trgt_K == 0) { + offset_t o = i * out_sX; + scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX; + scalar_t* out_ptr_NC = out_ptr + n * out_sN; + for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, out_ptr_NC += out_sC) + bound::add(out_ptr_NC, o, *trgt_ptr_NCX, s); + } else if (do_count) { + offset_t o = i * out_sX; + scalar_t* out_ptr_NC = out_ptr + n * out_sN; + for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) + bound::add(out_ptr_NC, o, static_cast(1), s); + } + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // LINEAR INTERPOLATION 3D + SLIDING BOUNDARY // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1736,8 +2225,6 @@ MONAI_NAMESPACE_DEVICE { // cuda PUSHPULL_INSTANTIATE1(BoundType); \ PUSHPULL_INSTANTIATE1(BoundVectorRef) - // ~~~ CUDA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - // Two arguments (source, grid) // > `bound` and `interpolation` can be single arguments or vectors. template @@ -1752,12 +2239,20 @@ MONAI_NAMESPACE_DEVICE { // cuda bool do_count, bool do_grad, bool do_sgrad) { + PushPullAllocator info( + grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad); + info.ioset(source, grid); + return AT_DISPATCH_FLOATING_TYPES_AND_HALF(grid.scalar_type(), "pushpull", [&] { - PushPullImpl f( - grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad); - f.ioset(source, grid); - pushpull_kernel<<>>(f); - return f.output; + if (info.canUse32BitIndexMath()) { + PushPullImpl algo(info); + pushpull_kernel<<>>(algo); + return algo.output; + } else { + PushPullImpl algo(info); + pushpull_kernel<<>>(algo); + return algo.output; + } }); } @@ -1777,17 +2272,24 @@ MONAI_NAMESPACE_DEVICE { // cuda bool do_count, bool do_grad, bool do_sgrad) { + PushPullAllocator info( + grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad); + info.ioset(source, grid, target); + return AT_DISPATCH_FLOATING_TYPES_AND_HALF(grid.scalar_type(), "pushpull", [&] { - PushPullImpl f( - grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad); - f.ioset(source, grid, target); - pushpull_kernel<<>>(f); - return f.output; + if (info.canUse32BitIndexMath()) { + PushPullImpl algo(info); + pushpull_kernel<<>>(algo); + return algo.output; + } else { + PushPullImpl algo(info); + pushpull_kernel<<>>(algo); + return algo.output; + } }); } PUSHPULL_INSTANTIATE; -} // namespace - +} // namespace gpu } // namespace monai diff --git a/monai/csrc/utils/common_utils.h b/monai/csrc/utils/common_utils.h index 882312acb3..4d09377e65 100644 --- a/monai/csrc/utils/common_utils.h +++ b/monai/csrc/utils/common_utils.h @@ -26,10 +26,10 @@ limitations under the License. value.layout() == at::kStrided, \ "(): expected " #value "to have torch.strided layout, but it has ", \ value.layout()); -#define CHECK_SPATIAL_2D_OR_3D(value) \ - TORCH_CHECK( \ - (value.dim() == 4 || value.dim() == 5), \ - "(): expected 4D or 5D " #value " but got input with sizes ", \ +#define CHECK_SPATIAL_1D_2D_OR_3D(value) \ + TORCH_CHECK( \ + (value.dim() == 3 || value.dim() == 4 || value.dim() == 5), \ + "(): expected 3D, 4D or 5D " #value " but got input with sizes ", \ value.sizes()); #define CHECK_GRID_COMPONENT(value, dim) \ TORCH_CHECK( \ @@ -42,18 +42,18 @@ limitations under the License. #define CHECK_SAME_DEVICE(value1, value2) \ TORCH_CHECK( \ value1.device() == value2.device(), \ - "(): expected " #value2 " and " #value2 \ + "(): expected " #value1 " and " #value2 \ " to be on same device, " \ - "but " #value2 " is on ", \ + "but " #value1 " is on ", \ value1.device(), \ " and " #value2 " is on ", \ value2.device()); #define CHECK_SAME_DTYPE(value1, value2) \ TORCH_CHECK( \ value1.dtype() == value2.dtype(), \ - "(): expected " #value2 " and " #value2 \ + "(): expected " #value1 " and " #value2 \ " to have the same dtype, " \ - "but " #value2 " has ", \ + "but " #value1 " has ", \ value1.dtype(), \ " and " #value2 " has ", \ value2.dtype()); @@ -67,14 +67,15 @@ limitations under the License. i, \ " being empty"); \ } -#define CHECK_GRID_TARGET_COMPAT(value1, value2) \ - TORCH_CHECK( \ - value2.size(0) == value1.size(0) && value2.size(2) == value1.size(1) && value2.size(3) == value1.size(2) && \ - (value2.dim() == 4 || value2.size(4) == value1.size(3)), \ - "(): expected " #value2 " and " #value1 \ - " to have same batch, width, height and (optionally) depth sizes, but got " #value2 " with sizes ", \ - value2.sizes(), \ - " and " #value1 " with sizes ", \ +#define CHECK_GRID_TARGET_COMPAT(value1, value2) \ + TORCH_CHECK( \ + value2.size(0) == value1.size(0) && (value2.dim() <= 2 || value2.size(2) == value1.size(1)) && \ + (value2.dim() <= 3 || value2.size(3) == value1.size(2)) && \ + (value2.dim() <= 4 || value2.size(4) == value1.size(3)), \ + "(): expected " #value2 " and " #value1 \ + " to have same batch, width, height and (optionally) depth sizes, but got " #value2 " with sizes ", \ + value2.sizes(), \ + " and " #value1 " with sizes ", \ value1.sizes()); #define CHECK_SPATIAL_LENGTH(value, dim) \ TORCH_CHECK(((int64_t)(value.size()) == dim - 2), "(): expected ", dim, #value " elements but got ", value.size()); diff --git a/monai/csrc/utils/resample_utils.h b/monai/csrc/utils/resample_utils.h index 4735d13ca1..bbdf258b4c 100644 --- a/monai/csrc/utils/resample_utils.h +++ b/monai/csrc/utils/resample_utils.h @@ -62,7 +62,9 @@ namespace monai { template static inline void cpuAtomicAdd(scalar_t* ptr, offset_t offset, scalar_t value) { #if AT_PARALLEL_OPENMP +#if _OPENMP #pragma omp atomic +#endif #endif ptr[offset] += value; } diff --git a/monai/networks/layers/spatial_transforms.py b/monai/networks/layers/spatial_transforms.py index 175fd05694..03031f3340 100644 --- a/monai/networks/layers/spatial_transforms.py +++ b/monai/networks/layers/spatial_transforms.py @@ -35,17 +35,15 @@ def forward(ctx, input, grid, interpolation, bound, extrapolate): @staticmethod def backward(ctx, grad): - var = ctx.saved_variables + if not (ctx.needs_input_grad[0] or ctx.needs_input_grad[1]): + return None, None, None, None, None + var = ctx.saved_tensors opt = ctx.opt - grad_input = grad_grid = None grads = _C.grid_pull_backward(grad, *var, *opt) if ctx.needs_input_grad[0]: - grad_input = grads[0] - if ctx.needs_input_grad[1]: - grad_grid = grads[1] - elif ctx.needs_input_grad[1]: - grad_grid = grads[0] - return grad_input, grad_grid, None, None, None + return grads[0], grads[1] if ctx.needs_input_grad[1] else None, None, None, None + if ctx.needs_input_grad[1]: + return None, grads[0], None, None, None def grid_pull(input: torch.Tensor, grid: torch.Tensor, interpolation="linear", bound="zero", extrapolate: bool = True): @@ -60,7 +58,9 @@ def grid_pull(input: torch.Tensor, grid: torch.Tensor, interpolation="linear", b - 2 or 'quadratic' or InterpolationType.quadratic - 3 or 'cubic' or InterpolationType.cubic - 4 or 'fourth' or InterpolationType.fourth - - etc. + - 5 or 'fifth' or InterpolationType.fifth + - 6 or 'sixth' or InterpolationType.sixth + - 7 or 'seventh' or InterpolationType.seventh A list of values can be provided, in the order [W, H, D], to specify dimension-specific interpolation orders. @@ -68,14 +68,13 @@ def grid_pull(input: torch.Tensor, grid: torch.Tensor, interpolation="linear", b `bound` can be an int, a string or a BoundType. Possible values are:: - - 0 or 'replicate' or BoundType.replicate - - 1 or 'dct1' or BoundType.dct1 - - 2 or 'dct2' or BoundType.dct2 - - 3 or 'dst1' or BoundType.dst1 - - 4 or 'dst2' or BoundType.dst2 - - 5 or 'dft' or BoundType.dft - - 6 or 'sliding' or BoundType.sliding [not implemented] - - 7 or 'zero' or BoundType.zero + - 0 or 'replicate' or 'nearest' or BoundType.replicate + - 1 or 'dct1' or 'mirror' or BoundType.dct1 + - 2 or 'dct2' or 'reflect' or BoundType.dct2 + - 3 or 'dst1' or 'antimirror' or BoundType.dst1 + - 4 or 'dst2' or 'antireflect' or BoundType.dst2 + - 5 or 'dft' or 'wrap' or BoundType.dft + - 7 or 'zero' or BoundType.zero A list of values can be provided, in the order [W, H, D], to specify dimension-specific boundary conditions. @@ -87,15 +86,17 @@ def grid_pull(input: torch.Tensor, grid: torch.Tensor, interpolation="linear", b - `dct2` corresponds to Neumann boundary conditions (symmetric) - `dst2` corresponds to Dirichlet boundary conditions (antisymmetric) - See: - https://en.wikipedia.org/wiki/Discrete_cosine_transform - https://en.wikipedia.org/wiki/Discrete_sine_transform + See Also: + - https://en.wikipedia.org/wiki/Discrete_cosine_transform + - https://en.wikipedia.org/wiki/Discrete_sine_transform + - ``help(monai._C.BoundType)`` + - ``help(monai._C.InterpolationType)`` Args: input: Input image. `(B, C, Wi, Hi, Di)`. - grid: Deformation field. `(B, Wo, Ho, Do, 2|3)`. + grid: Deformation field. `(B, Wo, Ho, Do, 1|2|3)`. interpolation (int or list[int] , optional): Interpolation order. - Defaults to `1`. + Defaults to `'linear'`. bound (BoundType, or list[BoundType], optional): Boundary conditions. Defaults to `'zero'`. extrapolate: Extrapolate out-of-bound data. @@ -106,11 +107,10 @@ def grid_pull(input: torch.Tensor, grid: torch.Tensor, interpolation="linear", b """ # Convert parameters - bound = ensure_tuple(bound) - interpolation = ensure_tuple(interpolation) - bound = [_C.BoundType.__members__[b] if isinstance(b, str) else _C.BoundType(b) for b in bound] + bound = [_C.BoundType.__members__[b] if isinstance(b, str) else _C.BoundType(b) for b in ensure_tuple(bound)] interpolation = [ - _C.InterpolationType.__members__[i] if isinstance(i, str) else _C.InterpolationType(i) for i in interpolation + _C.InterpolationType.__members__[i] if isinstance(i, str) else _C.InterpolationType(i) + for i in ensure_tuple(interpolation) ] return _GridPull.apply(input, grid, interpolation, bound, extrapolate) @@ -129,17 +129,15 @@ def forward(ctx, input, grid, shape, interpolation, bound, extrapolate): @staticmethod def backward(ctx, grad): - var = ctx.saved_variables + if not (ctx.needs_input_grad[0] or ctx.needs_input_grad[1]): + return None, None, None, None, None, None + var = ctx.saved_tensors opt = ctx.opt - grad_input = grad_grid = None grads = _C.grid_push_backward(grad, *var, *opt) if ctx.needs_input_grad[0]: - grad_input = grads[0] - if ctx.needs_input_grad[1]: - grad_grid = grads[1] - elif ctx.needs_input_grad[1]: - grad_grid = grads[0] - return grad_input, grad_grid, None, None, None, None + return grads[0], grads[1] if ctx.needs_input_grad[1] else None, None, None, None, None + if ctx.needs_input_grad[1]: + return None, grads[0], None, None, None, None def grid_push( @@ -156,7 +154,9 @@ def grid_push( - 2 or 'quadratic' or InterpolationType.quadratic - 3 or 'cubic' or InterpolationType.cubic - 4 or 'fourth' or InterpolationType.fourth - - etc. + - 5 or 'fifth' or InterpolationType.fifth + - 6 or 'sixth' or InterpolationType.sixth + - 7 or 'seventh' or InterpolationType.seventh A list of values can be provided, in the order `[W, H, D]`, to specify dimension-specific interpolation orders. @@ -164,14 +164,13 @@ def grid_push( `bound` can be an int, a string or a BoundType. Possible values are:: - - 0 or 'replicate' or BoundType.replicate - - 1 or 'dct1' or BoundType.dct1 - - 2 or 'dct2' or BoundType.dct2 - - 3 or 'dst1' or BoundType.dst1 - - 4 or 'dst2' or BoundType.dst2 - - 5 or 'dft' or BoundType.dft - - 6 or 'sliding' or BoundType.sliding [not implemented] - - 7 or 'zero' or BoundType.zero + - 0 or 'replicate' or 'nearest' or BoundType.replicate + - 1 or 'dct1' or 'mirror' or BoundType.dct1 + - 2 or 'dct2' or 'reflect' or BoundType.dct2 + - 3 or 'dst1' or 'antimirror' or BoundType.dst1 + - 4 or 'dst2' or 'antireflect' or BoundType.dst2 + - 5 or 'dft' or 'wrap' or BoundType.dft + - 7 or 'zero' or BoundType.zero A list of values can be provided, in the order `[W, H, D]`, to specify dimension-specific boundary conditions. @@ -183,17 +182,19 @@ def grid_push( - `dct2` corresponds to Neumann boundary conditions (symmetric) - `dst2` corresponds to Dirichlet boundary conditions (antisymmetric) - See also: + See Also: - https://en.wikipedia.org/wiki/Discrete_cosine_transform - https://en.wikipedia.org/wiki/Discrete_sine_transform + - ``help(monai._C.BoundType)`` + - ``help(monai._C.InterpolationType)`` Args: input: Input image `(B, C, Wi, Hi, Di)`. - grid: Deformation field `(B, Wi, Hi, Di, 2|3)`. + grid: Deformation field `(B, Wi, Hi, Di, 1|2|3)`. shape: Shape of the source image. interpolation (int or list[int] , optional): Interpolation order. - Defaults to `1`. + Defaults to `'linear'`. bound (BoundType, or list[BoundType], optional): Boundary conditions. Defaults to `'zero'`. extrapolate: Extrapolate out-of-bound data. @@ -204,11 +205,10 @@ def grid_push( """ # Convert parameters - bound = ensure_tuple(bound) - interpolation = ensure_tuple(interpolation) - bound = [_C.BoundType.__members__[b] if isinstance(b, str) else _C.BoundType(b) for b in bound] + bound = [_C.BoundType.__members__[b] if isinstance(b, str) else _C.BoundType(b) for b in ensure_tuple(bound)] interpolation = [ - _C.InterpolationType.__members__[i] if isinstance(i, str) else _C.InterpolationType(i) for i in interpolation + _C.InterpolationType.__members__[i] if isinstance(i, str) else _C.InterpolationType(i) + for i in ensure_tuple(interpolation) ] if shape is None: @@ -230,12 +230,11 @@ def forward(ctx, grid, shape, interpolation, bound, extrapolate): @staticmethod def backward(ctx, grad): - var = ctx.saved_variables - opt = ctx.opt - grad_grid = None if ctx.needs_input_grad[0]: - grad_grid = _C.grid_count_backward(grad, *var, *opt) - return grad_grid, None, None, None, None + var = ctx.saved_tensors + opt = ctx.opt + return _C.grid_count_backward(grad, *var, *opt), None, None, None, None + return None, None, None, None, None def grid_count(grid: torch.Tensor, shape=None, interpolation="linear", bound="zero", extrapolate: bool = True): @@ -252,7 +251,9 @@ def grid_count(grid: torch.Tensor, shape=None, interpolation="linear", bound="ze - 2 or 'quadratic' or InterpolationType.quadratic - 3 or 'cubic' or InterpolationType.cubic - 4 or 'fourth' or InterpolationType.fourth - - etc. + - 5 or 'fifth' or InterpolationType.fifth + - 6 or 'sixth' or InterpolationType.sixth + - 7 or 'seventh' or InterpolationType.seventh A list of values can be provided, in the order [W, H, D], to specify dimension-specific interpolation orders. @@ -260,14 +261,13 @@ def grid_count(grid: torch.Tensor, shape=None, interpolation="linear", bound="ze `bound` can be an int, a string or a BoundType. Possible values are:: - - 0 or 'replicate' or BoundType.replicate - - 1 or 'dct1' or BoundType.dct1 - - 2 or 'dct2' or BoundType.dct2 - - 3 or 'dst1' or BoundType.dst1 - - 4 or 'dst2' or BoundType.dst2 - - 5 or 'dft' or BoundType.dft - - 6 or 'sliding' or BoundType.sliding [not implemented] - - 7 or 'zero' or BoundType.zero + - 0 or 'replicate' or 'nearest' or BoundType.replicate + - 1 or 'dct1' or 'mirror' or BoundType.dct1 + - 2 or 'dct2' or 'reflect' or BoundType.dct2 + - 3 or 'dst1' or 'antimirror' or BoundType.dst1 + - 4 or 'dst2' or 'antireflect' or BoundType.dst2 + - 5 or 'dft' or 'wrap' or BoundType.dft + - 7 or 'zero' or BoundType.zero A list of values can be provided, in the order [W, H, D], to specify dimension-specific boundary conditions. @@ -283,12 +283,14 @@ def grid_count(grid: torch.Tensor, shape=None, interpolation="linear", bound="ze - https://en.wikipedia.org/wiki/Discrete_cosine_transform - https://en.wikipedia.org/wiki/Discrete_sine_transform + - ``help(monai._C.BoundType)`` + - ``help(monai._C.InterpolationType)`` Args: grid: Deformation field `(B, Wi, Hi, Di, 2|3)`. shape: shape of the source image. interpolation (int or list[int] , optional): Interpolation order. - Defaults to `1`. + Defaults to `'linear'`. bound (BoundType, or list[BoundType], optional): Boundary conditions. Defaults to `'zero'`. extrapolate (bool, optional): Extrapolate out-of-bound data. @@ -299,11 +301,10 @@ def grid_count(grid: torch.Tensor, shape=None, interpolation="linear", bound="ze """ # Convert parameters - bound = ensure_tuple(bound) - interpolation = ensure_tuple(interpolation) - bound = [_C.BoundType.__members__[b] if isinstance(b, str) else _C.BoundType(b) for b in bound] + bound = [_C.BoundType.__members__[b] if isinstance(b, str) else _C.BoundType(b) for b in ensure_tuple(bound)] interpolation = [ - _C.InterpolationType.__members__[i] if isinstance(i, str) else _C.InterpolationType(i) for i in interpolation + _C.InterpolationType.__members__[i] if isinstance(i, str) else _C.InterpolationType(i) + for i in ensure_tuple(interpolation) ] if shape is None: @@ -325,18 +326,15 @@ def forward(ctx, input, grid, interpolation, bound, extrapolate): @staticmethod def backward(ctx, grad): - var = ctx.saved_variables + if not (ctx.needs_input_grad[0] or ctx.needs_input_grad[1]): + return None, None, None, None, None + var = ctx.saved_tensors opt = ctx.opt - grad_input = grad_grid = None - if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: - grads = _C.grid_grad_backward(grad, *var, *opt) - if ctx.needs_input_grad[0]: - grad_input = grads[0] - if ctx.needs_input_grad[1]: - grad_grid = grads[1] - elif ctx.needs_input_grad[1]: - grad_grid = grads[0] - return grad_input, grad_grid, None, None, None + grads = _C.grid_grad_backward(grad, *var, *opt) + if ctx.needs_input_grad[0]: + return grads[0], grads[1] if ctx.needs_input_grad[1] else None, None, None, None + if ctx.needs_input_grad[1]: + return None, grads[0], None, None, None def grid_grad(input: torch.Tensor, grid: torch.Tensor, interpolation="linear", bound="zero", extrapolate: bool = True): @@ -351,7 +349,9 @@ def grid_grad(input: torch.Tensor, grid: torch.Tensor, interpolation="linear", b - 2 or 'quadratic' or InterpolationType.quadratic - 3 or 'cubic' or InterpolationType.cubic - 4 or 'fourth' or InterpolationType.fourth - - etc. + - 5 or 'fifth' or InterpolationType.fifth + - 6 or 'sixth' or InterpolationType.sixth + - 7 or 'seventh' or InterpolationType.seventh A list of values can be provided, in the order [W, H, D], to specify dimension-specific interpolation orders. @@ -359,14 +359,13 @@ def grid_grad(input: torch.Tensor, grid: torch.Tensor, interpolation="linear", b `bound` can be an int, a string or a BoundType. Possible values are:: - - 0 or 'replicate' or BoundType.replicate - - 1 or 'dct1' or BoundType.dct1 - - 2 or 'dct2' or BoundType.dct2 - - 3 or 'dst1' or BoundType.dst1 - - 4 or 'dst2' or BoundType.dst2 - - 5 or 'dft' or BoundType.dft - - 6 or 'sliding' or BoundType.sliding [not implemented] - - 7 or 'zero' or BoundType.zero + - 0 or 'replicate' or 'nearest' or BoundType.replicate + - 1 or 'dct1' or 'mirror' or BoundType.dct1 + - 2 or 'dct2' or 'reflect' or BoundType.dct2 + - 3 or 'dst1' or 'antimirror' or BoundType.dst1 + - 4 or 'dst2' or 'antireflect' or BoundType.dst2 + - 5 or 'dft' or 'wrap' or BoundType.dft + - 7 or 'zero' or BoundType.zero A list of values can be provided, in the order [W, H, D], to specify dimension-specific boundary conditions. @@ -378,30 +377,32 @@ def grid_grad(input: torch.Tensor, grid: torch.Tensor, interpolation="linear", b - `dct2` corresponds to Neumann boundary conditions (symmetric) - `dst2` corresponds to Dirichlet boundary conditions (antisymmetric) - See also: + See Also: - https://en.wikipedia.org/wiki/Discrete_cosine_transform - https://en.wikipedia.org/wiki/Discrete_sine_transform + - ``help(monai._C.BoundType)`` + - ``help(monai._C.InterpolationType)`` + Args: input: Input image. `(B, C, Wi, Hi, Di)`. grid: Deformation field. `(B, Wo, Ho, Do, 2|3)`. interpolation (int or list[int] , optional): Interpolation order. - Defaults to `1`. + Defaults to `'linear'`. bound (BoundType, or list[BoundType], optional): Boundary conditions. Defaults to `'zero'`. extrapolate: Extrapolate out-of-bound data. Defaults to `True`. Returns: - output (torch.Tensor): Sampled gradients (B, C, Wo, Ho, Do, 2|3). + output (torch.Tensor): Sampled gradients (B, C, Wo, Ho, Do, 1|2|3). """ # Convert parameters - bound = ensure_tuple(bound) - interpolation = ensure_tuple(interpolation) - bound = [_C.BoundType.__members__[b] if isinstance(b, str) else _C.BoundType(b) for b in bound] + bound = [_C.BoundType.__members__[b] if isinstance(b, str) else _C.BoundType(b) for b in ensure_tuple(bound)] interpolation = [ - _C.InterpolationType.__members__[i] if isinstance(i, str) else _C.InterpolationType(i) for i in interpolation + _C.InterpolationType.__members__[i] if isinstance(i, str) else _C.InterpolationType(i) + for i in ensure_tuple(interpolation) ] return _GridGrad.apply(input, grid, interpolation, bound, extrapolate) diff --git a/runtests.sh b/runtests.sh index 76692e731b..1395ccdcfd 100755 --- a/runtests.sh +++ b/runtests.sh @@ -159,6 +159,7 @@ function clean_py { find ${TO_CLEAN} -depth -maxdepth 1 -type d -name ".mypy_cache" -exec rm -r "{}" + find ${TO_CLEAN} -depth -maxdepth 1 -type d -name ".pytype" -exec rm -r "{}" + find ${TO_CLEAN} -depth -maxdepth 1 -type d -name ".coverage" -exec rm -r "{}" + + find ${TO_CLEAN} -depth -maxdepth 1 -type d -name "__pycache__" -exec rm -r "{}" + } function torch_validate { diff --git a/setup.py b/setup.py index 9b20df845a..426866428c 100644 --- a/setup.py +++ b/setup.py @@ -26,6 +26,7 @@ FORCE_CUDA = os.getenv("FORCE_CUDA", "0") == "1" # flag ignored if BUILD_MONAI is False BUILD_CPP = BUILD_CUDA = False +TORCH_VERSION = 0 try: import torch @@ -35,14 +36,13 @@ BUILD_CPP = True from torch.utils.cpp_extension import CUDA_HOME, CUDAExtension - BUILD_CUDA = (torch.cuda.is_available() and (CUDA_HOME is not None)) or FORCE_CUDA + BUILD_CUDA = (CUDA_HOME is not None) if torch.cuda.is_available() else FORCE_CUDA _pt_version = pkg_resources.parse_version(torch.__version__).release # type: ignore[attr-defined] if _pt_version is None or len(_pt_version) < 3: raise AssertionError("unknown torch version") TORCH_VERSION = int(_pt_version[0]) * 10000 + int(_pt_version[1]) * 100 + int(_pt_version[2]) except (ImportError, TypeError, AssertionError, AttributeError) as e: - TORCH_VERSION = 0 warnings.warn(f"extension build skipped: {e}") finally: if not RUN_BUILD: diff --git a/tests/test_enum_bound_interp.py b/tests/test_enum_bound_interp.py new file mode 100644 index 0000000000..f788f8ba17 --- /dev/null +++ b/tests/test_enum_bound_interp.py @@ -0,0 +1,73 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from monai.utils import optional_import +from tests.utils import skip_if_no_cpp_extension + +b, _ = optional_import("monai._C", name="BoundType") +p, _ = optional_import("monai._C", name="InterpolationType") + + +@skip_if_no_cpp_extension +class TestEnumBoundInterp(unittest.TestCase): + def test_bound(self): + self.assertEqual(str(b.replicate), "BoundType.replicate") + self.assertEqual(str(b.nearest), "BoundType.replicate") + self.assertEqual(str(b.dct1), "BoundType.dct1") + self.assertEqual(str(b.mirror), "BoundType.dct1") + self.assertEqual(str(b.dct2), "BoundType.dct2") + self.assertEqual(str(b.reflect), "BoundType.dct2") + self.assertEqual(str(b.dst1), "BoundType.dst1") + self.assertEqual(str(b.antimirror), "BoundType.dst1") + self.assertEqual(str(b.dst2), "BoundType.dst2") + self.assertEqual(str(b.antireflect), "BoundType.dst2") + self.assertEqual(str(b.dft), "BoundType.dft") + self.assertEqual(str(b.wrap), "BoundType.dft") + self.assertEqual(str(b.zero), "BoundType.zero") + + self.assertEqual(int(b.replicate), 0) + self.assertEqual(int(b.nearest), 0) + self.assertEqual(int(b.dct1), 1) + self.assertEqual(int(b.mirror), 1) + self.assertEqual(int(b.dct2), 2) + self.assertEqual(int(b.reflect), 2) + self.assertEqual(int(b.dst1), 3) + self.assertEqual(int(b.antimirror), 3) + self.assertEqual(int(b.dst2), 4) + self.assertEqual(int(b.antireflect), 4) + self.assertEqual(int(b.dft), 5) + self.assertEqual(int(b.wrap), 5) + self.assertEqual(int(b.zero), 7) + + def test_interp(self): + self.assertEqual(str(p.nearest), "InterpolationType.nearest") + self.assertEqual(str(p.linear), "InterpolationType.linear") + self.assertEqual(str(p.quadratic), "InterpolationType.quadratic") + self.assertEqual(str(p.cubic), "InterpolationType.cubic") + self.assertEqual(str(p.fourth), "InterpolationType.fourth") + self.assertEqual(str(p.fifth), "InterpolationType.fifth") + self.assertEqual(str(p.sixth), "InterpolationType.sixth") + self.assertEqual(str(p.seventh), "InterpolationType.seventh") + + self.assertEqual(int(p.nearest), 0) + self.assertEqual(int(p.linear), 1) + self.assertEqual(int(p.quadratic), 2) + self.assertEqual(int(p.cubic), 3) + self.assertEqual(int(p.fourth), 4) + self.assertEqual(int(p.fifth), 5) + self.assertEqual(int(p.sixth), 6) + self.assertEqual(int(p.seventh), 7) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_grid_pull.py b/tests/test_grid_pull.py new file mode 100644 index 0000000000..9e4d2e8237 --- /dev/null +++ b/tests/test_grid_pull.py @@ -0,0 +1,94 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from parameterized import parameterized + +from monai.networks.layers import grid_pull +from monai.utils import optional_import +from tests.testing_data.cpp_resample_answers import Expected_1D_GP_bwd, Expected_1D_GP_fwd +from tests.utils import skip_if_no_cpp_extension + +BType, has_b_type = optional_import("monai._C", name="BoundType") +PType, has_p_type = optional_import("monai._C", name="InterpolationType") + + +def make_grid(shape, dtype=None, device=None, requires_grad=True): + ranges = [torch.arange(float(s), dtype=dtype, device=device, requires_grad=requires_grad) for s in shape] + grid = torch.stack(torch.meshgrid(*ranges), dim=-1) + return grid[None] + + +# 1D combinations of bounds/interpolations +bounds = set(BType.__members__.values()) if has_b_type else [] +interps = set(PType.__members__.values()) if has_p_type else [] +device = "cuda" if torch.cuda.is_available() else "cpu" +TEST_1D_GP = [] +for bound in bounds: + for interp in interps: + if not Expected_1D_GP_fwd or not Expected_1D_GP_bwd: + break # skip if the testing data are unavailable + expected_val = Expected_1D_GP_fwd.pop(0) + + for input_g in (True, False): + for grid_g in (True, False): + expected_grad = Expected_1D_GP_bwd.pop(0) + test_case = [ + { + "input": torch.arange(10, dtype=torch.float, requires_grad=input_g, device=device).reshape( + (1, 1, 10) + ), + "grid": make_grid((20,), dtype=torch.float, device=device, requires_grad=grid_g) + 0.5, + "interpolation": interp, + "bound": bound, + }, + { + "val": torch.tensor([[expected_val]]), + "device": device, + "grad": torch.tensor(expected_grad), + }, + ] + TEST_1D_GP.append(test_case) + + +@skip_if_no_cpp_extension +class TestGridPull(unittest.TestCase): + @parameterized.expand(TEST_1D_GP, skip_on_empty=True) + def test_grid_pull(self, input_param, expected): + result = grid_pull(**input_param) + if input_param["input"].requires_grad: + input_param["input"].retain_grad() + if input_param["grid"].requires_grad: + input_param["grid"].retain_grad() + if input_param["input"].requires_grad or input_param["grid"].requires_grad: + result.sum().backward() + + grads = [] + if input_param["input"].requires_grad: + grads.append(input_param["input"].grad.view(-1)) + if input_param["grid"].requires_grad: + grads.append(input_param["grid"].grad.view(-1)) + if not grads: + grads = torch.tensor(0.0, device=result.device) + elif len(grads) == 1: + grads = grads[0] + else: + grads = torch.cat(grads, dim=0) + self.assertTrue("{}".format(result.device).startswith(expected["device"])) + np.testing.assert_allclose(result.detach().cpu().numpy(), expected["val"].cpu().numpy(), rtol=1e-4, atol=1e-4) + np.testing.assert_allclose(grads.detach().cpu().numpy(), expected["grad"].cpu().numpy(), rtol=1e-4, atol=1e-4) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/testing_data/1D_BP_bwd.txt b/tests/testing_data/1D_BP_bwd.txt new file mode 100644 index 0000000000..de43270e94 --- /dev/null +++ b/tests/testing_data/1D_BP_bwd.txt @@ -0,0 +1,224 @@ +0., 1., 1., 1., 1., 1., 1., 1., 1.,12., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., # InterpolationType.nearest BoundType.replicate +0., 1., 1., 1., 1., 1., 1., 1., 1.,12., # InterpolationType.nearest BoundType.replicate +0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.nearest BoundType.replicate +0., # InterpolationType.nearest BoundType.replicate +0.5, 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,11.5, 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , # InterpolationType.linear BoundType.replicate +0.5, 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,11.5, # InterpolationType.linear BoundType.replicate +1.,1.,1.,1.,1.,1.,1.,1.,1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.linear BoundType.replicate +0., # InterpolationType.linear BoundType.replicate +0.5, 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,11.5, 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , # InterpolationType.quadratic BoundType.replicate +0.5, 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,11.5, # InterpolationType.quadratic BoundType.replicate +1.,1.,1.,1.,1.,1.,1.,1.,1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.quadratic BoundType.replicate +0., # InterpolationType.quadratic BoundType.replicate +0.5208333 , 0.9791666 , 0.99999994, 0.99999994, 0.99999994, 0.99999994, 0.99999994, 0.99999994, 0.99999994,11.5 , 0.875 , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 0.875 , 0.125 , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , # InterpolationType.cubic BoundType.replicate +0.5208333 , 0.9791666 , 0.99999994, 0.99999994, 0.99999994, 0.99999994, 0.99999994, 0.99999994, 0.99999994,11.5 , # InterpolationType.cubic BoundType.replicate +0.875,1. ,1. ,1. ,1. ,1. ,1. ,1. ,0.875,0.125,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. , # InterpolationType.cubic BoundType.replicate +0., # InterpolationType.cubic BoundType.replicate +0.5416667 , 0.9583334 , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,11.5 , 0.8333334 , 1. , 1. , 1. , 1. , 1. , 0.9999999 , 1. , 0.833333 , 0.16666651, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , # InterpolationType.fourth BoundType.replicate +0.5416667, 0.9583334, 1. , 1. , 1. , 1. , 1. , 1. , 1. ,11.5 , # InterpolationType.fourth BoundType.replicate +0.8333334 ,1. ,1. ,1. ,1. ,1. ,0.9999999 ,1. ,0.833333 ,0.16666651,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. , # InterpolationType.fourth BoundType.replicate +0., # InterpolationType.fourth BoundType.replicate +5.6223959e-01,9.3802083e-01,9.9973959e-01,1.0000000e+00,1.0000000e+00,1.0000000e+00,1.0000000e+00,1.0000000e+00,1.0000000e+00,1.1499999e+01,7.9947913e-01,9.9739581e-01,1.0000000e+00,1.0000000e+00,9.9999994e-01,1.0000001e+00,9.9999976e-01,9.9739575e-01,7.9947948e-01,2.0052099e-01,2.6040077e-03,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.fifth BoundType.replicate +0.5622396, 0.9380208, 0.9997396, 1. , 1. , 1. , 1. , 1. , 1. ,11.499999 , # InterpolationType.fifth BoundType.replicate +0.7994791 ,0.9973958 ,1. ,1. ,0.99999994,1.0000001 ,0.99999976,0.99739575,0.7994795 ,0.20052099,0.00260401,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. , # InterpolationType.fifth BoundType.replicate +0., # InterpolationType.fifth BoundType.replicate +5.8194447e-01,9.1944444e-01,9.9861109e-01,1.0000000e+00,1.0000000e+00,1.0000000e+00,1.0000000e+00,1.0000000e+00,1.0000000e+00,1.1499997e+01,7.7499998e-01,9.9166673e-01,1.0000000e+00,1.0000000e+00,1.0000000e+00,9.9999982e-01,1.0000004e+00,9.9166673e-01,7.7499980e-01,2.2500010e-01,8.3333999e-03,1.9371510e-07,1.9371510e-07,1.9371510e-07,1.9371510e-07,1.9371510e-07,1.9371510e-07,1.9371510e-07,1.9371510e-07,1.9371510e-07, # InterpolationType.sixth BoundType.replicate +0.58194447, 0.91944444, 0.9986111 , 1. , 1. , 1. , 1. , 1. , 1. ,11.499997 , # InterpolationType.sixth BoundType.replicate +7.7499998e-01,9.9166673e-01,1.0000000e+00,1.0000000e+00,1.0000000e+00,9.9999982e-01,1.0000004e+00,9.9166673e-01,7.7499980e-01,2.2500010e-01,8.3333999e-03,1.9371510e-07,1.9371510e-07,1.9371510e-07,1.9371510e-07,1.9371510e-07,1.9371510e-07,1.9371510e-07,1.9371510e-07,1.9371510e-07, # InterpolationType.sixth BoundType.replicate +0., # InterpolationType.sixth BoundType.replicate +6.0078436e-01,9.0259641e-01,9.9662077e-01,9.9999845e-01,1.0000000e+00,1.0000000e+00,1.0000000e+00,1.0000000e+00,1.0000000e+00,1.1500004e+01,7.5551212e-01,9.8430985e-01,9.9997836e-01,9.9999994e-01,1.0000000e+00,1.0000001e+00,9.9997842e-01,9.8431003e-01,7.5551212e-01,2.4448761e-01,1.5690181e-02,2.1788481e-05,3.3080869e-07,3.3080869e-07,3.3080869e-07,3.3080869e-07,3.3080869e-07,3.3080869e-07,3.3080869e-07,3.3080869e-07, # InterpolationType.seventh BoundType.replicate +0.60078436, 0.9025964 , 0.9966208 , 0.99999845, 1. , 1. , 1. , 1. , 1. ,11.500004 , # InterpolationType.seventh BoundType.replicate +7.5551212e-01,9.8430985e-01,9.9997836e-01,9.9999994e-01,1.0000000e+00,1.0000001e+00,9.9997842e-01,9.8431003e-01,7.5551212e-01,2.4448761e-01,1.5690181e-02,2.1788481e-05,3.3080869e-07,3.3080869e-07,3.3080869e-07,3.3080869e-07,3.3080869e-07,3.3080869e-07,3.3080869e-07,3.3080869e-07, # InterpolationType.seventh BoundType.replicate +0., # InterpolationType.seventh BoundType.replicate +1.,3.,3.,2.,2.,2.,2.,2.,2.,1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.nearest BoundType.dct1 +1.,3.,3.,2.,2.,2.,2.,2.,2.,1., # InterpolationType.nearest BoundType.dct1 +0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.nearest BoundType.dct1 +0., # InterpolationType.nearest BoundType.dct1 +1.5, 3. , 2.5, 2. , 2. , 2. , 2. , 2. , 2. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-1. , 1. , 1. , # InterpolationType.linear BoundType.dct1 +1.5,3. ,2.5,2. ,2. ,2. ,2. ,2. ,2. ,1. , # InterpolationType.linear BoundType.dct1 +1., 1., 1., 1., 1., 1., 1., 1., 1.,-1.,-1.,-1.,-1.,-1.,-1.,-1.,-1.,-1., 1., 1., # InterpolationType.linear BoundType.dct1 +0., # InterpolationType.linear BoundType.dct1 +1.5, 3. , 2.5, 2. , 2. , 2. , 2. , 2. , 2. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-1. , 1. , 1. , # InterpolationType.quadratic BoundType.dct1 +1.5,3. ,2.5,2. ,2. ,2. ,2. ,2. ,2. ,1. , # InterpolationType.quadratic BoundType.dct1 +1., 1., 1., 1., 1., 1., 1., 1., 1.,-1.,-1.,-1.,-1.,-1.,-1.,-1.,-1.,-1., 1., 1., # InterpolationType.quadratic BoundType.dct1 +0., # InterpolationType.quadratic BoundType.dct1 +1.5 , 2.9791667 , 2.5 , 2.0208333 , 1.9999999 , 1.9999999 , 1.9999999 , 1.9999999 , 1.9999999 , 0.99999994, 0.75 , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 0.75 ,-0.75 ,-1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-0.75 , 0.75 , 1. , # InterpolationType.cubic BoundType.dct1 +1.5 ,2.9791667 ,2.5 ,2.0208333 ,1.9999999 ,1.9999999 ,1.9999999 ,1.9999999 ,1.9999999 ,0.99999994, # InterpolationType.cubic BoundType.dct1 +0.75, 1. , 1. , 1. , 1. , 1. , 1. , 1. , 0.75,-0.75,-1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-0.75, 0.75, 1. , # InterpolationType.cubic BoundType.dct1 +0., # InterpolationType.cubic BoundType.dct1 +1.5 , 2.9583333 , 2.5 , 2.0416667 , 2. , 2. , 2. , 2. , 2. , 1. , 0.6666666 , 1. , 1. , 1. , 1. , 1. , 0.9999999 , 1. , 0.6666664 ,-0.66666675,-1. ,-1.0000001 ,-1.0000002 ,-1. ,-1.0000001 ,-1.0000001 ,-1. ,-0.6666667 , 0.6666666 , 1. , # InterpolationType.fourth BoundType.dct1 +1.5 ,2.9583333,2.5 ,2.0416667,2. ,2. ,2. ,2. ,2. ,1. , # InterpolationType.fourth BoundType.dct1 +0.6666666 , 1. , 1. , 1. , 1. , 1. , 0.9999999 , 1. , 0.6666664 ,-0.66666675,-1. ,-1.0000001 ,-1.0000002 ,-1. ,-1.0000001 ,-1.0000001 ,-1. ,-0.6666667 , 0.6666666 , 1. , # InterpolationType.fourth BoundType.dct1 +0., # InterpolationType.fourth BoundType.dct1 +1.4997395 , 2.9380207 , 2.5 , 2.061979 , 2.0002604 , 2. , 2. , 2. , 2. , 1. , 0.5989583 , 0.9947917 , 1. , 1. , 0.99999994, 1.0000001 , 0.99999976, 0.99479157, 0.5989587 ,-0.59895825,-0.9947917 ,-0.9999998 ,-1.0000002 ,-1. ,-0.9999998 ,-1. ,-0.9947917 ,-0.5989583 , 0.5989583 , 0.9947917 , # InterpolationType.fifth BoundType.dct1 +1.4997395,2.9380207,2.5 ,2.061979 ,2.0002604,2. ,2. ,2. ,2. ,1. , # InterpolationType.fifth BoundType.dct1 +0.5989583 , 0.9947917 , 1. , 1. , 0.99999994, 1.0000001 , 0.99999976, 0.99479157, 0.5989587 ,-0.59895825,-0.9947917 ,-0.9999998 ,-1.0000002 ,-1. ,-0.9999998 ,-1. ,-0.9947917 ,-0.5989583 , 0.5989583 , 0.9947917 , # InterpolationType.fifth BoundType.dct1 +0., # InterpolationType.fifth BoundType.dct1 +1.498611 , 2.919444 , 2.5 , 2.0805554 , 2.0013888 , 2. , 2. , 2. , 2. , 1. , 0.54999995, 0.9833334 , 1. , 1. , 1. , 0.9999998 , 1.0000004 , 0.9833334 , 0.5499998 ,-0.5499999 ,-0.9833334 ,-1.0000004 ,-1.0000001 ,-1.0000001 ,-1. ,-0.99999994,-0.98333335,-0.55 , 0.54999995, 0.9833334 , # InterpolationType.sixth BoundType.dct1 +1.498611 ,2.919444 ,2.5 ,2.0805554,2.0013888,2. ,2. ,2. ,2. ,1. , # InterpolationType.sixth BoundType.dct1 +0.54999995, 0.9833334 , 1. , 1. , 1. , 0.9999998 , 1.0000004 , 0.9833334 , 0.5499998 ,-0.5499999 ,-0.9833334 ,-1.0000004 ,-1.0000001 ,-1.0000001 ,-1. ,-0.99999994,-0.98333335,-0.55 , 0.54999995, 0.9833334 , # InterpolationType.sixth BoundType.dct1 +0., # InterpolationType.sixth BoundType.dct1 +1.4966209 , 2.9025953 , 2.5000002 , 2.097404 , 2.0033796 , 2.000002 , 2.0000002 , 2.0000002 , 2.0000002 , 1. , 0.5110243 , 0.9686197 , 0.99995667, 0.99999994, 1. , 1.0000001 , 0.9999567 , 0.96861994, 0.51102436,-0.5110245 ,-0.9686197 ,-0.99995685,-1. ,-1. ,-1.0000001 ,-0.99995655,-0.9686198 ,-0.5110243 , 0.5110243 , 0.9686197 , # InterpolationType.seventh BoundType.dct1 +1.4966209,2.9025953,2.5000002,2.097404 ,2.0033796,2.000002 ,2.0000002,2.0000002,2.0000002,1. , # InterpolationType.seventh BoundType.dct1 +0.5110243 , 0.9686197 , 0.99995667, 0.99999994, 1. , 1.0000001 , 0.9999567 , 0.96861994, 0.51102436,-0.5110245 ,-0.9686197 ,-0.99995685,-1. ,-1. ,-1.0000001 ,-0.99995655,-0.9686198 ,-0.5110243 , 0.5110243 , 0.9686197 , # InterpolationType.seventh BoundType.dct1 +0., # InterpolationType.seventh BoundType.dct1 +2.,2.,2.,2.,2.,2.,2.,2.,2.,2.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.nearest BoundType.dct2 +2.,2.,2.,2.,2.,2.,2.,2.,2.,2., # InterpolationType.nearest BoundType.dct2 +0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.nearest BoundType.dct2 +0., # InterpolationType.nearest BoundType.dct2 +2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0.,-1.,-1.,-1.,-1.,-1.,-1.,-1.,-1.,-1., 0., # InterpolationType.linear BoundType.dct2 +2.,2.,2.,2.,2.,2.,2.,2.,2.,2., # InterpolationType.linear BoundType.dct2 +1., 1., 1., 1., 1., 1., 1., 1., 1., 0.,-1.,-1.,-1.,-1.,-1.,-1.,-1.,-1.,-1., 0., # InterpolationType.linear BoundType.dct2 +0., # InterpolationType.linear BoundType.dct2 +2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0.,-1.,-1.,-1.,-1.,-1.,-1.,-1.,-1.,-1., 0., # InterpolationType.quadratic BoundType.dct2 +2.,2.,2.,2.,2.,2.,2.,2.,2.,2., # InterpolationType.quadratic BoundType.dct2 +1., 1., 1., 1., 1., 1., 1., 1., 1., 0.,-1.,-1.,-1.,-1.,-1.,-1.,-1.,-1.,-1., 0., # InterpolationType.quadratic BoundType.dct2 +0., # InterpolationType.quadratic BoundType.dct2 +1.9999999, 2. , 1.9999999, 1.9999999, 1.9999999, 1.9999999, 1.9999999, 1.9999999, 1.9999999, 2. , 0.875 , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 0.875 , 0. ,-0.875 ,-1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-0.875 , 0. , # InterpolationType.cubic BoundType.dct2 +1.9999999,2. ,1.9999999,1.9999999,1.9999999,1.9999999,1.9999999,1.9999999,1.9999999,2. , # InterpolationType.cubic BoundType.dct2 +0.875, 1. , 1. , 1. , 1. , 1. , 1. , 1. , 0.875, 0. ,-0.875,-1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-1. ,-0.875, 0. , # InterpolationType.cubic BoundType.dct2 +0., # InterpolationType.cubic BoundType.dct2 +2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 8.3333337e-01, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 9.9999988e-01, 1.0000000e+00, 8.3333302e-01,-1.1920929e-07,-8.3333325e-01,-1.0000000e+00,-1.0000001e+00,-1.0000002e+00,-1.0000000e+00,-1.0000001e+00,-1.0000001e+00,-1.0000000e+00,-8.3333337e-01, 0., # InterpolationType.fourth BoundType.dct2 +2.,2.,2.,2.,2.,2.,2.,2.,2.,2., # InterpolationType.fourth BoundType.dct2 +8.3333337e-01, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 9.9999988e-01, 1.0000000e+00, 8.3333302e-01,-1.1920929e-07,-8.3333325e-01,-1.0000000e+00,-1.0000001e+00,-1.0000002e+00,-1.0000000e+00,-1.0000001e+00,-1.0000001e+00,-1.0000000e+00,-8.3333337e-01, 0., # InterpolationType.fourth BoundType.dct2 +0., # InterpolationType.fourth BoundType.dct2 +2.0000000e+00, 1.9999999e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 7.9687500e-01, 9.9739581e-01, 1.0000000e+00, 1.0000000e+00, 9.9999994e-01, 1.0000001e+00, 9.9999976e-01, 9.9739575e-01, 7.9687530e-01, 1.6018748e-07,-7.9687524e-01,-9.9739569e-01,-9.9999982e-01,-1.0000002e+00,-1.0000000e+00,-9.9999982e-01,-1.0000000e+00,-9.9739587e-01,-7.9687494e-01, 5.1222742e-09, # InterpolationType.fifth BoundType.dct2 +2. ,1.9999999,2. ,2. ,2. ,2. ,2. ,2. ,2. ,2. , # InterpolationType.fifth BoundType.dct2 +7.9687500e-01, 9.9739581e-01, 1.0000000e+00, 1.0000000e+00, 9.9999994e-01, 1.0000001e+00, 9.9999976e-01, 9.9739575e-01, 7.9687530e-01, 1.6018748e-07,-7.9687524e-01,-9.9739569e-01,-9.9999982e-01,-1.0000002e+00,-1.0000000e+00,-9.9999982e-01,-1.0000000e+00,-9.9739587e-01,-7.9687494e-01, 5.1222742e-09, # InterpolationType.fifth BoundType.dct2 +0., # InterpolationType.fifth BoundType.dct2 +2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 2.0000000e+00, 7.6666665e-01, 9.9166673e-01, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 9.9999982e-01, 1.0000004e+00, 9.9166673e-01, 7.6666647e-01, 5.9604645e-08,-7.6666659e-01,-9.9166662e-01,-1.0000004e+00,-1.0000001e+00,-1.0000001e+00,-1.0000000e+00,-9.9999994e-01,-9.9166667e-01,-7.6666665e-01, 1.8626451e-09, # InterpolationType.sixth BoundType.dct2 +2.,2.,2.,2.,2.,2.,2.,2.,2.,2., # InterpolationType.sixth BoundType.dct2 +7.6666665e-01, 9.9166673e-01, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 9.9999982e-01, 1.0000004e+00, 9.9166673e-01, 7.6666647e-01, 5.9604645e-08,-7.6666659e-01,-9.9166662e-01,-1.0000004e+00,-1.0000001e+00,-1.0000001e+00,-1.0000000e+00,-9.9999994e-01,-9.9166667e-01,-7.6666665e-01, 1.8626451e-09, # InterpolationType.sixth BoundType.dct2 +0., # InterpolationType.sixth BoundType.dct2 +2.0000002e+00, 2.0000000e+00, 2.0000000e+00, 2.0000002e+00, 2.0000002e+00, 2.0000002e+00, 2.0000002e+00, 2.0000002e+00, 2.0000002e+00, 2.0000002e+00, 7.3982203e-01, 9.8428816e-01, 9.9997836e-01, 9.9999994e-01, 1.0000000e+00, 1.0000001e+00, 9.9997842e-01, 9.8428833e-01, 7.3982203e-01,-1.6936974e-07,-7.3982191e-01,-9.8428810e-01,-9.9997830e-01,-1.0000000e+00,-1.0000000e+00,-1.0000001e+00,-9.9997824e-01,-9.8428822e-01,-7.3982203e-01,-2.7284841e-09, # InterpolationType.seventh BoundType.dct2 +2.0000002,2. ,2. ,2.0000002,2.0000002,2.0000002,2.0000002,2.0000002,2.0000002,2.0000002, # InterpolationType.seventh BoundType.dct2 +7.3982203e-01, 9.8428816e-01, 9.9997836e-01, 9.9999994e-01, 1.0000000e+00, 1.0000001e+00, 9.9997842e-01, 9.8428833e-01, 7.3982203e-01,-1.6936974e-07,-7.3982191e-01,-9.8428810e-01,-9.9997830e-01,-1.0000000e+00,-1.0000000e+00,-1.0000001e+00,-9.9997824e-01,-9.8428822e-01,-7.3982203e-01,-2.7284841e-09, # InterpolationType.seventh BoundType.dct2 +0., # InterpolationType.seventh BoundType.dct2 +-1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., # InterpolationType.nearest BoundType.dst1 +-1., 0., 0., 0., 0., 0., 0., 0., 0., 0., # InterpolationType.nearest BoundType.dst1 +0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.nearest BoundType.dst1 +0., # InterpolationType.nearest BoundType.dst1 +0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1.,-9.,-9., 1., 1., 1., 1., 1., 1., 1., 1., 1., # InterpolationType.linear BoundType.dst1 +0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.linear BoundType.dst1 +1., 1., 1., 1., 1., 1., 1., 1., 1.,-9.,-9., 1., 1., 1., 1., 1., 1., 1., 1., 1., # InterpolationType.linear BoundType.dst1 +0., # InterpolationType.linear BoundType.dst1 +0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1.,-9.,-9., 1., 1., 1., 1., 1., 1., 1., 1., 1., # InterpolationType.quadratic BoundType.dst1 +0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.quadratic BoundType.dst1 +1., 1., 1., 1., 1., 1., 1., 1., 1.,-9.,-9., 1., 1., 1., 1., 1., 1., 1., 1., 1., # InterpolationType.quadratic BoundType.dst1 +0., # InterpolationType.quadratic BoundType.dst1 +0., 0.,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08, 8.7500000e-01, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00,-2.5000000e-01,-7.7500000e+00,-7.7500000e+00,-2.5000000e-01, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 8.7500000e-01, # InterpolationType.cubic BoundType.dst1 +0., 0.,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08, # InterpolationType.cubic BoundType.dst1 +0.875, 1. , 1. , 1. , 1. , 1. , 1. , 1. ,-0.25 ,-7.75 ,-7.75 ,-0.25 , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 0.875, # InterpolationType.cubic BoundType.dst1 +0., # InterpolationType.cubic BoundType.dst1 +0., 0.,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-1.1175871e-08, 8.3333337e-01, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 9.9999988e-01, 1.0000000e+00,-6.6666698e-01,-7.3333335e+00,-7.3333335e+00,-6.6666675e-01, 1.0000000e+00, 1.0000001e+00, 1.0000002e+00, 1.0000000e+00, 1.0000001e+00, 1.0000001e+00, 1.0000000e+00, 8.3333337e-01, # InterpolationType.fourth BoundType.dst1 +0., 0.,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-1.1175871e-08, # InterpolationType.fourth BoundType.dst1 +0.8333334 , 1. , 1. , 1. , 1. , 1. , 0.9999999 , 1. ,-0.666667 ,-7.3333335 ,-7.3333335 ,-0.66666675, 1. , 1.0000001 , 1.0000002 , 1. , 1.0000001 , 1.0000001 , 1. , 0.8333334 , # InterpolationType.fourth BoundType.dst1 +0., # InterpolationType.fourth BoundType.dst1 +3.9872248e-09, 0., 1.1175871e-08, 7.1886461e-09, 7.1886461e-09, 7.1886461e-09, 7.1886461e-09, 7.1886461e-09, 7.1886461e-09, 7.1886461e-09, 7.9947913e-01, 9.9739581e-01, 1.0000000e+00, 1.0000000e+00, 9.9999994e-01, 1.0000001e+00, 9.9999976e-01, 9.7395825e-01,-1.0052080e+00,-6.9687500e+00,-6.9687500e+00,-1.0052083e+00, 9.7395819e-01, 9.9999982e-01, 1.0000002e+00, 1.0000000e+00, 9.9999982e-01, 1.0000000e+00, 9.9739587e-01, 7.9947913e-01, # InterpolationType.fifth BoundType.dst1 +3.9872248e-09,0.,1.1175871e-08,7.1886461e-09,7.1886461e-09,7.1886461e-09,7.1886461e-09,7.1886461e-09,7.1886461e-09,7.1886461e-09, # InterpolationType.fifth BoundType.dst1 +0.7994791 , 0.9973958 , 1. , 1. , 0.99999994, 1.0000001 , 0.99999976, 0.97395825,-1.005208 ,-6.96875 ,-6.96875 ,-1.0052083 , 0.9739582 , 0.9999998 , 1.0000002 , 1. , 0.9999998 , 1. , 0.9973959 , 0.7994791 , # InterpolationType.fifth BoundType.dst1 +0., # InterpolationType.fifth BoundType.dst1 +4.1094609e-08, 0.,-1.4901161e-08, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09,-2.6193447e-08, 7.7499998e-01, 9.9166673e-01, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 9.9999982e-01, 1.0000004e+00, 9.1666675e-01,-1.2500002e+00,-6.6666665e+00,-6.6666665e+00,-1.2500000e+00, 9.1666681e-01, 1.0000004e+00, 1.0000001e+00, 1.0000001e+00, 1.0000000e+00, 9.9999994e-01, 9.9166667e-01, 7.7499998e-01, # InterpolationType.sixth BoundType.dst1 +4.1094609e-08, 0.,-1.4901161e-08, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09,-2.6193447e-08, # InterpolationType.sixth BoundType.dst1 +0.775 , 0.99166673, 1. , 1. , 1. , 0.9999998 , 1.0000004 , 0.91666675,-1.2500002 ,-6.6666665 ,-6.6666665 ,-1.25 , 0.9166668 , 1.0000004 , 1.0000001 , 1.0000001 , 1. , 0.99999994, 0.9916667 , 0.775 , # InterpolationType.sixth BoundType.dst1 +0., # InterpolationType.sixth BoundType.dst1 +-9.7788870e-09, 3.7846348e-10,-7.4505806e-09, 2.3283064e-09, 1.9498430e-09, 1.9498430e-09, 1.9498430e-09, 1.9498430e-09, 1.9498430e-09, 1.9498430e-09, 7.5553381e-01, 9.8430985e-01, 9.9997836e-01, 9.9999994e-01, 1.0000000e+00, 1.0000001e+00, 9.9978310e-01, 8.4309906e-01,-1.4446614e+00,-6.3982205e+00,-6.3982205e+00,-1.4446614e+00, 8.4309900e-01, 9.9978304e-01, 1.0000000e+00, 1.0000000e+00, 1.0000001e+00, 9.9997824e-01, 9.8430991e-01, 7.5553381e-01, # InterpolationType.seventh BoundType.dst1 +-9.7788870e-09, 3.7846348e-10,-7.4505806e-09, 2.3283064e-09, 1.9498430e-09, 1.9498430e-09, 1.9498430e-09, 1.9498430e-09, 1.9498430e-09, 1.9498430e-09, # InterpolationType.seventh BoundType.dst1 +0.7555338 , 0.98430985, 0.99997836, 0.99999994, 1. , 1.0000001 , 0.9997831 , 0.84309906,-1.4446614 ,-6.3982205 ,-6.3982205 ,-1.4446614 , 0.843099 , 0.99978304, 1. , 1. , 1.0000001 , 0.99997824, 0.9843099 , 0.7555338 , # InterpolationType.seventh BoundType.dst1 +0., # InterpolationType.seventh BoundType.dst1 +0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.nearest BoundType.dst2 +0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.nearest BoundType.dst2 +0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.nearest BoundType.dst2 +0., # InterpolationType.nearest BoundType.dst2 + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1.,-18., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., # InterpolationType.linear BoundType.dst2 +0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.linear BoundType.dst2 + 1., 1., 1., 1., 1., 1., 1., 1., 1.,-18., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., # InterpolationType.linear BoundType.dst2 +0., # InterpolationType.linear BoundType.dst2 + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1.,-18., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., # InterpolationType.quadratic BoundType.dst2 +0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.quadratic BoundType.dst2 + 1., 1., 1., 1., 1., 1., 1., 1., 1.,-18., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., # InterpolationType.quadratic BoundType.dst2 +0., # InterpolationType.quadratic BoundType.dst2 +0., 0.,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08, 9.3132257e-09, 8.7500000e-01, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00,-1.3750000e+00,-1.3250000e+01,-1.3750000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 8.7500000e-01, 2.5000000e-01, # InterpolationType.cubic BoundType.dst2 +0., 0.,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08,-2.0489097e-08, 9.3132257e-09, # InterpolationType.cubic BoundType.dst2 + 0.875, 1. , 1. , 1. , 1. , 1. , 1. , 1. , -1.375,-13.25 , -1.375, 1. , 1. , 1. , 1. , 1. , 1. , 1. , 0.875, 0.25 , # InterpolationType.cubic BoundType.dst2 +0., # InterpolationType.cubic BoundType.dst2 +0., 0.,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-1.1175871e-08, 8.3333337e-01, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 9.9999988e-01, 1.0000000e+00,-2.1666670e+00,-1.1666667e+01,-2.1666667e+00, 1.0000000e+00, 1.0000001e+00, 1.0000002e+00, 1.0000000e+00, 1.0000001e+00, 1.0000001e+00, 1.0000000e+00, 8.3333337e-01, 3.3333334e-01, # InterpolationType.fourth BoundType.dst2 +0., 0.,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-4.0978193e-08,-1.1175871e-08, # InterpolationType.fourth BoundType.dst2 + 0.8333334 , 1. , 1. , 1. , 1. , 1. , 0.9999999 , 1. , -2.166667 ,-11.666667 , -2.1666667 , 1. , 1.0000001 , 1.0000002 , 1. , 1.0000001 , 1.0000001 , 1. , 0.8333334 , 0.33333334, # InterpolationType.fourth BoundType.dst2 +0., # InterpolationType.fourth BoundType.dst2 +0., 3.7252903e-09, 1.1175871e-08, 7.1886461e-09, 7.1886461e-09, 7.1886461e-09, 7.1886461e-09, 7.1886461e-09, 7.1886461e-09, 1.0913936e-08, 8.0208331e-01, 9.9739581e-01, 1.0000000e+00, 1.0000000e+00, 9.9999994e-01, 1.0000001e+00, 9.9999976e-01, 9.5052075e-01,-2.7604163e+00,-1.0380208e+01,-2.7604165e+00, 9.5052069e-01, 9.9999982e-01, 1.0000002e+00, 1.0000000e+00, 9.9999982e-01, 1.0000000e+00, 9.9739587e-01, 8.0208331e-01, 4.0104166e-01, # InterpolationType.fifth BoundType.dst2 +0.,3.7252903e-09,1.1175871e-08,7.1886461e-09,7.1886461e-09,7.1886461e-09,7.1886461e-09,7.1886461e-09,7.1886461e-09,1.0913936e-08, # InterpolationType.fifth BoundType.dst2 + 0.8020833 , 0.9973958 , 1. , 1. , 0.99999994, 1.0000001 , 0.99999976, 0.95052075, -2.7604163 ,-10.380208 , -2.7604165 , 0.9505207 , 0.9999998 , 1.0000002 , 1. , 0.9999998 , 1. , 0.9973959 , 0.8020833 , 0.40104166, # InterpolationType.fifth BoundType.dst2 +0., # InterpolationType.fifth BoundType.dst2 +5.9604645e-08,-1.4901161e-08,-1.4901161e-08, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09,-1.1292286e-08, 7.8333330e-01, 9.9166673e-01, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 9.9999982e-01, 1.0000004e+00, 8.4166676e-01,-3.1166668e+00,-9.4499998e+00,-3.1166666e+00, 8.4166652e-01, 1.0000004e+00, 1.0000001e+00, 1.0000001e+00, 1.0000000e+00, 9.9999994e-01, 9.9166667e-01, 7.8333330e-01, 4.5000002e-01, # InterpolationType.sixth BoundType.dst2 +5.9604645e-08,-1.4901161e-08,-1.4901161e-08, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09, 3.6088750e-09,-1.1292286e-08, # InterpolationType.sixth BoundType.dst2 +0.7833333 , 0.99166673, 1. , 1. , 1. , 0.9999998 , 1.0000004 , 0.84166676,-3.1166668 ,-9.45 ,-3.1166666 , 0.8416665 , 1.0000004 , 1.0000001 , 1.0000001 , 1. , 0.99999994, 0.9916667 , 0.7833333 , 0.45000002, # InterpolationType.sixth BoundType.dst2 +0., # InterpolationType.sixth BoundType.dst2 +0.,-7.4505806e-09,-6.9849193e-09, 2.3283064e-09, 1.9498430e-09, 1.9498430e-09, 1.9498430e-09, 1.9498430e-09, 1.9498430e-09,-5.0350764e-09, 7.7120221e-01, 9.8433155e-01, 9.9997836e-01, 9.9999994e-01, 1.0000000e+00, 1.0000001e+00, 9.9958777e-01, 7.0230043e-01,-3.3471570e+00,-8.7094622e+00,-3.3471570e+00, 7.0230043e-01, 9.9958777e-01, 1.0000000e+00, 1.0000000e+00, 1.0000001e+00, 9.9997824e-01, 9.8433161e-01, 7.7120221e-01, 4.8897570e-01, # InterpolationType.seventh BoundType.dst2 +0.,-7.4505806e-09,-6.9849193e-09, 2.3283064e-09, 1.9498430e-09, 1.9498430e-09, 1.9498430e-09, 1.9498430e-09, 1.9498430e-09,-5.0350764e-09, # InterpolationType.seventh BoundType.dst2 +0.7712022 , 0.98433155, 0.99997836, 0.99999994, 1. , 1.0000001 , 0.9995878 , 0.7023004 ,-3.347157 ,-8.709462 ,-3.347157 , 0.7023004 , 0.9995878 , 1. , 1. , 1.0000001 , 0.99997824, 0.9843316 , 0.7712022 , 0.4889757 , # InterpolationType.seventh BoundType.dst2 +0., # InterpolationType.seventh BoundType.dst2 +2.,2.,2.,2.,2.,2.,2.,2.,2.,2.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.nearest BoundType.dft +2.,2.,2.,2.,2.,2.,2.,2.,2.,2., # InterpolationType.nearest BoundType.dft +0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.nearest BoundType.dft +0., # InterpolationType.nearest BoundType.dft +2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1.,-9., 1., 1., 1., 1., 1., 1., 1., 1., 1.,-9., # InterpolationType.linear BoundType.dft +2.,2.,2.,2.,2.,2.,2.,2.,2.,2., # InterpolationType.linear BoundType.dft +1., 1., 1., 1., 1., 1., 1., 1., 1.,-9., 1., 1., 1., 1., 1., 1., 1., 1., 1.,-9., # InterpolationType.linear BoundType.dft +0., # InterpolationType.linear BoundType.dft +2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1.,-9., 1., 1., 1., 1., 1., 1., 1., 1., 1.,-9., # InterpolationType.quadratic BoundType.dft +2.,2.,2.,2.,2.,2.,2.,2.,2.,2., # InterpolationType.quadratic BoundType.dft +1., 1., 1., 1., 1., 1., 1., 1., 1.,-9., 1., 1., 1., 1., 1., 1., 1., 1., 1.,-9., # InterpolationType.quadratic BoundType.dft +0., # InterpolationType.quadratic BoundType.dft +2. , 2. , 1.9999999, 1.9999999, 1.9999999, 1.9999999, 1.9999999, 1.9999999, 1.9999999, 2. ,-0.25 , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,-0.25 ,-6.5 ,-0.25 , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,-0.25 ,-6.5 , # InterpolationType.cubic BoundType.dft +2. ,2. ,1.9999999,1.9999999,1.9999999,1.9999999,1.9999999,1.9999999,1.9999999,2. , # InterpolationType.cubic BoundType.dft +-0.25, 1. , 1. , 1. , 1. , 1. , 1. , 1. ,-0.25,-6.5 ,-0.25, 1. , 1. , 1. , 1. , 1. , 1. , 1. ,-0.25,-6.5 , # InterpolationType.cubic BoundType.dft +0., # InterpolationType.cubic BoundType.dft +2. , 2. , 2. , 2. , 2. , 2. , 2. , 2. , 2. , 2. ,-0.6666666, 1. , 1. , 1. , 1. , 1. , 0.9999999, 1. ,-0.666667 ,-5.666667 ,-0.6666666, 1. , 1. , 1. , 1. , 1. , 0.9999999, 1. ,-0.666667 ,-5.666667 , # InterpolationType.fourth BoundType.dft +2.,2.,2.,2.,2.,2.,2.,2.,2.,2., # InterpolationType.fourth BoundType.dft +-0.6666666, 1. , 1. , 1. , 1. , 1. , 0.9999999, 1. ,-0.666667 ,-5.666667 ,-0.6666666, 1. , 1. , 1. , 1. , 1. , 0.9999999, 1. ,-0.666667 ,-5.666667 , # InterpolationType.fourth BoundType.dft +0., # InterpolationType.fourth BoundType.dft +2. , 2. , 2. , 2. , 2. , 2. , 2. , 2. , 2. , 2. ,-0.97916675, 0.9739583 , 1. , 1. , 0.99999994, 1.0000001 , 0.99999976, 0.97395825,-0.9791663 ,-4.989583 ,-0.97916675, 0.9739583 , 1. , 1. , 0.99999994, 1.0000001 , 0.99999976, 0.97395825,-0.9791663 ,-4.989583 , # InterpolationType.fifth BoundType.dft +2.,2.,2.,2.,2.,2.,2.,2.,2.,2., # InterpolationType.fifth BoundType.dft +-0.97916675, 0.9739583 , 1. , 1. , 0.99999994, 1.0000001 , 0.99999976, 0.97395825,-0.9791663 ,-4.989583 ,-0.97916675, 0.9739583 , 1. , 1. , 0.99999994, 1.0000001 , 0.99999976, 0.97395825,-0.9791663 ,-4.989583 , # InterpolationType.fifth BoundType.dft +0., # InterpolationType.fifth BoundType.dft +1.9999999 , 2. , 2. , 2. , 2. , 2. , 2. , 2. , 2. , 2. ,-1.1666667 , 0.9166667 , 1. , 1. , 1. , 0.9999998 , 1.0000004 , 0.91666675,-1.1666669 ,-4.4999995 ,-1.1666667 , 0.9166667 , 1. , 1. , 1. , 0.9999998 , 1.0000004 , 0.91666675,-1.1666669 ,-4.4999995 , # InterpolationType.sixth BoundType.dft +1.9999999,2. ,2. ,2. ,2. ,2. ,2. ,2. ,2. ,2. , # InterpolationType.sixth BoundType.dft +-1.1666667 , 0.9166667 , 1. , 1. , 1. , 0.9999998 , 1.0000004 , 0.91666675,-1.1666669 ,-4.4999995 ,-1.1666667 , 0.9166667 , 1. , 1. , 1. , 0.9999998 , 1.0000004 , 0.91666675,-1.1666669 ,-4.4999995 , # InterpolationType.sixth BoundType.dft +0., # InterpolationType.sixth BoundType.dft +2.0000002 , 2.0000002 , 2.0000002 , 2.0000002 , 2.0000002 , 2.0000002 , 2.0000002 , 2.0000002 , 2.0000002 , 2.0000002 ,-1.2879773 , 0.84331596, 0.999783 , 0.99999994, 1. , 1.0000001 , 0.9997831 , 0.8433161 ,-1.2879775 ,-4.110243 ,-1.2879773 , 0.84331596, 0.999783 , 0.99999994, 1. , 1.0000001 , 0.9997831 , 0.8433161 ,-1.2879775 ,-4.110243 , # InterpolationType.seventh BoundType.dft +2.0000002,2.0000002,2.0000002,2.0000002,2.0000002,2.0000002,2.0000002,2.0000002,2.0000002,2.0000002, # InterpolationType.seventh BoundType.dft +-1.2879773 , 0.84331596, 0.999783 , 0.99999994, 1. , 1.0000001 , 0.9997831 , 0.8433161 ,-1.2879775 ,-4.110243 ,-1.2879773 , 0.84331596, 0.999783 , 0.99999994, 1. , 1.0000001 , 0.9997831 , 0.8433161 ,-1.2879775 ,-4.110243 , # InterpolationType.seventh BoundType.dft +0., # InterpolationType.seventh BoundType.dft +0.,1.,1.,1.,1.,1.,1.,1.,1.,1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.nearest BoundType.zero +0.,1.,1.,1.,1.,1.,1.,1.,1.,1., # InterpolationType.nearest BoundType.zero +0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., # InterpolationType.nearest BoundType.zero +0., # InterpolationType.nearest BoundType.zero +0.5, 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,-9. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , # InterpolationType.linear BoundType.zero +0.5,1. ,1. ,1. ,1. ,1. ,1. ,1. ,1. ,1. , # InterpolationType.linear BoundType.zero +1., 1., 1., 1., 1., 1., 1., 1., 1.,-9., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., # InterpolationType.linear BoundType.zero +0., # InterpolationType.linear BoundType.zero +0.5, 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,-9. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , # InterpolationType.quadratic BoundType.zero +0.5,1. ,1. ,1. ,1. ,1. ,1. ,1. ,1. ,1. , # InterpolationType.quadratic BoundType.zero +1., 1., 1., 1., 1., 1., 1., 1., 1.,-9., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., # InterpolationType.quadratic BoundType.zero +0., # InterpolationType.quadratic BoundType.zero +0.5 , 0.9791666 , 0.99999994, 0.99999994, 0.99999994, 0.99999994, 0.99999994, 0.99999994, 0.99999994, 0.99999994, 0.875 , 1. , 1. , 1. , 1. , 1. , 1. , 1. ,-0.25 ,-6.625 ,-1.125 , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , # InterpolationType.cubic BoundType.zero +0.5 ,0.9791666 ,0.99999994,0.99999994,0.99999994,0.99999994,0.99999994,0.99999994,0.99999994,0.99999994, # InterpolationType.cubic BoundType.zero +0.875, 1. , 1. , 1. , 1. , 1. , 1. , 1. ,-0.25 ,-6.625,-1.125, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , # InterpolationType.cubic BoundType.zero +0., # InterpolationType.cubic BoundType.zero +0.5 , 0.9583334, 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 0.8333334, 1. , 1. , 1. , 1. , 1. , 0.9999999, 1. ,-0.666667 ,-5.8333335,-1.5 , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , # InterpolationType.fourth BoundType.zero +0.5 ,0.9583334,1. ,1. ,1. ,1. ,1. ,1. ,1. ,1. , # InterpolationType.fourth BoundType.zero +0.8333334, 1. , 1. , 1. , 1. , 1. , 0.9999999, 1. ,-0.666667 ,-5.8333335,-1.5 , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , # InterpolationType.fourth BoundType.zero +0., # InterpolationType.fourth BoundType.zero +0.5 , 0.9380208 , 0.9997396 , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 0.7994791 , 0.9973958 , 1. , 1. , 0.99999994, 1.0000001 , 0.99999976, 0.97395825,-0.9817705 ,-5.190104 ,-1.7786459 ,-0.0234375 , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , # InterpolationType.fifth BoundType.zero +0.5 ,0.9380208,0.9997396,1. ,1. ,1. ,1. ,1. ,1. ,1. , # InterpolationType.fifth BoundType.zero +0.7994791 , 0.9973958 , 1. , 1. , 0.99999994, 1.0000001 , 0.99999976, 0.97395825,-0.9817705 ,-5.190104 ,-1.7786459 ,-0.0234375 , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , # InterpolationType.fifth BoundType.zero +0., # InterpolationType.fifth BoundType.zero +0.49999997, 0.91944444, 0.9986111 , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 0.775 , 0.99166673, 1. , 1. , 1. , 0.9999998 , 1.0000004 , 0.91666675,-1.1750002 ,-4.725 ,-1.9416667 ,-0.075 , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , # InterpolationType.sixth BoundType.zero +0.49999997,0.91944444,0.9986111 ,1. ,1. ,1. ,1. ,1. ,1. ,1. , # InterpolationType.sixth BoundType.zero +0.775 , 0.99166673, 1. , 1. , 1. , 0.9999998 , 1.0000004 , 0.91666675,-1.1750002 ,-4.725 ,-1.9416667 ,-0.075 , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , # InterpolationType.sixth BoundType.zero +0., # InterpolationType.sixth BoundType.zero +5.0000000e-01, 9.0259641e-01, 9.9662077e-01, 9.9999845e-01, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 1.0000000e+00, 7.5551212e-01, 9.8430985e-01, 9.9997836e-01, 9.9999994e-01, 1.0000000e+00, 1.0000001e+00, 9.9978310e-01, 8.4329438e-01,-1.3036675e+00,-4.3547311e+00,-2.0434895e+00,-1.4099392e-01,-1.9531250e-04, 0., 0., 0., 0., 0., 0., 0., # InterpolationType.seventh BoundType.zero +0.5 ,0.9025964 ,0.9966208 ,0.99999845,1. ,1. ,1. ,1. ,1. ,1. , # InterpolationType.seventh BoundType.zero +7.5551212e-01, 9.8430985e-01, 9.9997836e-01, 9.9999994e-01, 1.0000000e+00, 1.0000001e+00, 9.9978310e-01, 8.4329438e-01,-1.3036675e+00,-4.3547311e+00,-2.0434895e+00,-1.4099392e-01,-1.9531250e-04, 0., 0., 0., 0., 0., 0., 0., # InterpolationType.seventh BoundType.zero +0., # InterpolationType.seventh BoundType.zero diff --git a/tests/testing_data/1D_BP_fwd.txt b/tests/testing_data/1D_BP_fwd.txt new file mode 100644 index 0000000000..a620d59dff --- /dev/null +++ b/tests/testing_data/1D_BP_fwd.txt @@ -0,0 +1,56 @@ +1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, # InterpolationType.nearest BoundType.replicate +0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, # InterpolationType.linear BoundType.replicate +0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, # InterpolationType.quadratic BoundType.replicate +0.5208, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.4792, 8.9792, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, # InterpolationType.cubic BoundType.replicate +0.5417, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.4583, 8.9583, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, # InterpolationType.fourth BoundType.replicate +0.5622, 1.5003, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4997, 8.4378, 8.9378, 8.9997, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, # InterpolationType.fifth BoundType.replicate +0.5819, 1.5014, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4986, 8.4181, 8.9181, 8.9986, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, # InterpolationType.sixth BoundType.replicate +0.6008, 1.5034, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4966, 8.3992, 8.8992, 8.9966, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, # InterpolationType.seventh BoundType.replicate +1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0, 1.0, 2.0, # InterpolationType.nearest BoundType.dct1 +0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 8.5, 7.5, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5, 0.5, 0.5, 1.5, # InterpolationType.linear BoundType.dct1 +0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 8.5, 7.5, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5, 0.5, 0.5, 1.5, # InterpolationType.quadratic BoundType.dct1 +0.5417, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.4583, 8.4583, 7.5, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5, 0.5417, 0.5417, 1.5, # InterpolationType.cubic BoundType.dct1 +0.5833, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.4167, 8.4167, 7.5, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5, 0.5833, 0.5833, 1.5, # InterpolationType.fourth BoundType.dct1 +0.6245, 1.5005, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4995, 8.3755, 8.3755, 7.4995, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5005, 0.6245, 0.6245, 1.5005, # InterpolationType.fifth BoundType.dct1 +0.6639, 1.5028, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4972, 8.3361, 8.3361, 7.4972, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5028, 0.6639, 0.6639, 1.5028, # InterpolationType.sixth BoundType.dct1 +0.7016, 1.5068, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4932, 8.2984, 8.2984, 7.4932, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5068, 0.7016, 0.7016, 1.5068, # InterpolationType.seventh BoundType.dct1 +1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0, 0.0, # InterpolationType.nearest BoundType.dct2 +0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.0, 8.5, 7.5, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5, 0.5, 0.0, # InterpolationType.linear BoundType.dct2 +0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.0, 8.5, 7.5, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5, 0.5, 0.0, # InterpolationType.quadratic BoundType.dct2 +0.5208, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.4792, 8.9583, 8.4792, 7.5, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5, 0.5208, 0.0417, # InterpolationType.cubic BoundType.dct2 +0.5417, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.4583, 8.9167, 8.4583, 7.5, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5, 0.5417, 0.0833, # InterpolationType.fourth BoundType.dct2 +0.5625, 1.5003, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4997, 8.4375, 8.8755, 8.4375, 7.4997, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5003, 0.5625, 0.1245, # InterpolationType.fifth BoundType.dct2 +0.5833, 1.5014, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4986, 8.4167, 8.8361, 8.4167, 7.4986, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5014, 0.5833, 0.1639, # InterpolationType.sixth BoundType.dct2 +0.6042, 1.5034, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4966, 8.3958, 8.7984, 8.3958, 7.4966, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5034, 0.6042, 0.2016, # InterpolationType.seventh BoundType.dct2 +1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 0.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0, -2.0, -1.0, -0.0, # InterpolationType.nearest BoundType.dst1 +0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 4.5, -4.5, -8.5, -7.5, -6.5, -5.5, -4.5, -3.5, -2.5, -1.5, -0.5, # InterpolationType.linear BoundType.dst1 +0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 4.5, -4.5, -8.5, -7.5, -6.5, -5.5, -4.5, -3.5, -2.5, -1.5, -0.5, # InterpolationType.quadratic BoundType.dst1 +0.5208, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.2917, 4.2917, -4.2917, -8.2917, -7.5, -6.5, -5.5, -4.5, -3.5, -2.5, -1.5, -0.5208, # InterpolationType.cubic BoundType.dst1 +0.5417, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.0833, 4.0833, -4.0833, -8.0833, -7.5, -6.5, -5.5, -4.5, -3.5, -2.5, -1.5, -0.5417, # InterpolationType.fourth BoundType.dst1 +0.5622, 1.5003, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4974, 7.8776, 3.8802, -3.8802, -7.8776, -7.4974, -6.5, -5.5, -4.5, -3.5, -2.5, -1.5003, -0.5622, # InterpolationType.fifth BoundType.dst1 +0.5819, 1.5014, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4861, 7.6806, 3.6944, -3.6944, -7.6806, -7.4861, -6.5, -5.5, -4.5, -3.5, -2.5, -1.5014, -0.5819, # InterpolationType.sixth BoundType.dst1 +0.6008, 1.5034, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4662, 7.4922, 3.5260, -3.5260, -7.4922, -7.4662, -6.5, -5.5, -4.5, -3.5, -2.5, -1.5034, -0.6008, # InterpolationType.seventh BoundType.dst1 +1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0, -2.0, -1.0, -0.0, 0.0, # InterpolationType.nearest BoundType.dst2 +0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 0.0, -8.5, -7.5, -6.5, -5.5, -4.5, -3.5, -2.5, -1.5, -0.5, 0.0, # InterpolationType.linear BoundType.dst2 +0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 0.0, -8.5, -7.5, -6.5, -5.5, -4.5, -3.5, -2.5, -1.5, -0.5, 0.0, # InterpolationType.quadratic BoundType.dst2 +5.2083e-01, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.1042, -1.6391e-07, -8.1042, -7.5, -6.5, -5.5, -4.5, -3.5, -2.5, -1.5, -5.2083e-01, 0.0, # InterpolationType.cubic BoundType.dst2 +5.4167e-01, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 7.7083, 1.4901e-07, -7.7083, -7.5, -6.5, -5.5, -4.5, -3.5, -2.5, -1.5, -5.4167e-01, 0.0, # InterpolationType.fourth BoundType.dst2 +5.6198e-01, 1.5003, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4951, 7.3224, 1.2107e-07, -7.3224, -7.4951, -6.5, -5.5, -4.5, -3.5, -2.5, -1.5003, -5.6198e-01, 5.2387e-10, # InterpolationType.fifth BoundType.dst2 +5.8056e-01, 1.5014, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4736, 6.9694, -1.0896e-07, -6.9694, -7.4736, -6.5, -5.5, -4.5, -3.5, -2.5, -1.5014, -5.8056e-01, 2.3283e-10, # InterpolationType.sixth BoundType.dst2 +0.59740, 1.5034, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4358, 6.6493, 0.0, -6.6493, -7.4358, -6.5, -5.5, -4.5, -3.5, -2.5, -1.5034, -0.59740, 0.0, # InterpolationType.seventh BoundType.dst2 +1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 0.0, # InterpolationType.nearest BoundType.dft +0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 4.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 4.5, # InterpolationType.linear BoundType.dft +0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 4.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 4.5, # InterpolationType.quadratic BoundType.dft +0.7083, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.2917, 4.5, 0.7083, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.2917, 4.5, # InterpolationType.cubic BoundType.dft +0.9167, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.0833, 4.5, 0.9167, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.0833, 4.5, # InterpolationType.fourth BoundType.dft +1.1198, 1.5026, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4974, 7.8802, 4.5, 1.1198, 1.5026, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4974, 7.8802, 4.5, # InterpolationType.fifth BoundType.dft +1.3056, 1.5139, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4861, 7.6944, 4.5, 1.3056, 1.5139, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4861, 7.6944, 4.5, # InterpolationType.sixth BoundType.dft +1.4740, 1.5338, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4662, 7.5260, 4.5, 1.4740, 1.5338, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4662, 7.5260, 4.5, # InterpolationType.seventh BoundType.dft +1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, # InterpolationType.nearest BoundType.zero +0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 4.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, # InterpolationType.linear BoundType.zero +0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 4.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, # InterpolationType.quadratic BoundType.zero +0.5208, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.2917, 4.4792, 0.1875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, # InterpolationType.cubic BoundType.zero +0.5417, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.0833, 4.4583, 0.3750, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, # InterpolationType.fourth BoundType.zero +5.6224e-01, 1.5003, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4974, 7.8799, 4.4378, 5.5755e-01, 2.3438e-03, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, # InterpolationType.fifth BoundType.zero +0.5819, 1.5014, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4861, 7.6931, 4.4181, 0.7236, 0.0125, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, # InterpolationType.sixth BoundType.zero +6.0078e-01, 1.5034, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4662, 7.5226, 4.3992, 8.7325e-01, 3.0411e-02, 1.3951e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, # InterpolationType.seventh BoundType.zero diff --git a/tests/testing_data/cpp_resample_answers.py b/tests/testing_data/cpp_resample_answers.py new file mode 100644 index 0000000000..51ac6ccda9 --- /dev/null +++ b/tests/testing_data/cpp_resample_answers.py @@ -0,0 +1,41 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import csv +import os +import warnings +from typing import List, Optional + + +def _read_testing_data_answers(fname: Optional[str] = None, delimiter=",") -> List: + answers: List = [] + if not fname: + return answers + # read answers from directory of the current file + pwd = os.path.dirname(os.path.abspath(__file__)) + filename = os.path.join(pwd, fname) + if not os.path.isfile(filename): + warnings.warn("test data {} not found.".format(filename)) + return answers + with open(filename) as f: + res_reader = csv.reader(f, delimiter=delimiter) + for r in res_reader: + res_row = [] + for item in r: + if item.strip().startswith("#"): + continue # allow for some simple comments in the file + res_row.append(float(item)) + answers.append(res_row) + return answers + + +Expected_1D_GP_fwd: List = _read_testing_data_answers(fname="1D_BP_fwd.txt") +Expected_1D_GP_bwd: List = _read_testing_data_answers(fname="1D_BP_bwd.txt") From 3a1c459c0df5d39fa24832ab1d31267d6a03c035 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 24 Feb 2021 18:11:46 +0000 Subject: [PATCH 002/457] move transforms out of compose file (#1623) move transforms out of compose file. this is backwards compatible. --- monai/apps/deepgrow/transforms.py | 2 +- monai/transforms/__init__.py | 3 +- monai/transforms/compose.py | 192 +-------------------- monai/transforms/croppad/array.py | 2 +- monai/transforms/croppad/dictionary.py | 2 +- monai/transforms/intensity/array.py | 2 +- monai/transforms/intensity/dictionary.py | 2 +- monai/transforms/io/array.py | 2 +- monai/transforms/io/dictionary.py | 2 +- monai/transforms/post/array.py | 2 +- monai/transforms/post/dictionary.py | 2 +- monai/transforms/spatial/array.py | 2 +- monai/transforms/spatial/dictionary.py | 2 +- monai/transforms/transform.py | 206 +++++++++++++++++++++++ monai/transforms/utility/array.py | 2 +- monai/transforms/utility/dictionary.py | 2 +- tests/test_compose.py | 3 + 17 files changed, 229 insertions(+), 201 deletions(-) create mode 100644 monai/transforms/transform.py diff --git a/monai/apps/deepgrow/transforms.py b/monai/apps/deepgrow/transforms.py index f178360031..80b0d1648d 100644 --- a/monai/apps/deepgrow/transforms.py +++ b/monai/apps/deepgrow/transforms.py @@ -17,7 +17,7 @@ from monai.config import IndexSelection, KeysCollection from monai.networks.layers import GaussianFilter from monai.transforms import SpatialCrop -from monai.transforms.compose import MapTransform, Randomizable, Transform +from monai.transforms.transform import MapTransform, Randomizable, Transform from monai.transforms.utils import generate_spatial_bounding_box from monai.utils import min_version, optional_import diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index 6f7c2a4f61..357e00c6dd 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -10,7 +10,7 @@ # limitations under the License. from .adaptors import FunctionSignature, adaptor, apply_alias, to_kwargs -from .compose import Compose, MapTransform, Randomizable, Transform +from .compose import Compose from .croppad.array import ( BorderPad, BoundingRect, @@ -234,6 +234,7 @@ ZoomD, ZoomDict, ) +from .transform import MapTransform, Randomizable, Transform from .utility.array import ( AddChannel, AddExtremePointsChannel, diff --git a/monai/transforms/compose.py b/monai/transforms/compose.py index 2d1fe4eccd..2d612ad2e3 100644 --- a/monai/transforms/compose.py +++ b/monai/transforms/compose.py @@ -13,135 +13,17 @@ """ import warnings -from abc import ABC, abstractmethod -from typing import Any, Callable, Hashable, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Optional, Sequence, Union import numpy as np -from monai.config import KeysCollection +# For backwards compatiblity (so this still works: from monai.transforms.compose import MapTransform) +from monai.transforms.transform import MapTransform # noqa: F401 +from monai.transforms.transform import Randomizable, Transform from monai.transforms.utils import apply_transform from monai.utils import MAX_SEED, ensure_tuple, get_seed -__all__ = ["Transform", "Randomizable", "Compose", "MapTransform"] - - -class Transform(ABC): - """ - An abstract class of a ``Transform``. - A transform is callable that processes ``data``. - - It could be stateful and may modify ``data`` in place, - the implementation should be aware of: - - #. thread safety when mutating its own states. - When used from a multi-process context, transform's instance variables are read-only. - #. ``data`` content unused by this transform may still be used in the - subsequent transforms in a composed transform. - #. storing too much information in ``data`` may not scale. - - See Also - - :py:class:`monai.transforms.Compose` - """ - - @abstractmethod - def __call__(self, data: Any): - """ - ``data`` is an element which often comes from an iteration over an - iterable, such as :py:class:`torch.utils.data.Dataset`. This method should - return an updated version of ``data``. - To simplify the input validations, most of the transforms assume that - - - ``data`` is a Numpy ndarray, PyTorch Tensor or string - - the data shape can be: - - #. string data without shape, `LoadImage` transform expects file paths - #. most of the pre-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``, - except that `AddChannel` expects (spatial_dim_1[, spatial_dim_2, ...]) and - `AsChannelFirst` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels) - #. most of the post-processing transforms expect - ``(batch_size, num_channels, spatial_dim_1[, spatial_dim_2, ...])`` - - - the channel dimension is not omitted even if number of channels is one - - This method can optionally take additional arguments to help execute transformation operation. - - Raises: - NotImplementedError: When the subclass does not override this method. - - """ - raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") - - -class Randomizable(ABC): - """ - An interface for handling random state locally, currently based on a class variable `R`, - which is an instance of `np.random.RandomState`. - This is mainly for randomized data augmentation transforms. For example:: - - class RandShiftIntensity(Randomizable): - def randomize(): - self._offset = self.R.uniform(low=0, high=100) - def __call__(self, img): - self.randomize() - return img + self._offset - - transform = RandShiftIntensity() - transform.set_random_state(seed=0) - - """ - - R: np.random.RandomState = np.random.RandomState() - - def set_random_state( - self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None - ) -> "Randomizable": - """ - Set the random state locally, to control the randomness, the derived - classes should use :py:attr:`self.R` instead of `np.random` to introduce random - factors. - - Args: - seed: set the random state with an integer seed. - state: set the random state with a `np.random.RandomState` object. - - Raises: - TypeError: When ``state`` is not an ``Optional[np.random.RandomState]``. - - Returns: - a Randomizable instance. - - """ - if seed is not None: - _seed = id(seed) if not isinstance(seed, (int, np.integer)) else seed - _seed = _seed % MAX_SEED - self.R = np.random.RandomState(_seed) - return self - - if state is not None: - if not isinstance(state, np.random.RandomState): - raise TypeError(f"state must be None or a np.random.RandomState but is {type(state).__name__}.") - self.R = state - return self - - self.R = np.random.RandomState() - return self - - @abstractmethod - def randomize(self, data: Any) -> None: - """ - Within this method, :py:attr:`self.R` should be used, instead of `np.random`, to introduce random factors. - - all :py:attr:`self.R` calls happen here so that we have a better chance to - identify errors of sync the random state. - - This method can generate the random factors based on properties of the input data. - - Raises: - NotImplementedError: When the subclass does not override this method. - - """ - raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") +__all__ = ["Compose"] class Compose(Randomizable, Transform): @@ -255,67 +137,3 @@ def __call__(self, input_): for _transform in self.transforms: input_ = apply_transform(_transform, input_) return input_ - - -class MapTransform(Transform): - """ - A subclass of :py:class:`monai.transforms.Transform` with an assumption - that the ``data`` input of ``self.__call__`` is a MutableMapping such as ``dict``. - - The ``keys`` parameter will be used to get and set the actual data - item to transform. That is, the callable of this transform should - follow the pattern: - - .. code-block:: python - - def __call__(self, data): - for key in self.keys: - if key in data: - # update output data with some_transform_function(data[key]). - else: - # do nothing or some exceptions handling. - return data - - Raises: - ValueError: When ``keys`` is an empty iterable. - TypeError: When ``keys`` type is not in ``Union[Hashable, Iterable[Hashable]]``. - - """ - - def __init__(self, keys: KeysCollection) -> None: - self.keys: Tuple[Hashable, ...] = ensure_tuple(keys) - if not self.keys: - raise ValueError("keys must be non empty.") - for key in self.keys: - if not isinstance(key, Hashable): - raise TypeError(f"keys must be one of (Hashable, Iterable[Hashable]) but is {type(keys).__name__}.") - - @abstractmethod - def __call__(self, data): - """ - ``data`` often comes from an iteration over an iterable, - such as :py:class:`torch.utils.data.Dataset`. - - To simplify the input validations, this method assumes: - - - ``data`` is a Python dictionary - - ``data[key]`` is a Numpy ndarray, PyTorch Tensor or string, where ``key`` is an element - of ``self.keys``, the data shape can be: - - #. string data without shape, `LoadImaged` transform expects file paths - #. most of the pre-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``, - except that `AddChanneld` expects (spatial_dim_1[, spatial_dim_2, ...]) and - `AsChannelFirstd` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels) - #. most of the post-processing transforms expect - ``(batch_size, num_channels, spatial_dim_1[, spatial_dim_2, ...])`` - - - the channel dimension is not omitted even if number of channels is one - - Raises: - NotImplementedError: When the subclass does not override this method. - - returns: - An updated dictionary version of ``data`` by applying the transform. - - """ - raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index b4444803a4..ef5e0019bd 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -20,7 +20,7 @@ from monai.config import IndexSelection from monai.data.utils import get_random_patch, get_valid_patch_size -from monai.transforms.compose import Randomizable, Transform +from monai.transforms.transform import Randomizable, Transform from monai.transforms.utils import ( generate_pos_neg_label_crop_centers, generate_spatial_bounding_box, diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index 1faed25605..20ae6ac1ed 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -21,7 +21,6 @@ from monai.config import IndexSelection, KeysCollection from monai.data.utils import get_random_patch, get_valid_patch_size -from monai.transforms.compose import MapTransform, Randomizable from monai.transforms.croppad.array import ( BorderPad, BoundingRect, @@ -31,6 +30,7 @@ SpatialCrop, SpatialPad, ) +from monai.transforms.transform import MapTransform, Randomizable from monai.transforms.utils import ( generate_pos_neg_label_crop_centers, generate_spatial_bounding_box, diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 87091f6237..40bef064eb 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -22,7 +22,7 @@ from monai.config import DtypeLike from monai.networks.layers import GaussianFilter, HilbertTransform, SavitzkyGolayFilter -from monai.transforms.compose import Randomizable, Transform +from monai.transforms.transform import Randomizable, Transform from monai.transforms.utils import rescale_array from monai.utils import PT_BEFORE_1_7, InvalidPyTorchVersionError, dtype_torch_to_numpy, ensure_tuple_size diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 1c9b31c120..54a85a57b0 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -22,7 +22,6 @@ import torch from monai.config import DtypeLike, KeysCollection -from monai.transforms.compose import MapTransform, Randomizable from monai.transforms.intensity.array import ( AdjustContrast, GaussianSharpen, @@ -35,6 +34,7 @@ ShiftIntensity, ThresholdIntensity, ) +from monai.transforms.transform import MapTransform, Randomizable from monai.utils import dtype_torch_to_numpy, ensure_tuple_rep, ensure_tuple_size __all__ = [ diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index 9c14f7a689..855621e432 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -22,7 +22,7 @@ from monai.data.image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader from monai.data.nifti_saver import NiftiSaver from monai.data.png_saver import PNGSaver -from monai.transforms.compose import Transform +from monai.transforms.transform import Transform from monai.utils import GridSampleMode, GridSamplePadMode from monai.utils import ImageMetaKey as Key from monai.utils import InterpolateMode, ensure_tuple, optional_import diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index d3220aa682..55707f750e 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -21,8 +21,8 @@ from monai.config import DtypeLike, KeysCollection from monai.data.image_reader import ImageReader -from monai.transforms.compose import MapTransform from monai.transforms.io.array import LoadImage, SaveImage +from monai.transforms.transform import MapTransform from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode __all__ = [ diff --git a/monai/transforms/post/array.py b/monai/transforms/post/array.py index 0c60b0cc89..8b4f71093b 100644 --- a/monai/transforms/post/array.py +++ b/monai/transforms/post/array.py @@ -21,7 +21,7 @@ import torch.nn.functional as F from monai.networks import one_hot -from monai.transforms.compose import Transform +from monai.transforms.transform import Transform from monai.transforms.utils import get_largest_connected_component_mask from monai.utils import ensure_tuple diff --git a/monai/transforms/post/dictionary.py b/monai/transforms/post/dictionary.py index 60cda11a91..aff4ae3572 100644 --- a/monai/transforms/post/dictionary.py +++ b/monai/transforms/post/dictionary.py @@ -21,7 +21,6 @@ import torch from monai.config import KeysCollection -from monai.transforms.compose import MapTransform from monai.transforms.post.array import ( Activations, AsDiscrete, @@ -30,6 +29,7 @@ MeanEnsemble, VoteEnsemble, ) +from monai.transforms.transform import MapTransform from monai.utils import ensure_tuple_rep __all__ = [ diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index df10480188..0610982847 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -22,8 +22,8 @@ from monai.config import USE_COMPILED, DtypeLike from monai.data.utils import compute_shape_offset, to_affine_nd, zoom_affine from monai.networks.layers import AffineTransform, GaussianFilter, grid_pull -from monai.transforms.compose import Randomizable, Transform from monai.transforms.croppad.array import CenterSpatialCrop +from monai.transforms.transform import Randomizable, Transform from monai.transforms.utils import ( create_control_grid, create_grid, diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index e612a25ef8..8b546e5e97 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -22,7 +22,6 @@ from monai.config import DtypeLike, KeysCollection from monai.networks.layers.simplelayers import GaussianFilter -from monai.transforms.compose import MapTransform, Randomizable from monai.transforms.croppad.array import CenterSpatialCrop from monai.transforms.spatial.array import ( Flip, @@ -36,6 +35,7 @@ Spacing, Zoom, ) +from monai.transforms.transform import MapTransform, Randomizable from monai.transforms.utils import create_grid from monai.utils import ( GridSampleMode, diff --git a/monai/transforms/transform.py b/monai/transforms/transform.py new file mode 100644 index 0000000000..e5841cbe97 --- /dev/null +++ b/monai/transforms/transform.py @@ -0,0 +1,206 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +A collection of generic interfaces for MONAI transforms. +""" + +from abc import ABC, abstractmethod +from typing import Any, Hashable, Optional, Tuple + +import numpy as np + +from monai.config import KeysCollection +from monai.utils import MAX_SEED, ensure_tuple + +__all__ = ["Randomizable", "Transform", "MapTransform"] + + +class Randomizable(ABC): + """ + An interface for handling random state locally, currently based on a class variable `R`, + which is an instance of `np.random.RandomState`. + This is mainly for randomized data augmentation transforms. For example:: + + class RandShiftIntensity(Randomizable): + def randomize(): + self._offset = self.R.uniform(low=0, high=100) + def __call__(self, img): + self.randomize() + return img + self._offset + + transform = RandShiftIntensity() + transform.set_random_state(seed=0) + + """ + + R: np.random.RandomState = np.random.RandomState() + + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "Randomizable": + """ + Set the random state locally, to control the randomness, the derived + classes should use :py:attr:`self.R` instead of `np.random` to introduce random + factors. + + Args: + seed: set the random state with an integer seed. + state: set the random state with a `np.random.RandomState` object. + + Raises: + TypeError: When ``state`` is not an ``Optional[np.random.RandomState]``. + + Returns: + a Randomizable instance. + + """ + if seed is not None: + _seed = id(seed) if not isinstance(seed, (int, np.integer)) else seed + _seed = _seed % MAX_SEED + self.R = np.random.RandomState(_seed) + return self + + if state is not None: + if not isinstance(state, np.random.RandomState): + raise TypeError(f"state must be None or a np.random.RandomState but is {type(state).__name__}.") + self.R = state + return self + + self.R = np.random.RandomState() + return self + + @abstractmethod + def randomize(self, data: Any) -> None: + """ + Within this method, :py:attr:`self.R` should be used, instead of `np.random`, to introduce random factors. + + all :py:attr:`self.R` calls happen here so that we have a better chance to + identify errors of sync the random state. + + This method can generate the random factors based on properties of the input data. + + Raises: + NotImplementedError: When the subclass does not override this method. + + """ + raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") + + +class Transform(ABC): + """ + An abstract class of a ``Transform``. + A transform is callable that processes ``data``. + + It could be stateful and may modify ``data`` in place, + the implementation should be aware of: + + #. thread safety when mutating its own states. + When used from a multi-process context, transform's instance variables are read-only. + #. ``data`` content unused by this transform may still be used in the + subsequent transforms in a composed transform. + #. storing too much information in ``data`` may not scale. + + See Also + + :py:class:`monai.transforms.Compose` + """ + + @abstractmethod + def __call__(self, data: Any): + """ + ``data`` is an element which often comes from an iteration over an + iterable, such as :py:class:`torch.utils.data.Dataset`. This method should + return an updated version of ``data``. + To simplify the input validations, most of the transforms assume that + + - ``data`` is a Numpy ndarray, PyTorch Tensor or string + - the data shape can be: + + #. string data without shape, `LoadImage` transform expects file paths + #. most of the pre-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``, + except that `AddChannel` expects (spatial_dim_1[, spatial_dim_2, ...]) and + `AsChannelFirst` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels) + #. most of the post-processing transforms expect + ``(batch_size, num_channels, spatial_dim_1[, spatial_dim_2, ...])`` + + - the channel dimension is not omitted even if number of channels is one + + This method can optionally take additional arguments to help execute transformation operation. + + Raises: + NotImplementedError: When the subclass does not override this method. + + """ + raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") + + +class MapTransform(Transform): + """ + A subclass of :py:class:`monai.transforms.Transform` with an assumption + that the ``data`` input of ``self.__call__`` is a MutableMapping such as ``dict``. + + The ``keys`` parameter will be used to get and set the actual data + item to transform. That is, the callable of this transform should + follow the pattern: + + .. code-block:: python + + def __call__(self, data): + for key in self.keys: + if key in data: + # update output data with some_transform_function(data[key]). + else: + # do nothing or some exceptions handling. + return data + + Raises: + ValueError: When ``keys`` is an empty iterable. + TypeError: When ``keys`` type is not in ``Union[Hashable, Iterable[Hashable]]``. + + """ + + def __init__(self, keys: KeysCollection) -> None: + self.keys: Tuple[Hashable, ...] = ensure_tuple(keys) + if not self.keys: + raise ValueError("keys must be non empty.") + for key in self.keys: + if not isinstance(key, Hashable): + raise TypeError(f"keys must be one of (Hashable, Iterable[Hashable]) but is {type(keys).__name__}.") + + @abstractmethod + def __call__(self, data): + """ + ``data`` often comes from an iteration over an iterable, + such as :py:class:`torch.utils.data.Dataset`. + + To simplify the input validations, this method assumes: + + - ``data`` is a Python dictionary + - ``data[key]`` is a Numpy ndarray, PyTorch Tensor or string, where ``key`` is an element + of ``self.keys``, the data shape can be: + + #. string data without shape, `LoadImaged` transform expects file paths + #. most of the pre-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``, + except that `AddChanneld` expects (spatial_dim_1[, spatial_dim_2, ...]) and + `AsChannelFirstd` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels) + #. most of the post-processing transforms expect + ``(batch_size, num_channels, spatial_dim_1[, spatial_dim_2, ...])`` + + - the channel dimension is not omitted even if number of channels is one + + Raises: + NotImplementedError: When the subclass does not override this method. + + returns: + An updated dictionary version of ``data`` by applying the transform. + + """ + raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index c0ae40de59..8b161a9223 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -21,7 +21,7 @@ import torch from monai.config import DtypeLike, NdarrayTensor -from monai.transforms.compose import Randomizable, Transform +from monai.transforms.transform import Randomizable, Transform from monai.transforms.utils import extreme_points_to_image, get_extreme_points, map_binary_to_indices from monai.utils import ensure_tuple, min_version, optional_import diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index f374b82d76..c4bd7d4cba 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -23,7 +23,7 @@ import torch from monai.config import DtypeLike, KeysCollection, NdarrayTensor -from monai.transforms.compose import MapTransform, Randomizable +from monai.transforms.transform import MapTransform, Randomizable from monai.transforms.utility.array import ( AddChannel, AsChannelFirst, diff --git a/tests/test_compose.py b/tests/test_compose.py index c049044a97..3a0a6ea5bb 100644 --- a/tests/test_compose.py +++ b/tests/test_compose.py @@ -167,6 +167,9 @@ def test_flatten_and_len(self): # test len self.assertEqual(len(t1), 8) + def test_backwards_compatible_imports(self): + from monai.transforms.compose import MapTransform, Randomizable, Transform # noqa: F401 + if __name__ == "__main__": unittest.main() From c98fe05e8afaee4477f705bff149af31eb4d66a4 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 24 Feb 2021 19:46:35 +0000 Subject: [PATCH 003/457] pad_list_data_collate (#1626) * pad_collation Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * increase number of test cases to ensure required testing errors Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * determinism in setUp Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * pad collate for list of lists Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * code format Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * allow padding options Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/data/__init__.py | 1 + monai/data/utils.py | 80 ++++++++++++++++++++++++++++++- tests/test_pad_collation.py | 95 +++++++++++++++++++++++++++++++++++++ 3 files changed, 175 insertions(+), 1 deletion(-) create mode 100644 tests/test_pad_collation.py diff --git a/monai/data/__init__.py b/monai/data/__init__.py index e0db1e17ae..99990d7f53 100644 --- a/monai/data/__init__.py +++ b/monai/data/__init__.py @@ -46,6 +46,7 @@ iter_patch_slices, json_hashing, list_data_collate, + pad_list_data_collate, partition_dataset, partition_dataset_classes, pickle_hashing, diff --git a/monai/data/utils.py b/monai/data/utils.py index acc6d2e97a..c42e1abefa 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -36,6 +36,7 @@ first, optional_import, ) +from monai.utils.enums import Method nib, _ = optional_import("nibabel") @@ -63,6 +64,7 @@ "json_hashing", "pickle_hashing", "sorted_dict", + "pad_list_data_collate", ] @@ -240,7 +242,83 @@ def list_data_collate(batch: Sequence): """ elem = batch[0] data = [i for k in batch for i in k] if isinstance(elem, list) else batch - return default_collate(data) + try: + return default_collate(data) + except RuntimeError as re: + re_str = str(re) + if "stack expects each tensor to be equal size" in re_str: + re_str += ( + "\nMONAI hint: if your transforms intentionally create images of different shapes, creating your " + + "`DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem (check its " + + "documentation)." + ) + raise RuntimeError(re_str) + + +def pad_list_data_collate( + batch: Sequence, + method: Union[Method, str] = Method.SYMMETRIC, + mode: Union[NumpyPadMode, str] = NumpyPadMode.CONSTANT, +): + """ + Same as MONAI's ``list_data_collate``, except any tensors are centrally padded to match the shape of the biggest + tensor in each dimension. + + Note: + Need to use this collate if apply some transforms that can generate batch data. + + Args: + batch: batch of data to pad-collate + method: padding method (see :py:class:`monai.transforms.SpatialPad`) + mode: padding mode (see :py:class:`monai.transforms.SpatialPad`) + """ + list_of_dicts = isinstance(batch[0], dict) + for key_or_idx in batch[0].keys() if list_of_dicts else range(len(batch[0])): + max_shapes = [] + for elem in batch: + if not isinstance(elem[key_or_idx], (torch.Tensor, np.ndarray)): + break + max_shapes.append(elem[key_or_idx].shape[1:]) + # len > 0 if objects were arrays + if len(max_shapes) == 0: + continue + max_shape = np.array(max_shapes).max(axis=0) + # If all same size, skip + if np.all(np.array(max_shapes).min(axis=0) == max_shape): + continue + # Do we need to convert output to Tensor? + output_to_tensor = isinstance(batch[0][key_or_idx], torch.Tensor) + + # Use `SpatialPadd` or `SpatialPad` to match sizes + # Default params are central padding, padding with 0's + # If input is dictionary, use the dictionary version so that the transformation is recorded + padder: Union[SpatialPadd, SpatialPad] + if list_of_dicts: + from monai.transforms.croppad.dictionary import SpatialPadd # needs to be here to avoid circular import + + padder = SpatialPadd(key_or_idx, max_shape, method, mode) # type: ignore + + else: + from monai.transforms.croppad.array import SpatialPad # needs to be here to avoid circular import + + padder = SpatialPad(max_shape, method, mode) # type: ignore + + for idx in range(len(batch)): + padded = padder(batch[idx])[key_or_idx] if list_of_dicts else padder(batch[idx][key_or_idx]) + # since tuple is immutable we'll have to recreate + if isinstance(batch[idx], tuple): + batch[idx] = list(batch[idx]) # type: ignore + batch[idx][key_or_idx] = padded + batch[idx] = tuple(batch[idx]) # type: ignore + # else, replace + else: + batch[idx][key_or_idx] = padder(batch[idx])[key_or_idx] + + if output_to_tensor: + batch[idx][key_or_idx] = torch.Tensor(batch[idx][key_or_idx]) + + # After padding, use default list collator + return list_data_collate(batch) def worker_init_fn(worker_id: int) -> None: diff --git a/tests/test_pad_collation.py b/tests/test_pad_collation.py new file mode 100644 index 0000000000..156d2649e0 --- /dev/null +++ b/tests/test_pad_collation.py @@ -0,0 +1,95 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest +from typing import List, Tuple + +import numpy as np +import torch +from parameterized import parameterized + +from monai.data import CacheDataset, DataLoader +from monai.data.utils import pad_list_data_collate +from monai.transforms import ( + RandRotate, + RandRotate90, + RandRotate90d, + RandRotated, + RandSpatialCrop, + RandSpatialCropd, + RandZoom, + RandZoomd, +) +from monai.utils import set_determinism + +TESTS: List[Tuple] = [] + + +TESTS.append((dict, RandSpatialCropd("image", roi_size=[8, 7], random_size=True))) +TESTS.append((dict, RandRotated("image", prob=1, range_x=np.pi, keep_size=False))) +TESTS.append((dict, RandZoomd("image", prob=1, min_zoom=1.1, max_zoom=2.0, keep_size=False))) +TESTS.append((dict, RandRotate90d("image", prob=1, max_k=2))) + +TESTS.append((list, RandSpatialCrop(roi_size=[8, 7], random_size=True))) +TESTS.append((list, RandRotate(prob=1, range_x=np.pi, keep_size=False))) +TESTS.append((list, RandZoom(prob=1, min_zoom=1.1, max_zoom=2.0, keep_size=False))) +TESTS.append((list, RandRotate90(prob=1, max_k=2))) + + +class _Dataset(torch.utils.data.Dataset): + def __init__(self, images, labels, transforms): + self.images = images + self.labels = labels + self.transforms = transforms + + def __len__(self): + return len(self.images) + + def __getitem__(self, index): + return self.transforms(self.images[index]), self.labels[index] + + +class TestPadCollation(unittest.TestCase): + def setUp(self) -> None: + set_determinism(seed=0) + # image is non square to throw rotation errors + im = np.arange(0, 10 * 9).reshape(1, 10, 9) + num_elements = 20 + self.dict_data = [{"image": im} for _ in range(num_elements)] + self.list_data = [im for _ in range(num_elements)] + self.list_labels = [random.randint(0, 1) for _ in range(num_elements)] + + def tearDown(self) -> None: + set_determinism(None) + + @parameterized.expand(TESTS) + def test_pad_collation(self, t_type, transform): + + if t_type == dict: + dataset = CacheDataset(self.dict_data, transform, progress=False) + else: + dataset = _Dataset(self.list_data, self.list_labels, transform) + + # Default collation should raise an error + loader_fail = DataLoader(dataset, batch_size=10) + with self.assertRaises(RuntimeError): + for _ in loader_fail: + pass + + # Padded collation shouldn't + loader = DataLoader(dataset, batch_size=2, collate_fn=pad_list_data_collate) + for _ in loader: + pass + + +if __name__ == "__main__": + unittest.main() From 69d9fcc311d405bb1eeccca42b4de03d48ccb3a5 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Thu, 25 Feb 2021 09:26:13 +0000 Subject: [PATCH 004/457] decollate batch (#1624) decollate batch --- monai/data/__init__.py | 1 + monai/data/utils.py | 69 +++++++++++++++++++++- monai/transforms/__init__.py | 3 + monai/transforms/post/dictionary.py | 23 ++++++++ tests/test_decollate.py | 88 +++++++++++++++++++++++++++++ 5 files changed, 183 insertions(+), 1 deletion(-) create mode 100644 tests/test_decollate.py diff --git a/monai/data/__init__.py b/monai/data/__init__.py index 99990d7f53..3dd0a980ef 100644 --- a/monai/data/__init__.py +++ b/monai/data/__init__.py @@ -38,6 +38,7 @@ compute_shape_offset, correct_nifti_header_if_necessary, create_file_basename, + decollate_batch, dense_patch_slices, get_random_patch, get_valid_patch_size, diff --git a/monai/data/utils.py b/monai/data/utils.py index c42e1abefa..7717ddf3aa 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -18,7 +18,7 @@ from collections import defaultdict from itertools import product, starmap from pathlib import PurePath -from typing import Dict, Generator, Iterable, List, Optional, Sequence, Tuple, Union +from typing import Any, Dict, Generator, Iterable, List, Optional, Sequence, Tuple, Union import numpy as np import torch @@ -37,6 +37,7 @@ optional_import, ) from monai.utils.enums import Method +from monai.utils.misc import issequenceiterable nib, _ = optional_import("nibabel") @@ -64,6 +65,7 @@ "json_hashing", "pickle_hashing", "sorted_dict", + "decollate_batch", "pad_list_data_collate", ] @@ -255,6 +257,71 @@ def list_data_collate(batch: Sequence): raise RuntimeError(re_str) +def decollate_batch(data: dict, batch_size: Optional[int] = None) -> List[dict]: + """De-collate a batch of data (for example, as produced by a `DataLoader`). + + Returns a list of dictionaries. Each dictionary will only contain the data for a given batch. + + Images originally stored as (B,C,H,W,[D]) will be returned as (C,H,W,[D]). Other information, + such as metadata, may have been stored in a list (or a list inside nested dictionaries). In + this case we return the element of the list corresponding to the batch idx. + + Return types aren't guaranteed to be the same as the original, since numpy arrays will have been + converted to torch.Tensor, and tuples/lists may have been converted to lists of tensors + + For example: + + .. code-block:: python + + batch_data = { + "image": torch.rand((2,1,10,10)), + "image_meta_dict": {"scl_slope": torch.Tensor([0.0, 0.0])} + } + out = decollate_batch(batch_data) + print(len(out)) + >>> 2 + + print(out[0]) + >>> {'image': tensor([[[4.3549e-01...43e-01]]]), 'image_meta_dict': {'scl_slope': 0.0}} + + Args: + data: data to be de-collated. + batch_size: number of batches in data. If `None` is passed, try to figure out batch size. + """ + if not isinstance(data, dict): + raise RuntimeError("Only currently implemented for dictionary data (might be trivial to adapt).") + if batch_size is None: + for v in data.values(): + if isinstance(v, torch.Tensor): + batch_size = v.shape[0] + break + if batch_size is None: + raise RuntimeError("Couldn't determine batch size, please specify as argument.") + + def torch_to_single(d: torch.Tensor): + """If input is a torch.Tensor with only 1 element, return just the element.""" + return d if d.numel() > 1 else d.item() + + def decollate(data: Any, idx: int): + """Recursively de-collate.""" + if isinstance(data, dict): + return {k: decollate(v, idx) for k, v in data.items()} + if isinstance(data, torch.Tensor): + out = data[idx] + return torch_to_single(out) + elif isinstance(data, list): + if len(data) == 0: + return data + if isinstance(data[0], torch.Tensor): + return [torch_to_single(d[idx]) for d in data] + if issequenceiterable(data[0]): + return [decollate(d, idx) for d in data] + return data[idx] + raise TypeError(f"Not sure how to de-collate type: {type(data)}") + + return [{key: decollate(data[key], idx) for key in data.keys()} for idx in range(batch_size)] + + def pad_list_data_collate( batch: Sequence, method: Union[Method, str] = Method.SYMMETRIC, diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index 357e00c6dd..5578b93077 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -155,6 +155,9 @@ AsDiscreted, AsDiscreteD, AsDiscreteDict, + Decollated, + DecollateD, + DecollateDict, Ensembled, KeepLargestConnectedComponentd, KeepLargestConnectedComponentD, diff --git a/monai/transforms/post/dictionary.py b/monai/transforms/post/dictionary.py index aff4ae3572..85abdac0ac 100644 --- a/monai/transforms/post/dictionary.py +++ b/monai/transforms/post/dictionary.py @@ -20,6 +20,7 @@ import numpy as np import torch +import monai.data from monai.config import KeysCollection from monai.transforms.post.array import ( Activations, @@ -52,6 +53,9 @@ "MeanEnsembleDict", "VoteEnsembleD", "VoteEnsembleDict", + "DecollateD", + "DecollateDict", + "Decollated", ] @@ -306,9 +310,28 @@ def __init__( super().__init__(keys, ensemble, output_key) +class Decollated(MapTransform): + """ + Decollate a batch of data. + + Note that unlike most MapTransforms, this will decollate all data, so keys are not needed. + + Args: + batch_size: if not supplied, we try to determine it based on array lengths. Will raise an error if + it fails to determine it automatically. + """ + + def __init__(self, batch_size: Optional[int] = None) -> None: + self.batch_size = batch_size + + def __call__(self, data: dict) -> List[dict]: + return monai.data.decollate_batch(data, self.batch_size) + + ActivationsD = ActivationsDict = Activationsd AsDiscreteD = AsDiscreteDict = AsDiscreted KeepLargestConnectedComponentD = KeepLargestConnectedComponentDict = KeepLargestConnectedComponentd LabelToContourD = LabelToContourDict = LabelToContourd MeanEnsembleD = MeanEnsembleDict = MeanEnsembled VoteEnsembleD = VoteEnsembleDict = VoteEnsembled +DecollateD = DecollateDict = Decollated diff --git a/tests/test_decollate.py b/tests/test_decollate.py new file mode 100644 index 0000000000..5c6f04b48e --- /dev/null +++ b/tests/test_decollate.py @@ -0,0 +1,88 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from parameterized import parameterized + +from monai.data import CacheDataset, DataLoader, create_test_image_2d +from monai.data.utils import decollate_batch +from monai.transforms import AddChanneld, Compose, LoadImaged, RandFlipd, SpatialPadd, ToTensord +from monai.transforms.post.dictionary import Decollated +from monai.utils import optional_import, set_determinism +from tests.utils import make_nifti_image + +_, has_nib = optional_import("nibabel") + +IM_2D = create_test_image_2d(100, 101)[0] +DATA_2D = {"image": make_nifti_image(IM_2D) if has_nib else IM_2D} + +TESTS = [] +TESTS.append( + ( + "2D", + [DATA_2D for _ in range(6)], + ) +) + + +class TestDeCollate(unittest.TestCase): + def setUp(self) -> None: + set_determinism(seed=0) + + def tearDown(self) -> None: + set_determinism(None) + + def check_match(self, in1, in2): + if isinstance(in1, dict): + self.assertTrue(isinstance(in2, dict)) + self.check_match(list(in1.keys()), list(in2.keys())) + self.check_match(list(in1.values()), list(in2.values())) + elif any(isinstance(in1, i) for i in [list, tuple]): + for l1, l2 in zip(in1, in2): + self.check_match(l1, l2) + elif any(isinstance(in1, i) for i in [str, int]): + self.assertEqual(in1, in2) + elif any(isinstance(in1, i) for i in [torch.Tensor, np.ndarray]): + np.testing.assert_array_equal(in1, in2) + else: + raise RuntimeError(f"Not sure how to compare types. type(in1): {type(in1)}, type(in2): {type(in2)}") + + @parameterized.expand(TESTS) + def test_decollation(self, _, data, batch_size=2, num_workers=2): + transforms = Compose( + [ + AddChanneld("image"), + SpatialPadd("image", 150), + RandFlipd("image", prob=1.0, spatial_axis=1), + ToTensord("image"), + ] + ) + # If nibabel present, read from disk + if has_nib: + transforms = Compose([LoadImaged("image"), transforms]) + + dataset = CacheDataset(data, transforms, progress=False) + loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) + + for b, batch_data in enumerate(loader): + decollated_1 = decollate_batch(batch_data) + decollated_2 = Decollated()(batch_data) + + for decollated in [decollated_1, decollated_2]: + for i, d in enumerate(decollated): + self.check_match(dataset[b * batch_size + i], d) + + +if __name__ == "__main__": + unittest.main() From b2f1cf4cdfa712546d3db2a31f13d3e9b330f9cf Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Thu, 25 Feb 2021 11:45:40 +0000 Subject: [PATCH 005/457] enhance random range parameters (#1635) enhance random range parameters If element `i` is iterable, then `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0 and nothing for the remaining dimensions. Backwards compatibility is ensured because the old behaviour used to do `uniform[-rotate_range[i], rotate_range[i])`, and this PR just adds the possibility to do `uniform[-rotate_range[i][0], rotate_range[i][1])`. --- monai/transforms/spatial/array.py | 158 ++++++++++++------------- monai/transforms/spatial/dictionary.py | 97 +++++++-------- 2 files changed, 121 insertions(+), 134 deletions(-) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 0610982847..d6dbe56f01 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -42,6 +42,7 @@ ensure_tuple_rep, ensure_tuple_size, fall_back_tuple, + issequenceiterable, optional_import, ) @@ -69,6 +70,8 @@ "Rand3DElastic", ] +RandRange = Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] + class Spacing(Transform): """ @@ -965,30 +968,25 @@ class RandAffineGrid(Randomizable, Transform): def __init__( self, - rotate_range: Optional[Union[Sequence[float], float]] = None, - shear_range: Optional[Union[Sequence[float], float]] = None, - translate_range: Optional[Union[Sequence[float], float]] = None, - scale_range: Optional[Union[Sequence[float], float]] = None, + rotate_range: RandRange = None, + shear_range: RandRange = None, + translate_range: RandRange = None, + scale_range: RandRange = None, as_tensor_output: bool = True, device: Optional[torch.device] = None, ) -> None: """ Args: - rotate_range: angle range in radians. rotate_range[0] with be used to generate the 1st rotation - parameter from `uniform[-rotate_range[0], rotate_range[0])`. Similarly, `rotate_range[1]` and - `rotate_range[2]` are used in 3D affine for the range of 2nd and 3rd axes. - shear_range: shear_range[0] with be used to generate the 1st shearing parameter from - `uniform[-shear_range[0], shear_range[0])`. Similarly, `shear_range[1]` to - `shear_range[N]` controls the range of the uniform distribution used to generate the 2nd to - N-th parameter. - translate_range : translate_range[0] with be used to generate the 1st shift parameter from - `uniform[-translate_range[0], translate_range[0])`. Similarly, `translate_range[1]` - to `translate_range[N]` controls the range of the uniform distribution used to generate - the 2nd to N-th parameter. - scale_range: scaling_range[0] with be used to generate the 1st scaling factor from - `uniform[-scale_range[0], scale_range[0]) + 1.0`. Similarly, `scale_range[1]` to - `scale_range[N]` controls the range of the uniform distribution used to generate the 2nd to - N-th parameter. + rotate_range: angle range in radians. If element `i` is iterable, then + `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter + for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can + be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range + `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0 + and nothing for the remaining dimensions. + shear_range: shear_range with format matching `rotate_range`. + translate_range: translate_range with format matching `rotate_range`. + scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result. + This allows 0 to correspond to no change (i.e., a scaling of 1). as_tensor_output: whether to output tensor instead of numpy array. defaults to True. device: device to store the output grid data. @@ -1012,15 +1010,22 @@ def __init__( self.as_tensor_output = as_tensor_output self.device = device + def _get_rand_param(self, param_range, add_scalar: float = 0.0): + out_param = [] + for f in param_range: + if issequenceiterable(f): + if len(f) != 2: + raise ValueError("If giving range as [min,max], should only have two elements per dim.") + out_param.append(self.R.uniform(f[0], f[1]) + add_scalar) + elif f is not None: + out_param.append(self.R.uniform(-f, f) + add_scalar) + return out_param + def randomize(self, data: Optional[Any] = None) -> None: - if self.rotate_range: - self.rotate_params = [self.R.uniform(-f, f) for f in self.rotate_range if f is not None] - if self.shear_range: - self.shear_params = [self.R.uniform(-f, f) for f in self.shear_range if f is not None] - if self.translate_range: - self.translate_params = [self.R.uniform(-f, f) for f in self.translate_range if f is not None] - if self.scale_range: - self.scale_params = [self.R.uniform(-f, f) + 1.0 for f in self.scale_range if f is not None] + self.rotate_params = self._get_rand_param(self.rotate_range) + self.shear_params = self._get_rand_param(self.shear_range) + self.translate_params = self._get_rand_param(self.translate_range) + self.scale_params = self._get_rand_param(self.scale_range, 1.0) def __call__( self, spatial_size: Optional[Sequence[int]] = None, grid: Optional[Union[np.ndarray, torch.Tensor]] = None @@ -1282,11 +1287,11 @@ class RandAffine(Randomizable, Transform): def __init__( self, prob: float = 0.1, - rotate_range: Optional[Union[Sequence[float], float]] = None, - shear_range: Optional[Union[Sequence[float], float]] = None, - translate_range: Optional[Union[Sequence[float], float]] = None, - scale_range: Optional[Union[Sequence[float], float]] = None, - spatial_size: Optional[Union[Sequence[float], float]] = None, + rotate_range: RandRange = None, + shear_range: RandRange = None, + translate_range: RandRange = None, + scale_range: RandRange = None, + spatial_size: Optional[Union[Sequence[int], int]] = None, mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR, padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION, as_tensor_output: bool = True, @@ -1296,21 +1301,16 @@ def __init__( Args: prob: probability of returning a randomized affine grid. defaults to 0.1, with 10% chance returns a randomized grid. - rotate_range: angle range in radians. rotate_range[0] with be used to generate the 1st rotation - parameter from `uniform[-rotate_range[0], rotate_range[0])`. Similarly, `rotate_range[1]` and - `rotate_range[2]` are used in 3D affine for the range of 2nd and 3rd axes. - shear_range: shear_range[0] with be used to generate the 1st shearing parameter from - `uniform[-shear_range[0], shear_range[0])`. Similarly, `shear_range[1]` to - `shear_range[N]` controls the range of the uniform distribution used to generate the 2nd to - N-th parameter. - translate_range : translate_range[0] with be used to generate the 1st shift parameter from - `uniform[-translate_range[0], translate_range[0])`. Similarly, `translate_range[1]` - to `translate_range[N]` controls the range of the uniform distribution used to generate - the 2nd to N-th parameter. - scale_range: scaling_range[0] with be used to generate the 1st scaling factor from - `uniform[-scale_range[0], scale_range[0]) + 1.0`. Similarly, `scale_range[1]` to - `scale_range[N]` controls the range of the uniform distribution used to generate the 2nd to - N-th parameter. + rotate_range: angle range in radians. If element `i` is iterable, then + `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter + for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can + be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range + `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0 + and nothing for the remaining dimensions. + shear_range: shear_range with format matching `rotate_range`. + translate_range: translate_range with format matching `rotate_range`. + scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result. + This allows 0 to correspond to no change (i.e., a scaling of 1). spatial_size: output image spatial size. if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1, the transform will use the spatial size of `img`. @@ -1404,11 +1404,11 @@ def __init__( spacing: Union[Tuple[float, float], float], magnitude_range: Tuple[float, float], prob: float = 0.1, - rotate_range: Optional[Union[Sequence[float], float]] = None, - shear_range: Optional[Union[Sequence[float], float]] = None, - translate_range: Optional[Union[Sequence[float], float]] = None, - scale_range: Optional[Union[Sequence[float], float]] = None, - spatial_size: Optional[Union[Sequence[int], int]] = None, + rotate_range: RandRange = None, + shear_range: RandRange = None, + translate_range: RandRange = None, + scale_range: RandRange = None, + spatial_size: Optional[Union[Tuple[int, int], int]] = None, mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR, padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION, as_tensor_output: bool = False, @@ -1421,17 +1421,16 @@ def __init__( prob: probability of returning a randomized elastic transform. defaults to 0.1, with 10% chance returns a randomized elastic transform, otherwise returns a ``spatial_size`` centered area extracted from the input image. - rotate_range: angle range in radians. rotate_range[0] with be used to generate the 1st rotation - parameter from `uniform[-rotate_range[0], rotate_range[0])`. - shear_range: shear_range[0] with be used to generate the 1st shearing parameter from - `uniform[-shear_range[0], shear_range[0])`. Similarly, `shear_range[1]` controls - the range of the uniform distribution used to generate the 2nd parameter. - translate_range : translate_range[0] with be used to generate the 1st shift parameter from - `uniform[-translate_range[0], translate_range[0])`. Similarly, `translate_range[1]` controls - the range of the uniform distribution used to generate the 2nd parameter. - scale_range: scaling_range[0] with be used to generate the 1st scaling factor from - `uniform[-scale_range[0], scale_range[0]) + 1.0`. Similarly, `scale_range[1]` controls - the range of the uniform distribution used to generate the 2nd parameter. + rotate_range: angle range in radians. If element `i` is iterable, then + `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter + for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can + be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range + `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0 + and nothing for the remaining dimensions. + shear_range: shear_range with format matching `rotate_range`. + translate_range: translate_range with format matching `rotate_range`. + scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result. + This allows 0 to correspond to no change (i.e., a scaling of 1). spatial_size: specifying output image spatial size [h, w]. if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1, the transform will use the spatial size of `img`. @@ -1532,11 +1531,11 @@ def __init__( sigma_range: Tuple[float, float], magnitude_range: Tuple[float, float], prob: float = 0.1, - rotate_range: Optional[Union[Sequence[float], float]] = None, - shear_range: Optional[Union[Sequence[float], float]] = None, - translate_range: Optional[Union[Sequence[float], float]] = None, - scale_range: Optional[Union[Sequence[float], float]] = None, - spatial_size: Optional[Union[Sequence[int], int]] = None, + rotate_range: RandRange = None, + shear_range: RandRange = None, + translate_range: RandRange = None, + scale_range: RandRange = None, + spatial_size: Optional[Union[Tuple[int, int, int], int]] = None, mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR, padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION, as_tensor_output: bool = False, @@ -1551,19 +1550,16 @@ def __init__( prob: probability of returning a randomized elastic transform. defaults to 0.1, with 10% chance returns a randomized elastic transform, otherwise returns a ``spatial_size`` centered area extracted from the input image. - rotate_range: angle range in radians. rotate_range[0] with be used to generate the 1st rotation - parameter from `uniform[-rotate_range[0], rotate_range[0])`. Similarly, `rotate_range[1]` and - `rotate_range[2]` are used in 3D affine for the range of 2nd and 3rd axes. - shear_range: shear_range[0] with be used to generate the 1st shearing parameter from - `uniform[-shear_range[0], shear_range[0])`. Similarly, `shear_range[1]` and `shear_range[2]` - controls the range of the uniform distribution used to generate the 2nd and 3rd parameters. - translate_range : translate_range[0] with be used to generate the 1st shift parameter from - `uniform[-translate_range[0], translate_range[0])`. Similarly, `translate_range[1]` and - `translate_range[2]` controls the range of the uniform distribution used to generate - the 2nd and 3rd parameters. - scale_range: scaling_range[0] with be used to generate the 1st scaling factor from - `uniform[-scale_range[0], scale_range[0]) + 1.0`. Similarly, `scale_range[1]` and `scale_range[2]` - controls the range of the uniform distribution used to generate the 2nd and 3rd parameters. + rotate_range: angle range in radians. If element `i` is iterable, then + `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter + for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can + be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range + `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0 + and nothing for the remaining dimensions. + shear_range: shear_range with format matching `rotate_range`. + translate_range: translate_range with format matching `rotate_range`. + scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result. + This allows 0 to correspond to no change (i.e., a scaling of 1). spatial_size: specifying output image spatial size [h, w, d]. if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1, the transform will use the spatial size of `img`. diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 8b546e5e97..2c66cd5f50 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -374,10 +374,10 @@ def __init__( keys: KeysCollection, spatial_size: Optional[Union[Sequence[int], int]] = None, prob: float = 0.1, - rotate_range: Optional[Union[Sequence[float], float]] = None, - shear_range: Optional[Union[Sequence[float], float]] = None, - translate_range: Optional[Union[Sequence[float], float]] = None, - scale_range: Optional[Union[Sequence[float], float]] = None, + rotate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None, + shear_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None, + translate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None, + scale_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None, mode: GridSampleModeSequence = GridSampleMode.BILINEAR, padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION, as_tensor_output: bool = True, @@ -394,21 +394,16 @@ def __init__( to `(32, 64)` if the second spatial dimension size of img is `64`. prob: probability of returning a randomized affine grid. defaults to 0.1, with 10% chance returns a randomized grid. - rotate_range: angle range in radians. rotate_range[0] with be used to generate the 1st rotation - parameter from `uniform[-rotate_range[0], rotate_range[0])`. Similarly, `rotate_range[1]` and - `rotate_range[2]` are used in 3D affine for the range of 2nd and 3rd axes. - shear_range: shear_range[0] with be used to generate the 1st shearing parameter from - `uniform[-shear_range[0], shear_range[0])`. Similarly, `shear_range[1]` to - `shear_range[N]` controls the range of the uniform distribution used to generate the 2nd to - N-th parameter. - translate_range : translate_range[0] with be used to generate the 1st shift parameter from - `uniform[-translate_range[0], translate_range[0])`. Similarly, `translate_range[1]` - to `translate_range[N]` controls the range of the uniform distribution used to generate - the 2nd to N-th parameter. - scale_range: scaling_range[0] with be used to generate the 1st scaling factor from - `uniform[-scale_range[0], scale_range[0]) + 1.0`. Similarly, `scale_range[1]` to - `scale_range[N]` controls the range of the uniform distribution used to generate the 2nd to - N-th parameter. + rotate_range: angle range in radians. If element `i` is iterable, then + `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter + for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can + be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range + `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0 + and nothing for the remaining dimensions. + shear_range: shear_range with format matching `rotate_range`. + translate_range: translate_range with format matching `rotate_range`. + scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result. + This allows 0 to correspond to no change (i.e., a scaling of 1). mode: {``"bilinear"``, ``"nearest"``} Interpolation mode to calculate output values. Defaults to ``"bilinear"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample @@ -476,12 +471,12 @@ def __init__( keys: KeysCollection, spacing: Union[Tuple[float, float], float], magnitude_range: Tuple[float, float], - spatial_size: Optional[Union[Sequence[int], int]] = None, + spatial_size: Optional[Union[Tuple[int, int], int]] = None, prob: float = 0.1, - rotate_range: Optional[Union[Sequence[float], float]] = None, - shear_range: Optional[Union[Sequence[float], float]] = None, - translate_range: Optional[Union[Sequence[float], float]] = None, - scale_range: Optional[Union[Sequence[float], float]] = None, + rotate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None, + shear_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None, + translate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None, + scale_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None, mode: GridSampleModeSequence = GridSampleMode.BILINEAR, padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION, as_tensor_output: bool = False, @@ -502,17 +497,16 @@ def __init__( prob: probability of returning a randomized affine grid. defaults to 0.1, with 10% chance returns a randomized grid, otherwise returns a ``spatial_size`` centered area extracted from the input image. - rotate_range: angle range in radians. rotate_range[0] with be used to generate the 1st rotation - parameter from `uniform[-rotate_range[0], rotate_range[0])`. - shear_range: shear_range[0] with be used to generate the 1st shearing parameter from - `uniform[-shear_range[0], shear_range[0])`. Similarly, `shear_range[1]` controls - the range of the uniform distribution used to generate the 2nd parameter. - translate_range : translate_range[0] with be used to generate the 1st shift parameter from - `uniform[-translate_range[0], translate_range[0])`. Similarly, `translate_range[1]` controls - the range of the uniform distribution used to generate the 2nd parameter. - scale_range: scaling_range[0] with be used to generate the 1st scaling factor from - `uniform[-scale_range[0], scale_range[0]) + 1.0`. Similarly, `scale_range[1]` controls - the range of the uniform distribution used to generate the 2nd parameter. + rotate_range: angle range in radians. If element `i` is iterable, then + `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter + for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can + be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range + `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0 + and nothing for the remaining dimensions. + shear_range: shear_range with format matching `rotate_range`. + translate_range: translate_range with format matching `rotate_range`. + scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result. + This allows 0 to correspond to no change (i.e., a scaling of 1). mode: {``"bilinear"``, ``"nearest"``} Interpolation mode to calculate output values. Defaults to ``"bilinear"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample @@ -594,12 +588,12 @@ def __init__( keys: KeysCollection, sigma_range: Tuple[float, float], magnitude_range: Tuple[float, float], - spatial_size: Optional[Union[Sequence[int], int]] = None, + spatial_size: Optional[Union[Tuple[int, int, int], int]] = None, prob: float = 0.1, - rotate_range: Optional[Union[Sequence[float], float]] = None, - shear_range: Optional[Union[Sequence[float], float]] = None, - translate_range: Optional[Union[Sequence[float], float]] = None, - scale_range: Optional[Union[Sequence[float], float]] = None, + rotate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None, + shear_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None, + translate_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None, + scale_range: Optional[Union[Sequence[Union[Tuple[float, float], float]], float]] = None, mode: GridSampleModeSequence = GridSampleMode.BILINEAR, padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION, as_tensor_output: bool = False, @@ -621,19 +615,16 @@ def __init__( prob: probability of returning a randomized affine grid. defaults to 0.1, with 10% chance returns a randomized grid, otherwise returns a ``spatial_size`` centered area extracted from the input image. - rotate_range: angle range in radians. rotate_range[0] with be used to generate the 1st rotation - parameter from `uniform[-rotate_range[0], rotate_range[0])`. Similarly, `rotate_range[1]` and - `rotate_range[2]` are used in 3D affine for the range of 2nd and 3rd axes. - shear_range: shear_range[0] with be used to generate the 1st shearing parameter from - `uniform[-shear_range[0], shear_range[0])`. Similarly, `shear_range[1]` and `shear_range[2]` - controls the range of the uniform distribution used to generate the 2nd and 3rd parameters. - translate_range : translate_range[0] with be used to generate the 1st shift parameter from - `uniform[-translate_range[0], translate_range[0])`. Similarly, `translate_range[1]` and - `translate_range[2]` controls the range of the uniform distribution used to generate - the 2nd and 3rd parameters. - scale_range: scaling_range[0] with be used to generate the 1st scaling factor from - `uniform[-scale_range[0], scale_range[0]) + 1.0`. Similarly, `scale_range[1]` and `scale_range[2]` - controls the range of the uniform distribution used to generate the 2nd and 3rd parameters. + rotate_range: angle range in radians. If element `i` is iterable, then + `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter + for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can + be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range + `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0 + and nothing for the remaining dimensions. + shear_range: shear_range with format matching `rotate_range`. + translate_range: translate_range with format matching `rotate_range`. + scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result. + This allows 0 to correspond to no change (i.e., a scaling of 1). mode: {``"bilinear"``, ``"nearest"``} Interpolation mode to calculate output values. Defaults to ``"bilinear"``. See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample From 3c45f183ae5b80d052496a12751d794d1ba6b760 Mon Sep 17 00:00:00 2001 From: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Date: Thu, 25 Feb 2021 17:23:36 +0000 Subject: [PATCH 006/457] Loss script fixes (#1645) * Fixing issues preventing loss functions from being compatible with Torchscript Signed-off-by: Eric Kerfoot * Updates Signed-off-by: Eric Kerfoot * Updates Signed-off-by: Eric Kerfoot * [MONAI] python code formatting Signed-off-by: monai-bot * Updates Signed-off-by: Eric Kerfoot * Adding conditional skip to Torchscript tests Signed-off-by: Eric Kerfoot Co-authored-by: monai-bot --- monai/losses/dice.py | 22 +++--- monai/losses/focal_loss.py | 4 +- monai/losses/tversky.py | 4 +- monai/networks/layers/simplelayers.py | 76 +++++++++++-------- monai/networks/utils.py | 9 +-- tests/test_dice_ce_loss.py | 7 ++ tests/test_dice_loss.py | 7 ++ tests/test_focal_loss.py | 7 ++ tests/test_generalized_dice_loss.py | 7 ++ .../test_generalized_wasserstein_dice_loss.py | 13 ++++ ...local_normalized_cross_correlation_loss.py | 6 ++ tests/test_multi_scale.py | 7 ++ tests/test_tversky_loss.py | 7 ++ 13 files changed, 126 insertions(+), 50 deletions(-) diff --git a/monai/losses/dice.py b/monai/losses/dice.py index c284660cc6..24bd038b68 100644 --- a/monai/losses/dice.py +++ b/monai/losses/dice.py @@ -10,7 +10,7 @@ # limitations under the License. import warnings -from typing import Callable, Optional, Union +from typing import Callable, List, Optional, Union import numpy as np import torch @@ -139,7 +139,7 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})") # reducing only spatial dimensions (not batch nor channels) - reduce_axis = list(range(2, len(input.shape))) + reduce_axis: List[int] = torch.arange(2, len(input.shape)).tolist() if self.batch: # reducing spatial dimensions and batch reduce_axis = [0] + reduce_axis @@ -268,23 +268,27 @@ def __init__( raise TypeError(f"other_act must be None or callable but is {type(other_act).__name__}.") if int(sigmoid) + int(softmax) + int(other_act is not None) > 1: raise ValueError("Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].") + self.include_background = include_background self.to_onehot_y = to_onehot_y self.sigmoid = sigmoid self.softmax = softmax self.other_act = other_act - w_type = Weight(w_type) - self.w_func: Callable = torch.ones_like - if w_type == Weight.SIMPLE: - self.w_func = torch.reciprocal - elif w_type == Weight.SQUARE: - self.w_func = lambda x: torch.reciprocal(x * x) + self.w_type = Weight(w_type) self.smooth_nr = float(smooth_nr) self.smooth_dr = float(smooth_dr) self.batch = batch + def w_func(self, grnd): + if self.w_type == Weight.SIMPLE: + return torch.reciprocal(grnd) + elif self.w_type == Weight.SQUARE: + return torch.reciprocal(grnd * grnd) + else: + return torch.ones_like(grnd) + def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """ Args: @@ -325,7 +329,7 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})") # reducing only spatial dimensions (not batch nor channels) - reduce_axis = list(range(2, len(input.shape))) + reduce_axis: List[int] = torch.arange(2, len(input.shape)).tolist() if self.batch: reduce_axis = [0] + reduce_axis intersection = torch.sum(target * input, reduce_axis) diff --git a/monai/losses/focal_loss.py b/monai/losses/focal_loss.py index da7c63e571..920661f76f 100644 --- a/monai/losses/focal_loss.py +++ b/monai/losses/focal_loss.py @@ -80,8 +80,8 @@ def forward(self, logits: torch.Tensor, target: torch.Tensor) -> torch.Tensor: i = logits t = target - if i.ndimension() != t.ndimension(): - raise ValueError(f"logits and target ndim must match, got logits={i.ndimension()} target={t.ndimension()}.") + if i.ndim != t.ndim: + raise ValueError(f"logits and target ndim must match, got logits={i.ndim} target={t.ndim}.") if t.shape[1] != 1 and t.shape[1] != i.shape[1]: raise ValueError( diff --git a/monai/losses/tversky.py b/monai/losses/tversky.py index b1c45a74a2..1d75b9e8cc 100644 --- a/monai/losses/tversky.py +++ b/monai/losses/tversky.py @@ -10,7 +10,7 @@ # limitations under the License. import warnings -from typing import Callable, Optional, Union +from typing import Callable, List, Optional, Union import torch from torch.nn.modules.loss import _Loss @@ -139,7 +139,7 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: g1 = 1 - g0 # reducing only spatial dimensions (not batch nor channels) - reduce_axis = list(range(2, len(input.shape))) + reduce_axis: List[int] = torch.arange(2, len(input.shape)).tolist() if self.batch: # reducing spatial dimensions and batch reduce_axis = [0] + reduce_axis diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index f560526db8..b2af4fcbcd 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -10,14 +10,14 @@ # limitations under the License. import math -from typing import Sequence, Union, cast +from typing import List, Sequence, Union import torch import torch.nn.functional as F from torch import nn from torch.autograd import Function -from monai.networks.layers.convutils import gaussian_1d, same_padding +from monai.networks.layers.convutils import gaussian_1d from monai.networks.layers.factories import Conv from monai.utils import ( PT_BEFORE_1_7, @@ -164,9 +164,45 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return x.reshape(shape) -def separable_filtering( - x: torch.Tensor, kernels: Union[Sequence[torch.Tensor], torch.Tensor], mode: str = "zeros" +def _separable_filtering_conv( + input_: torch.Tensor, + kernels: List[torch.Tensor], + pad_mode: str, + d: int, + spatial_dims: int, + paddings: List[int], + num_channels: int, ) -> torch.Tensor: + + if d < 0: + return input_ + + s = [1] * len(input_.shape) + s[d + 2] = -1 + _kernel = kernels[d].reshape(s) + + # if filter kernel is unity, don't convolve + if _kernel.numel() == 1 and _kernel[0] == 1: + return _separable_filtering_conv(input_, kernels, pad_mode, d - 1, spatial_dims, paddings, num_channels) + + _kernel = _kernel.repeat([num_channels, 1] + [1] * spatial_dims) + _padding = [0] * spatial_dims + _padding[d] = paddings[d] + conv_type = [F.conv1d, F.conv2d, F.conv3d][spatial_dims - 1] + + # translate padding for input to torch.nn.functional.pad + _reversed_padding_repeated_twice: List[List[int]] = [[p, p] for p in reversed(_padding)] + _sum_reversed_padding_repeated_twice: List[int] = sum(_reversed_padding_repeated_twice, []) + padded_input = F.pad(input_, _sum_reversed_padding_repeated_twice, mode=pad_mode) + + return conv_type( + input=_separable_filtering_conv(padded_input, kernels, pad_mode, d - 1, spatial_dims, paddings, num_channels), + weight=_kernel, + groups=num_channels, + ) + + +def separable_filtering(x: torch.Tensor, kernels: List[torch.Tensor], mode: str = "zeros") -> torch.Tensor: """ Apply 1-D convolutions along each spatial dimension of `x`. @@ -186,36 +222,12 @@ def separable_filtering( raise TypeError(f"x must be a torch.Tensor but is {type(x).__name__}.") spatial_dims = len(x.shape) - 2 - _kernels = [ - torch.as_tensor(s, dtype=torch.float, device=s.device if isinstance(s, torch.Tensor) else None) - for s in ensure_tuple_rep(kernels, spatial_dims) - ] - _paddings = [cast(int, (same_padding(k.shape[0]))) for k in _kernels] + _kernels = [s.float() for s in kernels] + _paddings = [(k.shape[0] - 1) // 2 for k in _kernels] n_chs = x.shape[1] + pad_mode = "constant" if mode == "zeros" else mode - def _conv(input_: torch.Tensor, d: int) -> torch.Tensor: - if d < 0: - return input_ - s = [1] * len(input_.shape) - s[d + 2] = -1 - _kernel = kernels[d].reshape(s) - # if filter kernel is unity, don't convolve - if _kernel.numel() == 1 and _kernel[0] == 1: - return _conv(input_, d - 1) - _kernel = _kernel.repeat([n_chs, 1] + [1] * spatial_dims) - _padding = [0] * spatial_dims - _padding[d] = _paddings[d] - conv_type = [F.conv1d, F.conv2d, F.conv3d][spatial_dims - 1] - # translate padding for input to torch.nn.functional.pad - _reversed_padding_repeated_twice = [p for p in reversed(_padding) for _ in range(2)] - pad_mode = "constant" if mode == "zeros" else mode - return conv_type( - input=_conv(F.pad(input_, _reversed_padding_repeated_twice, mode=pad_mode), d - 1), - weight=_kernel, - groups=n_chs, - ) - - return _conv(x, spatial_dims - 1) + return _separable_filtering_conv(x, kernels, pad_mode, spatial_dims - 1, spatial_dims, _paddings, n_chs) class SavitzkyGolayFilter(nn.Module): diff --git a/monai/networks/utils.py b/monai/networks/utils.py index 847bfc97c2..48efe3934e 100644 --- a/monai/networks/utils.py +++ b/monai/networks/utils.py @@ -19,8 +19,6 @@ import torch import torch.nn as nn -from monai.utils import ensure_tuple_size - __all__ = [ "one_hot", "slice_channels", @@ -50,13 +48,14 @@ def one_hot(labels: torch.Tensor, num_classes: int, dtype: torch.dtype = torch.f # if `dim` is bigger, add singleton dim at the end if labels.ndim < dim + 1: - shape = ensure_tuple_size(labels.shape, dim + 1, 1) - labels = labels.reshape(*shape) + shape = list(labels.shape) + [1] * (dim + 1 - len(labels.shape)) + labels = torch.reshape(labels, shape) sh = list(labels.shape) if sh[dim] != 1: - raise AssertionError("labels should have a channel with length equals to one.") + raise AssertionError("labels should have a channel with length equal to one.") + sh[dim] = num_classes o = torch.zeros(size=sh, dtype=dtype, device=labels.device) diff --git a/tests/test_dice_ce_loss.py b/tests/test_dice_ce_loss.py index 443d9a9baf..8627c6d130 100644 --- a/tests/test_dice_ce_loss.py +++ b/tests/test_dice_ce_loss.py @@ -16,6 +16,7 @@ from parameterized import parameterized from monai.losses import DiceCELoss +from tests.utils import SkipIfBeforePyTorchVersion, test_script_save TEST_CASES = [ [ # shape: (2, 2, 3), (2, 1, 3) @@ -64,6 +65,12 @@ def test_ill_shape(self): with self.assertRaisesRegex(ValueError, ""): loss(torch.ones((1, 2, 3)), torch.ones((1, 1, 2, 3))) + @SkipIfBeforePyTorchVersion((1, 7, 0)) + def test_script(self): + loss = DiceCELoss() + test_input = torch.ones(2, 1, 8, 8) + test_script_save(loss, test_input, test_input) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_dice_loss.py b/tests/test_dice_loss.py index aa4a7cbc34..ef0a51eb15 100644 --- a/tests/test_dice_loss.py +++ b/tests/test_dice_loss.py @@ -16,6 +16,7 @@ from parameterized import parameterized from monai.losses import DiceLoss +from tests.utils import SkipIfBeforePyTorchVersion, test_script_save TEST_CASES = [ [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) @@ -195,6 +196,12 @@ def test_input_warnings(self): loss = DiceLoss(to_onehot_y=True) loss.forward(chn_input, chn_target) + @SkipIfBeforePyTorchVersion((1, 7, 0)) + def test_script(self): + loss = DiceLoss() + test_input = torch.ones(2, 1, 8, 8) + test_script_save(loss, test_input, test_input) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_focal_loss.py b/tests/test_focal_loss.py index d06e2b4c36..2d1df602c7 100644 --- a/tests/test_focal_loss.py +++ b/tests/test_focal_loss.py @@ -16,6 +16,7 @@ import torch.nn.functional as F from monai.losses import FocalLoss +from tests.utils import SkipIfBeforePyTorchVersion, test_script_save class TestFocalLoss(unittest.TestCase): @@ -164,6 +165,12 @@ def test_ill_shape(self): with self.assertRaisesRegex(NotImplementedError, ""): FocalLoss()(chn_input, chn_target) + @SkipIfBeforePyTorchVersion((1, 7, 0)) + def test_script(self): + loss = FocalLoss() + test_input = torch.ones(2, 2, 8, 8) + test_script_save(loss, test_input, test_input) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_generalized_dice_loss.py b/tests/test_generalized_dice_loss.py index e88253ccba..06446204fb 100644 --- a/tests/test_generalized_dice_loss.py +++ b/tests/test_generalized_dice_loss.py @@ -16,6 +16,7 @@ from parameterized import parameterized from monai.losses import GeneralizedDiceLoss +from tests.utils import SkipIfBeforePyTorchVersion, test_script_save TEST_CASES = [ [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) @@ -178,6 +179,12 @@ def test_input_warnings(self): loss = GeneralizedDiceLoss(to_onehot_y=True) loss.forward(chn_input, chn_target) + @SkipIfBeforePyTorchVersion((1, 7, 0)) + def test_script(self): + loss = GeneralizedDiceLoss() + test_input = torch.ones(2, 1, 8, 8) + test_script_save(loss, test_input, test_input) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_generalized_wasserstein_dice_loss.py b/tests/test_generalized_wasserstein_dice_loss.py index 6865b53027..295a4a6d70 100644 --- a/tests/test_generalized_wasserstein_dice_loss.py +++ b/tests/test_generalized_wasserstein_dice_loss.py @@ -18,6 +18,7 @@ import torch.optim as optim from monai.losses import GeneralizedWassersteinDiceLoss +from tests.utils import SkipIfBeforePyTorchVersion, test_script_save class TestGeneralizedWassersteinDiceLoss(unittest.TestCase): @@ -215,6 +216,18 @@ def forward(self, x): # check that the predicted segmentation has improved self.assertGreater(diff_start, diff_end) + @SkipIfBeforePyTorchVersion((1, 7, 0)) + def test_script(self): + target = torch.tensor([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]) + + # add another dimension corresponding to the batch (batch size = 1 here) + target = target.unsqueeze(0) + pred_very_good = 1000 * F.one_hot(target, num_classes=2).permute(0, 3, 1, 2).float() + + loss = GeneralizedWassersteinDiceLoss(dist_matrix=np.array([[0.0, 1.0], [1.0, 0.0]]), weighting_mode="default") + + test_script_save(loss, pred_very_good, target) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_local_normalized_cross_correlation_loss.py b/tests/test_local_normalized_cross_correlation_loss.py index cf8566a559..8e9482596f 100644 --- a/tests/test_local_normalized_cross_correlation_loss.py +++ b/tests/test_local_normalized_cross_correlation_loss.py @@ -110,5 +110,11 @@ def test_ill_opts(self): LocalNormalizedCrossCorrelationLoss(in_channels=3, reduction=None)(pred, target) +# def test_script(self): +# input_param, input_data, _ = TEST_CASES[0] +# loss = LocalNormalizedCrossCorrelationLoss(**input_param) +# test_script_save(loss, input_data["pred"], input_data["target"]) + + if __name__ == "__main__": unittest.main() diff --git a/tests/test_multi_scale.py b/tests/test_multi_scale.py index 722ae7cfce..9ce1734e28 100644 --- a/tests/test_multi_scale.py +++ b/tests/test_multi_scale.py @@ -16,6 +16,7 @@ from monai.losses import DiceLoss from monai.losses.multi_scale import MultiScaleLoss +from tests.utils import SkipIfBeforePyTorchVersion, test_script_save dice_loss = DiceLoss(include_background=True, sigmoid=True, smooth_nr=1e-5, smooth_dr=1e-5) @@ -55,6 +56,12 @@ def test_ill_opts(self): with self.assertRaisesRegex(ValueError, ""): MultiScaleLoss(loss=dice_loss, scales=[-1], reduction="none")(torch.ones((1, 1, 3)), torch.ones((1, 1, 3))) + @SkipIfBeforePyTorchVersion((1, 7, 0)) + def test_script(self): + input_param, input_data, expected_val = TEST_CASES[0] + loss = MultiScaleLoss(**input_param) + test_script_save(loss, input_data["y_pred"], input_data["y_true"]) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_tversky_loss.py b/tests/test_tversky_loss.py index a1befa062d..0bc2ca2e70 100644 --- a/tests/test_tversky_loss.py +++ b/tests/test_tversky_loss.py @@ -16,6 +16,7 @@ from parameterized import parameterized from monai.losses import TverskyLoss +from tests.utils import SkipIfBeforePyTorchVersion, test_script_save TEST_CASES = [ [ # shape: (1, 1, 2, 2), (1, 1, 2, 2) @@ -183,6 +184,12 @@ def test_input_warnings(self): loss = TverskyLoss(to_onehot_y=True) loss.forward(chn_input, chn_target) + @SkipIfBeforePyTorchVersion((1, 7, 0)) + def test_script(self): + loss = TverskyLoss() + test_input = torch.ones(2, 1, 8, 8) + test_script_save(loss, test_input, test_input) + if __name__ == "__main__": unittest.main() From 9ae3005abd2ff73c8f9b04871994bae11b600d2f Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Fri, 26 Feb 2021 02:53:16 +0800 Subject: [PATCH 007/457] 1633 fix string length > 1024 issue in all_gather (#1634) * [DLMED] fix length > 1024 issue in string list all gather Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] update according to comments Signed-off-by: Nic Ma * [DLMED] update according to comments Signed-off-by: Nic Ma * [DLMED] add more test Signed-off-by: Nic Ma Co-authored-by: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> --- monai/handlers/utils.py | 28 +++++++++++++++++------- tests/test_handler_metrics_saver_dist.py | 15 +++++++++---- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/monai/handlers/utils.py b/monai/handlers/utils.py index 3e36af0652..a0717169aa 100644 --- a/monai/handlers/utils.py +++ b/monai/handlers/utils.py @@ -85,27 +85,39 @@ def evenly_divisible_all_gather(data: torch.Tensor) -> torch.Tensor: return torch.cat([data[i * max_len : i * max_len + l, ...] for i, l in enumerate(all_lens)], dim=0) -def string_list_all_gather(strings: List[str], delimiter: str = "\t") -> List[str]: +def string_list_all_gather(strings: List[str]) -> List[str]: """ Utility function for distributed data parallel to all gather a list of strings. + Note that if the item in `strings` is longer than 1024 chars, it will be truncated to 1024: + https://github.com/pytorch/ignite/blob/master/ignite/distributed/comp_models/base.py#L92 Args: strings: a list of strings to all gather. - delimiter: use the delimiter to join the string list to be a long string, - then all gather across ranks and split to a list. default to "\t". """ - if idist.get_world_size() <= 1: + world_size = idist.get_world_size() + if world_size <= 1: return strings - _joined = delimiter.join(strings) + result: List[List[str]] = [[] for _ in range(world_size)] + # get length of strings + length = len(strings) + all_lens = idist.all_gather(length) + max_len = max(all_lens).item() + # pad the item to make sure the same length + if length < max_len: + strings = strings + ["" for _ in range(max_len - length)] + if get_torch_version_tuple() > (1, 6, 0): - # all gather across all ranks - _joined = delimiter.join(idist.all_gather(_joined)) + for s in strings: + gathered = idist.all_gather(s) + for i, g in enumerate(gathered): + if len(g) > 0: + result[i].append(g) else: raise RuntimeError("string all_gather can not be supported in PyTorch < 1.7.0.") - return _joined.split(delimiter) + return [i for k in result for i in k] def write_metrics_reports( diff --git a/tests/test_handler_metrics_saver_dist.py b/tests/test_handler_metrics_saver_dist.py index 1b17d0adb4..dfdaa16526 100644 --- a/tests/test_handler_metrics_saver_dist.py +++ b/tests/test_handler_metrics_saver_dist.py @@ -12,6 +12,7 @@ import csv import os +import random import tempfile import unittest @@ -44,8 +45,13 @@ def _val_func(engine, batch): engine = Engine(_val_func) + # test the case that all_gather with string length > 1024 chars + filename_postfix = "abcdefghigklmnopqrstuvwxyz" + for _ in range(1100): + filename_postfix += filename_postfix[random.randint(0, 26)] + if dist.get_rank() == 0: - data = [{"image_meta_dict": {"filename_or_obj": ["filepath1"]}}] + data = [{"image_meta_dict": {"filename_or_obj": [f"1{filename_postfix}"]}}] @engine.on(Events.EPOCH_COMPLETED) def _save_metrics0(engine): @@ -58,8 +64,8 @@ def _save_metrics0(engine): if dist.get_rank() == 1: # different ranks have different data length data = [ - {"image_meta_dict": {"filename_or_obj": ["filepath2"]}}, - {"image_meta_dict": {"filename_or_obj": ["filepath3"]}}, + {"image_meta_dict": {"filename_or_obj": [f"2{filename_postfix}"]}}, + {"image_meta_dict": {"filename_or_obj": [f"3{filename_postfix}"]}}, ] @engine.on(Events.EPOCH_COMPLETED) @@ -86,7 +92,8 @@ def _save_metrics1(engine): f_csv = csv.reader(f) for i, row in enumerate(f_csv): if i > 0: - self.assertEqual(row, [f"filepath{i}\t{float(i)}\t{float(i + 1)}\t{i + 0.5}"]) + expected = [f"{i}{filename_postfix[0: 1023]}\t{float(i)}\t{float(i + 1)}\t{i + 0.5}"] + self.assertEqual(row, expected) self.assertTrue(os.path.exists(os.path.join(tempdir, "metric3_summary.csv"))) # check the metric_summary.csv and content with open(os.path.join(tempdir, "metric3_summary.csv")) as f: From f83e30019e97aa92d21b42af9df4af2049d3e125 Mon Sep 17 00:00:00 2001 From: leotam Date: Thu, 25 Feb 2021 12:46:07 -0800 Subject: [PATCH 008/457] 1567 add RemoveRepeatedChannel (#1569) * DeleteChannel array, dictionary, tests Signed-off-by: Leo Tam * formatting Signed-off-by: Leo Tam * DeleteChannel array, dictionary, tests Signed-off-by: Leo Tam * Register in transform init Signed-off-by: Leo Tam * clean comment Signed-off-by: Leo Tam * Dict init Signed-off-by: Leo Tam * Register D and Dict Signed-off-by: Leo Tam * Style fix Signed-off-by: Leo Tam * Safety cast Signed-off-by: Leo Tam * Changinging names from DeleteChannel to RemoveRepeatedChannel Signed-off-by: Leo Tam * Lint fixes Signed-off-by: Leo Tam * Test import fix, autofix again Signed-off-by: Leo Tam --- monai/transforms/__init__.py | 4 +++ monai/transforms/utility/array.py | 27 ++++++++++++++++++++ monai/transforms/utility/dictionary.py | 27 ++++++++++++++++++++ tests/test_remove_repeated_channel.py | 30 +++++++++++++++++++++++ tests/test_remove_repeated_channeld.py | 34 ++++++++++++++++++++++++++ 5 files changed, 122 insertions(+) create mode 100644 tests/test_remove_repeated_channel.py create mode 100644 tests/test_remove_repeated_channeld.py diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index 5578b93077..3499afcf95 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -250,6 +250,7 @@ Identity, LabelToMask, Lambda, + RemoveRepeatedChannel, RepeatChannel, SimulateDelay, SplitChannel, @@ -305,6 +306,9 @@ RandLambdad, RandLambdaD, RandLambdaDict, + RemoveRepeatedChanneld, + RemoveRepeatedChannelD, + RemoveRepeatedChannelDict, RepeatChanneld, RepeatChannelD, RepeatChannelDict, diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 8b161a9223..fb9ae3c089 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -31,6 +31,7 @@ "AsChannelLast", "AddChannel", "RepeatChannel", + "RemoveRepeatedChannel", "SplitChannel", "CastToType", "ToTensor", @@ -161,6 +162,32 @@ def __call__(self, img: np.ndarray) -> np.ndarray: return np.repeat(img, self.repeats, 0) +class RemoveRepeatedChannel(Transform): + """ + RemoveRepeatedChannel data to undo RepeatChannel + The `repeats` count specifies the deletion of the origin data, for example: + ``RemoveRepeatedChannel(repeats=2)([[1, 2], [1, 2], [3, 4], [3, 4]])`` generates: ``[[1, 2], [3, 4]]`` + + Args: + repeats: the number of repetitions to be deleted for each element. + """ + + def __init__(self, repeats: int) -> None: + if repeats <= 0: + raise AssertionError("repeats count must be greater than 0.") + + self.repeats = repeats + + def __call__(self, img: np.ndarray) -> np.ndarray: + """ + Apply the transform to `img`, assuming `img` is a "channel-first" array. + """ + if np.shape(img)[0] < 2: + raise AssertionError("Image must have more than one channel") + + return np.array(img[:: self.repeats, :]) + + class SplitChannel(Transform): """ Split Numpy array or PyTorch Tensor data according to the channel dim. diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index c4bd7d4cba..83426734eb 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -35,6 +35,7 @@ Identity, LabelToMask, Lambda, + RemoveRepeatedChannel, RepeatChannel, SimulateDelay, SplitChannel, @@ -52,6 +53,7 @@ "AsChannelLastd", "AddChanneld", "RepeatChanneld", + "RemoveRepeatedChanneld", "SplitChanneld", "CastToTyped", "ToTensord", @@ -82,6 +84,8 @@ "RandLambdaDict", "RepeatChannelD", "RepeatChannelDict", + "RemoveRepeatedChannelD", + "RemoveRepeatedChannelDict", "SplitChannelD", "SplitChannelDict", "CastToTypeD", @@ -226,6 +230,28 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +class RemoveRepeatedChanneld(MapTransform): + """ + Dictionary-based wrapper of :py:class:`monai.transforms.RemoveRepeatedChannel`. + """ + + def __init__(self, keys: KeysCollection, repeats: int) -> None: + """ + Args: + keys: keys of the corresponding items to be transformed. + See also: :py:class:`monai.transforms.compose.MapTransform` + repeats: the number of repetitions for each element. + """ + super().__init__(keys) + self.repeater = RemoveRepeatedChannel(repeats) + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = dict(data) + for key in self.keys: + d[key] = self.repeater(d[key]) + return d + + class SplitChanneld(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.SplitChannel`. @@ -836,6 +862,7 @@ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torc AsChannelFirstD = AsChannelFirstDict = AsChannelFirstd AsChannelLastD = AsChannelLastDict = AsChannelLastd AddChannelD = AddChannelDict = AddChanneld +RemoveRepeatedChannelD = RemoveRepeatedChannelDict = RemoveRepeatedChanneld RepeatChannelD = RepeatChannelDict = RepeatChanneld SplitChannelD = SplitChannelDict = SplitChanneld CastToTypeD = CastToTypeDict = CastToTyped diff --git a/tests/test_remove_repeated_channel.py b/tests/test_remove_repeated_channel.py new file mode 100644 index 0000000000..070e0e2b8d --- /dev/null +++ b/tests/test_remove_repeated_channel.py @@ -0,0 +1,30 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +from parameterized import parameterized + +from monai.transforms import RemoveRepeatedChannel + +TEST_CASE_1 = [{"repeats": 2}, np.array([[1, 2], [1, 2], [3, 4], [3, 4]]), (2, 2)] + + +class TestRemoveRepeatedChannel(unittest.TestCase): + @parameterized.expand([TEST_CASE_1]) + def test_shape(self, input_param, input_data, expected_shape): + result = RemoveRepeatedChannel(**input_param)(input_data) + self.assertEqual(result.shape, expected_shape) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_remove_repeated_channeld.py b/tests/test_remove_repeated_channeld.py new file mode 100644 index 0000000000..46c68bbdc2 --- /dev/null +++ b/tests/test_remove_repeated_channeld.py @@ -0,0 +1,34 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +from parameterized import parameterized + +from monai.transforms import RemoveRepeatedChanneld + +TEST_CASE_1 = [ + {"keys": ["img"], "repeats": 2}, + {"img": np.array([[1, 2], [1, 2], [3, 4], [3, 4]]), "seg": np.array([[1, 2], [1, 2], [3, 4], [3, 4]])}, + (2, 2), +] + + +class TestRemoveRepeatedChanneld(unittest.TestCase): + @parameterized.expand([TEST_CASE_1]) + def test_shape(self, input_param, input_data, expected_shape): + result = RemoveRepeatedChanneld(**input_param)(input_data) + self.assertEqual(result["img"].shape, expected_shape) + + +if __name__ == "__main__": + unittest.main() From c68d25e7cb964a54db49835c697e042231dfc3c2 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Fri, 26 Feb 2021 06:04:53 +0800 Subject: [PATCH 009/457] [DLMED] add progress arg (#1647) Signed-off-by: Nic Ma Co-authored-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> --- monai/data/dataset.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/monai/data/dataset.py b/monai/data/dataset.py index b93f03151f..db1ecc2b1f 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -591,6 +591,7 @@ def __init__( cache_rate: float = 1.0, num_init_workers: Optional[int] = None, num_replace_workers: int = 0, + progress: bool = True, ) -> None: """ Args: @@ -605,8 +606,10 @@ def __init__( If num_init_workers is None then the number returned by os.cpu_count() is used. num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch. if 0, run in main thread, no separate thread will open. + progress: whether to display a progress bar. + """ - super().__init__(data, transform, cache_num, cache_rate, num_init_workers) + super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress) if self._cache is None: self._cache = self._fill_cache() if self.cache_num >= len(data): From 5875ae6f80f56ee18ac14c51530b38081c1fe2e3 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Thu, 25 Feb 2021 23:58:30 +0000 Subject: [PATCH 010/457] 1650-update distcall (#1652) * update distcall Signed-off-by: Wenqi Li * update final closing Signed-off-by: Wenqi Li Co-authored-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> --- monai/handlers/utils.py | 1 - tests/test_distcall.py | 29 +++++ tests/test_handler_metrics_saver_dist.py | 154 +++++++++++------------ tests/utils.py | 17 ++- 4 files changed, 119 insertions(+), 82 deletions(-) create mode 100644 tests/test_distcall.py diff --git a/monai/handlers/utils.py b/monai/handlers/utils.py index a0717169aa..9ed13d292c 100644 --- a/monai/handlers/utils.py +++ b/monai/handlers/utils.py @@ -163,7 +163,6 @@ def write_metrics_reports( with open(os.path.join(save_dir, "metrics.csv"), "w") as f: for k, v in metrics.items(): f.write(f"{k}{deli}{str(v)}\n") - if metric_details is not None and len(metric_details) > 0: for k, v in metric_details.items(): if isinstance(v, torch.Tensor): diff --git a/tests/test_distcall.py b/tests/test_distcall.py new file mode 100644 index 0000000000..1830a85654 --- /dev/null +++ b/tests/test_distcall.py @@ -0,0 +1,29 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from tests.utils import DistCall, DistTestCase + + +class DistributedCallTest(DistTestCase): + def test_constructor(self): + with self.assertRaises(ValueError): + DistCall(nnodes=1, nproc_per_node=0) + with self.assertRaises(ValueError): + DistCall(nnodes=0, nproc_per_node=0) + with self.assertRaises(ValueError): + DistCall(nnodes=0, nproc_per_node=1) + _ = DistCall(nnodes=1, nproc_per_node=1) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_handler_metrics_saver_dist.py b/tests/test_handler_metrics_saver_dist.py index dfdaa16526..0868ec5ff3 100644 --- a/tests/test_handler_metrics_saver_dist.py +++ b/tests/test_handler_metrics_saver_dist.py @@ -12,7 +12,6 @@ import csv import os -import random import tempfile import unittest @@ -28,85 +27,82 @@ class DistributedMetricsSaver(DistTestCase): @DistCall(nnodes=1, nproc_per_node=2) def test_content(self): - self._run() - - def _run(self): with tempfile.TemporaryDirectory() as tempdir: - metrics_saver = MetricsSaver( - save_dir=tempdir, - metrics=["metric1", "metric2"], - metric_details=["metric3", "metric4"], - batch_transform=lambda x: x["image_meta_dict"], - summary_ops="*", - ) - - def _val_func(engine, batch): - pass - - engine = Engine(_val_func) - - # test the case that all_gather with string length > 1024 chars - filename_postfix = "abcdefghigklmnopqrstuvwxyz" - for _ in range(1100): - filename_postfix += filename_postfix[random.randint(0, 26)] - - if dist.get_rank() == 0: - data = [{"image_meta_dict": {"filename_or_obj": [f"1{filename_postfix}"]}}] - - @engine.on(Events.EPOCH_COMPLETED) - def _save_metrics0(engine): - engine.state.metrics = {"metric1": 1, "metric2": 2} - engine.state.metric_details = { - "metric3": torch.tensor([[1, 2]]), - "metric4": torch.tensor([[5, 6]]), - } - - if dist.get_rank() == 1: - # different ranks have different data length - data = [ - {"image_meta_dict": {"filename_or_obj": [f"2{filename_postfix}"]}}, - {"image_meta_dict": {"filename_or_obj": [f"3{filename_postfix}"]}}, - ] - - @engine.on(Events.EPOCH_COMPLETED) - def _save_metrics1(engine): - engine.state.metrics = {"metric1": 1, "metric2": 2} - engine.state.metric_details = { - "metric3": torch.tensor([[2, 3], [3, 4]]), - "metric4": torch.tensor([[6, 7], [7, 8]]), - } - - metrics_saver.attach(engine) - engine.run(data, max_epochs=1) - - if dist.get_rank() == 0: - # check the metrics.csv and content - self.assertTrue(os.path.exists(os.path.join(tempdir, "metrics.csv"))) - with open(os.path.join(tempdir, "metrics.csv")) as f: - f_csv = csv.reader(f) - for i, row in enumerate(f_csv): - self.assertEqual(row, [f"metric{i + 1}\t{i + 1}"]) - self.assertTrue(os.path.exists(os.path.join(tempdir, "metric3_raw.csv"))) - # check the metric_raw.csv and content - with open(os.path.join(tempdir, "metric3_raw.csv")) as f: - f_csv = csv.reader(f) - for i, row in enumerate(f_csv): - if i > 0: - expected = [f"{i}{filename_postfix[0: 1023]}\t{float(i)}\t{float(i + 1)}\t{i + 0.5}"] - self.assertEqual(row, expected) - self.assertTrue(os.path.exists(os.path.join(tempdir, "metric3_summary.csv"))) - # check the metric_summary.csv and content - with open(os.path.join(tempdir, "metric3_summary.csv")) as f: - f_csv = csv.reader(f) - for i, row in enumerate(f_csv): - if i == 1: - self.assertEqual(row, ["class0\t1.0000\t1.0000\t1.0000\t1.0000\t1.0000\t0.0000"]) - elif i == 2: - self.assertEqual(row, ["class1\t2.0000\t2.0000\t2.0000\t2.0000\t2.0000\t0.0000"]) - elif i == 3: - self.assertEqual(row, ["mean\t1.5000\t1.5000\t1.5000\t1.5000\t1.5000\t0.0000"]) - self.assertTrue(os.path.exists(os.path.join(tempdir, "metric4_raw.csv"))) - self.assertTrue(os.path.exists(os.path.join(tempdir, "metric4_summary.csv"))) + self._run(tempdir) + + def _run(self, tempdir): + fnames = ["aaa" * 300, "bbb" * 301, "ccc" * 302] + + metrics_saver = MetricsSaver( + save_dir=tempdir, + metrics=["metric1", "metric2"], + metric_details=["metric3", "metric4"], + batch_transform=lambda x: x["image_meta_dict"], + summary_ops="*", + ) + + def _val_func(engine, batch): + pass + + engine = Engine(_val_func) + + if dist.get_rank() == 0: + data = [{"image_meta_dict": {"filename_or_obj": [fnames[0]]}}] + + @engine.on(Events.EPOCH_COMPLETED) + def _save_metrics0(engine): + engine.state.metrics = {"metric1": 1, "metric2": 2} + engine.state.metric_details = { + "metric3": torch.tensor([[1, 2]]), + "metric4": torch.tensor([[5, 6]]), + } + + if dist.get_rank() == 1: + # different ranks have different data length + data = [ + {"image_meta_dict": {"filename_or_obj": [fnames[1]]}}, + {"image_meta_dict": {"filename_or_obj": [fnames[2]]}}, + ] + + @engine.on(Events.EPOCH_COMPLETED) + def _save_metrics1(engine): + engine.state.metrics = {"metric1": 1, "metric2": 2} + engine.state.metric_details = { + "metric3": torch.tensor([[2, 3], [3, 4]]), + "metric4": torch.tensor([[6, 7], [7, 8]]), + } + + metrics_saver.attach(engine) + engine.run(data, max_epochs=1) + + if dist.get_rank() == 0: + # check the metrics.csv and content + self.assertTrue(os.path.exists(os.path.join(tempdir, "metrics.csv"))) + with open(os.path.join(tempdir, "metrics.csv")) as f: + f_csv = csv.reader(f) + for i, row in enumerate(f_csv): + self.assertEqual(row, [f"metric{i + 1}\t{i + 1}"]) + self.assertTrue(os.path.exists(os.path.join(tempdir, "metric3_raw.csv"))) + # check the metric_raw.csv and content + with open(os.path.join(tempdir, "metric3_raw.csv")) as f: + f_csv = csv.reader(f) + for i, row in enumerate(f_csv): + if i > 0: + expected = [f"{fnames[i-1]}\t{float(i)}\t{float(i + 1)}\t{i + 0.5}"] + self.assertEqual(row, expected) + self.assertTrue(os.path.exists(os.path.join(tempdir, "metric3_summary.csv"))) + # check the metric_summary.csv and content + with open(os.path.join(tempdir, "metric3_summary.csv")) as f: + f_csv = csv.reader(f) + for i, row in enumerate(f_csv): + if i == 1: + self.assertEqual(row, ["class0\t1.0000\t1.0000\t1.0000\t1.0000\t1.0000\t0.0000"]) + elif i == 2: + self.assertEqual(row, ["class1\t2.0000\t2.0000\t2.0000\t2.0000\t2.0000\t0.0000"]) + elif i == 3: + self.assertEqual(row, ["mean\t1.5000\t1.5000\t1.5000\t1.5000\t1.5000\t0.0000"]) + self.assertTrue(os.path.exists(os.path.join(tempdir, "metric4_raw.csv"))) + self.assertTrue(os.path.exists(os.path.join(tempdir, "metric4_summary.csv"))) if __name__ == "__main__": diff --git a/tests/utils.py b/tests/utils.py index 4597a18fbd..8b367158b2 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -237,7 +237,11 @@ def __init__( """ self.nnodes = int(nnodes) self.nproc_per_node = int(nproc_per_node) - self.node_rank = int(os.environ.get("NODE_RANK", "0")) if node_rank is None else node_rank + if self.nnodes < 1 or self.nproc_per_node < 1: + raise ValueError( + f"number of nodes and processes per node must be >= 1, got {self.nnodes} and {self.nproc_per_node}" + ) + self.node_rank = int(os.environ.get("NODE_RANK", "0")) if node_rank is None else int(node_rank) self.master_addr = master_addr self.master_port = np.random.randint(10000, 20000) if master_port is None else master_port @@ -286,11 +290,20 @@ def run_process(self, func, local_rank, args, kwargs, results): finally: os.environ.clear() os.environ.update(_env) - dist.destroy_process_group() + try: + dist.destroy_process_group() + except RuntimeError as e: + warnings.warn(f"While closing process group: {e}.") def __call__(self, obj): if not torch.distributed.is_available(): return unittest.skipIf(True, "Skipping distributed tests because not torch.distributed.is_available()")(obj) + if torch.cuda.is_available() and torch.cuda.device_count() < self.nproc_per_node: + return unittest.skipIf( + True, + f"Skipping distributed tests because it requires {self.nnodes} devices " + f"but got {torch.cuda.device_count()}", + )(obj) _cache_original_func(obj) From 56c54025541a6f73f3f70df70da2022dd5da4c9c Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Thu, 25 Feb 2021 20:04:23 -0500 Subject: [PATCH 011/457] Fix ToPIL (#1648) * Add ToPIL transformation Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add ToPILd, ToPILD, ToPILDict Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove has_pil Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Include ToPIL, ToPILd, ToPILD, and ToPILDict Also include ToNumpyD, ToNumpyDict, TorchVisionD, and TorchVisionDict Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Fix a typing issue Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Fix PIL optional import Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add unittests for ToPIL and ToPILD Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Fix formatting Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Fix formatting Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add PILImage.Image as the input for ToNumpy and ToTensor Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Fix type checking issue for PIL.Image.Image Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Change PILImage_fromarray to lower case Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- monai/transforms/__init__.py | 8 ++++ monai/transforms/utility/array.py | 31 ++++++++++-- monai/transforms/utility/dictionary.py | 46 +++++++++++++++--- tests/test_to_pil.py | 64 +++++++++++++++++++++++++ tests/test_to_pild.py | 65 ++++++++++++++++++++++++++ 5 files changed, 205 insertions(+), 9 deletions(-) create mode 100644 tests/test_to_pil.py create mode 100644 tests/test_to_pild.py diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index 3499afcf95..f5c7c826e9 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -256,6 +256,7 @@ SplitChannel, SqueezeDim, ToNumpy, + ToPIL, TorchVision, ToTensor, Transpose, @@ -323,7 +324,14 @@ SqueezeDimD, SqueezeDimDict, ToNumpyd, + ToNumpyD, + ToNumpyDict, + ToPILd, + ToPILD, + ToPILDict, TorchVisiond, + TorchVisionD, + TorchVisionDict, ToTensord, ToTensorD, ToTensorDict, diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index fb9ae3c089..0ee88e1a6c 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -15,7 +15,7 @@ import logging import time -from typing import Callable, List, Optional, Sequence, Tuple, Union +from typing import TYPE_CHECKING, Callable, List, Optional, Sequence, Tuple, Union import numpy as np import torch @@ -25,6 +25,15 @@ from monai.transforms.utils import extreme_points_to_image, get_extreme_points, map_binary_to_indices from monai.utils import ensure_tuple, min_version, optional_import +if TYPE_CHECKING: + from PIL.Image import Image as PILImageImage + from PIL.Image import fromarray as pil_image_fromarray + + has_pil = True +else: + PILImageImage, has_pil = optional_import("PIL.Image", name="Image") + pil_image_fromarray, _ = optional_import("PIL.Image", name="fromarray") + __all__ = [ "Identity", "AsChannelFirst", @@ -265,7 +274,7 @@ class ToTensor(Transform): Converts the input image to a tensor without applying any other transformations. """ - def __call__(self, img: Union[np.ndarray, torch.Tensor]) -> torch.Tensor: + def __call__(self, img: Union[np.ndarray, torch.Tensor, PILImageImage]) -> torch.Tensor: """ Apply the transform to `img` and make it contiguous. """ @@ -279,7 +288,7 @@ class ToNumpy(Transform): Converts the input data to numpy array, can support list or tuple of numbers and PyTorch Tensor. """ - def __call__(self, img: Union[List, Tuple, np.ndarray, torch.Tensor]) -> np.ndarray: + def __call__(self, img: Union[List, Tuple, np.ndarray, torch.Tensor, PILImageImage]) -> np.ndarray: """ Apply the transform to `img` and make it contiguous. """ @@ -288,6 +297,22 @@ def __call__(self, img: Union[List, Tuple, np.ndarray, torch.Tensor]) -> np.ndar return np.ascontiguousarray(img) +class ToPIL(Transform): + """ + Converts the input image (in the form of NumPy array or PyTorch Tensor) to PIL image + """ + + def __call__(self, img: Union[np.ndarray, torch.Tensor, PILImageImage]) -> PILImageImage: + """ + Apply the transform to `img` and make it contiguous. + """ + if isinstance(img, PILImageImage): + return img + if isinstance(img, torch.Tensor): + img = img.detach().cpu().numpy() + return pil_image_fromarray(img) + + class Transpose(Transform): """ Transposes the input image based on the given `indices` dimension ordering. diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index 83426734eb..f9612c2408 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -17,7 +17,7 @@ import copy import logging -from typing import Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union +from typing import TYPE_CHECKING, Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union import numpy as np import torch @@ -41,11 +41,19 @@ SplitChannel, SqueezeDim, ToNumpy, + ToPIL, TorchVision, ToTensor, ) from monai.transforms.utils import extreme_points_to_image, get_extreme_points -from monai.utils import ensure_tuple, ensure_tuple_rep +from monai.utils import ensure_tuple, ensure_tuple_rep, optional_import + +if TYPE_CHECKING: + from PIL.Image import Image as PILImageImage + + has_pil = True +else: + PILImageImage, has_pil = optional_import("PIL.Image", name="Image") __all__ = [ "Identityd", @@ -58,6 +66,7 @@ "CastToTyped", "ToTensord", "ToNumpyd", + "ToPILd", "DeleteItemsd", "SelectItemsd", "SqueezeDimd", @@ -348,8 +357,8 @@ def __init__(self, keys: KeysCollection) -> None: self.converter = ToTensor() def __call__( - self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]] - ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: + self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]] + ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]]: d = dict(data) for key in self.keys: d[key] = self.converter(d[key]) @@ -371,8 +380,31 @@ def __init__(self, keys: KeysCollection) -> None: self.converter = ToNumpy() def __call__( - self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]] - ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: + self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]] + ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]]: + d = dict(data) + for key in self.keys: + d[key] = self.converter(d[key]) + return d + + +class ToPILd(MapTransform): + """ + Dictionary-based wrapper of :py:class:`monai.transforms.ToNumpy`. + """ + + def __init__(self, keys: KeysCollection) -> None: + """ + Args: + keys: keys of the corresponding items to be transformed. + See also: :py:class:`monai.transforms.compose.MapTransform` + """ + super().__init__(keys) + self.converter = ToPIL() + + def __call__( + self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]] + ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]]: d = dict(data) for key in self.keys: d[key] = self.converter(d[key]) @@ -867,6 +899,8 @@ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torc SplitChannelD = SplitChannelDict = SplitChanneld CastToTypeD = CastToTypeDict = CastToTyped ToTensorD = ToTensorDict = ToTensord +ToNumpyD = ToNumpyDict = ToNumpyd +ToPILD = ToPILDict = ToPILd DeleteItemsD = DeleteItemsDict = DeleteItemsd SqueezeDimD = SqueezeDimDict = SqueezeDimd DataStatsD = DataStatsDict = DataStatsd diff --git a/tests/test_to_pil.py b/tests/test_to_pil.py new file mode 100644 index 0000000000..ec63750ce4 --- /dev/null +++ b/tests/test_to_pil.py @@ -0,0 +1,64 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from typing import TYPE_CHECKING +from unittest import skipUnless + +import numpy as np +import torch +from parameterized import parameterized + +from monai.transforms import ToPIL +from monai.utils import optional_import + +if TYPE_CHECKING: + from PIL.Image import Image as PILImageImage + from PIL.Image import fromarray as pil_image_fromarray + + has_pil = True +else: + pil_image_fromarray, has_pil = optional_import("PIL.Image", name="fromarray") + PILImageImage, _ = optional_import("PIL.Image", name="Image") + +TEST_CASE_ARRAY_1 = [np.array([[1.0, 2.0], [3.0, 4.0]])] +TEST_CASE_TENSOR_1 = [torch.tensor([[1.0, 2.0], [3.0, 4.0]])] + + +class TestToPIL(unittest.TestCase): + @parameterized.expand([TEST_CASE_ARRAY_1]) + @skipUnless(has_pil, "Requires `pillow` package.") + def test_numpy_input(self, test_data): + self.assertTrue(isinstance(test_data, np.ndarray)) + result = ToPIL()(test_data) + self.assertTrue(isinstance(result, PILImageImage)) + np.testing.assert_allclose(np.array(result), test_data) + + @parameterized.expand([TEST_CASE_TENSOR_1]) + @skipUnless(has_pil, "Requires `pillow` package.") + def test_tensor_input(self, test_data): + self.assertTrue(isinstance(test_data, torch.Tensor)) + result = ToPIL()(test_data) + self.assertTrue(isinstance(result, PILImageImage)) + np.testing.assert_allclose(np.array(result), test_data.numpy()) + + @parameterized.expand([TEST_CASE_ARRAY_1]) + @skipUnless(has_pil, "Requires `pillow` package.") + def test_pil_input(self, test_data): + test_data_pil = pil_image_fromarray(test_data) + self.assertTrue(isinstance(test_data_pil, PILImageImage)) + result = ToPIL()(test_data_pil) + self.assertTrue(isinstance(result, PILImageImage)) + np.testing.assert_allclose(np.array(result), test_data) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_to_pild.py b/tests/test_to_pild.py new file mode 100644 index 0000000000..43778022ee --- /dev/null +++ b/tests/test_to_pild.py @@ -0,0 +1,65 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from typing import TYPE_CHECKING +from unittest import skipUnless + +import numpy as np +import torch +from parameterized import parameterized + +from monai.transforms import ToPILd +from monai.utils import optional_import + +if TYPE_CHECKING: + from PIL.Image import Image as PILImageImage + from PIL.Image import fromarray as pil_image_fromarray + + has_pil = True +else: + pil_image_fromarray, has_pil = optional_import("PIL.Image", name="fromarray") + PILImageImage, _ = optional_import("PIL.Image", name="Image") + +TEST_CASE_ARRAY_1 = [{"keys": "image"}, {"image": np.array([[1.0, 2.0], [3.0, 4.0]])}] +TEST_CASE__TENSOR_1 = [{"keys": "image"}, {"image": torch.tensor([[1.0, 2.0], [3.0, 4.0]])}] + + +class TestToPIL(unittest.TestCase): + @parameterized.expand([TEST_CASE_ARRAY_1]) + @skipUnless(has_pil, "Requires `pillow` package.") + def test_numpy_input(self, input_param, test_data): + self.assertTrue(isinstance(test_data[input_param["keys"]], np.ndarray)) + result = ToPILd(**input_param)(test_data)[input_param["keys"]] + self.assertTrue(isinstance(result, PILImageImage)) + np.testing.assert_allclose(np.array(result), test_data[input_param["keys"]]) + + @parameterized.expand([TEST_CASE__TENSOR_1]) + @skipUnless(has_pil, "Requires `pillow` package.") + def test_tensor_input(self, input_param, test_data): + self.assertTrue(isinstance(test_data[input_param["keys"]], torch.Tensor)) + result = ToPILd(**input_param)(test_data)[input_param["keys"]] + self.assertTrue(isinstance(result, PILImageImage)) + np.testing.assert_allclose(np.array(result), test_data[input_param["keys"]].numpy()) + + @parameterized.expand([TEST_CASE_ARRAY_1]) + @skipUnless(has_pil, "Requires `pillow` package.") + def test_pil_input(self, input_param, test_data): + input_array = test_data[input_param["keys"]] + test_data[input_param["keys"]] = pil_image_fromarray(input_array) + self.assertTrue(isinstance(test_data[input_param["keys"]], PILImageImage)) + result = ToPILd(**input_param)(test_data)[input_param["keys"]] + self.assertTrue(isinstance(result, PILImageImage)) + np.testing.assert_allclose(np.array(result), test_data[input_param["keys"]]) + + +if __name__ == "__main__": + unittest.main() From 1df63523934ce42704f2fd305d17c7045f3efd7f Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Fri, 26 Feb 2021 13:07:48 +0000 Subject: [PATCH 012/457] Randomizable constructor (#1639) add RandomizableTransform --- docs/source/transforms.rst | 5 ++ monai/apps/deepgrow/transforms.py | 6 +- monai/data/dataset.py | 13 ++-- monai/data/image_dataset.py | 5 +- monai/transforms/__init__.py | 2 +- monai/transforms/compose.py | 9 ++- monai/transforms/croppad/array.py | 10 +-- monai/transforms/croppad/dictionary.py | 22 ++++--- monai/transforms/intensity/array.py | 55 +++++++--------- monai/transforms/intensity/dictionary.py | 67 +++++++++---------- monai/transforms/spatial/array.py | 67 ++++++++----------- monai/transforms/spatial/dictionary.py | 83 ++++++++++++------------ monai/transforms/transform.py | 50 +++++++++----- monai/transforms/utility/array.py | 4 +- monai/transforms/utility/dictionary.py | 10 +-- tests/test_compose.py | 14 ++-- tests/test_image_dataset.py | 4 +- tests/test_rand_lambdad.py | 4 +- tests/test_randomizable.py | 2 +- 19 files changed, 217 insertions(+), 215 deletions(-) diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index 5b550f7885..a144c8c138 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -27,6 +27,11 @@ Generic Interfaces .. autoclass:: Randomizable :members: +`RandomizableTransform` +^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomizableTransform + :members: + `Compose` ^^^^^^^^^ .. autoclass:: Compose diff --git a/monai/apps/deepgrow/transforms.py b/monai/apps/deepgrow/transforms.py index 80b0d1648d..3f4031fade 100644 --- a/monai/apps/deepgrow/transforms.py +++ b/monai/apps/deepgrow/transforms.py @@ -17,7 +17,7 @@ from monai.config import IndexSelection, KeysCollection from monai.networks.layers import GaussianFilter from monai.transforms import SpatialCrop -from monai.transforms.transform import MapTransform, Randomizable, Transform +from monai.transforms.transform import MapTransform, RandomizableTransform, Transform from monai.transforms.utils import generate_spatial_bounding_box from monai.utils import min_version, optional_import @@ -62,7 +62,7 @@ def __call__(self, data): return d -class AddInitialSeedPointd(Randomizable, Transform): +class AddInitialSeedPointd(RandomizableTransform): """ Add random guidance as initial seed point for a given label. @@ -279,7 +279,7 @@ def __call__(self, data): return d -class AddRandomGuidanced(Randomizable, Transform): +class AddRandomGuidanced(RandomizableTransform): """ Add random guidance based on discrepancies that were found between label and prediction. diff --git a/monai/data/dataset.py b/monai/data/dataset.py index db1ecc2b1f..bb5a98ba1e 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -26,6 +26,7 @@ from monai.data.utils import pickle_hashing from monai.transforms import Compose, Randomizable, Transform, apply_transform +from monai.transforms.transform import RandomizableTransform from monai.utils import MAX_SEED, get_seed, min_version, optional_import if TYPE_CHECKING: @@ -161,7 +162,7 @@ def _pre_transform(self, item_transformed): raise ValueError("transform must be an instance of monai.transforms.Compose.") for _transform in self.transform.transforms: # execute all the deterministic transforms - if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform): + if isinstance(_transform, RandomizableTransform) or not isinstance(_transform, Transform): break item_transformed = apply_transform(_transform, item_transformed) return item_transformed @@ -183,7 +184,7 @@ def _post_transform(self, item_transformed): for _transform in self.transform.transforms: if ( start_post_randomize_run - or isinstance(_transform, Randomizable) + or isinstance(_transform, RandomizableTransform) or not isinstance(_transform, Transform) ): start_post_randomize_run = True @@ -522,7 +523,7 @@ def _load_cache_item(self, idx: int): raise ValueError("transform must be an instance of monai.transforms.Compose.") for _transform in self.transform.transforms: # execute all the deterministic transforms - if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform): + if isinstance(_transform, RandomizableTransform) or not isinstance(_transform, Transform): break item = apply_transform(_transform, item) return item @@ -539,7 +540,7 @@ def __getitem__(self, index): if not isinstance(self.transform, Compose): raise ValueError("transform must be an instance of monai.transforms.Compose.") for _transform in self.transform.transforms: - if start_run or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform): + if start_run or isinstance(_transform, RandomizableTransform) or not isinstance(_transform, Transform): start_run = True data = apply_transform(_transform, data) return data @@ -924,9 +925,9 @@ def __getitem__(self, index: int): # set transforms of each zip component for dataset in self.dataset.data: transform = getattr(dataset, "transform", None) - if isinstance(transform, Randomizable): + if isinstance(transform, RandomizableTransform): transform.set_random_state(seed=self._seed) transform = getattr(self.dataset, "transform", None) - if isinstance(transform, Randomizable): + if isinstance(transform, RandomizableTransform): transform.set_random_state(seed=self._seed) return self.dataset[index] diff --git a/monai/data/image_dataset.py b/monai/data/image_dataset.py index 1568e082ee..1074105508 100644 --- a/monai/data/image_dataset.py +++ b/monai/data/image_dataset.py @@ -17,6 +17,7 @@ from monai.config import DtypeLike from monai.data.image_reader import ImageReader from monai.transforms import LoadImage, Randomizable, apply_transform +from monai.transforms.transform import RandomizableTransform from monai.utils import MAX_SEED, get_seed @@ -106,14 +107,14 @@ def __getitem__(self, index: int): label = self.labels[index] if self.transform is not None: - if isinstance(self.transform, Randomizable): + if isinstance(self.transform, RandomizableTransform): self.transform.set_random_state(seed=self._seed) img = apply_transform(self.transform, img) data = [img] if self.seg_transform is not None: - if isinstance(self.seg_transform, Randomizable): + if isinstance(self.seg_transform, RandomizableTransform): self.seg_transform.set_random_state(seed=self._seed) seg = apply_transform(self.seg_transform, seg) diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index f5c7c826e9..8b30d76bec 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -237,7 +237,7 @@ ZoomD, ZoomDict, ) -from .transform import MapTransform, Randomizable, Transform +from .transform import MapTransform, Randomizable, RandomizableTransform, Transform from .utility.array import ( AddChannel, AddExtremePointsChannel, diff --git a/monai/transforms/compose.py b/monai/transforms/compose.py index 2d612ad2e3..a9f66b12a0 100644 --- a/monai/transforms/compose.py +++ b/monai/transforms/compose.py @@ -18,15 +18,14 @@ import numpy as np # For backwards compatiblity (so this still works: from monai.transforms.compose import MapTransform) -from monai.transforms.transform import MapTransform # noqa: F401 -from monai.transforms.transform import Randomizable, Transform +from monai.transforms.transform import MapTransform, Randomizable, RandomizableTransform, Transform # noqa: F401 from monai.transforms.utils import apply_transform from monai.utils import MAX_SEED, ensure_tuple, get_seed __all__ = ["Compose"] -class Compose(Randomizable, Transform): +class Compose(RandomizableTransform): """ ``Compose`` provides the ability to chain a series of calls together in a sequence. Each transform in the sequence must take a single argument and @@ -96,14 +95,14 @@ def __init__(self, transforms: Optional[Union[Sequence[Callable], Callable]] = N def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None) -> "Compose": super().set_random_state(seed=seed, state=state) for _transform in self.transforms: - if not isinstance(_transform, Randomizable): + if not isinstance(_transform, RandomizableTransform): continue _transform.set_random_state(seed=self.R.randint(MAX_SEED, dtype="uint32")) return self def randomize(self, data: Optional[Any] = None) -> None: for _transform in self.transforms: - if not isinstance(_transform, Randomizable): + if not isinstance(_transform, RandomizableTransform): continue try: _transform.randomize(data) diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index ef5e0019bd..a3d36ad903 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -20,7 +20,7 @@ from monai.config import IndexSelection from monai.data.utils import get_random_patch, get_valid_patch_size -from monai.transforms.transform import Randomizable, Transform +from monai.transforms.transform import Randomizable, RandomizableTransform, Transform from monai.transforms.utils import ( generate_pos_neg_label_crop_centers, generate_spatial_bounding_box, @@ -276,7 +276,7 @@ def __call__(self, img: np.ndarray): return cropper(img) -class RandSpatialCrop(Randomizable, Transform): +class RandSpatialCrop(RandomizableTransform): """ Crop image with random size or specific size ROI. It can crop at a random position as center or at the image center. And allows to set the minimum size to limit the randomly generated ROI. @@ -321,7 +321,7 @@ def __call__(self, img: np.ndarray): return cropper(img) -class RandSpatialCropSamples(Randomizable, Transform): +class RandSpatialCropSamples(RandomizableTransform): """ Crop image with random size or specific size ROI to generate a list of N samples. It can crop at a random position as center or at the image center. And allows to set @@ -429,7 +429,7 @@ def __call__(self, img: np.ndarray): return cropped -class RandWeightedCrop(Randomizable, Transform): +class RandWeightedCrop(RandomizableTransform): """ Samples a list of `num_samples` image patches according to the provided `weight_map`. @@ -481,7 +481,7 @@ def __call__(self, img: np.ndarray, weight_map: Optional[np.ndarray] = None) -> return results -class RandCropByPosNegLabel(Randomizable, Transform): +class RandCropByPosNegLabel(RandomizableTransform): """ Crop random fixed sized regions with the center being a foreground or background voxel based on the Pos Neg Ratio. diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index 20ae6ac1ed..9739c6322f 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -30,7 +30,7 @@ SpatialCrop, SpatialPad, ) -from monai.transforms.transform import MapTransform, Randomizable +from monai.transforms.transform import MapTransform, Randomizable, RandomizableTransform from monai.transforms.utils import ( generate_pos_neg_label_crop_centers, generate_spatial_bounding_box, @@ -258,7 +258,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class RandSpatialCropd(Randomizable, MapTransform): +class RandSpatialCropd(RandomizableTransform, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandSpatialCrop`. Crop image with random size or specific size ROI. It can crop at a random position as @@ -283,7 +283,8 @@ def __init__( random_center: bool = True, random_size: bool = True, ) -> None: - super().__init__(keys) + RandomizableTransform.__init__(self) + MapTransform.__init__(self, keys) self.roi_size = roi_size self.random_center = random_center self.random_size = random_size @@ -312,7 +313,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class RandSpatialCropSamplesd(Randomizable, MapTransform): +class RandSpatialCropSamplesd(RandomizableTransform, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandSpatialCropSamples`. Crop image with random size or specific size ROI to generate a list of N samples. @@ -344,7 +345,8 @@ def __init__( random_center: bool = True, random_size: bool = True, ) -> None: - super().__init__(keys) + RandomizableTransform.__init__(self) + MapTransform.__init__(self, keys) if num_samples < 1: raise ValueError(f"num_samples must be positive, got {num_samples}.") self.num_samples = num_samples @@ -420,7 +422,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class RandWeightedCropd(Randomizable, MapTransform): +class RandWeightedCropd(RandomizableTransform, MapTransform): """ Samples a list of `num_samples` image patches according to the provided `weight_map`. @@ -446,7 +448,8 @@ def __init__( num_samples: int = 1, center_coord_key: Optional[str] = None, ): - super().__init__(keys) + RandomizableTransform.__init__(self) + MapTransform.__init__(self, keys) self.spatial_size = ensure_tuple(spatial_size) self.w_key = w_key self.num_samples = int(num_samples) @@ -484,7 +487,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, n return results -class RandCropByPosNegLabeld(Randomizable, MapTransform): +class RandCropByPosNegLabeld(RandomizableTransform, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandCropByPosNegLabel`. Crop random fixed sized regions with the center being a foreground or background voxel @@ -534,7 +537,8 @@ def __init__( fg_indices_key: Optional[str] = None, bg_indices_key: Optional[str] = None, ) -> None: - super().__init__(keys) + RandomizableTransform.__init__(self) + MapTransform.__init__(self, keys) self.label_key = label_key self.spatial_size: Union[Tuple[int, ...], Sequence[int], int] = spatial_size if pos < 0 or neg < 0: diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 40bef064eb..1bddc0137d 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -22,7 +22,7 @@ from monai.config import DtypeLike from monai.networks.layers import GaussianFilter, HilbertTransform, SavitzkyGolayFilter -from monai.transforms.transform import Randomizable, Transform +from monai.transforms.transform import RandomizableTransform, Transform from monai.transforms.utils import rescale_array from monai.utils import PT_BEFORE_1_7, InvalidPyTorchVersionError, dtype_torch_to_numpy, ensure_tuple_size @@ -49,7 +49,7 @@ ] -class RandGaussianNoise(Randomizable, Transform): +class RandGaussianNoise(RandomizableTransform): """ Add Gaussian noise to image. @@ -60,14 +60,13 @@ class RandGaussianNoise(Randomizable, Transform): """ def __init__(self, prob: float = 0.1, mean: Union[Sequence[float], float] = 0.0, std: float = 0.1) -> None: - self.prob = prob + RandomizableTransform.__init__(self, prob) self.mean = mean self.std = std - self._do_transform = False self._noise = None def randomize(self, im_shape: Sequence[int]) -> None: - self._do_transform = self.R.random() < self.prob + super().randomize(None) self._noise = self.R.normal(self.mean, self.R.uniform(0, self.std), size=im_shape) def __call__(self, img: Union[torch.Tensor, np.ndarray]) -> Union[torch.Tensor, np.ndarray]: @@ -101,7 +100,7 @@ def __call__(self, img: np.ndarray) -> np.ndarray: return np.asarray((img + self.offset), dtype=img.dtype) -class RandShiftIntensity(Randomizable, Transform): +class RandShiftIntensity(RandomizableTransform): """ Randomly shift intensity with randomly picked offset. """ @@ -113,6 +112,7 @@ def __init__(self, offsets: Union[Tuple[float, float], float], prob: float = 0.1 if single number, offset value is picked from (-offsets, offsets). prob: probability of shift. """ + RandomizableTransform.__init__(self, prob) if isinstance(offsets, (int, float)): self.offsets = (min(-offsets, offsets), max(-offsets, offsets)) else: @@ -120,12 +120,9 @@ def __init__(self, offsets: Union[Tuple[float, float], float], prob: float = 0.1 raise AssertionError("offsets should be a number or pair of numbers.") self.offsets = (min(offsets), max(offsets)) - self.prob = prob - self._do_transform = False - def randomize(self, data: Optional[Any] = None) -> None: self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) - self._do_transform = self.R.random() < self.prob + super().randomize(None) def __call__(self, img: np.ndarray) -> np.ndarray: """ @@ -172,7 +169,7 @@ def __call__(self, img: np.ndarray) -> np.ndarray: raise ValueError("Incompatible values: minv=None or maxv=None and factor=None.") -class RandScaleIntensity(Randomizable, Transform): +class RandScaleIntensity(RandomizableTransform): """ Randomly scale the intensity of input image by ``v = v * (1 + factor)`` where the `factor` is randomly picked from (-factors[0], factors[0]). @@ -186,6 +183,7 @@ def __init__(self, factors: Union[Tuple[float, float], float], prob: float = 0.1 prob: probability of scale. """ + RandomizableTransform.__init__(self, prob) if isinstance(factors, (int, float)): self.factors = (min(-factors, factors), max(-factors, factors)) else: @@ -193,12 +191,9 @@ def __init__(self, factors: Union[Tuple[float, float], float], prob: float = 0.1 raise AssertionError("factors should be a number or pair of numbers.") self.factors = (min(factors), max(factors)) - self.prob = prob - self._do_transform = False - def randomize(self, data: Optional[Any] = None) -> None: self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) - self._do_transform = self.R.random() < self.prob + super().randomize(None) def __call__(self, img: np.ndarray) -> np.ndarray: """ @@ -243,7 +238,7 @@ def __init__( self.dtype = dtype def _normalize(self, img: np.ndarray, sub=None, div=None) -> np.ndarray: - slices = (img != 0) if self.nonzero else np.ones(img.shape, dtype=np.bool_) + slices = (img != 0) if self.nonzero else np.ones(img.shape, dtype=bool) if not np.any(slices): return img @@ -370,7 +365,7 @@ def __call__(self, img: np.ndarray): return np.power(((img - img_min) / float(img_range + epsilon)), self.gamma) * img_range + img_min -class RandAdjustContrast(Randomizable, Transform): +class RandAdjustContrast(RandomizableTransform): """ Randomly changes image intensity by gamma. Each pixel/voxel intensity is updated as:: @@ -383,7 +378,7 @@ class RandAdjustContrast(Randomizable, Transform): """ def __init__(self, prob: float = 0.1, gamma: Union[Sequence[float], float] = (0.5, 4.5)) -> None: - self.prob = prob + RandomizableTransform.__init__(self, prob) if isinstance(gamma, (int, float)): if gamma <= 0.5: @@ -396,11 +391,10 @@ def __init__(self, prob: float = 0.1, gamma: Union[Sequence[float], float] = (0. raise AssertionError("gamma should be a number or pair of numbers.") self.gamma = (min(gamma), max(gamma)) - self._do_transform = False self.gamma_value = None def randomize(self, data: Optional[Any] = None) -> None: - self._do_transform = self.R.random_sample() < self.prob + super().randomize(None) self.gamma_value = self.R.uniform(low=self.gamma[0], high=self.gamma[1]) def __call__(self, img: np.ndarray) -> np.ndarray: @@ -657,7 +651,7 @@ def __call__(self, img: np.ndarray): return gaussian_filter(input_data).squeeze(0).detach().numpy() -class RandGaussianSmooth(Randomizable, Transform): +class RandGaussianSmooth(RandomizableTransform): """ Apply Gaussian smooth to the input data based on randomly selected `sigma` parameters. @@ -679,15 +673,14 @@ def __init__( prob: float = 0.1, approx: str = "erf", ) -> None: + RandomizableTransform.__init__(self, prob) self.sigma_x = sigma_x self.sigma_y = sigma_y self.sigma_z = sigma_z - self.prob = prob self.approx = approx - self._do_transform = False def randomize(self, data: Optional[Any] = None) -> None: - self._do_transform = self.R.random_sample() < self.prob + super().randomize(None) self.x = self.R.uniform(low=self.sigma_x[0], high=self.sigma_x[1]) self.y = self.R.uniform(low=self.sigma_y[0], high=self.sigma_y[1]) self.z = self.R.uniform(low=self.sigma_z[0], high=self.sigma_z[1]) @@ -748,7 +741,7 @@ def __call__(self, img: np.ndarray): return (blurred_f + self.alpha * (blurred_f - filter_blurred_f)).squeeze(0).detach().numpy() -class RandGaussianSharpen(Randomizable, Transform): +class RandGaussianSharpen(RandomizableTransform): """ Sharpen images using the Gaussian Blur filter based on randomly selected `sigma1`, `sigma2` and `alpha`. The algorithm is :py:class:`monai.transforms.GaussianSharpen`. @@ -782,6 +775,7 @@ def __init__( approx: str = "erf", prob: float = 0.1, ) -> None: + RandomizableTransform.__init__(self, prob) self.sigma1_x = sigma1_x self.sigma1_y = sigma1_y self.sigma1_z = sigma1_z @@ -790,11 +784,9 @@ def __init__( self.sigma2_z = sigma2_z self.alpha = alpha self.approx = approx - self.prob = prob - self._do_transform = False def randomize(self, data: Optional[Any] = None) -> None: - self._do_transform = self.R.random_sample() < self.prob + super().randomize(None) self.x1 = self.R.uniform(low=self.sigma1_x[0], high=self.sigma1_x[1]) self.y1 = self.R.uniform(low=self.sigma1_y[0], high=self.sigma1_y[1]) self.z1 = self.R.uniform(low=self.sigma1_z[0], high=self.sigma1_z[1]) @@ -815,7 +807,7 @@ def __call__(self, img: np.ndarray): return GaussianSharpen(sigma1=sigma1, sigma2=sigma2, alpha=self.a, approx=self.approx)(img) -class RandHistogramShift(Randomizable, Transform): +class RandHistogramShift(RandomizableTransform): """ Apply random nonlinear transform to the image's intensity histogram. @@ -827,6 +819,7 @@ class RandHistogramShift(Randomizable, Transform): """ def __init__(self, num_control_points: Union[Tuple[int, int], int] = 10, prob: float = 0.1) -> None: + RandomizableTransform.__init__(self, prob) if isinstance(num_control_points, int): if num_control_points <= 2: @@ -838,11 +831,9 @@ def __init__(self, num_control_points: Union[Tuple[int, int], int] = 10, prob: f if min(num_control_points) <= 2: raise AssertionError("num_control_points should be greater than or equal to 3") self.num_control_points = (min(num_control_points), max(num_control_points)) - self.prob = prob - self._do_transform = False def randomize(self, data: Optional[Any] = None) -> None: - self._do_transform = self.R.random() < self.prob + super().randomize(None) num_control_point = self.R.randint(self.num_control_points[0], self.num_control_points[1] + 1) self.reference_control_points = np.linspace(0, 1, num_control_point) self.floating_control_points = np.copy(self.reference_control_points) diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 54a85a57b0..7d0d66d2ba 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -34,7 +34,7 @@ ShiftIntensity, ThresholdIntensity, ) -from monai.transforms.transform import MapTransform, Randomizable +from monai.transforms.transform import MapTransform, RandomizableTransform from monai.utils import dtype_torch_to_numpy, ensure_tuple_rep, ensure_tuple_size __all__ = [ @@ -92,7 +92,7 @@ ] -class RandGaussianNoised(Randomizable, MapTransform): +class RandGaussianNoised(RandomizableTransform, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandGaussianNoise`. Add Gaussian noise to image. This transform assumes all the expected fields have same shape. @@ -108,15 +108,14 @@ class RandGaussianNoised(Randomizable, MapTransform): def __init__( self, keys: KeysCollection, prob: float = 0.1, mean: Union[Sequence[float], float] = 0.0, std: float = 0.1 ) -> None: - super().__init__(keys) - self.prob = prob + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) self.mean = ensure_tuple_rep(mean, len(self.keys)) self.std = std - self._do_transform = False self._noise: List[np.ndarray] = [] def randomize(self, im_shape: Sequence[int]) -> None: - self._do_transform = self.R.random() < self.prob + super().randomize(None) self._noise.clear() for m in self.mean: self._noise.append(self.R.normal(m, self.R.uniform(0, self.std), size=im_shape)) @@ -158,7 +157,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class RandShiftIntensityd(Randomizable, MapTransform): +class RandShiftIntensityd(RandomizableTransform, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandShiftIntensity`. """ @@ -173,7 +172,8 @@ def __init__(self, keys: KeysCollection, offsets: Union[Tuple[float, float], flo prob: probability of rotating. (Default 0.1, with 10% probability it returns a rotated array.) """ - super().__init__(keys) + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) if isinstance(offsets, (int, float)): self.offsets = (min(-offsets, offsets), max(-offsets, offsets)) @@ -182,12 +182,9 @@ def __init__(self, keys: KeysCollection, offsets: Union[Tuple[float, float], flo raise AssertionError("offsets should be a number or pair of numbers.") self.offsets = (min(offsets), max(offsets)) - self.prob = prob - self._do_transform = False - def randomize(self, data: Optional[Any] = None) -> None: self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) - self._do_transform = self.R.random() < self.prob + super().randomize(None) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) @@ -229,7 +226,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class RandScaleIntensityd(Randomizable, MapTransform): +class RandScaleIntensityd(RandomizableTransform, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandScaleIntensity`. """ @@ -245,7 +242,8 @@ def __init__(self, keys: KeysCollection, factors: Union[Tuple[float, float], flo (Default 0.1, with 10% probability it returns a rotated array.) """ - super().__init__(keys) + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) if isinstance(factors, (int, float)): self.factors = (min(-factors, factors), max(-factors, factors)) @@ -254,12 +252,9 @@ def __init__(self, keys: KeysCollection, factors: Union[Tuple[float, float], flo raise AssertionError("factors should be a number or pair of numbers.") self.factors = (min(factors), max(factors)) - self.prob = prob - self._do_transform = False - def randomize(self, data: Optional[Any] = None) -> None: self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) - self._do_transform = self.R.random() < self.prob + super().randomize(None) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) @@ -382,7 +377,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class RandAdjustContrastd(Randomizable, MapTransform): +class RandAdjustContrastd(RandomizableTransform, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandAdjustContrast`. Randomly changes image intensity by gamma. Each pixel/voxel intensity is updated as: @@ -400,8 +395,8 @@ class RandAdjustContrastd(Randomizable, MapTransform): def __init__( self, keys: KeysCollection, prob: float = 0.1, gamma: Union[Tuple[float, float], float] = (0.5, 4.5) ) -> None: - super().__init__(keys) - self.prob: float = prob + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) if isinstance(gamma, (int, float)): if gamma <= 0.5: @@ -414,11 +409,10 @@ def __init__( raise AssertionError("gamma should be a number or pair of numbers.") self.gamma = (min(gamma), max(gamma)) - self._do_transform = False self.gamma_value: Optional[float] = None def randomize(self, data: Optional[Any] = None) -> None: - self._do_transform = self.R.random_sample() < self.prob + super().randomize(None) self.gamma_value = self.R.uniform(low=self.gamma[0], high=self.gamma[1]) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: @@ -529,7 +523,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class RandGaussianSmoothd(Randomizable, MapTransform): +class RandGaussianSmoothd(RandomizableTransform, MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.GaussianSmooth`. @@ -554,16 +548,15 @@ def __init__( approx: str = "erf", prob: float = 0.1, ) -> None: - super().__init__(keys) + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) self.sigma_x = sigma_x self.sigma_y = sigma_y self.sigma_z = sigma_z self.approx = approx - self.prob = prob - self._do_transform = False def randomize(self, data: Optional[Any] = None) -> None: - self._do_transform = self.R.random_sample() < self.prob + super().randomize(None) self.x = self.R.uniform(low=self.sigma_x[0], high=self.sigma_x[1]) self.y = self.R.uniform(low=self.sigma_y[0], high=self.sigma_y[1]) self.z = self.R.uniform(low=self.sigma_z[0], high=self.sigma_z[1]) @@ -616,7 +609,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class RandGaussianSharpend(Randomizable, MapTransform): +class RandGaussianSharpend(RandomizableTransform, MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.GaussianSharpen`. @@ -652,7 +645,8 @@ def __init__( approx: str = "erf", prob: float = 0.1, ): - super().__init__(keys) + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) self.sigma1_x = sigma1_x self.sigma1_y = sigma1_y self.sigma1_z = sigma1_z @@ -661,11 +655,9 @@ def __init__( self.sigma2_z = sigma2_z self.alpha = alpha self.approx = approx - self.prob = prob - self._do_transform = False def randomize(self, data: Optional[Any] = None) -> None: - self._do_transform = self.R.random_sample() < self.prob + super().randomize(None) self.x1 = self.R.uniform(low=self.sigma1_x[0], high=self.sigma1_x[1]) self.y1 = self.R.uniform(low=self.sigma1_y[0], high=self.sigma1_y[1]) self.z1 = self.R.uniform(low=self.sigma1_z[0], high=self.sigma1_z[1]) @@ -689,7 +681,7 @@ def __call__(self, data): return d -class RandHistogramShiftd(Randomizable, MapTransform): +class RandHistogramShiftd(RandomizableTransform, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandHistogramShift`. Apply random nonlinear transform the the image's intensity histogram. @@ -706,7 +698,8 @@ class RandHistogramShiftd(Randomizable, MapTransform): def __init__( self, keys: KeysCollection, num_control_points: Union[Tuple[int, int], int] = 10, prob: float = 0.1 ) -> None: - super().__init__(keys) + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) if isinstance(num_control_points, int): if num_control_points <= 2: raise AssertionError("num_control_points should be greater than or equal to 3") @@ -717,11 +710,9 @@ def __init__( if min(num_control_points) <= 2: raise AssertionError("num_control_points should be greater than or equal to 3") self.num_control_points = (min(num_control_points), max(num_control_points)) - self.prob = prob - self._do_transform = False def randomize(self, data: Optional[Any] = None) -> None: - self._do_transform = self.R.random() < self.prob + super().randomize(None) num_control_point = self.R.randint(self.num_control_points[0], self.num_control_points[1] + 1) self.reference_control_points = np.linspace(0, 1, num_control_point) self.floating_control_points = np.copy(self.reference_control_points) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index d6dbe56f01..3559d0eb3c 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -23,7 +23,7 @@ from monai.data.utils import compute_shape_offset, to_affine_nd, zoom_affine from monai.networks.layers import AffineTransform, GaussianFilter, grid_pull from monai.transforms.croppad.array import CenterSpatialCrop -from monai.transforms.transform import Randomizable, Transform +from monai.transforms.transform import RandomizableTransform, Transform from monai.transforms.utils import ( create_control_grid, create_grid, @@ -604,7 +604,7 @@ def __call__(self, img: np.ndarray) -> np.ndarray: return result.astype(img.dtype) -class RandRotate90(Randomizable, Transform): +class RandRotate90(RandomizableTransform): """ With probability `prob`, input arrays are rotated by 90 degrees in the plane specified by `spatial_axes`. @@ -619,16 +619,15 @@ def __init__(self, prob: float = 0.1, max_k: int = 3, spatial_axes: Tuple[int, i spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes. Default: (0, 1), this is the first two axis in spatial dimensions. """ - self.prob = min(max(prob, 0.0), 1.0) + RandomizableTransform.__init__(self, min(max(prob, 0.0), 1.0)) self.max_k = max_k self.spatial_axes = spatial_axes - self._do_transform = False self._rand_k = 0 def randomize(self, data: Optional[Any] = None) -> None: self._rand_k = self.R.randint(self.max_k) + 1 - self._do_transform = self.R.random() < self.prob + super().randomize(None) def __call__(self, img: np.ndarray) -> np.ndarray: """ @@ -642,7 +641,7 @@ def __call__(self, img: np.ndarray) -> np.ndarray: return rotator(img) -class RandRotate(Randomizable, Transform): +class RandRotate(RandomizableTransform): """ Randomly rotate the input arrays. @@ -682,6 +681,7 @@ def __init__( align_corners: bool = False, dtype: DtypeLike = np.float64, ) -> None: + RandomizableTransform.__init__(self, prob) self.range_x = ensure_tuple(range_x) if len(self.range_x) == 1: self.range_x = tuple(sorted([-self.range_x[0], self.range_x[0]])) @@ -692,20 +692,18 @@ def __init__( if len(self.range_z) == 1: self.range_z = tuple(sorted([-self.range_z[0], self.range_z[0]])) - self.prob = prob self.keep_size = keep_size self.mode: GridSampleMode = GridSampleMode(mode) self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode) self.align_corners = align_corners self.dtype = dtype - self._do_transform = False self.x = 0.0 self.y = 0.0 self.z = 0.0 def randomize(self, data: Optional[Any] = None) -> None: - self._do_transform = self.R.random_sample() < self.prob + super().randomize(None) self.x = self.R.uniform(low=self.range_x[0], high=self.range_x[1]) self.y = self.R.uniform(low=self.range_y[0], high=self.range_y[1]) self.z = self.R.uniform(low=self.range_z[0], high=self.range_z[1]) @@ -747,7 +745,7 @@ def __call__( return rotator(img) -class RandFlip(Randomizable, Transform): +class RandFlip(RandomizableTransform): """ Randomly flips the image along axes. Preserves shape. See numpy.flip for additional details. @@ -759,25 +757,21 @@ class RandFlip(Randomizable, Transform): """ def __init__(self, prob: float = 0.1, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None: - self.prob = prob + RandomizableTransform.__init__(self, min(max(prob, 0.0), 1.0)) self.flipper = Flip(spatial_axis=spatial_axis) - self._do_transform = False - - def randomize(self, data: Optional[Any] = None) -> None: - self._do_transform = self.R.random_sample() < self.prob def __call__(self, img: np.ndarray) -> np.ndarray: """ Args: img: channel first array, must have shape: (num_channels, H[, W, ..., ]), """ - self.randomize() + self.randomize(None) if not self._do_transform: return img return self.flipper(img) -class RandZoom(Randomizable, Transform): +class RandZoom(RandomizableTransform): """ Randomly zooms input arrays with given probability within given zoom range. @@ -816,21 +810,20 @@ def __init__( align_corners: Optional[bool] = None, keep_size: bool = True, ) -> None: + RandomizableTransform.__init__(self, prob) self.min_zoom = ensure_tuple(min_zoom) self.max_zoom = ensure_tuple(max_zoom) if len(self.min_zoom) != len(self.max_zoom): raise AssertionError("min_zoom and max_zoom must have same length.") - self.prob = prob self.mode: InterpolateMode = InterpolateMode(mode) self.padding_mode: NumpyPadMode = NumpyPadMode(padding_mode) self.align_corners = align_corners self.keep_size = keep_size - self._do_transform = False self._zoom: Sequence[float] = [1.0] def randomize(self, data: Optional[Any] = None) -> None: - self._do_transform = self.R.random_sample() < self.prob + super().randomize(None) self._zoom = [self.R.uniform(l, h) for l, h in zip(self.min_zoom, self.max_zoom)] def __call__( @@ -961,7 +954,7 @@ def __call__( return np.asarray(grid.cpu().numpy()) -class RandAffineGrid(Randomizable, Transform): +class RandAffineGrid(RandomizableTransform): """ Generate randomised affine grid. """ @@ -1050,7 +1043,7 @@ def __call__( return affine_grid(spatial_size, grid) -class RandDeformGrid(Randomizable, Transform): +class RandDeformGrid(RandomizableTransform): """ Generate random deformation grid. """ @@ -1279,7 +1272,7 @@ def __call__( ) -class RandAffine(Randomizable, Transform): +class RandAffine(RandomizableTransform): """ Random affine transform. """ @@ -1331,6 +1324,7 @@ def __init__( - :py:class:`RandAffineGrid` for the random affine parameters configurations. - :py:class:`Affine` for the affine transformation parameters configurations. """ + RandomizableTransform.__init__(self, prob) self.rand_affine_grid = RandAffineGrid( rotate_range=rotate_range, @@ -1346,9 +1340,6 @@ def __init__( self.mode: GridSampleMode = GridSampleMode(mode) self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode) - self.do_transform = False - self.prob = prob - def set_random_state( self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None ) -> "RandAffine": @@ -1357,7 +1348,7 @@ def set_random_state( return self def randomize(self, data: Optional[Any] = None) -> None: - self.do_transform = self.R.rand() < self.prob + super().randomize(None) self.rand_affine_grid.randomize() def __call__( @@ -1385,7 +1376,7 @@ def __call__( self.randomize() sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:]) - if self.do_transform: + if self._do_transform: grid = self.rand_affine_grid(spatial_size=sp_size) else: grid = create_grid(spatial_size=sp_size) @@ -1394,7 +1385,7 @@ def __call__( ) -class Rand2DElastic(Randomizable, Transform): +class Rand2DElastic(RandomizableTransform): """ Random elastic deformation and affine in 2D """ @@ -1451,6 +1442,7 @@ def __init__( - :py:class:`RandAffineGrid` for the random affine parameters configurations. - :py:class:`Affine` for the affine transformation parameters configurations. """ + RandomizableTransform.__init__(self, prob) self.deform_grid = RandDeformGrid( spacing=spacing, magnitude_range=magnitude_range, as_tensor_output=True, device=device ) @@ -1467,8 +1459,6 @@ def __init__( self.spatial_size = spatial_size self.mode: GridSampleMode = GridSampleMode(mode) self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode) - self.prob = prob - self.do_transform = False def set_random_state( self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None @@ -1479,7 +1469,7 @@ def set_random_state( return self def randomize(self, spatial_size: Sequence[int]) -> None: - self.do_transform = self.R.rand() < self.prob + super().randomize(None) self.deform_grid.randomize(spatial_size) self.rand_affine_grid.randomize() @@ -1505,7 +1495,7 @@ def __call__( """ sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:]) self.randomize(spatial_size=sp_size) - if self.do_transform: + if self._do_transform: grid = self.deform_grid(spatial_size=sp_size) grid = self.rand_affine_grid(grid=grid) grid = torch.nn.functional.interpolate( # type: ignore @@ -1521,7 +1511,7 @@ def __call__( return self.resampler(img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode) -class Rand3DElastic(Randomizable, Transform): +class Rand3DElastic(RandomizableTransform): """ Random elastic deformation and affine in 3D """ @@ -1580,6 +1570,7 @@ def __init__( - :py:class:`RandAffineGrid` for the random affine parameters configurations. - :py:class:`Affine` for the affine transformation parameters configurations. """ + RandomizableTransform.__init__(self, prob) self.rand_affine_grid = RandAffineGrid(rotate_range, shear_range, translate_range, scale_range, True, device) self.resampler = Resample(as_tensor_output=as_tensor_output, device=device) @@ -1590,8 +1581,6 @@ def __init__( self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode) self.device = device - self.prob = prob - self.do_transform = False self.rand_offset = None self.magnitude = 1.0 self.sigma = 1.0 @@ -1604,8 +1593,8 @@ def set_random_state( return self def randomize(self, grid_size: Sequence[int]) -> None: - self.do_transform = self.R.rand() < self.prob - if self.do_transform: + super().randomize(None) + if self._do_transform: self.rand_offset = self.R.uniform(-1.0, 1.0, [3] + list(grid_size)).astype(np.float32) self.magnitude = self.R.uniform(self.magnitude_range[0], self.magnitude_range[1]) self.sigma = self.R.uniform(self.sigma_range[0], self.sigma_range[1]) @@ -1634,7 +1623,7 @@ def __call__( sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:]) self.randomize(grid_size=sp_size) grid = create_grid(spatial_size=sp_size) - if self.do_transform: + if self._do_transform: if self.rand_offset is None: raise AssertionError grid = torch.as_tensor(np.ascontiguousarray(grid), device=self.device) diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 2c66cd5f50..6693d75bcd 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -35,7 +35,7 @@ Spacing, Zoom, ) -from monai.transforms.transform import MapTransform, Randomizable +from monai.transforms.transform import MapTransform, RandomizableTransform from monai.transforms.utils import create_grid from monai.utils import ( GridSampleMode, @@ -274,7 +274,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class RandRotate90d(Randomizable, MapTransform): +class RandRotate90d(RandomizableTransform, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandRotate90`. With probability `prob`, input arrays are rotated by 90 degrees @@ -299,28 +299,26 @@ def __init__( spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes. Default: (0, 1), this is the first two axis in spatial dimensions. """ - super().__init__(keys) + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) - self.prob = min(max(prob, 0.0), 1.0) self.max_k = max_k self.spatial_axes = spatial_axes - self._do_transform = False self._rand_k = 0 def randomize(self, data: Optional[Any] = None) -> None: self._rand_k = self.R.randint(self.max_k) + 1 - self._do_transform = self.R.random() < self.prob + super().randomize(None) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Mapping[Hashable, np.ndarray]: self.randomize() - if not self._do_transform: - return data + d = dict(data) rotator = Rotate90(self._rand_k, self.spatial_axes) - d = dict(data) for key in self.keys: - d[key] = rotator(d[key]) + if self._do_transform: + d[key] = rotator(d[key]) return d @@ -364,7 +362,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class RandAffined(Randomizable, MapTransform): +class RandAffined(RandomizableTransform, MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.RandAffine`. """ @@ -420,9 +418,10 @@ def __init__( - :py:class:`monai.transforms.compose.MapTransform` - :py:class:`RandAffineGrid` for the random affine parameters configurations. """ - super().__init__(keys) + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) self.rand_affine = RandAffine( - prob=prob, + prob=1.0, # because probability handled in this class rotate_range=rotate_range, shear_range=shear_range, translate_range=translate_range, @@ -442,6 +441,7 @@ def set_random_state( return self def randomize(self, data: Optional[Any] = None) -> None: + super().randomize(None) self.rand_affine.randomize() def __call__( @@ -451,7 +451,7 @@ def __call__( self.randomize() sp_size = fall_back_tuple(self.rand_affine.spatial_size, data[self.keys[0]].shape[1:]) - if self.rand_affine.do_transform: + if self._do_transform: grid = self.rand_affine.rand_affine_grid(spatial_size=sp_size) else: grid = create_grid(spatial_size=sp_size) @@ -461,7 +461,7 @@ def __call__( return d -class Rand2DElasticd(Randomizable, MapTransform): +class Rand2DElasticd(RandomizableTransform, MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.Rand2DElastic`. """ @@ -523,11 +523,12 @@ def __init__( - :py:class:`RandAffineGrid` for the random affine parameters configurations. - :py:class:`Affine` for the affine transformation parameters configurations. """ - super().__init__(keys) + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) self.rand_2d_elastic = Rand2DElastic( spacing=spacing, magnitude_range=magnitude_range, - prob=prob, + prob=1.0, # because probability controlled by this class rotate_range=rotate_range, shear_range=shear_range, translate_range=translate_range, @@ -547,6 +548,7 @@ def set_random_state( return self def randomize(self, spatial_size: Sequence[int]) -> None: + super().randomize(None) self.rand_2d_elastic.randomize(spatial_size) def __call__( @@ -557,7 +559,7 @@ def __call__( sp_size = fall_back_tuple(self.rand_2d_elastic.spatial_size, data[self.keys[0]].shape[1:]) self.randomize(spatial_size=sp_size) - if self.rand_2d_elastic.do_transform: + if self._do_transform: grid = self.rand_2d_elastic.deform_grid(spatial_size=sp_size) grid = self.rand_2d_elastic.rand_affine_grid(grid=grid) grid = torch.nn.functional.interpolate( # type: ignore @@ -578,7 +580,7 @@ def __call__( return d -class Rand3DElasticd(Randomizable, MapTransform): +class Rand3DElasticd(RandomizableTransform, MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.Rand3DElastic`. """ @@ -641,11 +643,12 @@ def __init__( - :py:class:`RandAffineGrid` for the random affine parameters configurations. - :py:class:`Affine` for the affine transformation parameters configurations. """ - super().__init__(keys) + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) self.rand_3d_elastic = Rand3DElastic( sigma_range=sigma_range, magnitude_range=magnitude_range, - prob=prob, + prob=1.0, # because probability controlled by this class rotate_range=rotate_range, shear_range=shear_range, translate_range=translate_range, @@ -665,6 +668,7 @@ def set_random_state( return self def randomize(self, grid_size: Sequence[int]) -> None: + super().randomize(None) self.rand_3d_elastic.randomize(grid_size) def __call__( @@ -675,7 +679,7 @@ def __call__( self.randomize(grid_size=sp_size) grid = create_grid(spatial_size=sp_size) - if self.rand_3d_elastic.do_transform: + if self._do_transform: device = self.rand_3d_elastic.device grid = torch.tensor(grid).to(device) gaussian = GaussianFilter(spatial_dims=3, sigma=self.rand_3d_elastic.sigma, truncated=3.0).to(device) @@ -713,7 +717,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class RandFlipd(Randomizable, MapTransform): +class RandFlipd(RandomizableTransform, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandFlip`. @@ -732,23 +736,18 @@ def __init__( prob: float = 0.1, spatial_axis: Optional[Union[Sequence[int], int]] = None, ) -> None: - super().__init__(keys) + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) self.spatial_axis = spatial_axis - self.prob = prob - self._do_transform = False self.flipper = Flip(spatial_axis=spatial_axis) - def randomize(self, data: Optional[Any] = None) -> None: - self._do_transform = self.R.random_sample() < self.prob - def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: - self.randomize() + self.randomize(None) d = dict(data) - if not self._do_transform: - return d for key in self.keys: - d[key] = self.flipper(d[key]) + if self._do_transform: + d[key] = self.flipper(d[key]) return d @@ -810,7 +809,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class RandRotated(Randomizable, MapTransform): +class RandRotated(RandomizableTransform, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandRotate` Randomly rotates the input arrays. @@ -857,7 +856,8 @@ def __init__( align_corners: Union[Sequence[bool], bool] = False, dtype: Union[Sequence[DtypeLike], DtypeLike] = np.float64, ) -> None: - super().__init__(keys) + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) self.range_x = ensure_tuple(range_x) if len(self.range_x) == 1: self.range_x = tuple(sorted([-self.range_x[0], self.range_x[0]])) @@ -868,20 +868,18 @@ def __init__( if len(self.range_z) == 1: self.range_z = tuple(sorted([-self.range_z[0], self.range_z[0]])) - self.prob = prob self.keep_size = keep_size self.mode = ensure_tuple_rep(mode, len(self.keys)) self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys)) self.align_corners = ensure_tuple_rep(align_corners, len(self.keys)) self.dtype = ensure_tuple_rep(dtype, len(self.keys)) - self._do_transform = False self.x = 0.0 self.y = 0.0 self.z = 0.0 def randomize(self, data: Optional[Any] = None) -> None: - self._do_transform = self.R.random_sample() < self.prob + super().randomize(None) self.x = self.R.uniform(low=self.range_x[0], high=self.range_x[1]) self.y = self.R.uniform(low=self.range_y[0], high=self.range_y[1]) self.z = self.R.uniform(low=self.range_z[0], high=self.range_z[1]) @@ -957,7 +955,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class RandZoomd(Randomizable, MapTransform): +class RandZoomd(RandomizableTransform, MapTransform): """ Dict-based version :py:class:`monai.transforms.RandZoom`. @@ -1000,23 +998,22 @@ def __init__( align_corners: Union[Sequence[Optional[bool]], Optional[bool]] = None, keep_size: bool = True, ) -> None: - super().__init__(keys) + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) self.min_zoom = ensure_tuple(min_zoom) self.max_zoom = ensure_tuple(max_zoom) if len(self.min_zoom) != len(self.max_zoom): raise AssertionError("min_zoom and max_zoom must have same length.") - self.prob = prob self.mode = ensure_tuple_rep(mode, len(self.keys)) self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys)) self.align_corners = ensure_tuple_rep(align_corners, len(self.keys)) self.keep_size = keep_size - self._do_transform = False self._zoom: Sequence[float] = [1.0] def randomize(self, data: Optional[Any] = None) -> None: - self._do_transform = self.R.random_sample() < self.prob + super().randomize(None) self._zoom = [self.R.uniform(l, h) for l, h in zip(self.min_zoom, self.max_zoom)] def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: diff --git a/monai/transforms/transform.py b/monai/transforms/transform.py index e5841cbe97..9c9729d250 100644 --- a/monai/transforms/transform.py +++ b/monai/transforms/transform.py @@ -20,25 +20,13 @@ from monai.config import KeysCollection from monai.utils import MAX_SEED, ensure_tuple -__all__ = ["Randomizable", "Transform", "MapTransform"] +__all__ = ["Randomizable", "RandomizableTransform", "Transform", "MapTransform"] class Randomizable(ABC): """ An interface for handling random state locally, currently based on a class variable `R`, which is an instance of `np.random.RandomState`. - This is mainly for randomized data augmentation transforms. For example:: - - class RandShiftIntensity(Randomizable): - def randomize(): - self._offset = self.R.uniform(low=0, high=100) - def __call__(self, img): - self.randomize() - return img + self._offset - - transform = RandShiftIntensity() - transform.set_random_state(seed=0) - """ R: np.random.RandomState = np.random.RandomState() @@ -77,7 +65,6 @@ def set_random_state( self.R = np.random.RandomState() return self - @abstractmethod def randomize(self, data: Any) -> None: """ Within this method, :py:attr:`self.R` should be used, instead of `np.random`, to introduce random factors. @@ -89,7 +76,6 @@ def randomize(self, data: Any) -> None: Raises: NotImplementedError: When the subclass does not override this method. - """ raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") @@ -142,6 +128,40 @@ def __call__(self, data: Any): raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") +class RandomizableTransform(Randomizable, Transform): + """ + An interface for handling random state locally, currently based on a class variable `R`, + which is an instance of `np.random.RandomState`. + This is mainly for randomized data augmentation transforms. For example:: + + class RandShiftIntensity(RandomizableTransform): + def randomize(): + self._offset = self.R.uniform(low=0, high=100) + def __call__(self, img): + self.randomize() + return img + self._offset + + transform = RandShiftIntensity() + transform.set_random_state(seed=0) + + """ + + def __init__(self, prob=1.0, do_transform=False): + self._do_transform = do_transform + self.prob = min(max(prob, 0.0), 1.0) + + def randomize(self, data: Any) -> None: + """ + Within this method, :py:attr:`self.R` should be used, instead of `np.random`, to introduce random factors. + + all :py:attr:`self.R` calls happen here so that we have a better chance to + identify errors of sync the random state. + + This method can generate the random factors based on properties of the input data. + """ + self._do_transform = self.R.rand() < self.prob + + class MapTransform(Transform): """ A subclass of :py:class:`monai.transforms.Transform` with an assumption diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 0ee88e1a6c..24d2feb781 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -21,7 +21,7 @@ import torch from monai.config import DtypeLike, NdarrayTensor -from monai.transforms.transform import Randomizable, Transform +from monai.transforms.transform import RandomizableTransform, Transform from monai.transforms.utils import extreme_points_to_image, get_extreme_points, map_binary_to_indices from monai.utils import ensure_tuple, min_version, optional_import @@ -631,7 +631,7 @@ def __call__(self, img: np.ndarray) -> np.ndarray: return np.stack(result, axis=0).astype(np.float32) -class AddExtremePointsChannel(Transform, Randomizable): +class AddExtremePointsChannel(RandomizableTransform): """ Add extreme points of label to the image as a new channel. This transform generates extreme point from label and applies a gaussian filter. The pixel values in points image are rescaled diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index f9612c2408..e9d923d0fd 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -23,7 +23,7 @@ import torch from monai.config import DtypeLike, KeysCollection, NdarrayTensor -from monai.transforms.transform import MapTransform, Randomizable +from monai.transforms.transform import MapTransform, RandomizableTransform from monai.transforms.utility.array import ( AddChannel, AsChannelFirst, @@ -682,9 +682,9 @@ def __call__(self, data): return d -class RandLambdad(Lambdad, Randomizable): +class RandLambdad(Lambdad, RandomizableTransform): """ - Randomizable version :py:class:`monai.transforms.Lambdad`, the input `func` contains random logic. + RandomizableTransform version :py:class:`monai.transforms.Lambdad`, the input `func` contains random logic. It's a randomizable transform so `CacheDataset` will not execute it and cache the results. Args: @@ -800,7 +800,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class AddExtremePointsChanneld(Randomizable, MapTransform): +class AddExtremePointsChanneld(RandomizableTransform, MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.AddExtremePointsChannel`. @@ -828,7 +828,7 @@ def __init__( rescale_min: float = -1.0, rescale_max: float = 1.0, ): - super().__init__(keys) + MapTransform.__init__(self, keys) self.background = background self.pert = pert self.points: List[Tuple[int, ...]] = [] diff --git a/tests/test_compose.py b/tests/test_compose.py index 3a0a6ea5bb..bb8a5f08c5 100644 --- a/tests/test_compose.py +++ b/tests/test_compose.py @@ -13,11 +13,12 @@ import unittest from monai.data import DataLoader, Dataset -from monai.transforms import AddChannel, Compose, Randomizable +from monai.transforms import AddChannel, Compose +from monai.transforms.transform import RandomizableTransform from monai.utils import set_determinism -class _RandXform(Randomizable): +class _RandXform(RandomizableTransform): def randomize(self): self.val = self.R.random_sample() @@ -79,7 +80,7 @@ def c(d): # transform to handle dict data self.assertDictEqual(item, {"a": 2, "b": 1, "c": 2}) def test_random_compose(self): - class _Acc(Randomizable): + class _Acc(RandomizableTransform): self.rand = 0.0 def randomize(self, data=None): @@ -98,10 +99,13 @@ def __call__(self, data): self.assertAlmostEqual(c(1), 1.90734751) def test_randomize_warn(self): - class _RandomClass(Randomizable): + class _RandomClass(RandomizableTransform): def randomize(self, foo1, foo2): pass + def __call__(self, data): + pass + c = Compose([_RandomClass(), _RandomClass()]) with self.assertWarns(Warning): c.randomize() @@ -168,7 +172,7 @@ def test_flatten_and_len(self): self.assertEqual(len(t1), 8) def test_backwards_compatible_imports(self): - from monai.transforms.compose import MapTransform, Randomizable, Transform # noqa: F401 + from monai.transforms.compose import MapTransform, RandomizableTransform, Transform # noqa: F401 if __name__ == "__main__": diff --git a/tests/test_image_dataset.py b/tests/test_image_dataset.py index d79a7d884c..ec2cf77cd8 100644 --- a/tests/test_image_dataset.py +++ b/tests/test_image_dataset.py @@ -17,12 +17,12 @@ import numpy as np from monai.data import ImageDataset -from monai.transforms import Randomizable +from monai.transforms.transform import RandomizableTransform FILENAMES = ["test1.nii.gz", "test2.nii", "test3.nii.gz"] -class RandTest(Randomizable): +class RandTest(RandomizableTransform): """ randomisable transform for testing. """ diff --git a/tests/test_rand_lambdad.py b/tests/test_rand_lambdad.py index 359da8857a..2ddfeefae0 100644 --- a/tests/test_rand_lambdad.py +++ b/tests/test_rand_lambdad.py @@ -13,11 +13,11 @@ import numpy as np -from monai.transforms import Randomizable +from monai.transforms.transform import RandomizableTransform from monai.transforms.utility.dictionary import RandLambdad -class RandTest(Randomizable): +class RandTest(RandomizableTransform): """ randomisable transform for testing. """ diff --git a/tests/test_randomizable.py b/tests/test_randomizable.py index a7a30124df..9972bded0f 100644 --- a/tests/test_randomizable.py +++ b/tests/test_randomizable.py @@ -13,7 +13,7 @@ import numpy as np -from monai.transforms import Randomizable +from monai.transforms.transform import Randomizable class RandTest(Randomizable): From 4b8819aa2d26f96dc2d23d3159e56cc413d3e846 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yuan-Ting=20Hsieh=20=28=E8=AC=9D=E6=B2=85=E5=BB=B7=29?= Date: Fri, 26 Feb 2021 16:13:21 -0800 Subject: [PATCH 013/457] Add more tests to test_deepgrow_dataset (#1654) * Add more tests to test_deepgrow_dataset Signed-off-by: YuanTingHsieh --- monai/apps/deepgrow/dataset.py | 6 +- tests/test_deepgrow_dataset.py | 109 ++++++++++++++++++++++++--------- 2 files changed, 82 insertions(+), 33 deletions(-) diff --git a/monai/apps/deepgrow/dataset.py b/monai/apps/deepgrow/dataset.py index 45cfbde6ea..acaeba0bc3 100644 --- a/monai/apps/deepgrow/dataset.py +++ b/monai/apps/deepgrow/dataset.py @@ -22,7 +22,7 @@ def create_dataset( datalist, output_dir: str, - dimension, + dimension: int, pixdim, image_key: str = "image", label_key: str = "label", @@ -138,7 +138,7 @@ def _save_data_2d(vol_idx, vol_image, vol_label, dataset_dir, relative_path): if len(vol_image.shape) == 4: logging.info( "4D-Image, pick only first series; Image: {}; Label: {}".format( - vol_image.shape, vol_label.shape if vol_label else None + vol_image.shape, vol_label.shape if vol_label is not None else None ) ) vol_image = vol_image[0] @@ -216,7 +216,7 @@ def _save_data_3d(vol_idx, vol_image, vol_label, dataset_dir, relative_path): if len(vol_image.shape) == 4: logging.info( "4D-Image, pick only first series; Image: {}; Label: {}".format( - vol_image.shape, vol_label.shape if vol_label else None + vol_image.shape, vol_label.shape if vol_label is not None else None ) ) vol_image = vol_image[0] diff --git a/tests/test_deepgrow_dataset.py b/tests/test_deepgrow_dataset.py index e871c328a6..147d8e7099 100644 --- a/tests/test_deepgrow_dataset.py +++ b/tests/test_deepgrow_dataset.py @@ -10,47 +10,96 @@ # limitations under the License. import os +import shutil import tempfile import unittest import nibabel as nib import numpy as np +from parameterized import parameterized from monai.apps.deepgrow.dataset import create_dataset +from monai.utils import set_determinism + +TEST_CASE_1 = [{"dimension": 2, "pixdim": (1, 1)}, {"length": 3}, 9, 1] + +TEST_CASE_2 = [{"dimension": 2, "pixdim": (1, 1), "limit": 1}, {"length": 3}, 3, 1] + +TEST_CASE_3 = [{"dimension": 2, "pixdim": (1, 1)}, {"length": 1}, 3, 1] + +TEST_CASE_4 = [{"dimension": 3, "pixdim": (1, 1, 1)}, {"length": 1}, 1, 1] + +TEST_CASE_5 = [{"dimension": 3, "pixdim": (1, 1, 1)}, {"length": 1, "image_channel": 4}, 1, 1] + +TEST_CASE_6 = [{"dimension": 2, "pixdim": (1, 1)}, {"length": 1, "image_channel": 4}, 3, 1] + +TEST_CASE_7 = [ + {"dimension": 2, "pixdim": (1, 1), "label_key": None}, + {"length": 1, "image_channel": 4, "with_label": False}, + 40, + None, +] + +TEST_CASE_8 = [ + {"dimension": 3, "pixdim": (1, 1, 1), "label_key": None}, + {"length": 1, "image_channel": 4, "with_label": False}, + 1, + None, +] class TestCreateDataset(unittest.TestCase): - def _create_data(self, tempdir): + def setUp(self): + set_determinism(1) + self.tempdir = tempfile.mkdtemp() + + def _create_data(self, length=1, image_channel=1, with_label=True): affine = np.eye(4) - image = np.random.randint(0, 2, size=(128, 128, 40)) - image_file = os.path.join(tempdir, "image1.nii.gz") - nib.save(nib.Nifti1Image(image, affine), image_file) - - label = np.zeros((128, 128, 40)) - label[0][1][0] = 1 - label[0][1][1] = 1 - label[0][0][2] = 1 - label[0][1][2] = 1 - label_file = os.path.join(tempdir, "label1.nii.gz") - nib.save(nib.Nifti1Image(label, affine), label_file) - - return [{"image": image_file, "label": label_file}] - - def test_create_dataset_2d(self): - with tempfile.TemporaryDirectory() as tempdir: - datalist = self._create_data(tempdir) - output_dir = os.path.join(tempdir, "2d") - deepgrow_datalist = create_dataset(datalist=datalist, output_dir=output_dir, dimension=2, pixdim=(1, 1)) - self.assertEqual(len(deepgrow_datalist), 3) - self.assertEqual(deepgrow_datalist[0]["region"], 1) - - def test_create_dataset_3d(self): - with tempfile.TemporaryDirectory() as tempdir: - datalist = self._create_data(tempdir) - output_dir = os.path.join(tempdir, "3d") - deepgrow_datalist = create_dataset(datalist=datalist, output_dir=output_dir, dimension=3, pixdim=(1, 1, 1)) - self.assertEqual(len(deepgrow_datalist), 1) - self.assertEqual(deepgrow_datalist[0]["region"], 1) + datalist = [] + for i in range(length): + if image_channel == 1: + image = np.random.randint(0, 2, size=(128, 128, 40)) + else: + image = np.random.randint(0, 2, size=(128, 128, 40, image_channel)) + image_file = os.path.join(self.tempdir, f"image{i}.nii.gz") + nib.save(nib.Nifti1Image(image, affine), image_file) + + if with_label: + # 3 slices has label + label = np.zeros((128, 128, 40)) + label[0][1][0] = 1 + label[0][1][1] = 1 + label[0][0][2] = 1 + label[0][1][2] = 1 + label_file = os.path.join(self.tempdir, f"label{i}.nii.gz") + nib.save(nib.Nifti1Image(label, affine), label_file) + datalist.append({"image": image_file, "label": label_file}) + else: + datalist.append({"image": image_file}) + + return datalist + + @parameterized.expand( + [TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7, TEST_CASE_8] + ) + def test_create_dataset(self, args, data_args, expected_length, expected_region): + datalist = self._create_data(**data_args) + deepgrow_datalist = create_dataset(datalist=datalist, output_dir=self.tempdir, **args) + self.assertEqual(len(deepgrow_datalist), expected_length) + if expected_region is not None: + self.assertEqual(deepgrow_datalist[0]["region"], expected_region) + + def test_invalid_dim(self): + with self.assertRaises(ValueError): + create_dataset(datalist=self._create_data(), output_dir=self.tempdir, dimension=4, pixdim=(1, 1, 1, 1)) + + def test_empty_datalist(self): + with self.assertRaises(ValueError): + create_dataset(datalist=[], output_dir=self.tempdir, dimension=3, pixdim=(1, 1, 1)) + + def tearDown(self): + shutil.rmtree(self.tempdir) + set_determinism(None) if __name__ == "__main__": From 39fd45544673f06a5752980c08f0680e723a50f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yuan-Ting=20Hsieh=20=28=E8=AC=9D=E6=B2=85=E5=BB=B7=29?= Date: Mon, 1 Mar 2021 08:07:49 -0800 Subject: [PATCH 014/457] Add Deepgrow inference transforms (#1653) * Add inference transforms Signed-off-by: YuanTingHsieh * Remove unused import Signed-off-by: YuanTingHsieh * Fix review comments Signed-off-by: YuanTingHsieh Co-authored-by: SACHIDANAND ALLE --- docs/source/apps.rst | 10 + monai/apps/deepgrow/transforms.py | 494 ++++++++++++++++++++++++++++-- tests/test_deepgrow_transforms.py | 234 ++++++++++++++ 3 files changed, 715 insertions(+), 23 deletions(-) diff --git a/docs/source/apps.rst b/docs/source/apps.rst index b8c8b4d341..1c4f4c3dfb 100644 --- a/docs/source/apps.rst +++ b/docs/source/apps.rst @@ -46,9 +46,19 @@ Applications :members: .. autoclass:: AddRandomGuidanced :members: +.. autoclass:: AddGuidanceFromPointsd + :members: .. autoclass:: SpatialCropForegroundd :members: +.. autoclass:: SpatialCropGuidanced + :members: +.. autoclass:: RestoreLabeld + :members: +.. autoclass:: ResizeGuidanced + :members: .. autoclass:: FindDiscrepancyRegionsd :members: .. autoclass:: FindAllValidSlicesd :members: +.. autoclass:: Fetch2DSliced + :members: diff --git a/monai/apps/deepgrow/transforms.py b/monai/apps/deepgrow/transforms.py index 3f4031fade..cc01a717ad 100644 --- a/monai/apps/deepgrow/transforms.py +++ b/monai/apps/deepgrow/transforms.py @@ -8,18 +8,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from typing import Callable, Optional, Sequence, Union +from typing import Callable, Dict, Optional, Sequence, Union import numpy as np import torch from monai.config import IndexSelection, KeysCollection from monai.networks.layers import GaussianFilter -from monai.transforms import SpatialCrop +from monai.transforms import Resize, SpatialCrop from monai.transforms.transform import MapTransform, RandomizableTransform, Transform from monai.transforms.utils import generate_spatial_bounding_box -from monai.utils import min_version, optional_import +from monai.utils import InterpolateMode, ensure_tuple_rep, min_version, optional_import measure, _ = optional_import("skimage.measure", "0.14.2", min_version) distance_transform_cdt, _ = optional_import("scipy.ndimage.morphology", name="distance_transform_cdt") @@ -48,7 +47,7 @@ def _apply(self, label): return np.asarray(sids) def __call__(self, data): - d = dict(data) + d: Dict = dict(data) label = d[self.label] if label.shape[0] != 1: raise ValueError("Only supports single channel labels!") @@ -67,7 +66,8 @@ class AddInitialSeedPointd(RandomizableTransform): Add random guidance as initial seed point for a given label. Note that the label is of size (C, D, H, W) or (C, H, W) - The guidance is of size (2, N, # of dims) where N is number of guidance added + + The guidance is of size (2, N, # of dims) where N is number of guidance added. # of dims = 4 when C, D, H, W; # of dims = 3 when (C, H, W) Args: @@ -86,14 +86,23 @@ def __init__( sid: str = "sid", connected_regions: int = 5, ): + super().__init__(prob=1.0, do_transform=True) self.label = label - self.sids = sids - self.sid = sid + self.sids_key = sids + self.sid_key = sid + self.sid = None self.guidance = guidance self.connected_regions = connected_regions - def randomize(self, data=None): - pass + def randomize(self, data): + sid = data.get(self.sid_key, None) + sids = data.get(self.sids_key, None) + if sids is not None: + if sid is None or sid not in sids: + sid = self.R.choice(sids, replace=False) + else: + sid = None + self.sid = sid def _apply(self, label, sid): dimensions = 3 if len(label.shape) > 3 else 2 @@ -134,14 +143,8 @@ def _apply(self, label, sid): def __call__(self, data): d = dict(data) - sid = d.get(self.sid, None) - sids = d.get(self.sids, None) - if sids is not None: - if sid is None or sid not in sids: - sid = self.R.choice(sids, replace=False) - else: - sid = None - d[self.guidance] = self._apply(d[self.label], sid) + self.randomize(data) + d[self.guidance] = self._apply(d[self.label], self.sid) return d @@ -232,6 +235,7 @@ class FindDiscrepancyRegionsd(Transform): Find discrepancy between prediction and actual during click interactions during training. If batched is true: + label is in shape (B, C, D, H, W) or (B, C, H, W) pred has same shape as label discrepancy will have shape (B, 2, C, D, H, W) or (B, 2, C, H, W) @@ -283,7 +287,7 @@ class AddRandomGuidanced(RandomizableTransform): """ Add random guidance based on discrepancies that were found between label and prediction. - If batched is True: + If batched is True, input shape is as below: Guidance is of shape (B, 2, N, # of dim) where B is batch size, 2 means positive and negative, N means how many guidance points, # of dim is the total number of dimensions of the image @@ -291,7 +295,15 @@ class AddRandomGuidanced(RandomizableTransform): Discrepancy is of shape (B, 2, C, D, H, W) or (B, 2, C, H, W) - Probability is of shape (B,) + Probability is of shape (B, 1) + + else: + + Guidance is of shape (2, N, # of dim) + + Discrepancy is of shape (2, C, D, H, W) or (2, C, H, W) + + Probability is of shape (1) Args: guidance: key to guidance source. @@ -307,6 +319,7 @@ def __init__( probability: str = "probability", batched: bool = True, ): + super().__init__(prob=1.0, do_transform=True) self.guidance = guidance self.discrepancy = discrepancy self.probability = probability @@ -389,7 +402,7 @@ class SpatialCropForegroundd(MapTransform): """ Crop only the foreground object of the expected images. - Difference VS CropForegroundd: + Difference VS :py:class:`monai.transforms.CropForegroundd`: 1. If the bounding box is smaller than spatial size in all dimensions then this transform will crop the object using box's center and spatial_size. @@ -399,9 +412,11 @@ class SpatialCropForegroundd(MapTransform): The typical usage is to help training and evaluation if the valid part is small in the whole medical image. The valid part can be determined by any field in the data with `source_key`, for example: + - Select values > 0 in image field as the foreground and crop on all fields specified by `keys`. - Select label = 3 in label field as the foreground to crop on all fields specified by `keys`. - Select label > 0 in the third channel of a One-Hot label field as the foreground to crop all `keys` fields. + Users can define arbitrary function to select expected foreground from the whole source image or specified channels. And it can also add margin to every dim of the bounding box of foreground object. @@ -457,8 +472,8 @@ def __call__(self, data): d[self.source_key], self.select_fn, self.channel_indices, self.margin ) - center = np.mean([box_start, box_end], axis=0).astype(int).tolist() - current_size = np.subtract(box_end, box_start).astype(int).tolist() + center = list(np.mean([box_start, box_end], axis=0).astype(int)) + current_size = list(np.subtract(box_end, box_start).astype(int)) if np.all(np.less(current_size, self.spatial_size)): cropper = SpatialCrop(roi_center=center, roi_size=self.spatial_size) @@ -477,3 +492,436 @@ def __call__(self, data): d[meta_key][self.cropped_shape_key] = image.shape d[key] = image return d + + +# Transforms to support Inference for Deepgrow models +class AddGuidanceFromPointsd(Transform): + """ + Add guidance based on user clicks. + + We assume the input is loaded by LoadImaged and has the shape of (H, W, D) originally. + Clicks always specify the coordinates in (H, W, D) + + If depth_first is True: + + Input is now of shape (D, H, W), will return guidance that specifies the coordinates in (D, H, W) + + else: + + Input is now of shape (H, W, D), will return guidance that specifies the coordinates in (H, W, D) + + Args: + ref_image: key to reference image to fetch current and original image details. + guidance: output key to store guidance. + foreground: key that represents user foreground (+ve) clicks. + background: key that represents user background (-ve) clicks. + axis: axis that represents slices in 3D volume. (axis to Depth) + depth_first: if depth (slices) is positioned at first dimension. + dimensions: dimensions based on model used for deepgrow (2D vs 3D). + slice_key: key that represents applicable slice to add guidance. + meta_key_postfix: use `{ref_image}_{postfix}` to to fetch the meta data according to the key data, + default is `meta_dict`, the meta data is a dictionary object. + For example, to handle key `image`, read/write affine matrices from the + metadata `image_meta_dict` dictionary's `affine` field. + """ + + def __init__( + self, + ref_image, + guidance: str = "guidance", + foreground: str = "foreground", + background: str = "background", + axis: int = 0, + depth_first: bool = True, + dimensions: int = 2, + slice_key: str = "slice", + meta_key_postfix: str = "meta_dict", + ): + self.ref_image = ref_image + self.guidance = guidance + self.foreground = foreground + self.background = background + self.axis = axis + self.depth_first = depth_first + self.dimensions = dimensions + self.slice = slice_key + self.meta_key_postfix = meta_key_postfix + + def _apply(self, pos_clicks, neg_clicks, factor, slice_num): + pos = neg = [] + + if self.dimensions == 2: + points = list(pos_clicks) + points.extend(neg_clicks) + points = np.array(points) + + slices = list(np.unique(points[:, self.axis])) + slice_idx = slices[0] if slice_num is None else next(x for x in slices if x == slice_num) + + if len(pos_clicks): + pos_clicks = np.array(pos_clicks) + pos = (pos_clicks[np.where(pos_clicks[:, self.axis] == slice_idx)] * factor)[:, 1:].astype(int).tolist() + if len(neg_clicks): + neg_clicks = np.array(neg_clicks) + neg = (neg_clicks[np.where(neg_clicks[:, self.axis] == slice_idx)] * factor)[:, 1:].astype(int).tolist() + + guidance = [pos, neg, slice_idx] + else: + if len(pos_clicks): + pos = np.multiply(pos_clicks, factor).astype(int).tolist() + if len(neg_clicks): + neg = np.multiply(neg_clicks, factor).astype(int).tolist() + guidance = [pos, neg] + return guidance + + def __call__(self, data): + d = dict(data) + meta_dict_key = f"{self.ref_image}_{self.meta_key_postfix}" + if meta_dict_key not in d: + raise RuntimeError(f"Missing meta_dict {meta_dict_key} in data!") + if "spatial_shape" not in d[meta_dict_key]: + raise RuntimeError('Missing "spatial_shape" in meta_dict!') + original_shape = d[meta_dict_key]["spatial_shape"] + current_shape = list(d[self.ref_image].shape) + + if self.depth_first: + if self.axis != 0: + raise RuntimeError("Depth first means the depth axis should be 0.") + # in here we assume the depth dimension was in the last dimension of "original_shape" + original_shape = np.roll(original_shape, 1) + + factor = np.array(current_shape) / original_shape + + fg_bg_clicks = [] + for key in [self.foreground, self.background]: + clicks = d[key] + clicks = list(np.array(clicks).astype(int)) + if self.depth_first: + for i in range(len(clicks)): + clicks[i] = list(np.roll(clicks[i], 1)) + fg_bg_clicks.append(clicks) + d[self.guidance] = self._apply(fg_bg_clicks[0], fg_bg_clicks[1], factor, d.get(self.slice, None)) + return d + + +class SpatialCropGuidanced(MapTransform): + """ + Crop image based on guidance with minimal spatial size. + + - If the bounding box is smaller than spatial size in all dimensions then this transform will crop the + object using box's center and spatial_size. + + - This transform will set "start_coord_key", "end_coord_key", "original_shape_key" and "cropped_shape_key" + in data[{key}_{meta_key_postfix}] + + Input data is of shape (C, spatial_1, [spatial_2, ...]) + + Args: + keys: keys of the corresponding items to be transformed. + guidance: key to the guidance. It is used to generate the bounding box of foreground + spatial_size: minimal spatial size of the image patch e.g. [128, 128, 128] to fit in. + margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims. + meta_key_postfix: use `key_{postfix}` to to fetch the meta data according to the key data, + default is `meta_dict`, the meta data is a dictionary object. + For example, to handle key `image`, read/write affine matrices from the + metadata `image_meta_dict` dictionary's `affine` field. + start_coord_key: key to record the start coordinate of spatial bounding box for foreground. + end_coord_key: key to record the end coordinate of spatial bounding box for foreground. + original_shape_key: key to record original shape for foreground. + cropped_shape_key: key to record cropped shape for foreground. + """ + + def __init__( + self, + keys: KeysCollection, + guidance: str, + spatial_size, + margin=20, + meta_key_postfix="meta_dict", + start_coord_key: str = "foreground_start_coord", + end_coord_key: str = "foreground_end_coord", + original_shape_key: str = "foreground_original_shape", + cropped_shape_key: str = "foreground_cropped_shape", + ) -> None: + super().__init__(keys) + + self.guidance = guidance + self.spatial_size = list(spatial_size) + self.margin = margin + self.meta_key_postfix = meta_key_postfix + self.start_coord_key = start_coord_key + self.end_coord_key = end_coord_key + self.original_shape_key = original_shape_key + self.cropped_shape_key = cropped_shape_key + + def bounding_box(self, points, img_shape): + ndim = len(img_shape) + margin = ensure_tuple_rep(self.margin, ndim) + for m in margin: + if m < 0: + raise ValueError("margin value should not be negative number.") + + box_start = [0] * ndim + box_end = [0] * ndim + + for di in range(ndim): + dt = points[..., di] + min_d = max(min(dt - margin[di]), 0) + max_d = min(img_shape[di], max(dt + margin[di] + 1)) + box_start[di], box_end[di] = min_d, max_d + return box_start, box_end + + def __call__(self, data): + d: Dict = dict(data) + guidance = d[self.guidance] + original_spatial_shape = d[self.keys[0]].shape[1:] + box_start, box_end = self.bounding_box(np.array(guidance[0] + guidance[1]), original_spatial_shape) + center = list(np.mean([box_start, box_end], axis=0).astype(int)) + spatial_size = self.spatial_size + + box_size = list(np.subtract(box_end, box_start).astype(int)) + spatial_size = spatial_size[-len(box_size) :] + + if len(spatial_size) < len(box_size): + # If the data is in 3D and spatial_size is specified as 2D [256,256] + # Then we will get all slices in such case + diff = len(box_size) - len(spatial_size) + spatial_size = list(original_spatial_shape[1 : (1 + diff)]) + spatial_size + + if np.all(np.less(box_size, spatial_size)): + if len(center) == 3: + # 3D Deepgrow: set center to be middle of the depth dimension (D) + center[0] = spatial_size[0] // 2 + cropper = SpatialCrop(roi_center=center, roi_size=spatial_size) + else: + cropper = SpatialCrop(roi_start=box_start, roi_end=box_end) + box_start, box_end = cropper.roi_start, cropper.roi_end + + for key in self.keys: + if not np.array_equal(d[key].shape[1:], original_spatial_shape): + raise RuntimeError("All the image specified in keys should have same spatial shape") + meta_key = f"{key}_{self.meta_key_postfix}" + d[meta_key][self.start_coord_key] = box_start + d[meta_key][self.end_coord_key] = box_end + d[meta_key][self.original_shape_key] = d[key].shape + + image = cropper(d[key]) + d[meta_key][self.cropped_shape_key] = image.shape + d[key] = image + + pos_clicks, neg_clicks = guidance[0], guidance[1] + pos = np.subtract(pos_clicks, box_start).tolist() if len(pos_clicks) else [] + neg = np.subtract(neg_clicks, box_start).tolist() if len(neg_clicks) else [] + + d[self.guidance] = [pos, neg] + return d + + +class ResizeGuidanced(Transform): + """ + Resize the guidance based on cropped vs resized image. + + This transform assumes that the images have been cropped and resized. And the shape after cropped is store inside + the meta dict of ref image. + + Args: + guidance: key to guidance + ref_image: key to reference image to fetch current and original image details + meta_key_postfix: use `{ref_image}_{postfix}` to to fetch the meta data according to the key data, + default is `meta_dict`, the meta data is a dictionary object. + For example, to handle key `image`, read/write affine matrices from the + metadata `image_meta_dict` dictionary's `affine` field. + cropped_shape_key: key that records cropped shape for foreground. + """ + + def __init__( + self, + guidance: str, + ref_image: str, + meta_key_postfix="meta_dict", + cropped_shape_key: str = "foreground_cropped_shape", + ) -> None: + self.guidance = guidance + self.ref_image = ref_image + self.meta_key_postfix = meta_key_postfix + self.cropped_shape_key = cropped_shape_key + + def __call__(self, data): + d = dict(data) + guidance = d[self.guidance] + meta_dict: Dict = d[f"{self.ref_image}_{self.meta_key_postfix}"] + current_shape = d[self.ref_image].shape[1:] + cropped_shape = meta_dict[self.cropped_shape_key][1:] + factor = np.divide(current_shape, cropped_shape) + + pos_clicks, neg_clicks = guidance[0], guidance[1] + pos = np.multiply(pos_clicks, factor).astype(int).tolist() if len(pos_clicks) else [] + neg = np.multiply(neg_clicks, factor).astype(int).tolist() if len(neg_clicks) else [] + + d[self.guidance] = [pos, neg] + return d + + +class RestoreLabeld(MapTransform): + """ + Restores label based on the ref image. + + The ref_image is assumed that it went through the following transforms: + + 1. Fetch2DSliced (If 2D) + 2. Spacingd + 3. SpatialCropGuidanced + 4. Resized + + And its shape is assumed to be (C, D, H, W) + + This transform tries to undo these operation so that the result label can be overlapped with original volume. + It does the following operation: + + 1. Undo Resized + 2. Undo SpatialCropGuidanced + 3. Undo Spacingd + 4. Undo Fetch2DSliced + + The resulting label is of shape (D, H, W) + + Args: + keys: keys of the corresponding items to be transformed. + ref_image: reference image to fetch current and original image details + slice_only: apply only to an applicable slice, in case of 2D model/prediction + mode: {``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``, ``"mean"``, + ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``} + One of the listed string values or a user supplied function for padding. Defaults to ``"constant"``. + See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html + align_corners: Geometrically, we consider the pixels of the input as squares rather than points. + See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample + It also can be a sequence of bool, each element corresponds to a key in ``keys``. + meta_key_postfix: use `{ref_image}_{meta_key_postfix}` to to fetch the meta data according to the key data, + default is `meta_dict`, the meta data is a dictionary object. + For example, to handle key `image`, read/write affine matrices from the + metadata `image_meta_dict` dictionary's `affine` field. + start_coord_key: key that records the start coordinate of spatial bounding box for foreground. + end_coord_key: key that records the end coordinate of spatial bounding box for foreground. + original_shape_key: key that records original shape for foreground. + cropped_shape_key: key that records cropped shape for foreground. + """ + + def __init__( + self, + keys: KeysCollection, + ref_image: str, + slice_only: bool = False, + mode: Union[Sequence[Union[InterpolateMode, str]], InterpolateMode, str] = InterpolateMode.NEAREST, + align_corners: Union[Sequence[Optional[bool]], Optional[bool]] = None, + meta_key_postfix: str = "meta_dict", + start_coord_key: str = "foreground_start_coord", + end_coord_key: str = "foreground_end_coord", + original_shape_key: str = "foreground_original_shape", + cropped_shape_key: str = "foreground_cropped_shape", + ) -> None: + super().__init__(keys) + self.ref_image = ref_image + self.slice_only = slice_only + self.mode = ensure_tuple_rep(mode, len(self.keys)) + self.align_corners = ensure_tuple_rep(align_corners, len(self.keys)) + self.meta_key_postfix = meta_key_postfix + self.start_coord_key = start_coord_key + self.end_coord_key = end_coord_key + self.original_shape_key = original_shape_key + self.cropped_shape_key = cropped_shape_key + + def __call__(self, data): + d = dict(data) + meta_dict: Dict = d[f"{self.ref_image}_{self.meta_key_postfix}"] + + for idx, key in enumerate(self.keys): + image = d[key] + + # Undo Resize + current_shape = image.shape + cropped_shape = meta_dict[self.cropped_shape_key] + if np.any(np.not_equal(current_shape, cropped_shape)): + resizer = Resize(spatial_size=cropped_shape[1:], mode=self.mode[idx]) + image = resizer(image, mode=self.mode[idx], align_corners=self.align_corners[idx]) + + # Undo Crop + original_shape = meta_dict[self.original_shape_key] + result = np.zeros(original_shape, dtype=np.float32) + box_start = meta_dict[self.start_coord_key] + box_end = meta_dict[self.end_coord_key] + + spatial_dims = min(len(box_start), len(image.shape[1:])) + slices = [slice(None)] + [slice(s, e) for s, e in zip(box_start[:spatial_dims], box_end[:spatial_dims])] + slices = tuple(slices) + result[slices] = image + + # Undo Spacing + current_size = result.shape[1:] + # change spatial_shape from HWD to DHW + spatial_shape = list(np.roll(meta_dict["spatial_shape"], 1)) + spatial_size = spatial_shape[-len(current_size) :] + + if np.any(np.not_equal(current_size, spatial_size)): + resizer = Resize(spatial_size=spatial_size, mode=self.mode[idx]) + result = resizer(result, mode=self.mode[idx], align_corners=self.align_corners[idx]) + + # Undo Slicing + slice_idx = meta_dict.get("slice_idx") + if slice_idx is None or self.slice_only: + final_result = result if len(result.shape) <= 3 else result[0] + else: + slice_idx = meta_dict["slice_idx"][0] + final_result = np.zeros(tuple(spatial_shape)) + final_result[slice_idx] = result + d[key] = final_result + + meta = d.get(f"{key}_{self.meta_key_postfix}") + if meta is None: + meta = dict() + d[f"{key}_{self.meta_key_postfix}"] = meta + meta["slice_idx"] = slice_idx + meta["affine"] = meta_dict["original_affine"] + return d + + +class Fetch2DSliced(MapTransform): + """ + Fetch one slice in case of a 3D volume. + + The volume only contains spatial coordinates. + + Args: + keys: keys of the corresponding items to be transformed. + guidance: key that represents guidance. + axis: axis that represents slice in 3D volume. + meta_key_postfix: use `key_{meta_key_postfix}` to to fetch the meta data according to the key data, + default is `meta_dict`, the meta data is a dictionary object. + For example, to handle key `image`, read/write affine matrices from the + metadata `image_meta_dict` dictionary's `affine` field. + """ + + def __init__(self, keys, guidance="guidance", axis: int = 0, meta_key_postfix: str = "meta_dict"): + super().__init__(keys) + self.guidance = guidance + self.axis = axis + self.meta_key_postfix = meta_key_postfix + + def _apply(self, image, guidance): + slice_idx = guidance[2] # (pos, neg, slice_idx) + idx = [] + for i in range(len(image.shape)): + idx.append(slice_idx) if i == self.axis else idx.append(slice(0, image.shape[i])) + + idx = tuple(idx) + return image[idx], idx + + def __call__(self, data): + d = dict(data) + guidance = d[self.guidance] + if len(guidance) < 3: + raise RuntimeError("Guidance does not container slice_idx!") + for key in self.keys: + img_slice, idx = self._apply(d[key], guidance) + d[key] = img_slice + d[f"{key}_{self.meta_key_postfix}"]["slice_idx"] = idx + return d diff --git a/tests/test_deepgrow_transforms.py b/tests/test_deepgrow_transforms.py index f534813832..2d57ed9325 100644 --- a/tests/test_deepgrow_transforms.py +++ b/tests/test_deepgrow_transforms.py @@ -15,12 +15,17 @@ from parameterized import parameterized from monai.apps.deepgrow.transforms import ( + AddGuidanceFromPointsd, AddGuidanceSignald, AddInitialSeedPointd, AddRandomGuidanced, + Fetch2DSliced, FindAllValidSlicesd, FindDiscrepancyRegionsd, + ResizeGuidanced, + RestoreLabeld, SpatialCropForegroundd, + SpatialCropGuidanced, ) IMAGE = np.array([[[[1, 0, 2, 0, 1], [0, 1, 2, 1, 0], [2, 2, 3, 2, 2], [0, 1, 2, 1, 0], [1, 0, 2, 0, 1]]]]) @@ -76,6 +81,76 @@ "probability": [1.0], } +DATA_5 = { + "image": np.arange(25).reshape((1, 5, 5)), + "image_meta_dict": {"spatial_shape": [5, 5, 1]}, + "foreground": [[2, 2, 0]], + "background": [], +} + +DATA_6 = { + "image": np.arange(25).reshape((1, 5, 5)), + "image_meta_dict": {"spatial_shape": [5, 2, 1]}, + "foreground": [[2, 1, 0]], + "background": [[1, 0, 0]], +} + +DATA_7 = { + "image": np.arange(500).reshape((5, 10, 10)), + "image_meta_dict": {"spatial_shape": [20, 20, 10]}, + "foreground": [[10, 14, 6], [10, 14, 8]], + "background": [[10, 16, 8]], + "slice": 6, +} + +DATA_8 = { + "image": np.arange(500).reshape((1, 5, 10, 10)), + "image_meta_dict": {"spatial_shape": [20, 20, 10]}, + "guidance": [[[3, 5, 7], [4, 5, 7]], [[4, 5, 8]]], +} + +DATA_9 = { + "image": np.arange(1000).reshape((1, 5, 10, 20)), + "image_meta_dict": {"foreground_cropped_shape": (1, 10, 20, 40)}, + "guidance": [[[6, 10, 14], [8, 10, 14]], [[8, 10, 16]]], +} + +DATA_10 = { + "image": np.arange(9).reshape((1, 1, 3, 3)), + "image_meta_dict": { + "spatial_shape": [3, 3, 1], + "foreground_start_coord": np.array([0, 0, 0]), + "foreground_end_coord": np.array([1, 3, 3]), + "foreground_original_shape": (1, 1, 3, 3), + "foreground_cropped_shape": (1, 1, 3, 3), + "original_affine": np.array( + [[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]]] + ), + }, + "pred": np.array([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]]), +} + +DATA_11 = { + "image": np.arange(500).reshape((1, 5, 10, 10)), + "image_meta_dict": { + "spatial_shape": [20, 20, 10], + "foreground_start_coord": np.array([2, 2, 2]), + "foreground_end_coord": np.array([4, 4, 4]), + "foreground_original_shape": (1, 5, 10, 10), + "foreground_cropped_shape": (1, 2, 2, 2), + "original_affine": np.array( + [[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]]] + ), + }, + "pred": np.array([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]), +} + +DATA_12 = { + "image": np.arange(27).reshape(3, 3, 3), + "image_meta_dict": {}, + "guidance": [[0, 0, 0], [0, 1, 1], 1], +} + FIND_SLICE_TEST_CASE_1 = [ {"label": "label", "sids": "sids"}, DATA_1, @@ -159,6 +234,118 @@ np.array([[[[1, 0, 2, 2], [1, 0, 1, 3]], [[-1, -1, -1, -1], [-1, -1, -1, -1]]]]), ] +ADD_GUIDANCE_FROM_POINTS_TEST_CASE_1 = [ + {"ref_image": "image", "dimensions": 3, "guidance": "guidance", "depth_first": True}, + DATA_5, + [[0, 2, 2]], + [], +] + +ADD_GUIDANCE_FROM_POINTS_TEST_CASE_2 = [ + {"ref_image": "image", "dimensions": 3, "guidance": "guidance", "depth_first": True}, + DATA_6, + [[0, 2, 2]], + [[0, 1, 0]], +] + +ADD_GUIDANCE_FROM_POINTS_TEST_CASE_3 = [ + {"ref_image": "image", "dimensions": 3, "guidance": "guidance", "depth_first": True}, + DATA_7, + [[3, 5, 7], [4, 5, 7]], + [[4, 5, 8]], +] + +ADD_GUIDANCE_FROM_POINTS_TEST_CASE_4 = [ + {"ref_image": "image", "dimensions": 2, "guidance": "guidance", "depth_first": True}, + DATA_6, + [[2, 2]], + [[1, 0]], +] + +ADD_GUIDANCE_FROM_POINTS_TEST_CASE_5 = [ + {"ref_image": "image", "dimensions": 2, "guidance": "guidance", "depth_first": True, "slice_key": "slice"}, + DATA_7, + [[5, 7]], + [], +] + +ADD_GUIDANCE_FROM_POINTS_TEST_CASE_6 = [ + {"ref_image": "image", "dimensions": 2, "guidance": "guidance", "depth_first": True}, + DATA_5, + [[2, 2]], + [], +] + +SPATIAL_CROP_GUIDANCE_TEST_CASE_1 = [ + {"keys": ["image"], "guidance": "guidance", "spatial_size": [1, 4, 4], "margin": 0}, + DATA_8, + np.array([[[[357, 358]], [[457, 458]]]]), +] + +SPATIAL_CROP_GUIDANCE_TEST_CASE_2 = [ + {"keys": ["image"], "guidance": "guidance", "spatial_size": [2, 2], "margin": 1}, + DATA_8, + np.array( + [ + [ + [[246, 247, 248, 249], [256, 257, 258, 259], [266, 267, 268, 269]], + [[346, 347, 348, 349], [356, 357, 358, 359], [366, 367, 368, 369]], + [[446, 447, 448, 449], [456, 457, 458, 459], [466, 467, 468, 469]], + ] + ] + ), +] + +SPATIAL_CROP_GUIDANCE_TEST_CASE_3 = [ + {"keys": ["image"], "guidance": "guidance", "spatial_size": [3, 3], "margin": 0}, + DATA_8, + np.array( + [ + [ + [[47, 48, 49], [57, 58, 59], [67, 68, 69]], + [[147, 148, 149], [157, 158, 159], [167, 168, 169]], + [[247, 248, 249], [257, 258, 259], [267, 268, 269]], + [[347, 348, 349], [357, 358, 359], [367, 368, 369]], + [[447, 448, 449], [457, 458, 459], [467, 468, 469]], + ] + ] + ), +] + +RESIZE_GUIDANCE_TEST_CASE_1 = [ + {"ref_image": "image", "guidance": "guidance"}, + DATA_9, + [[[3, 5, 7], [4, 5, 7]], [[4, 5, 8]]], +] + +RESTORE_LABEL_TEST_CASE_1 = [ + {"keys": ["pred"], "ref_image": "image", "mode": "nearest"}, + DATA_10, + np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]), +] + +RESULT = np.zeros((10, 20, 20)) +RESULT[4:8, 4:8, 4:8] = np.array( + [ + [[1.0, 1.0, 2.0, 2.0], [1.0, 1.0, 2.0, 2.0], [3.0, 3.0, 4.0, 4.0], [3.0, 3.0, 4.0, 4.0]], + [[1.0, 1.0, 2.0, 2.0], [1.0, 1.0, 2.0, 2.0], [3.0, 3.0, 4.0, 4.0], [3.0, 3.0, 4.0, 4.0]], + [[5.0, 5.0, 6.0, 6.0], [5.0, 5.0, 6.0, 6.0], [7.0, 7.0, 8.0, 8.0], [7.0, 7.0, 8.0, 8.0]], + [[5.0, 5.0, 6.0, 6.0], [5.0, 5.0, 6.0, 6.0], [7.0, 7.0, 8.0, 8.0], [7.0, 7.0, 8.0, 8.0]], + ], +) + +RESTORE_LABEL_TEST_CASE_2 = [ + {"keys": ["pred"], "ref_image": "image", "mode": "nearest"}, + DATA_11, + RESULT, +] + +FETCH_2D_SLICE_TEST_CASE_1 = [ + {"keys": ["image"], "guidance": "guidance"}, + DATA_12, + np.array([[9, 10, 11], [12, 13, 14], [15, 16, 17]]), +] + class TestFindAllValidSlicesd(unittest.TestCase): @parameterized.expand([FIND_SLICE_TEST_CASE_1, FIND_SLICE_TEST_CASE_2]) @@ -220,5 +407,52 @@ def test_correct_results(self, arguments, input_data, expected_result): np.testing.assert_allclose(result[arguments["guidance"]], expected_result, rtol=1e-5) +class TestAddGuidanceFromPointsd(unittest.TestCase): + @parameterized.expand( + [ + ADD_GUIDANCE_FROM_POINTS_TEST_CASE_1, + ADD_GUIDANCE_FROM_POINTS_TEST_CASE_2, + ADD_GUIDANCE_FROM_POINTS_TEST_CASE_3, + ADD_GUIDANCE_FROM_POINTS_TEST_CASE_4, + ADD_GUIDANCE_FROM_POINTS_TEST_CASE_5, + ADD_GUIDANCE_FROM_POINTS_TEST_CASE_6, + ] + ) + def test_correct_results(self, arguments, input_data, expected_pos, expected_neg): + result = AddGuidanceFromPointsd(**arguments)(input_data) + self.assertEqual(result[arguments["guidance"]][0], expected_pos) + self.assertEqual(result[arguments["guidance"]][1], expected_neg) + + +class TestSpatialCropGuidanced(unittest.TestCase): + @parameterized.expand( + [SPATIAL_CROP_GUIDANCE_TEST_CASE_1, SPATIAL_CROP_GUIDANCE_TEST_CASE_2, SPATIAL_CROP_GUIDANCE_TEST_CASE_3] + ) + def test_correct_results(self, arguments, input_data, expected_result): + result = SpatialCropGuidanced(**arguments)(input_data) + np.testing.assert_allclose(result["image"], expected_result) + + +class TestResizeGuidanced(unittest.TestCase): + @parameterized.expand([RESIZE_GUIDANCE_TEST_CASE_1]) + def test_correct_results(self, arguments, input_data, expected_result): + result = ResizeGuidanced(**arguments)(input_data) + self.assertEqual(result[arguments["guidance"]], expected_result) + + +class TestRestoreLabeld(unittest.TestCase): + @parameterized.expand([RESTORE_LABEL_TEST_CASE_1, RESTORE_LABEL_TEST_CASE_2]) + def test_correct_results(self, arguments, input_data, expected_result): + result = RestoreLabeld(**arguments)(input_data) + np.testing.assert_allclose(result["pred"], expected_result) + + +class TestFetch2DSliced(unittest.TestCase): + @parameterized.expand([FETCH_2D_SLICE_TEST_CASE_1]) + def test_correct_results(self, arguments, input_data, expected_result): + result = Fetch2DSliced(**arguments)(input_data) + np.testing.assert_allclose(result["image"], expected_result) + + if __name__ == "__main__": unittest.main() From f5ccdc6bcd9696da9c4763910143f12835dca954 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 2 Mar 2021 02:40:59 +0800 Subject: [PATCH 015/457] 1668 Add RandAxisFlip transforms (#1670) * [DLMED] add RandAxisFlip transforms Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot * [DLMED] fix flake8 issues Signed-off-by: Nic Ma Co-authored-by: monai-bot --- docs/source/transforms.rst | 12 +++++++++ monai/transforms/__init__.py | 4 +++ monai/transforms/spatial/array.py | 32 ++++++++++++++++++++++ monai/transforms/spatial/dictionary.py | 37 ++++++++++++++++++++++++++ tests/test_rand_axis_flip.py | 32 ++++++++++++++++++++++ tests/test_rand_axis_flipd.py | 32 ++++++++++++++++++++++ 6 files changed, 149 insertions(+) create mode 100644 tests/test_rand_axis_flip.py create mode 100644 tests/test_rand_axis_flipd.py diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index a144c8c138..00d8cb9053 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -314,6 +314,12 @@ Spatial :members: :special-members: __call__ +`RandAxisFlip` +"""""""""""""" +.. autoclass:: RandAxisFlip + :members: + :special-members: __call__ + `RandZoom` """""""""" .. autoclass:: RandZoom @@ -791,6 +797,12 @@ Spatial (Dict) :members: :special-members: __call__ +`RandAxisFlipd` +""""""""""""""" +.. autoclass:: RandAxisFlipd + :members: + :special-members: __call__ + `Rotated` """"""""" .. autoclass:: Rotated diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index 8b30d76bec..cd5b195bd3 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -181,6 +181,7 @@ Rand3DElastic, RandAffine, RandAffineGrid, + RandAxisFlip, RandDeformGrid, RandFlip, RandRotate, @@ -209,6 +210,9 @@ RandAffined, RandAffineD, RandAffineDict, + RandAxisFlipd, + RandAxisFlipD, + RandAxisFlipDict, RandFlipd, RandFlipD, RandFlipDict, diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 3559d0eb3c..2867361b8e 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -59,6 +59,7 @@ "RandRotate90", "RandRotate", "RandFlip", + "RandAxisFlip", "RandZoom", "AffineGrid", "RandAffineGrid", @@ -771,6 +772,37 @@ def __call__(self, img: np.ndarray) -> np.ndarray: return self.flipper(img) +class RandAxisFlip(RandomizableTransform): + """ + Randomly select a spatial axis and flip along it. + See numpy.flip for additional details. + https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html + + Args: + prob: Probability of flipping. + + """ + + def __init__(self, prob: float = 0.1) -> None: + RandomizableTransform.__init__(self, min(max(prob, 0.0), 1.0)) + self._axis: Optional[int] = None + + def randomize(self, data: np.ndarray) -> None: + super().randomize(None) + self._axis = self.R.randint(data.ndim - 1) + + def __call__(self, img: np.ndarray) -> np.ndarray: + """ + Args: + img: channel first array, must have shape: (num_channels, H[, W, ..., ]), + """ + self.randomize(data=img) + if not self._do_transform: + return img + flipper = Flip(spatial_axis=self._axis) + return flipper(img) + + class RandZoom(RandomizableTransform): """ Randomly zooms input arrays with given probability within given zoom range. diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 6693d75bcd..f29258bf28 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -58,6 +58,7 @@ "Rand3DElasticd", "Flipd", "RandFlipd", + "RandAxisFlipd", "Rotated", "RandRotated", "Zoomd", @@ -82,6 +83,8 @@ "FlipDict", "RandFlipD", "RandFlipDict", + "RandAxisFlipD", + "RandAxisFlipDict", "RotateD", "RotateDict", "RandRotateD", @@ -751,6 +754,39 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +class RandAxisFlipd(RandomizableTransform, MapTransform): + """ + Dictionary-based version :py:class:`monai.transforms.RandAxisFlip`. + + See `numpy.flip` for additional details. + https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html + + Args: + keys: Keys to pick data for transformation. + prob: Probability of flipping. + + """ + + def __init__(self, keys: KeysCollection, prob: float = 0.1) -> None: + MapTransform.__init__(self, keys) + RandomizableTransform.__init__(self, prob) + self._axis: Optional[int] = None + + def randomize(self, data: np.ndarray) -> None: + super().randomize(None) + self._axis = self.R.randint(data.ndim - 1) + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + self.randomize(data=data[self.keys[0]]) + flipper = Flip(spatial_axis=self._axis) + + d = dict(data) + for key in self.keys: + if self._do_transform: + d[key] = flipper(d[key]) + return d + + class Rotated(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.Rotate`. @@ -1051,6 +1087,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda Rand3DElasticD = Rand3DElasticDict = Rand3DElasticd FlipD = FlipDict = Flipd RandFlipD = RandFlipDict = RandFlipd +RandAxisFlipD = RandAxisFlipDict = RandAxisFlipd RotateD = RotateDict = Rotated RandRotateD = RandRotateDict = RandRotated ZoomD = ZoomDict = Zoomd diff --git a/tests/test_rand_axis_flip.py b/tests/test_rand_axis_flip.py new file mode 100644 index 0000000000..0bc2eb130e --- /dev/null +++ b/tests/test_rand_axis_flip.py @@ -0,0 +1,32 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +from monai.transforms import RandAxisFlip +from tests.utils import NumpyImageTestCase2D + + +class TestRandAxisFlip(NumpyImageTestCase2D): + def test_correct_results(self): + flip = RandAxisFlip(prob=1.0) + result = flip(self.imt[0]) + + expected = [] + for channel in self.imt[0]: + expected.append(np.flip(channel, flip._axis)) + self.assertTrue(np.allclose(np.stack(expected), result)) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_rand_axis_flipd.py b/tests/test_rand_axis_flipd.py new file mode 100644 index 0000000000..154d7813cb --- /dev/null +++ b/tests/test_rand_axis_flipd.py @@ -0,0 +1,32 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +from monai.transforms import RandAxisFlipd +from tests.utils import NumpyImageTestCase3D + + +class TestRandAxisFlip(NumpyImageTestCase3D): + def test_correct_results(self): + flip = RandAxisFlipd(keys="img", prob=1.0) + result = flip({"img": self.imt[0]}) + + expected = [] + for channel in self.imt[0]: + expected.append(np.flip(channel, flip._axis)) + self.assertTrue(np.allclose(np.stack(expected), result["img"])) + + +if __name__ == "__main__": + unittest.main() From 20e17b2fcec0a878a51c5b8e947413a699c4da61 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Tue, 2 Mar 2021 12:05:41 +0000 Subject: [PATCH 016/457] allow dictionary image only and test endianness (#1669) fix big endianness problem for dictionary reading. Also allow dictionary image_only --- monai/transforms/io/array.py | 24 ++++++++++++++++ monai/transforms/io/dictionary.py | 28 +++++++++++------- tests/test_nifti_endianness.py | 48 +++++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+), 10 deletions(-) create mode 100644 tests/test_nifti_endianness.py diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index 855621e432..9c4f631699 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -33,6 +33,27 @@ __all__ = ["LoadImage", "SaveImage"] +def switch_endianness(data, old, new): + """ + If any numpy arrays have `old` (e.g., ">"), + replace with `new` (e.g., "<"). + """ + if isinstance(data, np.ndarray): + if data.dtype.byteorder == old: + data = data.newbyteorder(new) + elif isinstance(data, tuple): + data = (switch_endianness(x, old, new) for x in data) + elif isinstance(data, list): + data = [switch_endianness(x, old, new) for x in data] + elif isinstance(data, dict): + data = {k: switch_endianness(v, old, new) for k, v in data.items()} + elif isinstance(data, (bool, str, float, int)): + pass + else: + raise AssertionError() + return data + + class LoadImage(Transform): """ Load image file or files from provided path based on reader. @@ -132,6 +153,9 @@ def __call__( if self.image_only: return img_array meta_data[Key.FILENAME_OR_OBJ] = ensure_tuple(filename)[0] + # make sure all elements in metadata are little endian + meta_data = switch_endianness(meta_data, ">", "<") + return img_array, meta_data diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index 55707f750e..d9b6b5e6ab 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -59,6 +59,7 @@ def __init__( dtype: DtypeLike = np.float32, meta_key_postfix: str = "meta_dict", overwriting: bool = False, + image_only: bool = False, *args, **kwargs, ) -> None: @@ -76,11 +77,13 @@ def __init__( For example, load nifti file for `image`, store the metadata into `image_meta_dict`. overwriting: whether allow to overwrite existing meta data of same key. default is False, which will raise exception if encountering existing key. + image_only: if True return dictionary containing just only the image volumes, otherwise return + dictionary containing image data array and header dict per input key. args: additional parameters for reader if providing a reader name. kwargs: additional parameters for reader if providing a reader name. """ super().__init__(keys) - self._loader = LoadImage(reader, False, dtype, *args, **kwargs) + self._loader = LoadImage(reader, image_only, dtype, *args, **kwargs) if not isinstance(meta_key_postfix, str): raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.") self.meta_key_postfix = meta_key_postfix @@ -98,15 +101,20 @@ def __call__(self, data, reader: Optional[ImageReader] = None): d = dict(data) for key in self.keys: data = self._loader(d[key], reader) - if not isinstance(data, (tuple, list)): - raise ValueError("loader must return a tuple or list.") - d[key] = data[0] - if not isinstance(data[1], dict): - raise ValueError("metadata must be a dict.") - key_to_add = f"{key}_{self.meta_key_postfix}" - if key_to_add in d and not self.overwriting: - raise KeyError(f"Meta data with key {key_to_add} already exists and overwriting=False.") - d[key_to_add] = data[1] + if self._loader.image_only: + if not isinstance(data, np.ndarray): + raise ValueError("loader must return a numpy array (because image_only=True was used).") + d[key] = data + else: + if not isinstance(data, (tuple, list)): + raise ValueError("loader must return a tuple or list (because image_only=False was used).") + d[key] = data[0] + if not isinstance(data[1], dict): + raise ValueError("metadata must be a dict.") + key_to_add = f"{key}_{self.meta_key_postfix}" + if key_to_add in d and not self.overwriting: + raise KeyError(f"Meta data with key {key_to_add} already exists and overwriting=False.") + d[key_to_add] = data[1] return d diff --git a/tests/test_nifti_endianness.py b/tests/test_nifti_endianness.py new file mode 100644 index 0000000000..14317c0832 --- /dev/null +++ b/tests/test_nifti_endianness.py @@ -0,0 +1,48 @@ +import tempfile +import unittest +from typing import TYPE_CHECKING, List, Tuple +from unittest.case import skipUnless + +import numpy as np +from parameterized import parameterized + +from monai.data import DataLoader, Dataset, create_test_image_2d +from monai.transforms import LoadImage, LoadImaged +from monai.utils.module import optional_import + +if TYPE_CHECKING: + import nibabel as nib + + has_nib = True +else: + nib, has_nib = optional_import("nibabel") + +TESTS: List[Tuple] = [] +for endianness in ["<", ">"]: + for use_array in [True, False]: + for image_only in [True, False]: + TESTS.append((endianness, use_array, image_only)) + + +class TestNiftiEndianness(unittest.TestCase): + def setUp(self): + self.im, _ = create_test_image_2d(100, 100) + self.fname = tempfile.NamedTemporaryFile(suffix=".nii.gz").name + + @parameterized.expand(TESTS) + @skipUnless(has_nib, "Requires NiBabel") + def test_endianness(self, endianness, use_array, image_only): + + hdr = nib.Nifti1Header(endianness=endianness) + nii = nib.Nifti1Image(self.im, np.eye(4), header=hdr) + nib.save(nii, self.fname) + + data = [self.fname] if use_array else [{"image": self.fname}] + tr = LoadImage(image_only=image_only) if use_array else LoadImaged("image", image_only=image_only) + check_ds = Dataset(data, tr) + check_loader = DataLoader(check_ds, batch_size=1) + _ = next(iter(check_loader)) + + +if __name__ == "__main__": + unittest.main() From 9898a89d24364a9be3525d066a7492adf00b9e6b Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Wed, 3 Mar 2021 00:34:55 +0800 Subject: [PATCH 017/457] 1673 Add AutoAdjustChannel transform (#1675) * [DLMED] add AutoAdjustChannel transform Signed-off-by: Nic Ma * [DLMED] add dict version transform Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot * [DLMED] fix doc-build issue Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] update according to Wenqi's comments Signed-off-by: Nic Ma * [DLMED] update doc-strings Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot Co-authored-by: monai-bot --- docs/source/transforms.rst | 12 ++++ monai/data/image_reader.py | 39 ++++++++---- monai/transforms/__init__.py | 4 ++ monai/transforms/utility/array.py | 29 ++++++++- monai/transforms/utility/dictionary.py | 31 ++++++++++ tests/min_tests.py | 2 + tests/test_ensure_channel_first.py | 86 ++++++++++++++++++++++++++ tests/test_ensure_channel_firstd.py | 62 +++++++++++++++++++ 8 files changed, 253 insertions(+), 12 deletions(-) create mode 100644 tests/test_ensure_channel_first.py create mode 100644 tests/test_ensure_channel_firstd.py diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index 00d8cb9053..dd10176de9 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -437,6 +437,12 @@ Utility :members: :special-members: __call__ +`EnsureChannelFirst` +"""""""""""""""""""" +.. autoclass:: EnsureChannelFirst + :members: + :special-members: __call__ + `RepeatChannel` """"""""""""""" .. autoclass:: RepeatChannel @@ -890,6 +896,12 @@ Utility (Dict) :members: :special-members: __call__ +`EnsureChannelFirstd` +""""""""""""""""""""" +.. autoclass:: EnsureChannelFirstd + :members: + :special-members: __call__ + `RepeatChanneld` """""""""""""""" .. autoclass:: RepeatChanneld diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index e458833979..dfbdaf5b41 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -109,6 +109,17 @@ def _copy_compatible_dict(from_dict: Dict, to_dict: Dict): ) +def _stack_images(image_list: List, meta_dict: Dict): + if len(image_list) > 1: + if meta_dict.get("original_channel_dim", None) not in ("no_channel", None): + raise RuntimeError("can not read a list of images which already have channel dimension.") + meta_dict["original_channel_dim"] = 0 + img_array = np.stack(image_list, axis=0) + else: + img_array = image_list[0] + return img_array + + class ITKReader(ImageReader): """ Load medical images based on ITK library. @@ -200,11 +211,12 @@ def get_data(self, img): header["original_affine"] = self._get_affine(i) header["affine"] = header["original_affine"].copy() header["spatial_shape"] = self._get_spatial_shape(i) - img_array.append(self._get_array_data(i)) + data = self._get_array_data(i) + img_array.append(data) + header["original_channel_dim"] = "no_channel" if len(data.shape) == len(header["spatial_shape"]) else -1 _copy_compatible_dict(header, compatible_meta) - img_array_ = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0] - return img_array_, compatible_meta + return _stack_images(img_array, compatible_meta), compatible_meta def _get_meta_dict(self, img) -> Dict: """ @@ -265,6 +277,7 @@ def _get_spatial_shape(self, img) -> np.ndarray: img: a ITK image object loaded from a image file. """ + # the img data should have no channel dim or the last dim is channel shape = list(itk.size(img)) shape.reverse() return np.asarray(shape) @@ -371,11 +384,12 @@ def get_data(self, img): i = nib.as_closest_canonical(i) header["affine"] = self._get_affine(i) header["spatial_shape"] = self._get_spatial_shape(i) - img_array.append(self._get_array_data(i)) + data = self._get_array_data(i) + img_array.append(data) + header["original_channel_dim"] = "no_channel" if len(data.shape) == len(header["spatial_shape"]) else -1 _copy_compatible_dict(header, compatible_meta) - img_array_ = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0] - return img_array_, compatible_meta + return _stack_images(img_array, compatible_meta), compatible_meta def _get_meta_dict(self, img) -> Dict: """ @@ -408,6 +422,7 @@ def _get_spatial_shape(self, img) -> np.ndarray: """ ndim = img.header["dim"][0] spatial_rank = min(ndim, 3) + # the img data should have no channel dim or the last dim is channel return np.asarray(img.header["dim"][1 : spatial_rank + 1]) def _get_array_data(self, img) -> np.ndarray: @@ -504,12 +519,12 @@ def get_data(self, img): for i in ensure_tuple(img): header = {} if isinstance(i, np.ndarray): + # can not detect the channel dim of numpy array, use all the dims as spatial_shape header["spatial_shape"] = i.shape img_array.append(i) _copy_compatible_dict(header, compatible_meta) - img_array_ = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0] - return img_array_, compatible_meta + return _stack_images(img_array, compatible_meta), compatible_meta class PILReader(ImageReader): @@ -582,11 +597,12 @@ def get_data(self, img): for i in ensure_tuple(img): header = self._get_meta_dict(i) header["spatial_shape"] = self._get_spatial_shape(i) - img_array.append(np.asarray(i)) + data = np.asarray(i) + img_array.append(data) + header["original_channel_dim"] = "no_channel" if len(data.shape) == len(header["spatial_shape"]) else -1 _copy_compatible_dict(header, compatible_meta) - img_array_ = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0] - return img_array_, compatible_meta + return _stack_images(img_array, compatible_meta), compatible_meta def _get_meta_dict(self, img) -> Dict: """ @@ -608,4 +624,5 @@ def _get_spatial_shape(self, img) -> np.ndarray: Args: img: a PIL Image object loaded from a image file. """ + # the img data should have no channel dim or the last dim is channel return np.asarray((img.width, img.height)) diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index cd5b195bd3..a8d647b657 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -250,6 +250,7 @@ CastToType, ConvertToMultiChannelBasedOnBratsClasses, DataStats, + EnsureChannelFirst, FgBgToIndices, Identity, LabelToMask, @@ -296,6 +297,9 @@ DeleteItemsd, DeleteItemsD, DeleteItemsDict, + EnsureChannelFirstd, + EnsureChannelFirstD, + EnsureChannelFirstDict, FgBgToIndicesd, FgBgToIndicesD, FgBgToIndicesDict, diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 24d2feb781..62daf9309c 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -15,7 +15,7 @@ import logging import time -from typing import TYPE_CHECKING, Callable, List, Optional, Sequence, Tuple, Union +from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Sequence, Tuple, Union import numpy as np import torch @@ -39,6 +39,7 @@ "AsChannelFirst", "AsChannelLast", "AddChannel", + "EnsureChannelFirst", "RepeatChannel", "RemoveRepeatedChannel", "SplitChannel", @@ -149,6 +150,32 @@ def __call__(self, img: NdarrayTensor): return img[None] +class EnsureChannelFirst(Transform): + """ + Automatically adjust or add the channel dimension of input data to ensure `channel_first` shape. + It extracts the `original_channel_dim` info from provided meta_data dictionary. + Typical values of `original_channel_dim` can be: "no_channel", 0, -1. + Convert the data to `channel_first` based on the `original_channel_dim` information. + + """ + + def __call__(self, img: np.ndarray, meta_dict: Optional[Dict] = None): + """ + Apply the transform to `img`. + """ + if not isinstance(meta_dict, dict): + raise ValueError("meta_dict must be a dictionay data.") + + channel_dim = meta_dict.get("original_channel_dim", None) + + if channel_dim is None: + raise ValueError("meta_dict must contain `original_channel_dim` information.") + elif channel_dim == "no_channel": + return AddChannel()(img) + else: + return AsChannelFirst(channel_dim=channel_dim)(img) + + class RepeatChannel(Transform): """ Repeat channel data to construct expected input shape for models. diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index e9d923d0fd..4a0808fdbb 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -31,6 +31,7 @@ CastToType, ConvertToMultiChannelBasedOnBratsClasses, DataStats, + EnsureChannelFirst, FgBgToIndices, Identity, LabelToMask, @@ -60,6 +61,7 @@ "AsChannelFirstd", "AsChannelLastd", "AddChanneld", + "EnsureChannelFirstd", "RepeatChanneld", "RemoveRepeatedChanneld", "SplitChanneld", @@ -89,6 +91,8 @@ "AsChannelLastDict", "AddChannelD", "AddChannelDict", + "EnsureChannelFirstD", + "EnsureChannelFirstDict", "RandLambdaD", "RandLambdaDict", "RepeatChannelD", @@ -217,6 +221,32 @@ def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, Nda return d +class EnsureChannelFirstd(MapTransform): + """ + Dictionary-based wrapper of :py:class:`monai.transforms.EnsureChannelFirst`. + """ + + def __init__(self, keys: KeysCollection, meta_key_postfix: str = "meta_dict") -> None: + """ + Args: + keys: keys of the corresponding items to be transformed. + See also: :py:class:`monai.transforms.compose.MapTransform` + meta_key_postfix: `key_{postfix}` was used to store the metadata in `LoadImaged`. + So need the key to extract metadata for channel dim information, default is `meta_dict`. + For example, for data with key `image`, metadata by default is in `image_meta_dict`. + + """ + super().__init__(keys) + self.adjuster = EnsureChannelFirst() + self.meta_key_postfix = meta_key_postfix + + def __call__(self, data) -> Dict[Hashable, np.ndarray]: + d = dict(data) + for key in self.keys: + d[key] = self.adjuster(d[key], d[f"{key}_{self.meta_key_postfix}"]) + return d + + class RepeatChanneld(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.RepeatChannel`. @@ -894,6 +924,7 @@ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torc AsChannelFirstD = AsChannelFirstDict = AsChannelFirstd AsChannelLastD = AsChannelLastDict = AsChannelLastd AddChannelD = AddChannelDict = AddChanneld +EnsureChannelFirstD = EnsureChannelFirstDict = EnsureChannelFirstd RemoveRepeatedChannelD = RemoveRepeatedChannelDict = RemoveRepeatedChanneld RepeatChannelD = RepeatChannelDict = RepeatChanneld SplitChannelD = SplitChannelDict = SplitChanneld diff --git a/tests/min_tests.py b/tests/min_tests.py index 999a1aeaa0..83c1ceea9f 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -109,6 +109,8 @@ def run_testsuit(): "test_deepgrow_dataset", "test_save_image", "test_save_imaged", + "test_ensure_channel_first", + "test_ensure_channel_firstd", ] assert sorted(exclude_cases) == sorted(set(exclude_cases)), f"Duplicated items in {exclude_cases}" diff --git a/tests/test_ensure_channel_first.py b/tests/test_ensure_channel_first.py new file mode 100644 index 0000000000..ff656f2e24 --- /dev/null +++ b/tests/test_ensure_channel_first.py @@ -0,0 +1,86 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import tempfile +import unittest + +import itk +import nibabel as nib +import numpy as np +from parameterized import parameterized +from PIL import Image + +from monai.data import ITKReader +from monai.transforms import EnsureChannelFirst, LoadImage + +TEST_CASE_1 = [{"image_only": False}, ["test_image.nii.gz"], None] + +TEST_CASE_2 = [{"image_only": False}, ["test_image.nii.gz"], -1] + +TEST_CASE_3 = [ + {"image_only": False}, + ["test_image.nii.gz", "test_image2.nii.gz", "test_image3.nii.gz"], + None, +] + +TEST_CASE_4 = [{"reader": ITKReader(), "image_only": False}, ["test_image.nii.gz"], None] + +TEST_CASE_5 = [{"reader": ITKReader(), "image_only": False}, ["test_image.nii.gz"], -1] + +TEST_CASE_6 = [ + {"reader": ITKReader(), "image_only": False}, + ["test_image.nii.gz", "test_image2.nii.gz", "test_image3.nii.gz"], + None, +] + +TEST_CASE_7 = [ + {"image_only": False, "reader": ITKReader(pixel_type=itk.UC)}, + "tests/testing_data/CT_DICOM", + None, +] + + +class TestEnsureChannelFirst(unittest.TestCase): + @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6]) + def test_load_nifti(self, input_param, filenames, original_channel_dim): + if original_channel_dim is None: + test_image = np.random.rand(128, 128, 128) + elif original_channel_dim == -1: + test_image = np.random.rand(128, 128, 128, 1) + + with tempfile.TemporaryDirectory() as tempdir: + for i, name in enumerate(filenames): + filenames[i] = os.path.join(tempdir, name) + nib.save(nib.Nifti1Image(test_image, np.eye(4)), filenames[i]) + result, header = LoadImage(**input_param)(filenames) + result = EnsureChannelFirst()(result, header) + self.assertEqual(result.shape[0], len(filenames)) + + @parameterized.expand([TEST_CASE_7]) + def test_itk_dicom_series_reader(self, input_param, filenames, original_channel_dim): + result, header = LoadImage(**input_param)(filenames) + result = EnsureChannelFirst()(result, header) + self.assertEqual(result.shape[0], 1) + + def test_load_png(self): + spatial_size = (256, 256, 3) + test_image = np.random.randint(0, 256, size=spatial_size) + with tempfile.TemporaryDirectory() as tempdir: + filename = os.path.join(tempdir, "test_image.png") + Image.fromarray(test_image.astype("uint8")).save(filename) + result, header = LoadImage(image_only=False)(filename) + result = EnsureChannelFirst()(result, header) + self.assertEqual(result.shape[0], 3) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_ensure_channel_firstd.py b/tests/test_ensure_channel_firstd.py new file mode 100644 index 0000000000..a5298f4453 --- /dev/null +++ b/tests/test_ensure_channel_firstd.py @@ -0,0 +1,62 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import tempfile +import unittest + +import nibabel as nib +import numpy as np +from parameterized import parameterized +from PIL import Image + +from monai.transforms import EnsureChannelFirstd, LoadImaged + +TEST_CASE_1 = [{"keys": "img"}, ["test_image.nii.gz"], None] + +TEST_CASE_2 = [{"keys": "img"}, ["test_image.nii.gz"], -1] + +TEST_CASE_3 = [ + {"keys": "img"}, + ["test_image.nii.gz", "test_image2.nii.gz", "test_image3.nii.gz"], + None, +] + + +class TestEnsureChannelFirstd(unittest.TestCase): + @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) + def test_load_nifti(self, input_param, filenames, original_channel_dim): + if original_channel_dim is None: + test_image = np.random.rand(128, 128, 128) + elif original_channel_dim == -1: + test_image = np.random.rand(128, 128, 128, 1) + + with tempfile.TemporaryDirectory() as tempdir: + for i, name in enumerate(filenames): + filenames[i] = os.path.join(tempdir, name) + nib.save(nib.Nifti1Image(test_image, np.eye(4)), filenames[i]) + result = LoadImaged(**input_param)({"img": filenames}) + result = EnsureChannelFirstd(**input_param)(result) + self.assertEqual(result["img"].shape[0], len(filenames)) + + def test_load_png(self): + spatial_size = (256, 256, 3) + test_image = np.random.randint(0, 256, size=spatial_size) + with tempfile.TemporaryDirectory() as tempdir: + filename = os.path.join(tempdir, "test_image.png") + Image.fromarray(test_image.astype("uint8")).save(filename) + result = LoadImaged(keys="img")({"img": filename}) + result = EnsureChannelFirstd(keys="img")(result) + self.assertEqual(result["img"].shape[0], 3) + + +if __name__ == "__main__": + unittest.main() From 421c0a9c3453672c608a4dc06ddbfe3d7b939c1a Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 3 Mar 2021 12:44:36 +0000 Subject: [PATCH 018/457] [1662] Allow missing keys (#1676) Allow missing keys --- monai/apps/deepgrow/transforms.py | 40 +++-- monai/transforms/croppad/dictionary.py | 131 +++++++++------ monai/transforms/intensity/dictionary.py | 163 +++++++++++++----- monai/transforms/io/dictionary.py | 12 +- monai/transforms/post/dictionary.py | 45 +++-- monai/transforms/spatial/dictionary.py | 158 +++++++++++------- monai/transforms/transform.py | 33 +++- monai/transforms/utility/dictionary.py | 202 +++++++++++++---------- 8 files changed, 510 insertions(+), 274 deletions(-) diff --git a/monai/apps/deepgrow/transforms.py b/monai/apps/deepgrow/transforms.py index cc01a717ad..644507092d 100644 --- a/monai/apps/deepgrow/transforms.py +++ b/monai/apps/deepgrow/transforms.py @@ -437,6 +437,7 @@ class SpatialCropForegroundd(MapTransform): end_coord_key: key to record the end coordinate of spatial bounding box for foreground. original_shape_key: key to record original shape for foreground. cropped_shape_key: key to record cropped shape for foreground. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( @@ -452,8 +453,9 @@ def __init__( end_coord_key: str = "foreground_end_coord", original_shape_key: str = "foreground_original_shape", cropped_shape_key: str = "foreground_cropped_shape", + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.source_key = source_key self.spatial_size = list(spatial_size) @@ -482,7 +484,7 @@ def __call__(self, data): else: cropper = SpatialCrop(roi_start=box_start, roi_end=box_end) - for key in self.keys: + for key in self.key_iterator(d): meta_key = f"{key}_{self.meta_key_postfix}" d[meta_key][self.start_coord_key] = box_start d[meta_key][self.end_coord_key] = box_end @@ -629,6 +631,7 @@ class SpatialCropGuidanced(MapTransform): end_coord_key: key to record the end coordinate of spatial bounding box for foreground. original_shape_key: key to record original shape for foreground. cropped_shape_key: key to record cropped shape for foreground. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( @@ -642,8 +645,9 @@ def __init__( end_coord_key: str = "foreground_end_coord", original_shape_key: str = "foreground_original_shape", cropped_shape_key: str = "foreground_cropped_shape", + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.guidance = guidance self.spatial_size = list(spatial_size) @@ -697,7 +701,7 @@ def __call__(self, data): cropper = SpatialCrop(roi_start=box_start, roi_end=box_end) box_start, box_end = cropper.roi_start, cropper.roi_end - for key in self.keys: + for key in self.key_iterator(d): if not np.array_equal(d[key].shape[1:], original_spatial_shape): raise RuntimeError("All the image specified in keys should have same spatial shape") meta_key = f"{key}_{self.meta_key_postfix}" @@ -804,6 +808,7 @@ class RestoreLabeld(MapTransform): end_coord_key: key that records the end coordinate of spatial bounding box for foreground. original_shape_key: key that records original shape for foreground. cropped_shape_key: key that records cropped shape for foreground. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( @@ -818,8 +823,9 @@ def __init__( end_coord_key: str = "foreground_end_coord", original_shape_key: str = "foreground_original_shape", cropped_shape_key: str = "foreground_cropped_shape", + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.ref_image = ref_image self.slice_only = slice_only self.mode = ensure_tuple_rep(mode, len(self.keys)) @@ -834,15 +840,15 @@ def __call__(self, data): d = dict(data) meta_dict: Dict = d[f"{self.ref_image}_{self.meta_key_postfix}"] - for idx, key in enumerate(self.keys): + for key, mode, align_corners in self.key_iterator(d, self.mode, self.align_corners): image = d[key] # Undo Resize current_shape = image.shape cropped_shape = meta_dict[self.cropped_shape_key] if np.any(np.not_equal(current_shape, cropped_shape)): - resizer = Resize(spatial_size=cropped_shape[1:], mode=self.mode[idx]) - image = resizer(image, mode=self.mode[idx], align_corners=self.align_corners[idx]) + resizer = Resize(spatial_size=cropped_shape[1:], mode=mode) + image = resizer(image, mode=mode, align_corners=align_corners) # Undo Crop original_shape = meta_dict[self.original_shape_key] @@ -862,8 +868,8 @@ def __call__(self, data): spatial_size = spatial_shape[-len(current_size) :] if np.any(np.not_equal(current_size, spatial_size)): - resizer = Resize(spatial_size=spatial_size, mode=self.mode[idx]) - result = resizer(result, mode=self.mode[idx], align_corners=self.align_corners[idx]) + resizer = Resize(spatial_size=spatial_size, mode=mode) + result = resizer(result, mode=mode, align_corners=align_corners) # Undo Slicing slice_idx = meta_dict.get("slice_idx") @@ -898,10 +904,18 @@ class Fetch2DSliced(MapTransform): default is `meta_dict`, the meta data is a dictionary object. For example, to handle key `image`, read/write affine matrices from the metadata `image_meta_dict` dictionary's `affine` field. + allow_missing_keys: don't raise exception if key is missing. """ - def __init__(self, keys, guidance="guidance", axis: int = 0, meta_key_postfix: str = "meta_dict"): - super().__init__(keys) + def __init__( + self, + keys, + guidance="guidance", + axis: int = 0, + meta_key_postfix: str = "meta_dict", + allow_missing_keys: bool = False, + ): + super().__init__(keys, allow_missing_keys) self.guidance = guidance self.axis = axis self.meta_key_postfix = meta_key_postfix @@ -920,7 +934,7 @@ def __call__(self, data): guidance = d[self.guidance] if len(guidance) < 3: raise RuntimeError("Guidance does not container slice_idx!") - for key in self.keys: + for key in self.key_iterator(d): img_slice, idx = self._apply(d[key], guidance) d[key] = img_slice d[f"{key}_{self.meta_key_postfix}"]["slice_idx"] = idx diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index 9739c6322f..823b2dd3f4 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -94,6 +94,7 @@ def __init__( spatial_size: Union[Sequence[int], int], method: Union[Method, str] = Method.SYMMETRIC, mode: NumpyPadModeSequence = NumpyPadMode.CONSTANT, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -108,15 +109,16 @@ def __init__( One of the listed string values or a user supplied function. Defaults to ``"constant"``. See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html It also can be a sequence of string, each element corresponds to a key in ``keys``. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.mode = ensure_tuple_rep(mode, len(self.keys)) self.padder = SpatialPad(spatial_size, method) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key, m in zip(self.keys, self.mode): + for key, m in self.key_iterator(d, self.mode): d[key] = self.padder(d[key], mode=m) return d @@ -132,6 +134,7 @@ def __init__( keys: KeysCollection, spatial_border: Union[Sequence[int], int], mode: NumpyPadModeSequence = NumpyPadMode.CONSTANT, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -153,15 +156,16 @@ def __init__( One of the listed string values or a user supplied function. Defaults to ``"constant"``. See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html It also can be a sequence of string, each element corresponds to a key in ``keys``. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.mode = ensure_tuple_rep(mode, len(self.keys)) self.padder = BorderPad(spatial_border=spatial_border) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key, m in zip(self.keys, self.mode): + for key, m in self.key_iterator(d, self.mode): d[key] = self.padder(d[key], mode=m) return d @@ -173,7 +177,11 @@ class DivisiblePadd(MapTransform): """ def __init__( - self, keys: KeysCollection, k: Union[Sequence[int], int], mode: NumpyPadModeSequence = NumpyPadMode.CONSTANT + self, + keys: KeysCollection, + k: Union[Sequence[int], int], + mode: NumpyPadModeSequence = NumpyPadMode.CONSTANT, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -187,17 +195,18 @@ def __init__( One of the listed string values or a user supplied function. Defaults to ``"constant"``. See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html It also can be a sequence of string, each element corresponds to a key in ``keys``. + allow_missing_keys: don't raise exception if key is missing. See also :py:class:`monai.transforms.SpatialPad` """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.mode = ensure_tuple_rep(mode, len(self.keys)) self.padder = DivisiblePad(k=k) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key, m in zip(self.keys, self.mode): + for key, m in self.key_iterator(d, self.mode): d[key] = self.padder(d[key], mode=m) return d @@ -216,6 +225,7 @@ def __init__( roi_size: Optional[Sequence[int]] = None, roi_start: Optional[Sequence[int]] = None, roi_end: Optional[Sequence[int]] = None, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -225,13 +235,14 @@ def __init__( roi_size: size of the crop ROI. roi_start: voxel coordinates for start of the crop ROI. roi_end: voxel coordinates for end of the crop ROI. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.cropper = SpatialCrop(roi_center, roi_size, roi_start, roi_end) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.cropper(d[key]) return d @@ -245,15 +256,18 @@ class CenterSpatialCropd(MapTransform): See also: monai.transforms.MapTransform roi_size: the size of the crop region e.g. [224,224,128] If its components have non-positive values, the corresponding size of input image will be used. + allow_missing_keys: don't raise exception if key is missing. """ - def __init__(self, keys: KeysCollection, roi_size: Union[Sequence[int], int]) -> None: - super().__init__(keys) + def __init__( + self, keys: KeysCollection, roi_size: Union[Sequence[int], int], allow_missing_keys: bool = False + ) -> None: + super().__init__(keys, allow_missing_keys) self.cropper = CenterSpatialCrop(roi_size) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.cropper(d[key]) return d @@ -274,6 +288,7 @@ class RandSpatialCropd(RandomizableTransform, MapTransform): random_center: crop at random position as center or the image center. random_size: crop with random size or specific size ROI. The actual size is sampled from `randint(roi_size, img_size)`. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( @@ -282,9 +297,10 @@ def __init__( roi_size: Union[Sequence[int], int], random_center: bool = True, random_size: bool = True, + allow_missing_keys: bool = False, ) -> None: RandomizableTransform.__init__(self) - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) self.roi_size = roi_size self.random_center = random_center self.random_size = random_size @@ -304,7 +320,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda self.randomize(d[self.keys[0]].shape[1:]) # image shape from the first data key if self._size is None: raise AssertionError - for key in self.keys: + for key in self.key_iterator(d): if self.random_center: d[key] = d[key][self._slices] else: @@ -331,6 +347,7 @@ class RandSpatialCropSamplesd(RandomizableTransform, MapTransform): random_center: crop at random position as center or the image center. random_size: crop with random size or specific size ROI. The actual size is sampled from `randint(roi_size, img_size)`. + allow_missing_keys: don't raise exception if key is missing. Raises: ValueError: When ``num_samples`` is nonpositive. @@ -344,13 +361,14 @@ def __init__( num_samples: int, random_center: bool = True, random_size: bool = True, + allow_missing_keys: bool = False, ) -> None: RandomizableTransform.__init__(self) - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) if num_samples < 1: raise ValueError(f"num_samples must be positive, got {num_samples}.") self.num_samples = num_samples - self.cropper = RandSpatialCropd(keys, roi_size, random_center, random_size) + self.cropper = RandSpatialCropd(keys, roi_size, random_center, random_size, allow_missing_keys) def set_random_state( self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None @@ -388,6 +406,7 @@ def __init__( margin: int = 0, start_coord_key: str = "foreground_start_coord", end_coord_key: str = "foreground_end_coord", + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -400,8 +419,9 @@ def __init__( margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims. start_coord_key: key to record the start coordinate of spatial bounding box for foreground. end_coord_key: key to record the end coordinate of spatial bounding box for foreground. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.source_key = source_key self.select_fn = select_fn self.channel_indices = ensure_tuple(channel_indices) if channel_indices is not None else None @@ -417,7 +437,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda d[self.start_coord_key] = np.asarray(box_start) d[self.end_coord_key] = np.asarray(box_end) cropper = SpatialCrop(roi_start=box_start, roi_end=box_end) - for key in self.keys: + for key in self.key_iterator(d): d[key] = cropper(d[key]) return d @@ -435,6 +455,7 @@ class RandWeightedCropd(RandomizableTransform, MapTransform): If its components have non-positive values, the corresponding size of `img` will be used. num_samples: number of samples (image patches) to take in the returned list. center_coord_key: if specified, the actual sampling location will be stored with the corresponding key. + allow_missing_keys: don't raise exception if key is missing. See Also: :py:class:`monai.transforms.RandWeightedCrop` @@ -447,9 +468,10 @@ def __init__( spatial_size: Union[Sequence[int], int], num_samples: int = 1, center_coord_key: Optional[str] = None, + allow_missing_keys: bool = False, ): RandomizableTransform.__init__(self) - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) self.spatial_size = ensure_tuple(spatial_size) self.w_key = w_key self.num_samples = int(num_samples) @@ -467,22 +489,22 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, n _spatial_size = fall_back_tuple(self.spatial_size, d[self.w_key].shape[1:]) results: List[Dict[Hashable, np.ndarray]] = [{} for _ in range(self.num_samples)] - for key in data.keys(): - if key in self.keys: - img = d[key] - if img.shape[1:] != d[self.w_key].shape[1:]: - raise ValueError( - f"data {key} and weight map {self.w_key} spatial shape mismatch: " - f"{img.shape[1:]} vs {d[self.w_key].shape[1:]}." - ) - for i, center in enumerate(self.centers): - cropper = SpatialCrop(roi_center=center, roi_size=_spatial_size) - results[i][key] = cropper(img) - if self.center_coord_key: - results[i][self.center_coord_key] = center - else: - for i in range(self.num_samples): - results[i][key] = data[key] + for key in self.key_iterator(d): + img = d[key] + if img.shape[1:] != d[self.w_key].shape[1:]: + raise ValueError( + f"data {key} and weight map {self.w_key} spatial shape mismatch: " + f"{img.shape[1:]} vs {d[self.w_key].shape[1:]}." + ) + for i, center in enumerate(self.centers): + cropper = SpatialCrop(roi_center=center, roi_size=_spatial_size) + results[i][key] = cropper(img) + if self.center_coord_key: + results[i][self.center_coord_key] = center + # fill in the extra keys with unmodified data + for key in set(data.keys()).difference(set(self.keys)): + for i in range(self.num_samples): + results[i][key] = data[key] return results @@ -517,6 +539,7 @@ class RandCropByPosNegLabeld(RandomizableTransform, MapTransform): `image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices_key` and `bg_indices_key` together, expect to be 1 dim array of spatial indices after flattening. a typical usage is to call `FgBgToIndicesd` transform first and cache the results. + allow_missing_keys: don't raise exception if key is missing. Raises: ValueError: When ``pos`` or ``neg`` are negative. @@ -536,9 +559,10 @@ def __init__( image_threshold: float = 0.0, fg_indices_key: Optional[str] = None, bg_indices_key: Optional[str] = None, + allow_missing_keys: bool = False, ) -> None: RandomizableTransform.__init__(self) - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) self.label_key = label_key self.spatial_size: Union[Tuple[int, ...], Sequence[int], int] = spatial_size if pos < 0 or neg < 0: @@ -583,15 +607,15 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, n if self.centers is None: raise AssertionError results: List[Dict[Hashable, np.ndarray]] = [{} for _ in range(self.num_samples)] - for key in data.keys(): - if key in self.keys: + + for i, center in enumerate(self.centers): + for key in self.key_iterator(d): img = d[key] - for i, center in enumerate(self.centers): - cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size) # type: ignore - results[i][key] = cropper(img) - else: - for i in range(self.num_samples): - results[i][key] = data[key] + cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size) # type: ignore + results[i][key] = cropper(img) + # fill in the extra keys with unmodified data + for key in set(data.keys()).difference(set(self.keys)): + results[i][key] = data[key] return results @@ -609,6 +633,7 @@ class ResizeWithPadOrCropd(MapTransform): ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``} One of the listed string values or a user supplied function for padding. Defaults to ``"constant"``. See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html + allow_missing_keys: don't raise exception if key is missing. """ @@ -617,13 +642,14 @@ def __init__( keys: KeysCollection, spatial_size: Union[Sequence[int], int], mode: Union[NumpyPadMode, str] = NumpyPadMode.CONSTANT, + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.padcropper = ResizeWithPadOrCrop(spatial_size=spatial_size, mode=mode) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.padcropper(d[key]) return d @@ -638,10 +664,17 @@ class BoundingRectd(MapTransform): bbox_key_postfix: the output bounding box coordinates will be written to the value of `{key}_{bbox_key_postfix}`. select_fn: function to select expected foreground, default is to select values > 0. + allow_missing_keys: don't raise exception if key is missing. """ - def __init__(self, keys: KeysCollection, bbox_key_postfix: str = "bbox", select_fn: Callable = lambda x: x > 0): - super().__init__(keys=keys) + def __init__( + self, + keys: KeysCollection, + bbox_key_postfix: str = "bbox", + select_fn: Callable = lambda x: x > 0, + allow_missing_keys: bool = False, + ): + super().__init__(keys, allow_missing_keys) self.bbox = BoundingRect(select_fn=select_fn) self.bbox_key_postfix = bbox_key_postfix @@ -650,7 +683,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda See also: :py:class:`monai.transforms.utils.generate_spatial_bounding_box`. """ d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): bbox = self.bbox(d[key]) key_to_add = f"{key}_{self.bbox_key_postfix}" if key_to_add in d: diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 7d0d66d2ba..4602d59379 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -103,12 +103,18 @@ class RandGaussianNoised(RandomizableTransform, MapTransform): prob: Probability to add Gaussian noise. mean: Mean or “centre” of the distribution. std: Standard deviation (spread) of distribution. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( - self, keys: KeysCollection, prob: float = 0.1, mean: Union[Sequence[float], float] = 0.0, std: float = 0.1 + self, + keys: KeysCollection, + prob: float = 0.1, + mean: Union[Sequence[float], float] = 0.0, + std: float = 0.1, + allow_missing_keys: bool = False, ) -> None: - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) self.mean = ensure_tuple_rep(mean, len(self.keys)) self.std = std @@ -129,7 +135,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda raise AssertionError if not self._do_transform: return d - for noise, key in zip(self._noise, self.keys): + for key, noise in self.key_iterator(d, self._noise): dtype = dtype_torch_to_numpy(d[key].dtype) if isinstance(d[key], torch.Tensor) else d[key].dtype d[key] = d[key] + noise.astype(dtype) return d @@ -140,19 +146,20 @@ class ShiftIntensityd(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.ShiftIntensity`. """ - def __init__(self, keys: KeysCollection, offset: float) -> None: + def __init__(self, keys: KeysCollection, offset: float, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` offset: offset value to shift the intensity of image. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.shifter = ShiftIntensity(offset) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.shifter(d[key]) return d @@ -162,7 +169,13 @@ class RandShiftIntensityd(RandomizableTransform, MapTransform): Dictionary-based version :py:class:`monai.transforms.RandShiftIntensity`. """ - def __init__(self, keys: KeysCollection, offsets: Union[Tuple[float, float], float], prob: float = 0.1) -> None: + def __init__( + self, + keys: KeysCollection, + offsets: Union[Tuple[float, float], float], + prob: float = 0.1, + allow_missing_keys: bool = False, + ) -> None: """ Args: keys: keys of the corresponding items to be transformed. @@ -171,8 +184,9 @@ def __init__(self, keys: KeysCollection, offsets: Union[Tuple[float, float], flo if single number, offset value is picked from (-offsets, offsets). prob: probability of rotating. (Default 0.1, with 10% probability it returns a rotated array.) + allow_missing_keys: don't raise exception if key is missing. """ - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) if isinstance(offsets, (int, float)): @@ -192,7 +206,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda if not self._do_transform: return d shifter = ShiftIntensity(self._offset) - for key in self.keys: + for key in self.key_iterator(d): d[key] = shifter(d[key]) return d @@ -205,7 +219,12 @@ class ScaleIntensityd(MapTransform): """ def __init__( - self, keys: KeysCollection, minv: float = 0.0, maxv: float = 1.0, factor: Optional[float] = None + self, + keys: KeysCollection, + minv: float = 0.0, + maxv: float = 1.0, + factor: Optional[float] = None, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -214,14 +233,15 @@ def __init__( minv: minimum value of output data. maxv: maximum value of output data. factor: factor scale by ``v = v * (1 + factor)``. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.scaler = ScaleIntensity(minv, maxv, factor) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.scaler(d[key]) return d @@ -231,7 +251,13 @@ class RandScaleIntensityd(RandomizableTransform, MapTransform): Dictionary-based version :py:class:`monai.transforms.RandScaleIntensity`. """ - def __init__(self, keys: KeysCollection, factors: Union[Tuple[float, float], float], prob: float = 0.1) -> None: + def __init__( + self, + keys: KeysCollection, + factors: Union[Tuple[float, float], float], + prob: float = 0.1, + allow_missing_keys: bool = False, + ) -> None: """ Args: keys: keys of the corresponding items to be transformed. @@ -240,9 +266,10 @@ def __init__(self, keys: KeysCollection, factors: Union[Tuple[float, float], flo if single number, factor value is picked from (-factors, factors). prob: probability of rotating. (Default 0.1, with 10% probability it returns a rotated array.) + allow_missing_keys: don't raise exception if key is missing. """ - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) if isinstance(factors, (int, float)): @@ -262,7 +289,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda if not self._do_transform: return d scaler = ScaleIntensity(minv=None, maxv=None, factor=self.factor) - for key in self.keys: + for key in self.key_iterator(d): d[key] = scaler(d[key]) return d @@ -282,6 +309,7 @@ class NormalizeIntensityd(MapTransform): channel_wise: if using calculated mean and std, calculate on each channel separately or calculate on the entire image directly. dtype: output data type, defaut to float32. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( @@ -292,13 +320,14 @@ def __init__( nonzero: bool = False, channel_wise: bool = False, dtype: DtypeLike = np.float32, + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.normalizer = NormalizeIntensity(subtrahend, divisor, nonzero, channel_wise, dtype) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.normalizer(d[key]) return d @@ -313,15 +342,23 @@ class ThresholdIntensityd(MapTransform): threshold: the threshold to filter intensity values. above: filter values above the threshold or below the threshold, default is True. cval: value to fill the remaining parts of the image, default is 0. + allow_missing_keys: don't raise exception if key is missing. """ - def __init__(self, keys: KeysCollection, threshold: float, above: bool = True, cval: float = 0.0) -> None: - super().__init__(keys) + def __init__( + self, + keys: KeysCollection, + threshold: float, + above: bool = True, + cval: float = 0.0, + allow_missing_keys: bool = False, + ) -> None: + super().__init__(keys, allow_missing_keys) self.filter = ThresholdIntensity(threshold, above, cval) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.filter(d[key]) return d @@ -338,17 +375,25 @@ class ScaleIntensityRanged(MapTransform): b_min: intensity target range min. b_max: intensity target range max. clip: whether to perform clip after scaling. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( - self, keys: KeysCollection, a_min: float, a_max: float, b_min: float, b_max: float, clip: bool = False + self, + keys: KeysCollection, + a_min: float, + a_max: float, + b_min: float, + b_max: float, + clip: bool = False, + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.scaler = ScaleIntensityRange(a_min, a_max, b_min, b_max, clip) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.scaler(d[key]) return d @@ -364,15 +409,16 @@ class AdjustContrastd(MapTransform): keys: keys of the corresponding items to be transformed. See also: monai.transforms.MapTransform gamma: gamma value to adjust the contrast as function. + allow_missing_keys: don't raise exception if key is missing. """ - def __init__(self, keys: KeysCollection, gamma: float) -> None: - super().__init__(keys) + def __init__(self, keys: KeysCollection, gamma: float, allow_missing_keys: bool = False) -> None: + super().__init__(keys, allow_missing_keys) self.adjuster = AdjustContrast(gamma) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.adjuster(d[key]) return d @@ -390,12 +436,17 @@ class RandAdjustContrastd(RandomizableTransform, MapTransform): prob: Probability of adjustment. gamma: Range of gamma values. If single number, value is picked from (0.5, gamma), default is (0.5, 4.5). + allow_missing_keys: don't raise exception if key is missing. """ def __init__( - self, keys: KeysCollection, prob: float = 0.1, gamma: Union[Tuple[float, float], float] = (0.5, 4.5) + self, + keys: KeysCollection, + prob: float = 0.1, + gamma: Union[Tuple[float, float], float] = (0.5, 4.5), + allow_missing_keys: bool = False, ) -> None: - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) if isinstance(gamma, (int, float)): @@ -423,7 +474,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda if not self._do_transform: return d adjuster = AdjustContrast(self.gamma_value) - for key in self.keys: + for key in self.key_iterator(d): d[key] = adjuster(d[key]) return d @@ -441,6 +492,7 @@ class ScaleIntensityRangePercentilesd(MapTransform): b_max: intensity target range max. clip: whether to perform clip after scaling. relative: whether to scale to the corresponding percentiles of [b_min, b_max] + allow_missing_keys: don't raise exception if key is missing. """ def __init__( @@ -452,13 +504,14 @@ def __init__( b_max: float, clip: bool = False, relative: bool = False, + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.scaler = ScaleIntensityRangePercentiles(lower, upper, b_min, b_max, clip, relative) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.scaler(d[key]) return d @@ -477,6 +530,7 @@ class MaskIntensityd(MapTransform): if None, will extract the mask data from input data based on `mask_key`. mask_key: the key to extract mask data from input dictionary, only works when `mask_data` is None. + allow_missing_keys: don't raise exception if key is missing. """ @@ -485,14 +539,15 @@ def __init__( keys: KeysCollection, mask_data: Optional[np.ndarray] = None, mask_key: Optional[str] = None, + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.converter = MaskIntensity(mask_data) self.mask_key = mask_key if mask_data is None else None def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.converter(d[key], d[self.mask_key]) if self.mask_key is not None else self.converter(d[key]) return d @@ -509,16 +564,23 @@ class GaussianSmoothd(MapTransform): use it for all spatial dimensions. approx: discrete Gaussian kernel type, available options are "erf", "sampled", and "scalespace". see also :py:meth:`monai.networks.layers.GaussianFilter`. + allow_missing_keys: don't raise exception if key is missing. """ - def __init__(self, keys: KeysCollection, sigma: Union[Sequence[float], float], approx: str = "erf") -> None: - super().__init__(keys) + def __init__( + self, + keys: KeysCollection, + sigma: Union[Sequence[float], float], + approx: str = "erf", + allow_missing_keys: bool = False, + ) -> None: + super().__init__(keys, allow_missing_keys) self.converter = GaussianSmooth(sigma, approx=approx) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d @@ -536,6 +598,7 @@ class RandGaussianSmoothd(RandomizableTransform, MapTransform): approx: discrete Gaussian kernel type, available options are "erf", "sampled", and "scalespace". see also :py:meth:`monai.networks.layers.GaussianFilter`. prob: probability of Gaussian smooth. + allow_missing_keys: don't raise exception if key is missing. """ @@ -547,8 +610,9 @@ def __init__( sigma_z: Tuple[float, float] = (0.25, 1.5), approx: str = "erf", prob: float = 0.1, + allow_missing_keys: bool = False, ) -> None: - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) self.sigma_x = sigma_x self.sigma_y = sigma_y @@ -566,7 +630,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda self.randomize() if not self._do_transform: return d - for key in self.keys: + for key in self.key_iterator(d): sigma = ensure_tuple_size(tup=(self.x, self.y, self.z), dim=d[key].ndim - 1) d[key] = GaussianSmooth(sigma=sigma, approx=self.approx)(d[key]) return d @@ -588,6 +652,7 @@ class GaussianSharpend(MapTransform): alpha: weight parameter to compute the final result. approx: discrete Gaussian kernel type, available options are "erf", "sampled", and "scalespace". see also :py:meth:`monai.networks.layers.GaussianFilter`. + allow_missing_keys: don't raise exception if key is missing. """ @@ -598,13 +663,14 @@ def __init__( sigma2: Union[Sequence[float], float] = 1.0, alpha: float = 30.0, approx: str = "erf", + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.converter = GaussianSharpen(sigma1, sigma2, alpha, approx=approx) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d @@ -629,6 +695,7 @@ class RandGaussianSharpend(RandomizableTransform, MapTransform): approx: discrete Gaussian kernel type, available options are "erf", "sampled", and "scalespace". see also :py:meth:`monai.networks.layers.GaussianFilter`. prob: probability of Gaussian sharpen. + allow_missing_keys: don't raise exception if key is missing. """ @@ -644,8 +711,9 @@ def __init__( alpha: Tuple[float, float] = (10.0, 30.0), approx: str = "erf", prob: float = 0.1, + allow_missing_keys: bool = False, ): - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) self.sigma1_x = sigma1_x self.sigma1_y = sigma1_y @@ -674,7 +742,7 @@ def __call__(self, data): self.randomize() if not self._do_transform: return d - for key in self.keys: + for key in self.key_iterator(d): sigma1 = ensure_tuple_size(tup=(self.x1, self.y1, self.z1), dim=d[key].ndim - 1) sigma2 = ensure_tuple_size(tup=(self.x2, self.y2, self.z2), dim=d[key].ndim - 1) d[key] = GaussianSharpen(sigma1=sigma1, sigma2=sigma2, alpha=self.a, approx=self.approx)(d[key]) @@ -693,12 +761,17 @@ class RandHistogramShiftd(RandomizableTransform, MapTransform): a smaller number of control points allows for larger intensity shifts. if two values provided, number of control points selecting from range (min_value, max_value). prob: probability of histogram shift. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( - self, keys: KeysCollection, num_control_points: Union[Tuple[int, int], int] = 10, prob: float = 0.1 + self, + keys: KeysCollection, + num_control_points: Union[Tuple[int, int], int] = 10, + prob: float = 0.1, + allow_missing_keys: bool = False, ) -> None: - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) if isinstance(num_control_points, int): if num_control_points <= 2: @@ -726,7 +799,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda self.randomize() if not self._do_transform: return d - for key in self.keys: + for key in self.key_iterator(d): img_min, img_max = d[key].min(), d[key].max() reference_control_points_scaled = self.reference_control_points * (img_max - img_min) + img_min floating_control_points_scaled = self.floating_control_points * (img_max - img_min) + img_min diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index d9b6b5e6ab..ea965255d5 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -60,6 +60,7 @@ def __init__( meta_key_postfix: str = "meta_dict", overwriting: bool = False, image_only: bool = False, + allow_missing_keys: bool = False, *args, **kwargs, ) -> None: @@ -79,10 +80,11 @@ def __init__( default is False, which will raise exception if encountering existing key. image_only: if True return dictionary containing just only the image volumes, otherwise return dictionary containing image data array and header dict per input key. + allow_missing_keys: don't raise exception if key is missing. args: additional parameters for reader if providing a reader name. kwargs: additional parameters for reader if providing a reader name. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self._loader = LoadImage(reader, image_only, dtype, *args, **kwargs) if not isinstance(meta_key_postfix, str): raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.") @@ -99,7 +101,7 @@ def __call__(self, data, reader: Optional[ImageReader] = None): """ d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): data = self._loader(d[key], reader) if self._loader.image_only: if not isinstance(data, np.ndarray): @@ -163,6 +165,7 @@ class SaveImaged(MapTransform): it's used for NIfTI format only. save_batch: whether the import image is a batch data, default to `False`. usually pre-transforms run for channel first data, while post-transforms run for batch data. + allow_missing_keys: don't raise exception if key is missing. """ @@ -180,8 +183,9 @@ def __init__( dtype: DtypeLike = np.float64, output_dtype: DtypeLike = np.float32, save_batch: bool = False, + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.meta_key_postfix = meta_key_postfix self._saver = SaveImage( output_dir=output_dir, @@ -198,7 +202,7 @@ def __init__( def __call__(self, data): d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): meta_data = d[f"{key}_{self.meta_key_postfix}"] if self.meta_key_postfix is not None else None self._saver(img=d[key], meta_data=meta_data) return d diff --git a/monai/transforms/post/dictionary.py b/monai/transforms/post/dictionary.py index 85abdac0ac..42796e2412 100644 --- a/monai/transforms/post/dictionary.py +++ b/monai/transforms/post/dictionary.py @@ -71,6 +71,7 @@ def __init__( sigmoid: Union[Sequence[bool], bool] = False, softmax: Union[Sequence[bool], bool] = False, other: Optional[Union[Sequence[Callable], Callable]] = None, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -83,9 +84,10 @@ def __init__( other: callable function to execute other activation layers, for example: `other = lambda x: torch.tanh(x)`. it also can be a sequence of Callable, each element corresponds to a key in ``keys``. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.sigmoid = ensure_tuple_rep(sigmoid, len(self.keys)) self.softmax = ensure_tuple_rep(softmax, len(self.keys)) self.other = ensure_tuple_rep(other, len(self.keys)) @@ -93,8 +95,8 @@ def __init__( def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torch.Tensor]: d = dict(data) - for idx, key in enumerate(self.keys): - d[key] = self.converter(d[key], self.sigmoid[idx], self.softmax[idx], self.other[idx]) + for key, sigmoid, softmax, other in self.key_iterator(d, self.sigmoid, self.softmax, self.other): + d[key] = self.converter(d[key], sigmoid, softmax, other) return d @@ -111,6 +113,7 @@ def __init__( n_classes: Optional[Union[Sequence[int], int]] = None, threshold_values: Union[Sequence[bool], bool] = False, logit_thresh: Union[Sequence[float], float] = 0.5, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -126,9 +129,10 @@ def __init__( it also can be a sequence of bool, each element corresponds to a key in ``keys``. logit_thresh: the threshold value for thresholding operation, default is 0.5. it also can be a sequence of float, each element corresponds to a key in ``keys``. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.argmax = ensure_tuple_rep(argmax, len(self.keys)) self.to_onehot = ensure_tuple_rep(to_onehot, len(self.keys)) self.n_classes = ensure_tuple_rep(n_classes, len(self.keys)) @@ -138,14 +142,16 @@ def __init__( def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torch.Tensor]: d = dict(data) - for idx, key in enumerate(self.keys): + for key, argmax, to_onehot, n_classes, threshold_values, logit_thresh in self.key_iterator( + d, self.argmax, self.to_onehot, self.n_classes, self.threshold_values, self.logit_thresh + ): d[key] = self.converter( d[key], - self.argmax[idx], - self.to_onehot[idx], - self.n_classes[idx], - self.threshold_values[idx], - self.logit_thresh[idx], + argmax, + to_onehot, + n_classes, + threshold_values, + logit_thresh, ) return d @@ -161,6 +167,7 @@ def __init__( applied_labels: Union[Sequence[int], int], independent: bool = True, connectivity: Optional[int] = None, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -175,14 +182,15 @@ def __init__( connectivity: Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor. Accepted values are ranging from 1 to input.ndim. If ``None``, a full connectivity of ``input.ndim`` is used. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.converter = KeepLargestConnectedComponent(applied_labels, independent, connectivity) def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torch.Tensor]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d @@ -192,20 +200,21 @@ class LabelToContourd(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.LabelToContour`. """ - def __init__(self, keys: KeysCollection, kernel_type: str = "Laplace") -> None: + def __init__(self, keys: KeysCollection, kernel_type: str = "Laplace", allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` kernel_type: the method applied to do edge detection, default is "Laplace". + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.converter = LabelToContour(kernel_type=kernel_type) def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torch.Tensor]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d @@ -221,6 +230,7 @@ def __init__( keys: KeysCollection, ensemble: Callable[[Union[Sequence[torch.Tensor], torch.Tensor]], torch.Tensor], output_key: Optional[str] = None, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -229,13 +239,14 @@ def __init__( output_key: the key to store ensemble result in the dictionary. ensemble: callable method to execute ensemble on specified data. if only 1 key provided in `keys`, `output_key` can be None and use `keys` as default. + allow_missing_keys: don't raise exception if key is missing. Raises: TypeError: When ``ensemble`` is not ``callable``. ValueError: When ``len(keys) > 1`` and ``output_key=None``. Incompatible values. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) if not callable(ensemble): raise TypeError(f"ensemble must be callable but is {type(ensemble).__name__}.") self.ensemble = ensemble @@ -249,7 +260,7 @@ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torc if len(self.keys) == 1: items = d[self.keys[0]] else: - items = [d[key] for key in self.keys] + items = [d[key] for key in self.key_iterator(d)] d[self.output_key] = self.ensemble(items) return d diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index f29258bf28..a81aeb432b 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -125,6 +125,7 @@ def __init__( align_corners: Union[Sequence[bool], bool] = False, dtype: Optional[Union[Sequence[DtypeLike], DtypeLike]] = np.float64, meta_key_postfix: str = "meta_dict", + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -160,12 +161,13 @@ def __init__( default is `meta_dict`, the meta data is a dictionary object. For example, to handle key `image`, read/write affine matrices from the metadata `image_meta_dict` dictionary's `affine` field. + allow_missing_keys: don't raise exception if key is missing. Raises: TypeError: When ``meta_key_postfix`` is not a ``str``. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.spacing_transform = Spacing(pixdim, diagonal=diagonal) self.mode = ensure_tuple_rep(mode, len(self.keys)) self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys)) @@ -179,17 +181,19 @@ def __call__( self, data: Mapping[Union[Hashable, str], Dict[str, np.ndarray]] ) -> Dict[Union[Hashable, str], Union[np.ndarray, Dict[str, np.ndarray]]]: d: Dict = dict(data) - for idx, key in enumerate(self.keys): + for key, mode, padding_mode, align_corners, dtype in self.key_iterator( + d, self.mode, self.padding_mode, self.align_corners, self.dtype + ): meta_data = d[f"{key}_{self.meta_key_postfix}"] # resample array of each corresponding key # using affine fetched from d[affine_key] d[key], _, new_affine = self.spacing_transform( data_array=np.asarray(d[key]), affine=meta_data["affine"], - mode=self.mode[idx], - padding_mode=self.padding_mode[idx], - align_corners=self.align_corners[idx], - dtype=self.dtype[idx], + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, + dtype=dtype, ) # set the 'affine' key meta_data["affine"] = new_affine @@ -214,6 +218,7 @@ def __init__( as_closest_canonical: bool = False, labels: Optional[Sequence[Tuple[str, str]]] = tuple(zip("LPI", "RAS")), meta_key_postfix: str = "meta_dict", + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -230,6 +235,7 @@ def __init__( default is `meta_dict`, the meta data is a dictionary object. For example, to handle key `image`, read/write affine matrices from the metadata `image_meta_dict` dictionary's `affine` field. + allow_missing_keys: don't raise exception if key is missing. Raises: TypeError: When ``meta_key_postfix`` is not a ``str``. @@ -238,7 +244,7 @@ def __init__( `nibabel.orientations.ornt2axcodes`. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.ornt_transform = Orientation(axcodes=axcodes, as_closest_canonical=as_closest_canonical, labels=labels) if not isinstance(meta_key_postfix, str): raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.") @@ -248,7 +254,7 @@ def __call__( self, data: Mapping[Union[Hashable, str], Dict[str, np.ndarray]] ) -> Dict[Union[Hashable, str], Union[np.ndarray, Dict[str, np.ndarray]]]: d: Dict = dict(data) - for key in self.keys: + for key in self.key_iterator(d): meta_data = d[f"{key}_{self.meta_key_postfix}"] d[key], _, new_affine = self.ornt_transform(d[key], affine=meta_data["affine"]) meta_data["affine"] = new_affine @@ -260,19 +266,22 @@ class Rotate90d(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.Rotate90`. """ - def __init__(self, keys: KeysCollection, k: int = 1, spatial_axes: Tuple[int, int] = (0, 1)) -> None: + def __init__( + self, keys: KeysCollection, k: int = 1, spatial_axes: Tuple[int, int] = (0, 1), allow_missing_keys: bool = False + ) -> None: """ Args: k: number of times to rotate by 90 degrees. spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes. Default: (0, 1), this is the first two axis in spatial dimensions. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.rotator = Rotate90(k, spatial_axes) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.rotator(d[key]) return d @@ -290,6 +299,7 @@ def __init__( prob: float = 0.1, max_k: int = 3, spatial_axes: Tuple[int, int] = (0, 1), + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -301,8 +311,9 @@ def __init__( (Default 3) spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes. Default: (0, 1), this is the first two axis in spatial dimensions. + allow_missing_keys: don't raise exception if key is missing. """ - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) self.max_k = max_k @@ -319,7 +330,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Mapping[Hashable, np. d = dict(data) rotator = Rotate90(self._rand_k, self.spatial_axes) - for key in self.keys: + for key in self.key_iterator(d): if self._do_transform: d[key] = rotator(d[key]) return d @@ -344,6 +355,7 @@ class Resized(MapTransform): 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None. See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate It also can be a sequence of bool or None, each element corresponds to a key in ``keys``. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( @@ -352,16 +364,17 @@ def __init__( spatial_size: Union[Sequence[int], int], mode: InterpolateModeSequence = InterpolateMode.AREA, align_corners: Union[Sequence[Optional[bool]], Optional[bool]] = None, + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.mode = ensure_tuple_rep(mode, len(self.keys)) self.align_corners = ensure_tuple_rep(align_corners, len(self.keys)) self.resizer = Resize(spatial_size=spatial_size) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for idx, key in enumerate(self.keys): - d[key] = self.resizer(d[key], mode=self.mode[idx], align_corners=self.align_corners[idx]) + for key, mode, align_corners in self.key_iterator(d, self.mode, self.align_corners): + d[key] = self.resizer(d[key], mode=mode, align_corners=align_corners) return d @@ -383,6 +396,7 @@ def __init__( padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION, as_tensor_output: bool = True, device: Optional[torch.device] = None, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -416,12 +430,13 @@ def __init__( as_tensor_output: the computation is implemented using pytorch tensors, this option specifies whether to convert it back to numpy arrays. device: device on which the tensor will be allocated. + allow_missing_keys: don't raise exception if key is missing. See also: - :py:class:`monai.transforms.compose.MapTransform` - :py:class:`RandAffineGrid` for the random affine parameters configurations. """ - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) self.rand_affine = RandAffine( prob=1.0, # because probability handled in this class @@ -459,8 +474,8 @@ def __call__( else: grid = create_grid(spatial_size=sp_size) - for idx, key in enumerate(self.keys): - d[key] = self.rand_affine.resampler(d[key], grid, mode=self.mode[idx], padding_mode=self.padding_mode[idx]) + for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): + d[key] = self.rand_affine.resampler(d[key], grid, mode=mode, padding_mode=padding_mode) return d @@ -484,6 +499,7 @@ def __init__( padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION, as_tensor_output: bool = False, device: Optional[torch.device] = None, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -521,12 +537,13 @@ def __init__( as_tensor_output: the computation is implemented using pytorch tensors, this option specifies whether to convert it back to numpy arrays. device: device on which the tensor will be allocated. + allow_missing_keys: don't raise exception if key is missing. See also: - :py:class:`RandAffineGrid` for the random affine parameters configurations. - :py:class:`Affine` for the affine transformation parameters configurations. """ - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) self.rand_2d_elastic = Rand2DElastic( spacing=spacing, @@ -576,10 +593,8 @@ def __call__( else: grid = create_grid(spatial_size=sp_size) - for idx, key in enumerate(self.keys): - d[key] = self.rand_2d_elastic.resampler( - d[key], grid, mode=self.mode[idx], padding_mode=self.padding_mode[idx] - ) + for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): + d[key] = self.rand_2d_elastic.resampler(d[key], grid, mode=mode, padding_mode=padding_mode) return d @@ -603,6 +618,7 @@ def __init__( padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION, as_tensor_output: bool = False, device: Optional[torch.device] = None, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -641,12 +657,13 @@ def __init__( as_tensor_output: the computation is implemented using pytorch tensors, this option specifies whether to convert it back to numpy arrays. device: device on which the tensor will be allocated. + allow_missing_keys: don't raise exception if key is missing. See also: - :py:class:`RandAffineGrid` for the random affine parameters configurations. - :py:class:`Affine` for the affine transformation parameters configurations. """ - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) self.rand_3d_elastic = Rand3DElastic( sigma_range=sigma_range, @@ -690,10 +707,8 @@ def __call__( grid[:3] += gaussian(offset)[0] * self.rand_3d_elastic.magnitude grid = self.rand_3d_elastic.rand_affine_grid(grid=grid) - for idx, key in enumerate(self.keys): - d[key] = self.rand_3d_elastic.resampler( - d[key], grid, mode=self.mode[idx], padding_mode=self.padding_mode[idx] - ) + for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): + d[key] = self.rand_3d_elastic.resampler(d[key], grid, mode=mode, padding_mode=padding_mode) return d @@ -707,15 +722,21 @@ class Flipd(MapTransform): Args: keys: Keys to pick data for transformation. spatial_axis: Spatial axes along which to flip over. Default is None. + allow_missing_keys: don't raise exception if key is missing. """ - def __init__(self, keys: KeysCollection, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None: - super().__init__(keys) + def __init__( + self, + keys: KeysCollection, + spatial_axis: Optional[Union[Sequence[int], int]] = None, + allow_missing_keys: bool = False, + ) -> None: + super().__init__(keys, allow_missing_keys) self.flipper = Flip(spatial_axis=spatial_axis) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.flipper(d[key]) return d @@ -731,6 +752,7 @@ class RandFlipd(RandomizableTransform, MapTransform): keys: Keys to pick data for transformation. prob: Probability of flipping. spatial_axis: Spatial axes along which to flip over. Default is None. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( @@ -738,8 +760,9 @@ def __init__( keys: KeysCollection, prob: float = 0.1, spatial_axis: Optional[Union[Sequence[int], int]] = None, + allow_missing_keys: bool = False, ) -> None: - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) self.spatial_axis = spatial_axis @@ -748,7 +771,7 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: self.randomize(None) d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): if self._do_transform: d[key] = self.flipper(d[key]) return d @@ -764,11 +787,12 @@ class RandAxisFlipd(RandomizableTransform, MapTransform): Args: keys: Keys to pick data for transformation. prob: Probability of flipping. + allow_missing_keys: don't raise exception if key is missing. """ - def __init__(self, keys: KeysCollection, prob: float = 0.1) -> None: - MapTransform.__init__(self, keys) + def __init__(self, keys: KeysCollection, prob: float = 0.1, allow_missing_keys: bool = False) -> None: + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) self._axis: Optional[int] = None @@ -781,7 +805,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda flipper = Flip(spatial_axis=self._axis) d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): if self._do_transform: d[key] = flipper(d[key]) return d @@ -812,6 +836,7 @@ class Rotated(MapTransform): If None, use the data type of input data. To be compatible with other modules, the output data type is always ``np.float32``. It also can be a sequence of dtype or None, each element corresponds to a key in ``keys``. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( @@ -823,8 +848,9 @@ def __init__( padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER, align_corners: Union[Sequence[bool], bool] = False, dtype: Union[Sequence[DtypeLike], DtypeLike] = np.float64, + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.rotator = Rotate(angle=angle, keep_size=keep_size) self.mode = ensure_tuple_rep(mode, len(self.keys)) @@ -834,13 +860,15 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for idx, key in enumerate(self.keys): + for key, mode, padding_mode, align_corners, dtype in self.key_iterator( + d, self.mode, self.padding_mode, self.align_corners, self.dtype + ): d[key] = self.rotator( d[key], - mode=self.mode[idx], - padding_mode=self.padding_mode[idx], - align_corners=self.align_corners[idx], - dtype=self.dtype[idx], + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, + dtype=dtype, ) return d @@ -877,6 +905,7 @@ class RandRotated(RandomizableTransform, MapTransform): If None, use the data type of input data. To be compatible with other modules, the output data type is always ``np.float32``. It also can be a sequence of dtype or None, each element corresponds to a key in ``keys``. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( @@ -891,8 +920,9 @@ def __init__( padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER, align_corners: Union[Sequence[bool], bool] = False, dtype: Union[Sequence[DtypeLike], DtypeLike] = np.float64, + allow_missing_keys: bool = False, ) -> None: - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) self.range_x = ensure_tuple(range_x) if len(self.range_x) == 1: @@ -929,13 +959,15 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda angle=self.x if d[self.keys[0]].ndim == 3 else (self.x, self.y, self.z), keep_size=self.keep_size, ) - for idx, key in enumerate(self.keys): + for key, mode, padding_mode, align_corners, dtype in self.key_iterator( + d, self.mode, self.padding_mode, self.align_corners, self.dtype + ): d[key] = rotator( d[key], - mode=self.mode[idx], - padding_mode=self.padding_mode[idx], - align_corners=self.align_corners[idx], - dtype=self.dtype[idx], + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, + dtype=dtype, ) return d @@ -962,6 +994,7 @@ class Zoomd(MapTransform): See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate It also can be a sequence of bool or None, each element corresponds to a key in ``keys``. keep_size: Should keep original size (pad if needed), default is True. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( @@ -972,8 +1005,9 @@ def __init__( padding_mode: NumpyPadModeSequence = NumpyPadMode.EDGE, align_corners: Union[Sequence[Optional[bool]], Optional[bool]] = None, keep_size: bool = True, + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.mode = ensure_tuple_rep(mode, len(self.keys)) self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys)) self.align_corners = ensure_tuple_rep(align_corners, len(self.keys)) @@ -981,12 +1015,14 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for idx, key in enumerate(self.keys): + for key, mode, padding_mode, align_corners in self.key_iterator( + d, self.mode, self.padding_mode, self.align_corners + ): d[key] = self.zoomer( d[key], - mode=self.mode[idx], - padding_mode=self.padding_mode[idx], - align_corners=self.align_corners[idx], + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, ) return d @@ -1021,6 +1057,7 @@ class RandZoomd(RandomizableTransform, MapTransform): See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate It also can be a sequence of bool or None, each element corresponds to a key in ``keys``. keep_size: Should keep original size (pad if needed), default is True. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( @@ -1033,8 +1070,9 @@ def __init__( padding_mode: NumpyPadModeSequence = NumpyPadMode.EDGE, align_corners: Union[Sequence[Optional[bool]], Optional[bool]] = None, keep_size: bool = True, + allow_missing_keys: bool = False, ) -> None: - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) self.min_zoom = ensure_tuple(min_zoom) self.max_zoom = ensure_tuple(max_zoom) @@ -1067,12 +1105,14 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda # if 2 zoom factors provided for 3D data, use the first factor for H and W dims, second factor for D dim self._zoom = ensure_tuple_rep(self._zoom[0], img_dims - 2) + ensure_tuple(self._zoom[-1]) zoomer = Zoom(self._zoom, keep_size=self.keep_size) - for idx, key in enumerate(self.keys): + for key, mode, padding_mode, align_corners in self.key_iterator( + d, self.mode, self.padding_mode, self.align_corners + ): d[key] = zoomer( d[key], - mode=self.mode[idx], - padding_mode=self.padding_mode[idx], - align_corners=self.align_corners[idx], + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, ) return d diff --git a/monai/transforms/transform.py b/monai/transforms/transform.py index 9c9729d250..7a09efa6d5 100644 --- a/monai/transforms/transform.py +++ b/monai/transforms/transform.py @@ -13,7 +13,7 @@ """ from abc import ABC, abstractmethod -from typing import Any, Hashable, Optional, Tuple +from typing import Any, Dict, Generator, Hashable, Iterable, List, Optional, Tuple import numpy as np @@ -178,7 +178,7 @@ def __call__(self, data): if key in data: # update output data with some_transform_function(data[key]). else: - # do nothing or some exceptions handling. + # raise exception unless allow_missing_keys==True. return data Raises: @@ -187,8 +187,9 @@ def __call__(self, data): """ - def __init__(self, keys: KeysCollection) -> None: + def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None: self.keys: Tuple[Hashable, ...] = ensure_tuple(keys) + self.allow_missing_keys = allow_missing_keys if not self.keys: raise ValueError("keys must be non empty.") for key in self.keys: @@ -224,3 +225,29 @@ def __call__(self, data): """ raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") + + def key_iterator( + self, + data: Dict[Hashable, Any], + *extra_iterables: Optional[Iterable], + ) -> Generator: + """ + Iterate across keys and optionally extra iterables. If key is missing, exception is raised if + `allow_missing_keys==False` (default). If `allow_missing_keys==True`, key is skipped. + + Args: + data: data that the transform will be applied to + extra_iterables: anything else to be iterated through + """ + # if no extra iterables given, create a dummy list of Nones + ex_iters = extra_iterables if extra_iterables else [[None] * len(self.keys)] + + # loop over keys and any extra iterables + _ex_iters: List[Any] + for key, *_ex_iters in zip(self.keys, *ex_iters): + # all normal, yield (what we yield depends on whether extra iterables were given) + if key in data.keys(): + yield (key,) + tuple(_ex_iters) if extra_iterables else key + # if missing keys not allowed, raise + elif not self.allow_missing_keys: + raise KeyError(f"Key was missing ({key}) and allow_missing_keys==False") diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index 4a0808fdbb..14f34fb663 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -137,21 +137,22 @@ class Identityd(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.Identity`. """ - def __init__(self, keys: KeysCollection) -> None: + def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.identity = Identity() def __call__( self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]] ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.identity(d[key]) return d @@ -161,19 +162,20 @@ class AsChannelFirstd(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.AsChannelFirst`. """ - def __init__(self, keys: KeysCollection, channel_dim: int = -1) -> None: + def __init__(self, keys: KeysCollection, channel_dim: int = -1, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` channel_dim: which dimension of input image is the channel, default is the last dimension. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.converter = AsChannelFirst(channel_dim=channel_dim) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d @@ -183,19 +185,20 @@ class AsChannelLastd(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.AsChannelLast`. """ - def __init__(self, keys: KeysCollection, channel_dim: int = 0) -> None: + def __init__(self, keys: KeysCollection, channel_dim: int = 0, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` channel_dim: which dimension of input image is the channel, default is the first dimension. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.converter = AsChannelLast(channel_dim=channel_dim) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d @@ -205,18 +208,19 @@ class AddChanneld(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.AddChannel`. """ - def __init__(self, keys: KeysCollection) -> None: + def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.adder = AddChannel() def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, NdarrayTensor]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.adder(d[key]) return d @@ -252,19 +256,20 @@ class RepeatChanneld(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.RepeatChannel`. """ - def __init__(self, keys: KeysCollection, repeats: int) -> None: + def __init__(self, keys: KeysCollection, repeats: int, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` repeats: the number of repetitions for each element. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.repeater = RepeatChannel(repeats) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.repeater(d[key]) return d @@ -274,19 +279,20 @@ class RemoveRepeatedChanneld(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.RemoveRepeatedChannel`. """ - def __init__(self, keys: KeysCollection, repeats: int) -> None: + def __init__(self, keys: KeysCollection, repeats: int, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` repeats: the number of repetitions for each element. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.repeater = RemoveRepeatedChannel(repeats) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.repeater(d[key]) return d @@ -303,6 +309,7 @@ def __init__( keys: KeysCollection, output_postfixes: Optional[Sequence[str]] = None, channel_dim: Optional[int] = None, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -316,9 +323,10 @@ def __init__( to automatically select: if data is numpy array, channel_dim is 0 as `numpy array` is used in the pre transforms, if PyTorch Tensor, channel_dim is 1 as in most of the cases `Tensor` is uses in the post transforms. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.output_postfixes = output_postfixes self.splitter = SplitChannel(channel_dim=channel_dim) @@ -326,7 +334,7 @@ def __call__( self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]] ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): rets = self.splitter(d[key]) postfixes: Sequence = list(range(len(rets))) if self.output_postfixes is None else self.output_postfixes if len(postfixes) != len(rets): @@ -348,6 +356,7 @@ def __init__( self, keys: KeysCollection, dtype: Union[Sequence[Union[DtypeLike, torch.dtype]], DtypeLike, torch.dtype] = np.float32, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -356,9 +365,10 @@ def __init__( dtype: convert image to this data type, default is `np.float32`. it also can be a sequence of dtypes or torch.dtype, each element corresponds to a key in ``keys``. + allow_missing_keys: don't raise exception if key is missing. """ - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) self.dtype = ensure_tuple_rep(dtype, len(self.keys)) self.converter = CastToType() @@ -366,8 +376,8 @@ def __call__( self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]] ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: d = dict(data) - for idx, key in enumerate(self.keys): - d[key] = self.converter(d[key], dtype=self.dtype[idx]) + for key, dtype in self.key_iterator(d, self.dtype): + d[key] = self.converter(d[key], dtype=dtype) return d @@ -377,20 +387,21 @@ class ToTensord(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.ToTensor`. """ - def __init__(self, keys: KeysCollection) -> None: + def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.converter = ToTensor() def __call__( self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]] ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d @@ -400,20 +411,21 @@ class ToNumpyd(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.ToNumpy`. """ - def __init__(self, keys: KeysCollection) -> None: + def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.converter = ToNumpy() def __call__( self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]] ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d @@ -423,20 +435,21 @@ class ToPILd(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.ToNumpy`. """ - def __init__(self, keys: KeysCollection) -> None: + def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.converter = ToPIL() def __call__( self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]] ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d @@ -448,7 +461,7 @@ class DeleteItemsd(MapTransform): """ def __call__(self, data): - return {key: val for key, val in data.items() if key not in self.keys} + return {key: val for key, val in data.items() if key not in self.key_iterator(data)} class SelectItemsd(MapTransform): @@ -458,7 +471,7 @@ class SelectItemsd(MapTransform): """ def __call__(self, data): - result = {key: val for key, val in data.items() if key in self.keys} + result = {key: data[key] for key in self.key_iterator(data)} return result @@ -467,19 +480,20 @@ class SqueezeDimd(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.SqueezeDim`. """ - def __init__(self, keys: KeysCollection, dim: int = 0) -> None: + def __init__(self, keys: KeysCollection, dim: int = 0, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` dim: dimension to be squeezed. Default: 0 (the first dimension) + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.converter = SqueezeDim(dim=dim) def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, NdarrayTensor]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d @@ -498,6 +512,7 @@ def __init__( data_value: Union[Sequence[bool], bool] = False, additional_info: Optional[Union[Sequence[Callable], Callable]] = None, logger_handler: Optional[logging.Handler] = None, + allow_missing_keys: bool = False, ) -> None: """ Args: @@ -517,9 +532,10 @@ def __init__( corresponds to a key in ``keys``. logger_handler: add additional handler to output data: save to file, etc. add existing python logging handlers: https://docs.python.org/3/library/logging.handlers.html + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.prefix = ensure_tuple_rep(prefix, len(self.keys)) self.data_shape = ensure_tuple_rep(data_shape, len(self.keys)) self.value_range = ensure_tuple_rep(value_range, len(self.keys)) @@ -530,14 +546,16 @@ def __init__( def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, NdarrayTensor]: d = dict(data) - for idx, key in enumerate(self.keys): + for key, prefix, data_shape, value_range, data_value, additional_info in self.key_iterator( + d, self.prefix, self.data_shape, self.value_range, self.data_value, self.additional_info + ): d[key] = self.printer( d[key], - self.prefix[idx], - self.data_shape[idx], - self.value_range[idx], - self.data_value[idx], - self.additional_info[idx], + prefix, + data_shape, + value_range, + data_value, + additional_info, ) return d @@ -547,23 +565,26 @@ class SimulateDelayd(MapTransform): Dictionary-based wrapper of :py:class:`monai.transforms.SimulateDelay`. """ - def __init__(self, keys: KeysCollection, delay_time: Union[Sequence[float], float] = 0.0) -> None: + def __init__( + self, keys: KeysCollection, delay_time: Union[Sequence[float], float] = 0.0, allow_missing_keys: bool = False + ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` delay_time: The minimum amount of time, in fractions of seconds, to accomplish this identity task. It also can be a sequence of string, each element corresponds to a key in ``keys``. + allow_missing_keys: don't raise exception if key is missing. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.delay_time = ensure_tuple_rep(delay_time, len(self.keys)) self.delayer = SimulateDelay() def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, NdarrayTensor]: d = dict(data) - for idx, key in enumerate(self.keys): - d[key] = self.delayer(d[key], delay_time=self.delay_time[idx]) + for key, delay_time in self.key_iterator(d, self.delay_time): + d[key] = self.delayer(d[key], delay_time=delay_time) return d @@ -574,7 +595,9 @@ class CopyItemsd(MapTransform): """ - def __init__(self, keys: KeysCollection, times: int, names: KeysCollection) -> None: + def __init__( + self, keys: KeysCollection, times: int, names: KeysCollection, allow_missing_keys: bool = False + ) -> None: """ Args: keys: keys of the corresponding items to be transformed. @@ -584,13 +607,14 @@ def __init__(self, keys: KeysCollection, times: int, names: KeysCollection) -> N names: the names corresponding to the newly copied data, the length should match `len(keys) x times`. for example, if keys is ["img", "seg"] and times is 2, names can be: ["img_1", "seg_1", "img_2", "seg_2"]. + allow_missing_keys: don't raise exception if key is missing. Raises: ValueError: When ``times`` is nonpositive. ValueError: When ``len(names)`` is not ``len(keys) * times``. Incompatible values. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) if times < 1: raise ValueError(f"times must be positive, got {times}.") self.times = times @@ -609,13 +633,14 @@ def __call__(self, data): """ d = dict(data) - for key, new_key in zip(self.keys * self.times, self.names): + for new_key in self.names: if new_key in d: raise KeyError(f"Key {new_key} already exists in data.") - if isinstance(d[key], torch.Tensor): - d[new_key] = d[key].detach().clone() - else: - d[new_key] = copy.deepcopy(d[key]) + for key in self.key_iterator(d): + if isinstance(d[key], torch.Tensor): + d[new_key] = d[key].detach().clone() + else: + d[new_key] = copy.deepcopy(d[key]) return d @@ -626,19 +651,20 @@ class ConcatItemsd(MapTransform): """ - def __init__(self, keys: KeysCollection, name: str, dim: int = 0) -> None: + def __init__(self, keys: KeysCollection, name: str, dim: int = 0, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be concatenated together. See also: :py:class:`monai.transforms.compose.MapTransform` name: the name corresponding to the key to store the concatenated data. dim: on which dimension to concatenate the items, default is 0. + allow_missing_keys: don't raise exception if key is missing. Raises: ValueError: When insufficient keys are given (``len(self.keys) < 2``). """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) if len(self.keys) < 2: raise ValueError("Concatenation requires at least 2 keys.") self.name = name @@ -654,7 +680,7 @@ def __call__(self, data): d = dict(data) output = [] data_type = None - for key in self.keys: + for key in self.key_iterator(d): if data_type is None: data_type = type(d[key]) elif not isinstance(d[key], data_type): @@ -690,6 +716,7 @@ class Lambdad(MapTransform): each element corresponds to a key in ``keys``. overwrite: whether to overwrite the original data in the input dictionary with lamdbda function output. default to True. it also can be a sequence of bool, each element corresponds to a key in ``keys``. + allow_missing_keys: don't raise exception if key is missing. """ def __init__( @@ -697,17 +724,18 @@ def __init__( keys: KeysCollection, func: Union[Sequence[Callable], Callable], overwrite: Union[Sequence[bool], bool] = True, + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.func = ensure_tuple_rep(func, len(self.keys)) self.overwrite = ensure_tuple_rep(overwrite, len(self.keys)) self._lambd = Lambda() def __call__(self, data): d = dict(data) - for idx, key in enumerate(self.keys): - ret = self._lambd(d[key], func=self.func[idx]) - if self.overwrite[idx]: + for key, func, overwrite in self.key_iterator(d, self.func, self.overwrite): + ret = self._lambd(d[key], func=func) + if overwrite: d[key] = ret return d @@ -745,6 +773,7 @@ class LabelToMaskd(MapTransform): `select_labels` is the expected channel indices. merge_channels: whether to use `np.any()` to merge the result on channel dim. if yes, will return a single channel mask with binary data. + allow_missing_keys: don't raise exception if key is missing. """ @@ -753,13 +782,14 @@ def __init__( # pytype: disable=annotation-type-mismatch keys: KeysCollection, select_labels: Union[Sequence[int], int], merge_channels: bool = False, + allow_missing_keys: bool = False, ) -> None: # pytype: disable=annotation-type-mismatch - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.converter = LabelToMask(select_labels=select_labels, merge_channels=merge_channels) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d @@ -781,6 +811,7 @@ class FgBgToIndicesd(MapTransform): image_threshold: if enabled image_key, use ``image > image_threshold`` to determine the valid image content area and select background only in this area. output_shape: expected shape of output indices. if not None, unravel indices to specified shape. + allow_missing_keys: don't raise exception if key is missing. """ @@ -792,8 +823,9 @@ def __init__( image_key: Optional[str] = None, image_threshold: float = 0.0, output_shape: Optional[Sequence[int]] = None, + allow_missing_keys: bool = False, ) -> None: - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.fg_postfix = fg_postfix self.bg_postfix = bg_postfix self.image_key = image_key @@ -802,7 +834,7 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) image = d[self.image_key] if self.image_key else None - for key in self.keys: + for key in self.key_iterator(d): d[str(key) + self.fg_postfix], d[str(key) + self.bg_postfix] = self.converter(d[key], image) return d @@ -819,13 +851,13 @@ class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform): and ET (Enhancing tumor). """ - def __init__(self, keys: KeysCollection): - super().__init__(keys) + def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False): + super().__init__(keys, allow_missing_keys) self.converter = ConvertToMultiChannelBasedOnBratsClasses() def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.converter(d[key]) return d @@ -845,6 +877,7 @@ class AddExtremePointsChanneld(RandomizableTransform, MapTransform): use it for all spatial dimensions. rescale_min: minimum value of output data. rescale_max: maximum value of output data. + allow_missing_keys: don't raise exception if key is missing. """ @@ -857,8 +890,9 @@ def __init__( sigma: Union[Sequence[float], float, Sequence[torch.Tensor], torch.Tensor] = 3.0, rescale_min: float = -1.0, rescale_max: float = 1.0, + allow_missing_keys: bool = False, ): - MapTransform.__init__(self, keys) + MapTransform.__init__(self, keys, allow_missing_keys) self.background = background self.pert = pert self.points: List[Tuple[int, ...]] = [] @@ -879,17 +913,16 @@ def __call__(self, data): # Generate extreme points self.randomize(label[0, :]) - for key in data.keys(): - if key in self.keys: - img = d[key] - points_image = extreme_points_to_image( - points=self.points, - label=label, - sigma=self.sigma, - rescale_min=self.rescale_min, - rescale_max=self.rescale_max, - ) - d[key] = np.concatenate([img, points_image], axis=0) + for key in self.key_iterator(d): + img = d[key] + points_image = extreme_points_to_image( + points=self.points, + label=label, + sigma=self.sigma, + rescale_min=self.rescale_min, + rescale_max=self.rescale_max, + ) + d[key] = np.concatenate([img, points_image], axis=0) return d @@ -900,22 +933,23 @@ class TorchVisiond(MapTransform): data to be dict of PyTorch Tensors, users can easily call `ToTensord` transform to convert Numpy to Tensor. """ - def __init__(self, keys: KeysCollection, name: str, *args, **kwargs) -> None: + def __init__(self, keys: KeysCollection, name: str, allow_missing_keys: bool = False, *args, **kwargs) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` name: The transform name in TorchVision package. + allow_missing_keys: don't raise exception if key is missing. args: parameters for the TorchVision transform. kwargs: parameters for the TorchVision transform. """ - super().__init__(keys) + super().__init__(keys, allow_missing_keys) self.trans = TorchVision(name, *args, **kwargs) def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torch.Tensor]: d = dict(data) - for key in self.keys: + for key in self.key_iterator(d): d[key] = self.trans(d[key]) return d From 27543c883c97d1a35f5a3d035ba1fd608b51f6a0 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 3 Mar 2021 14:06:30 +0000 Subject: [PATCH 019/457] nifti saver squeeze dims (#1680) Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/data/nifti_saver.py | 14 ++++++++++++++ monai/transforms/io/array.py | 8 ++++++++ monai/transforms/io/dictionary.py | 8 ++++++++ tests/test_nifti_saver.py | 24 ++++++++++++++++++++++++ 4 files changed, 54 insertions(+) diff --git a/monai/data/nifti_saver.py b/monai/data/nifti_saver.py index 01e701b1a6..016b06fda5 100644 --- a/monai/data/nifti_saver.py +++ b/monai/data/nifti_saver.py @@ -27,6 +27,8 @@ class NiftiSaver: Typically, the data can be segmentation predictions, call `save` for single data or call `save_batch` to save a batch of data together. If no meta data provided, use index from 0 as the filename prefix. + + NB: image should include channel dimension: [B],C,H,W,[D]. """ def __init__( @@ -40,6 +42,7 @@ def __init__( align_corners: bool = False, dtype: DtypeLike = np.float64, output_dtype: DtypeLike = np.float32, + squeeze_end_dims: bool = True, ) -> None: """ Args: @@ -60,6 +63,10 @@ def __init__( dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision. If None, use the data type of input data. output_dtype: data type for saving data. Defaults to ``np.float32``. + squeeze_end_dims: if True, any trailing singleton dimensions will be removed (after the channel + has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and + then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false, + image will always be saved as (H,W,D,C). """ self.output_dir = output_dir self.output_postfix = output_postfix @@ -71,6 +78,7 @@ def __init__( self.dtype = dtype self.output_dtype = output_dtype self._data_index = 0 + self.squeeze_end_dims = squeeze_end_dims def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: """ @@ -111,6 +119,12 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] data = np.expand_dims(data, -1) # change data to "channel last" format and write to nifti format file data = np.moveaxis(np.asarray(data), 0, -1) + + # if desired, remove trailing singleton dimensions + if self.squeeze_end_dims: + while data.shape[-1] == 1: + data = np.squeeze(data, -1) + write_nifti( data, file_name=filename, diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index 9c4f631699..4ede04cf69 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -165,6 +165,8 @@ class SaveImage(Transform): It can work for both numpy array and PyTorch Tensor in both pre-transform chain and post transform chain. + NB: image should include channel dimension: [B],C,H,W,[D]. + Args: output_dir: output image directory. output_postfix: a string appended to all output file names, default to `trans`. @@ -200,6 +202,10 @@ class SaveImage(Transform): it's used for NIfTI format only. save_batch: whether the import image is a batch data, default to `False`. usually pre-transforms run for channel first data, while post-transforms run for batch data. + squeeze_end_dims: if True, any trailing singleton dimensions will be removed (after the channel + has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and + then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false, + image will always be saved as (H,W,D,C). """ @@ -215,6 +221,7 @@ def __init__( dtype: DtypeLike = np.float64, output_dtype: DtypeLike = np.float32, save_batch: bool = False, + squeeze_end_dims: bool = True, ) -> None: self.saver: Union[NiftiSaver, PNGSaver] if output_ext in (".nii.gz", ".nii"): @@ -227,6 +234,7 @@ def __init__( padding_mode=padding_mode, dtype=dtype, output_dtype=output_dtype, + squeeze_end_dims=squeeze_end_dims, ) elif output_ext == ".png": self.saver = PNGSaver( diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index ea965255d5..8a428e1118 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -124,6 +124,8 @@ class SaveImaged(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.SaveImage`. + NB: image should include channel dimension: [B],C,H,W,[D]. + Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` @@ -166,6 +168,10 @@ class SaveImaged(MapTransform): save_batch: whether the import image is a batch data, default to `False`. usually pre-transforms run for channel first data, while post-transforms run for batch data. allow_missing_keys: don't raise exception if key is missing. + squeeze_end_dims: if True, any trailing singleton dimensions will be removed (after the channel + has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and + then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false, + image will always be saved as (H,W,D,C). """ @@ -184,6 +190,7 @@ def __init__( output_dtype: DtypeLike = np.float32, save_batch: bool = False, allow_missing_keys: bool = False, + squeeze_end_dims: bool = True, ) -> None: super().__init__(keys, allow_missing_keys) self.meta_key_postfix = meta_key_postfix @@ -198,6 +205,7 @@ def __init__( dtype=dtype, output_dtype=output_dtype, save_batch=save_batch, + squeeze_end_dims=squeeze_end_dims, ) def __call__(self, data): diff --git a/tests/test_nifti_saver.py b/tests/test_nifti_saver.py index 2e2bfd4254..f48374a61c 100644 --- a/tests/test_nifti_saver.py +++ b/tests/test_nifti_saver.py @@ -17,6 +17,7 @@ import torch from monai.data import NiftiSaver +from monai.transforms import LoadImage class TestNiftiSaver(unittest.TestCase): @@ -72,6 +73,29 @@ def test_saved_3d_resize_content(self): filepath = os.path.join("testfile" + str(i), "testfile" + str(i) + "_seg.nii.gz") self.assertTrue(os.path.exists(os.path.join(tempdir, filepath))) + def test_squeeze_end_dims(self): + with tempfile.TemporaryDirectory() as tempdir: + + for squeeze_end_dims in [False, True]: + + saver = NiftiSaver( + output_dir=tempdir, + output_postfix="", + output_ext=".nii.gz", + dtype=np.float32, + squeeze_end_dims=squeeze_end_dims, + ) + + fname = "testfile_squeeze" + meta_data = {"filename_or_obj": fname} + + # 2d image w channel + saver.save(torch.randint(0, 255, (1, 2, 2)), meta_data) + + im, meta = LoadImage()(os.path.join(tempdir, fname, fname + ".nii.gz")) + self.assertTrue(im.ndim == 2 if squeeze_end_dims else 4) + self.assertTrue(meta["dim"][0] == im.ndim) + if __name__ == "__main__": unittest.main() From b533afdb5c9b401cf860ec4b110fd58c11e4d851 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 3 Mar 2021 15:21:21 +0000 Subject: [PATCH 020/457] improve gradcam doc (#1682) Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/visualize/class_activation_maps.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/monai/visualize/class_activation_maps.py b/monai/visualize/class_activation_maps.py index a917bcf800..624d054500 100644 --- a/monai/visualize/class_activation_maps.py +++ b/monai/visualize/class_activation_maps.py @@ -212,6 +212,12 @@ class CAM(CAMBase): cam = CAM(nn_module=model_2d, target_layers="layer4", fc_layers="last_linear") result = cam(x=torch.rand((2, 3, 48, 64))) + N.B.: To help select the target layer, it may be useful to list all layers: + + .. code-block:: python + + for name, _ in model.named_modules(): print(name) + See Also: - :py:class:`monai.visualize.class_activation_maps.GradCAM` @@ -307,6 +313,12 @@ class GradCAM(CAMBase): cam = GradCAM(nn_module=model_2d, target_layers="layer4") result = cam(x=torch.rand((2, 3, 48, 64))) + N.B.: To help select the target layer, it may be useful to list all layers: + + .. code-block:: python + + for name, _ in model.named_modules(): print(name) + See Also: - :py:class:`monai.visualize.class_activation_maps.CAM` From 7cfb52970f5b4a2ddfdf81a2d517e1d5ca90309a Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Wed, 3 Mar 2021 16:50:46 +0000 Subject: [PATCH 021/457] fixes integration tests (#1683) Signed-off-by: Wenqi Li --- tests/test_integration_sliding_window.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_integration_sliding_window.py b/tests/test_integration_sliding_window.py index c4d020276e..faec377586 100644 --- a/tests/test_integration_sliding_window.py +++ b/tests/test_integration_sliding_window.py @@ -84,7 +84,7 @@ def test_training(self): ) output_image = nib.load(output_file).get_fdata() np.testing.assert_allclose(np.sum(output_image), 33621) - np.testing.assert_allclose(output_image.shape, (28, 25, 63, 1)) + np.testing.assert_allclose(output_image.shape, (28, 25, 63)) if __name__ == "__main__": From 4bd9cf34df514515a7c93c0a29685c681409f3a6 Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Thu, 4 Mar 2021 03:58:13 +0800 Subject: [PATCH 022/457] Update load pretrain for densenet (#1684) * Update load pretrain for densenet Signed-off-by: Yiheng Wang * Fix isort issue Signed-off-by: Yiheng Wang --- monai/networks/nets/densenet.py | 4 ++-- tests/test_densenet.py | 41 ++++++++++++++++++++++++++++----- 2 files changed, 37 insertions(+), 8 deletions(-) diff --git a/monai/networks/nets/densenet.py b/monai/networks/nets/densenet.py index ad1d1d6e5f..a59ab99e68 100644 --- a/monai/networks/nets/densenet.py +++ b/monai/networks/nets/densenet.py @@ -210,14 +210,14 @@ def _load_state_dict(model, model_url, progress): `_ """ pattern = re.compile( - r"^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$" + r"^(.*denselayer\d+)(\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$" ) state_dict = load_state_dict_from_url(model_url, progress=progress) for key in list(state_dict.keys()): res = pattern.match(key) if res: - new_key = res.group(1) + res.group(2) + new_key = res.group(1) + ".layers" + res.group(2) + res.group(3) state_dict[new_key] = state_dict[key] del state_dict[key] diff --git a/tests/test_densenet.py b/tests/test_densenet.py index 876689314a..41b5fbf7d6 100644 --- a/tests/test_densenet.py +++ b/tests/test_densenet.py @@ -10,14 +10,25 @@ # limitations under the License. import unittest +from typing import TYPE_CHECKING +from unittest import skipUnless import torch from parameterized import parameterized from monai.networks import eval_mode from monai.networks.nets import densenet121, densenet169, densenet201, densenet264 +from monai.utils import optional_import from tests.utils import skip_if_quick, test_pretrained_networks, test_script_save +if TYPE_CHECKING: + import torchvision + + has_torchvision = True +else: + torchvision, has_torchvision = optional_import("torchvision") + + device = "cuda" if torch.cuda.is_available() else "cpu" TEST_CASE_1 = [ # 4-channel 3D, batch 2 @@ -50,27 +61,45 @@ TEST_PRETRAINED_2D_CASE_1 = [ # 4-channel 2D, batch 2 densenet121, {"pretrained": True, "progress": True, "spatial_dims": 2, "in_channels": 2, "out_channels": 3}, - (2, 2, 32, 64), - (2, 3), + (1, 2, 32, 64), + (1, 3), ] TEST_PRETRAINED_2D_CASE_2 = [ # 4-channel 2D, batch 2 densenet121, - {"pretrained": True, "progress": False, "spatial_dims": 2, "in_channels": 2, "out_channels": 3}, - (2, 2, 32, 64), - (2, 3), + {"pretrained": True, "progress": False, "spatial_dims": 2, "in_channels": 2, "out_channels": 1}, + (1, 2, 32, 64), + (1, 1), +] + +TEST_PRETRAINED_2D_CASE_3 = [ + densenet121, + {"pretrained": True, "progress": False, "spatial_dims": 2, "in_channels": 3, "out_channels": 1}, + (1, 3, 32, 32), ] class TestPretrainedDENSENET(unittest.TestCase): @parameterized.expand([TEST_PRETRAINED_2D_CASE_1, TEST_PRETRAINED_2D_CASE_2]) @skip_if_quick - def test_121_3d_shape_pretrain(self, model, input_param, input_shape, expected_shape): + def test_121_2d_shape_pretrain(self, model, input_param, input_shape, expected_shape): net = test_pretrained_networks(model, input_param, device) with eval_mode(net): result = net.forward(torch.randn(input_shape).to(device)) self.assertEqual(result.shape, expected_shape) + @parameterized.expand([TEST_PRETRAINED_2D_CASE_3]) + @skipUnless(has_torchvision, "Requires `torchvision` package.") + def test_pretrain_consistency(self, model, input_param, input_shape): + example = torch.randn(input_shape).to(device) + net = test_pretrained_networks(model, input_param, device) + with eval_mode(net): + result = net.features.forward(example) + torchvision_net = torchvision.models.densenet121(pretrained=True).to(device) + with eval_mode(torchvision_net): + expected_result = torchvision_net.features.forward(example) + self.assertTrue(torch.all(result == expected_result)) + class TestDENSENET(unittest.TestCase): @parameterized.expand(TEST_CASES) From 8cc200fcd3650cee92740c4d6cd16afddfb71ded Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Wed, 3 Mar 2021 21:08:48 +0000 Subject: [PATCH 023/457] revise the dtype according to the discussion (#1671) Signed-off-by: Wenqi Li --- monai/transforms/utility/array.py | 2 +- tests/test_convert_to_multi_channel.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 62daf9309c..8776238711 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -655,7 +655,7 @@ def __call__(self, img: np.ndarray) -> np.ndarray: result.append(np.logical_or(np.logical_or(img == 1, img == 4), img == 2)) # label 4 is ET result.append(img == 4) - return np.stack(result, axis=0).astype(np.float32) + return np.stack(result, axis=0) class AddExtremePointsChannel(RandomizableTransform): diff --git a/tests/test_convert_to_multi_channel.py b/tests/test_convert_to_multi_channel.py index ea27371ac7..03510ad38c 100644 --- a/tests/test_convert_to_multi_channel.py +++ b/tests/test_convert_to_multi_channel.py @@ -27,6 +27,7 @@ class TestConvertToMultiChannel(unittest.TestCase): def test_type_shape(self, data, expected_result): result = ConvertToMultiChannelBasedOnBratsClasses()(data) np.testing.assert_equal(result, expected_result) + self.assertEqual(f"{result.dtype}", "bool") if __name__ == "__main__": From ac9f953c9f69b106dbc09e9d7214dd13d8ecc822 Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Thu, 4 Mar 2021 20:35:54 +0800 Subject: [PATCH 024/457] Fix senet pretrained weights issue (#1689) Signed-off-by: Yiheng Wang --- monai/networks/nets/senet.py | 4 ++-- tests/test_senet.py | 32 +++++++++++++++++++++++++++----- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/monai/networks/nets/senet.py b/monai/networks/nets/senet.py index 655ff203c7..ef67f853d6 100644 --- a/monai/networks/nets/senet.py +++ b/monai/networks/nets/senet.py @@ -275,7 +275,7 @@ def _load_state_dict(model, model_url, progress): if pattern_conv.match(key): new_key = re.sub(pattern_conv, r"\1conv.\2", key) elif pattern_bn.match(key): - new_key = re.sub(pattern_bn, r"\1conv\2norm.\3", key) + new_key = re.sub(pattern_bn, r"\1conv\2adn.N.\3", key) elif pattern_se.match(key): state_dict[key] = state_dict[key].squeeze() new_key = re.sub(pattern_se, r"\1se_layer.fc.0.\2", key) @@ -285,7 +285,7 @@ def _load_state_dict(model, model_url, progress): elif pattern_down_conv.match(key): new_key = re.sub(pattern_down_conv, r"\1project.conv.\2", key) elif pattern_down_bn.match(key): - new_key = re.sub(pattern_down_bn, r"\1project.norm.\2", key) + new_key = re.sub(pattern_down_bn, r"\1project.adn.N.\2", key) if new_key: state_dict[new_key] = state_dict[key] del state_dict[key] diff --git a/tests/test_senet.py b/tests/test_senet.py index 883d75d62d..c1327ceb7d 100644 --- a/tests/test_senet.py +++ b/tests/test_senet.py @@ -10,6 +10,8 @@ # limitations under the License. import unittest +from typing import TYPE_CHECKING +from unittest import skipUnless import torch from parameterized import parameterized @@ -23,8 +25,17 @@ se_resnext101_32x4d, senet154, ) +from monai.utils import optional_import from tests.utils import test_pretrained_networks, test_script_save +if TYPE_CHECKING: + import pretrainedmodels + + has_cadene_pretrain = True +else: + pretrainedmodels, has_cadene_pretrain = optional_import("pretrainedmodels") + + device = "cuda" if torch.cuda.is_available() else "cpu" NET_ARGS = {"spatial_dims": 3, "in_channels": 2, "num_classes": 2} @@ -56,11 +67,7 @@ def test_script(self, net, net_args): class TestPretrainedSENET(unittest.TestCase): - @parameterized.expand( - [ - TEST_CASE_PRETRAINED, - ] - ) + @parameterized.expand([TEST_CASE_PRETRAINED]) def test_senet_shape(self, model, input_param): net = test_pretrained_networks(model, input_param, device) input_data = torch.randn(3, 3, 64, 64).to(device) @@ -70,6 +77,21 @@ def test_senet_shape(self, model, input_param): result = net(input_data) self.assertEqual(result.shape, expected_shape) + @parameterized.expand([TEST_CASE_PRETRAINED]) + @skipUnless(has_cadene_pretrain, "Requires `pretrainedmodels` package.") + def test_pretrain_consistency(self, model, input_param): + input_data = torch.randn(1, 3, 64, 64).to(device) + net = test_pretrained_networks(model, input_param, device) + with eval_mode(net): + result = net.features(input_data) + cadene_net = pretrainedmodels.se_resnet50().to(device) + with eval_mode(cadene_net): + expected_result = cadene_net.features(input_data) + # The difference between Cadene's senet and our version is that + # we use nn.Linear as the FC layer, but Cadene's version uses + # a conv layer with kernel size equals to 1. It may bring a little difference. + self.assertTrue(torch.allclose(result, expected_result, rtol=1e-5, atol=1e-5)) + if __name__ == "__main__": unittest.main() From 380f042dec9c55ba1a5ab241ae6b1a7b3e2b07fb Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Thu, 4 Mar 2021 21:54:36 +0800 Subject: [PATCH 025/457] 1680 Add squeeze SegmentationSaver handler (#1686) * [DLMED] add suqeeze to handler Signed-off-by: Nic Ma * [DLMED] update according to comments Signed-off-by: Nic Ma --- monai/handlers/segmentation_saver.py | 7 +++++++ monai/transforms/io/array.py | 1 + monai/transforms/io/dictionary.py | 1 + 3 files changed, 9 insertions(+) diff --git a/monai/handlers/segmentation_saver.py b/monai/handlers/segmentation_saver.py index a46918b893..56370fd41c 100644 --- a/monai/handlers/segmentation_saver.py +++ b/monai/handlers/segmentation_saver.py @@ -41,6 +41,7 @@ def __init__( scale: Optional[int] = None, dtype: DtypeLike = np.float64, output_dtype: DtypeLike = np.float32, + squeeze_end_dims: bool = True, batch_transform: Callable = lambda x: x, output_transform: Callable = lambda x: x, name: Optional[str] = None, @@ -77,6 +78,11 @@ def __init__( If None, use the data type of input data. It's used for Nifti format only. output_dtype: data type for saving data. Defaults to ``np.float32``, it's used for Nifti format only. + squeeze_end_dims: if True, any trailing singleton dimensions will be removed (after the channel + has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and + then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false, + image will always be saved as (H,W,D,C). + it's used for NIfTI format only. batch_transform: a callable that is used to transform the ignite.engine.batch into expected format to extract the meta_data dictionary. output_transform: a callable that is used to transform the @@ -96,6 +102,7 @@ def __init__( scale=scale, dtype=dtype, output_dtype=output_dtype, + squeeze_end_dims=squeeze_end_dims, save_batch=True, ) self.batch_transform = batch_transform diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index 4ede04cf69..a256a16ec8 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -206,6 +206,7 @@ class SaveImage(Transform): has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false, image will always be saved as (H,W,D,C). + it's used for NIfTI format only. """ diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index 8a428e1118..50ab8f9868 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -172,6 +172,7 @@ class SaveImaged(MapTransform): has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false, image will always be saved as (H,W,D,C). + it's used for NIfTI format only. """ From 114faf079cd119de868c4419deeebd618b259c91 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Thu, 4 Mar 2021 16:07:41 +0000 Subject: [PATCH 026/457] 1285 adds module list (#1690) * fixes #1285 Signed-off-by: Wenqi Li * adds test Signed-off-by: Wenqi Li --- monai/__init__.py | 16 ++++++++++++++++ tests/test_module_list.py | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) create mode 100644 tests/test_module_list.py diff --git a/monai/__init__.py b/monai/__init__.py index 910698ee14..b9b010f0fb 100644 --- a/monai/__init__.py +++ b/monai/__init__.py @@ -44,3 +44,19 @@ # load all modules, this will trigger all export decorations load_submodules(sys.modules[__name__], True, exclude_pattern=excludes) + +__all__ = [ + "apps", + "config", + "data", + "engines", + "handlers", + "inferers", + "losses", + "metrics", + "networks", + "optimizers", + "transforms", + "utils", + "visualize", +] diff --git a/tests/test_module_list.py b/tests/test_module_list.py new file mode 100644 index 0000000000..25e1dfbd6f --- /dev/null +++ b/tests/test_module_list.py @@ -0,0 +1,38 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import os +import unittest + +import monai + + +class TestAllImport(unittest.TestCase): + def test_public_api(self): + """ + This is to check "monai.__all__" should be consistent with + the top-level folders except for "__pycache__" and "csrc" (cpp/cuda src) + """ + base_folder = os.path.dirname(monai.__file__) + to_search = os.path.join(base_folder, "*", "") + subfolders = [os.path.basename(x[:-1]) for x in glob.glob(to_search)] + to_exclude = ("__pycache__", "csrc") + mod = [] + for code_folder in subfolders: + if code_folder in to_exclude: + continue + mod.append(code_folder) + self.assertEqual(sorted(monai.__all__), sorted(mod)) + + +if __name__ == "__main__": + unittest.main() From 0bd86b1424e9ef2d8584812ba52e13d88ebf2dde Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Thu, 4 Mar 2021 23:41:54 +0000 Subject: [PATCH 027/457] 1685-upgrade base image to 21.02 (#1687) * fixes #1685 Signed-off-by: Wenqi Li * add temp test Signed-off-by: Wenqi Li * adds docstring Signed-off-by: Wenqi Li * fixes dist sampler Signed-off-by: Wenqi Li * remove temp tests Signed-off-by: Wenqi Li * fixes type hint issue Signed-off-by: Wenqi Li --- .github/workflows/cron.yml | 2 +- Dockerfile | 5 +++-- monai/data/utils.py | 4 ---- monai/networks/utils.py | 6 +++--- tests/test_distributed_sampler.py | 2 ++ tests/utils.py | 7 +++++++ 6 files changed, 16 insertions(+), 10 deletions(-) diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index e568ba9e15..98834dbc8f 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -56,7 +56,7 @@ jobs: cron-pt-image: if: github.repository == 'Project-MONAI/MONAI' container: - image: nvcr.io/nvidia/pytorch:20.12-py3 # testing with the latest pytorch base image + image: nvcr.io/nvidia/pytorch:21.02-py3 # testing with the latest pytorch base image options: "--gpus all" runs-on: [self-hosted, linux, x64, common] steps: diff --git a/Dockerfile b/Dockerfile index 47976b97b1..c90558c970 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,8 +9,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:20.12-py3 - +# To build with a different base image +# please run `docker build` using the `--build-arg PYTORCH_IMAGE=...` flag. +ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:21.02-py3 FROM ${PYTORCH_IMAGE} LABEL maintainer="monai.contact@gmail.com" diff --git a/monai/data/utils.py b/monai/data/utils.py index 7717ddf3aa..60250af441 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -922,10 +922,6 @@ class DistributedSampler(_TorchDistributedSampler): """ def __init__(self, even_divisible: bool = True, *args, **kwargs): - self.total_size: int = 0 - self.rank: int = 0 - self.num_samples: int = 0 - self.num_replicas: int = 0 super().__init__(*args, **kwargs) if not even_divisible: diff --git a/monai/networks/utils.py b/monai/networks/utils.py index 48efe3934e..bd25e358f6 100644 --- a/monai/networks/utils.py +++ b/monai/networks/utils.py @@ -14,7 +14,7 @@ import warnings from contextlib import contextmanager -from typing import Any, Callable, Optional, Sequence, cast +from typing import Any, Callable, Optional, Sequence import torch import torch.nn as nn @@ -86,10 +86,10 @@ def predict_segmentation( threshold: thresholding the prediction values if multi-labels task. """ if not mutually_exclusive: - return (cast(torch.Tensor, logits >= threshold)).int() + return (logits >= threshold).int() if logits.shape[1] == 1: warnings.warn("single channel prediction, `mutually_exclusive=True` ignored, use threshold instead.") - return (cast(torch.Tensor, logits >= threshold)).int() + return (logits >= threshold).int() return logits.argmax(1, keepdim=True) diff --git a/tests/test_distributed_sampler.py b/tests/test_distributed_sampler.py index d0054885eb..0a439874bd 100644 --- a/tests/test_distributed_sampler.py +++ b/tests/test_distributed_sampler.py @@ -24,6 +24,7 @@ def test_even(self): data = [1, 2, 3, 4, 5] sampler = DistributedSampler(dataset=data, shuffle=False) samples = np.array([data[i] for i in list(sampler)]) + self.assertEqual(dist.get_rank(), sampler.rank) if dist.get_rank() == 0: np.testing.assert_allclose(samples, np.array([1, 3, 5])) @@ -35,6 +36,7 @@ def test_uneven(self): data = [1, 2, 3, 4, 5] sampler = DistributedSampler(dataset=data, shuffle=False, even_divisible=False) samples = np.array([data[i] for i in list(sampler)]) + self.assertEqual(dist.get_rank(), sampler.rank) if dist.get_rank() == 0: np.testing.assert_allclose(samples, np.array([1, 3, 5])) diff --git a/tests/utils.py b/tests/utils.py index 8b367158b2..3636cbe974 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -16,6 +16,7 @@ import queue import sys import tempfile +import time import traceback import unittest import warnings @@ -273,6 +274,7 @@ def run_process(self, func, local_rank, args, kwargs, results): os.environ["RANK"] = str(self.nproc_per_node * self.node_rank + local_rank) if torch.cuda.is_available(): + os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" torch.cuda.set_device(int(local_rank)) dist.init_process_group( @@ -283,6 +285,11 @@ def run_process(self, func, local_rank, args, kwargs, results): rank=int(os.environ["RANK"]), ) func(*args, **kwargs) + # the primary node lives longer to + # avoid _store_based_barrier, RuntimeError: Broken pipe + # as the TCP store daemon is on the rank 0 + if int(os.environ["RANK"]) == 0: + time.sleep(0.1) results.put(True) except Exception as e: results.put(False) From 75b577224ab7843c31e2012b8da1aa275e34d4b4 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Fri, 5 Mar 2021 00:52:22 +0000 Subject: [PATCH 028/457] fixes data type in switching (#1694) * fixes data type in switching Signed-off-by: Wenqi Li * [MONAI] python code formatting Signed-off-by: monai-bot Co-authored-by: monai-bot --- monai/transforms/io/array.py | 2 +- tests/test_nifti_endianness.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index a256a16ec8..de9a8800a2 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -42,7 +42,7 @@ def switch_endianness(data, old, new): if data.dtype.byteorder == old: data = data.newbyteorder(new) elif isinstance(data, tuple): - data = (switch_endianness(x, old, new) for x in data) + data = tuple(switch_endianness(x, old, new) for x in data) elif isinstance(data, list): data = [switch_endianness(x, old, new) for x in data] elif isinstance(data, dict): diff --git a/tests/test_nifti_endianness.py b/tests/test_nifti_endianness.py index 14317c0832..d8adb1efb2 100644 --- a/tests/test_nifti_endianness.py +++ b/tests/test_nifti_endianness.py @@ -8,6 +8,7 @@ from monai.data import DataLoader, Dataset, create_test_image_2d from monai.transforms import LoadImage, LoadImaged +from monai.transforms.io.array import switch_endianness from monai.utils.module import optional_import if TYPE_CHECKING: @@ -43,6 +44,11 @@ def test_endianness(self, endianness, use_array, image_only): check_loader = DataLoader(check_ds, batch_size=1) _ = next(iter(check_loader)) + def test_switch(self): # verify data types + for data in (np.zeros((2, 1)), ("test",), [24, 42], {"foo": "bar"}, True, 42): + output = switch_endianness(data, ">", "<") + self.assertEqual(type(data), type(output)) + if __name__ == "__main__": unittest.main() From 889c9f941270915b0594701c58020c7a92c3b057 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Fri, 5 Mar 2021 00:08:28 -0500 Subject: [PATCH 029/457] WSI reader (#1548) * Implement CuImageReader and OpenSlideReader Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add unittests for CuImageReader Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add unittests for OpenSlideReader Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Sort imports Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add correct boundaries Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add test cases for reading patches on a grid for CuImage Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add patch whole slide imaging dataset for pathology Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add test case for read patches for OpenSlide Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * flake8 and few minor changes Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * black Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * flake8 Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add kwargs to CuImageReader and OpenSlideReader's read method Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Change the type hint from np.dtype to DTypeLike Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Fix a bug Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Implement WSIReader and unittests Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Minor updates Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Fix few typing issues Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Revert datasets Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add shape property to openslide image object Reverse size to be compatible with output size (hxw) Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add untittest for loading the whole image Reverse the size accroding to the WSIReader Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update the whole image size Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove optional size Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove optional dtype Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove _get_spatial_shape return type Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Reverse the orders of dimensions of `location` to be compatible with image shape Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Change test cases to use smaller image and revese location's dimensions Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Replace the test TIFF and some upgrades Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update dependencies for OpenSlide Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update unittests for OpenSlide and CuImage Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Fix openslide dependency Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Fix doc dependencies Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Minor changes Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Few variable name changes Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add EnsureChannelFirst Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add metadata to WSIReader Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- docs/requirements.txt | 1 + docs/source/data.rst | 4 + monai/data/__init__.py | 2 +- monai/data/image_reader.py | 161 ++++++++++++++++++++++++++++++++- requirements-dev.txt | 1 + setup.cfg | 3 + tests/test_cuimage_reader.py | 103 +++++++++++++++++++++ tests/test_openslide_reader.py | 103 +++++++++++++++++++++ 8 files changed, 373 insertions(+), 5 deletions(-) create mode 100644 tests/test_cuimage_reader.py create mode 100644 tests/test_openslide_reader.py diff --git a/docs/requirements.txt b/docs/requirements.txt index d046bc53cf..cd06166359 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -4,6 +4,7 @@ pytorch-ignite==0.4.2 numpy>=1.17 itk>=5.0 nibabel +openslide-python==1.1.2 parameterized scikit-image>=0.14.2 tensorboard diff --git a/docs/source/data.rst b/docs/source/data.rst index 11609964c3..eed4b30ded 100644 --- a/docs/source/data.rst +++ b/docs/source/data.rst @@ -105,6 +105,10 @@ PILReader .. autoclass:: PILReader :members: +WSIReader +~~~~~~~~~ +.. autoclass:: WSIReader + :members: Nifti format handling --------------------- diff --git a/monai/data/__init__.py b/monai/data/__init__.py index 3dd0a980ef..54ee7908f4 100644 --- a/monai/data/__init__.py +++ b/monai/data/__init__.py @@ -24,7 +24,7 @@ from .decathlon_datalist import load_decathlon_datalist, load_decathlon_properties from .grid_dataset import GridPatchDataset, PatchDataset from .image_dataset import ImageDataset -from .image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader +from .image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader, WSIReader from .iterable_dataset import IterableDataset from .nifti_saver import NiftiSaver from .nifti_writer import write_nifti diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index dfbdaf5b41..76bf1817dc 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -19,26 +19,31 @@ from monai.config import DtypeLike, KeysCollection from monai.data.utils import correct_nifti_header_if_necessary +from monai.transforms.utility.array import EnsureChannelFirst from monai.utils import ensure_tuple, optional_import from .utils import is_supported_format if TYPE_CHECKING: + import cuimage import itk # type: ignore import nibabel as nib + import openslide from itk import Image # type: ignore from nibabel.nifti1 import Nifti1Image from PIL import Image as PILImage - has_itk = has_nib = has_pil = True + has_itk = has_nib = has_pil = has_cux = has_osl = True else: itk, has_itk = optional_import("itk", allow_namespace_pkg=True) Image, _ = optional_import("itk", allow_namespace_pkg=True, name="Image") nib, has_nib = optional_import("nibabel") Nifti1Image, _ = optional_import("nibabel.nifti1", name="Nifti1Image") PILImage, has_pil = optional_import("PIL.Image") + cuimage, has_cux = optional_import("cuimage") + openslide, has_osl = optional_import("openslide") -__all__ = ["ImageReader", "ITKReader", "NibabelReader", "NumpyReader", "PILReader"] +__all__ = ["ImageReader", "ITKReader", "NibabelReader", "NumpyReader", "PILReader", "WSIReader"] class ImageReader(ABC): @@ -264,10 +269,10 @@ def _get_affine(self, img) -> np.ndarray: origin = np.asarray(img.GetOrigin()) direction = np.asarray(direction) - affine = np.eye(direction.shape[0] + 1) + affine: np.ndarray = np.eye(direction.shape[0] + 1) affine[(slice(-1), slice(-1))] = direction @ np.diag(spacing) affine[(slice(-1), -1)] = origin - return np.asarray(affine) + return affine def _get_spatial_shape(self, img) -> np.ndarray: """ @@ -626,3 +631,151 @@ def _get_spatial_shape(self, img) -> np.ndarray: """ # the img data should have no channel dim or the last dim is channel return np.asarray((img.width, img.height)) + + +class WSIReader(ImageReader): + """ + Read whole slide imaging and extract patches + + """ + + def __init__(self, reader_lib: str = "cuClaraImage"): + super().__init__() + self.reader_lib = reader_lib.lower() + if self.reader_lib == "openslide": + self.wsi_reader = openslide.OpenSlide + print("> OpenSlide is being used.") + elif self.reader_lib == "cuclaraimage": + self.wsi_reader = cuimage.CuImage + print("> CuImage is being used.") + else: + raise ValueError('`reader_lib` should be either "cuClaraImage" or "OpenSlide"') + + def verify_suffix(self, filename: Union[Sequence[str], str]) -> bool: + """ + Verify whether the specified file or files format is supported by WSI reader. + + Args: + filename: file name or a list of file names to read. + if a list of files, verify all the suffixes. + """ + return is_supported_format(filename, ["tif", "tiff"]) + + def read(self, data: Union[Sequence[str], str, np.ndarray], **kwargs): + """ + Read image data from specified file or files. + Note that the returned object is CuImage or list of CuImage objects. + + Args: + data: file name or a list of file names to read. + + """ + img_: List = [] + + filenames: Sequence[str] = ensure_tuple(data) + for name in filenames: + img = self.wsi_reader(name) + if self.reader_lib == "openslide": + img.shape = (img.dimensions[1], img.dimensions[0], 3) + img_.append(img) + + return img_ if len(filenames) > 1 else img_[0] + + def get_data( + self, + img, + location: Tuple[int, int] = (0, 0), + size: Optional[Tuple[int, int]] = None, + level: int = 0, + dtype: DtypeLike = np.uint8, + grid_shape: Tuple[int, int] = (1, 1), + patch_size: Optional[int] = None, + ): + """ + Extract regions as numpy array from WSI image and return them. + + Args: + img: a WSIReader image object loaded from a file, or list of CuImage objects + location: (x_min, y_min) tuple giving the top left pixel in the level 0 reference frame, + or list of tuples (default=(0, 0)) + size: (height, width) tuple giving the region size, or list of tuples (default to full image size) + This is the size of image at the given level (`level`) + level: the level number, or list of level numbers (default=0) + dtype: the data type of output image + grid_shape: (row, columns) tuple define a grid to extract patches on that + patch_size: (heigsht, width) the size of extracted patches at the given level + """ + if size is None: + if location == (0, 0): + # the maximum size is set to WxH + size = (img.shape[0] // (2 ** level), img.shape[1] // (2 ** level)) + print(f"Reading the whole image at level={level} with shape={size}") + else: + raise ValueError("Size need to be provided to extract the region!") + + region = self._extract_region(img, location=location, size=size, level=level, dtype=dtype) + + metadata: Dict = {} + metadata["spatial_shape"] = size + metadata["original_channel_dim"] = -1 + region = EnsureChannelFirst()(region, metadata) + + if patch_size is None: + patches = region + else: + patches = self._extract_patches( + region, patch_size=(patch_size, patch_size), grid_shape=grid_shape, dtype=dtype + ) + + return patches, metadata + + def _extract_region( + self, + img_obj, + size: Tuple[int, int], + location: Tuple[int, int] = (0, 0), + level: int = 0, + dtype: DtypeLike = np.uint8, + ): + # reverse the order of dimensions for size and location to be compatible with image shape + size = size[::-1] + location = location[::-1] + region = img_obj.read_region(location=location, size=size, level=level) + if self.reader_lib == "openslide": + region = region.convert("RGB") + # convert to numpy + region = np.asarray(region, dtype=dtype) + + return region + + def _extract_patches( + self, + region: np.ndarray, + grid_shape: Tuple[int, int] = (1, 1), + patch_size: Optional[Tuple[int, int]] = None, + dtype: DtypeLike = np.uint8, + ): + if patch_size is None and grid_shape == (1, 1): + return region + + n_patches = grid_shape[0] * grid_shape[1] + region_size = region.shape[1:] + + if patch_size is None: + patch_size = (region_size[0] // grid_shape[0], region_size[1] // grid_shape[1]) + + # split the region into patches on the grid and center crop them to patch size + flat_patch_grid = np.zeros((n_patches, 3, patch_size[0], patch_size[1]), dtype=dtype) + start_points = [ + np.round(region_size[i] * (0.5 + np.arange(grid_shape[i])) / grid_shape[i] - patch_size[i] / 2).astype(int) + for i in range(2) + ] + idx = 0 + for y_start in start_points[1]: + for x_start in start_points[0]: + x_end = x_start + patch_size[0] + y_end = y_start + patch_size[1] + flat_patch_grid[idx] = region[:, x_start:x_end, y_start:y_end] + idx += 1 + + return flat_patch_grid diff --git a/requirements-dev.txt b/requirements-dev.txt index 2a43e63d73..3eeab474b6 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -30,3 +30,4 @@ Sphinx==3.3.0 recommonmark==0.6.0 sphinx-autodoc-typehints==1.11.1 sphinx-rtd-theme==0.5.0 +openslide-python==1.1.2 diff --git a/setup.cfg b/setup.cfg index ea61eadd92..f18b4610fd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -32,6 +32,7 @@ all = torchvision itk>=5.0 tqdm>=4.47.0 + openslide-python==1.1.2 nibabel = nibabel skimage = @@ -54,6 +55,8 @@ lmdb = lmdb psutil = psutil +openslide = + openslide-python==1.1.2 [flake8] select = B,C,E,F,N,P,T4,W,B9 diff --git a/tests/test_cuimage_reader.py b/tests/test_cuimage_reader.py new file mode 100644 index 0000000000..7cdf692a30 --- /dev/null +++ b/tests/test_cuimage_reader.py @@ -0,0 +1,103 @@ +import os +import unittest +from unittest import skipUnless +from urllib import request + +import numpy as np +from numpy.testing import assert_array_equal +from parameterized import parameterized + +from monai.data.image_reader import WSIReader +from monai.utils import optional_import + +_, has_cui = optional_import("cuimage") + + +FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" +HEIGHT = 32914 +WIDTH = 46000 + +TEST_CASE_0 = [FILE_URL, (3, HEIGHT, WIDTH)] + +TEST_CASE_1 = [ + FILE_URL, + {"location": (HEIGHT // 2, WIDTH // 2), "size": (2, 1), "level": 0}, + np.array([[[246], [246]], [[246], [246]], [[246], [246]]]), +] + +TEST_CASE_2 = [ + FILE_URL, + {"location": (0, 0), "size": (2, 1), "level": 2}, + np.array([[[239], [239]], [[239], [239]], [[239], [239]]]), +] + +TEST_CASE_3 = [ + FILE_URL, + { + "location": (0, 0), + "size": (8, 8), + "level": 2, + "grid_shape": (2, 1), + "patch_size": 2, + }, + np.array( + [ + [[[239, 239], [239, 239]], [[239, 239], [239, 239]], [[239, 239], [239, 239]]], + [[[242, 242], [242, 243]], [[242, 242], [242, 243]], [[242, 242], [242, 243]]], + ] + ), +] + +TEST_CASE_4 = [ + FILE_URL, + { + "location": (0, 0), + "size": (8, 8), + "level": 2, + "grid_shape": (2, 1), + "patch_size": 1, + }, + np.array([[[[239]], [[239]], [[239]]], [[[243]], [[243]], [[243]]]]), +] + + +class TestCuClaraImageReader(unittest.TestCase): + @parameterized.expand([TEST_CASE_0]) + @skipUnless(has_cui, "Requires CuClaraImage") + def test_read_whole_image(self, file_url, expected_shape): + filename = self.camelyon_data_download(file_url) + reader = WSIReader("CuClaraImage") + img_obj = reader.read(filename) + img = reader.get_data(img_obj)[0] + self.assertTupleEqual(img.shape, expected_shape) + + @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) + @skipUnless(has_cui, "Requires CuClaraImage") + def test_read_region(self, file_url, patch_info, expected_img): + filename = self.camelyon_data_download(file_url) + reader = WSIReader("CuClaraImage") + img_obj = reader.read(filename) + img = reader.get_data(img_obj, **patch_info)[0] + self.assertTupleEqual(img.shape, expected_img.shape) + self.assertIsNone(assert_array_equal(img, expected_img)) + + @parameterized.expand([TEST_CASE_3, TEST_CASE_4]) + @skipUnless(has_cui, "Requires CuClaraImage") + def test_read_patches(self, file_url, patch_info, expected_img): + filename = self.camelyon_data_download(file_url) + reader = WSIReader("CuClaraImage") + img_obj = reader.read(filename) + img = reader.get_data(img_obj, **patch_info)[0] + self.assertTupleEqual(img.shape, expected_img.shape) + self.assertIsNone(assert_array_equal(img, expected_img)) + + def camelyon_data_download(self, file_url): + filename = os.path.basename(file_url) + if not os.path.exists(filename): + print(f"Test image [{filename}] does not exist. Downloading...") + request.urlretrieve(file_url, filename) + return filename + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_openslide_reader.py b/tests/test_openslide_reader.py new file mode 100644 index 0000000000..e1f9187937 --- /dev/null +++ b/tests/test_openslide_reader.py @@ -0,0 +1,103 @@ +import os +import unittest +from unittest import skipUnless +from urllib import request + +import numpy as np +from numpy.testing import assert_array_equal +from parameterized import parameterized + +from monai.data.image_reader import WSIReader +from monai.utils import optional_import + +_, has_osl = optional_import("openslide") + + +FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" +HEIGHT = 32914 +WIDTH = 46000 + +TEST_CASE_0 = [FILE_URL, (3, HEIGHT, WIDTH)] + +TEST_CASE_1 = [ + FILE_URL, + {"location": (HEIGHT // 2, WIDTH // 2), "size": (2, 1), "level": 0}, + np.array([[[246], [246]], [[246], [246]], [[246], [246]]]), +] + +TEST_CASE_2 = [ + FILE_URL, + {"location": (0, 0), "size": (2, 1), "level": 2}, + np.array([[[239], [239]], [[239], [239]], [[239], [239]]]), +] + +TEST_CASE_3 = [ + FILE_URL, + { + "location": (0, 0), + "size": (8, 8), + "level": 2, + "grid_shape": (2, 1), + "patch_size": 2, + }, + np.array( + [ + [[[239, 239], [239, 239]], [[239, 239], [239, 239]], [[239, 239], [239, 239]]], + [[[242, 242], [242, 243]], [[242, 242], [242, 243]], [[242, 242], [242, 243]]], + ] + ), +] + +TEST_CASE_4 = [ + FILE_URL, + { + "location": (0, 0), + "size": (8, 8), + "level": 2, + "grid_shape": (2, 1), + "patch_size": 1, + }, + np.array([[[[239]], [[239]], [[239]]], [[[243]], [[243]], [[243]]]]), +] + + +class TestOpenSlideReader(unittest.TestCase): + @parameterized.expand([TEST_CASE_0]) + @skipUnless(has_osl, "Requires OpenSlide") + def test_read_whole_image(self, file_url, expected_shape): + filename = self.camelyon_data_download(file_url) + reader = WSIReader("OpenSlide") + img_obj = reader.read(filename) + img = reader.get_data(img_obj)[0] + self.assertTupleEqual(img.shape, expected_shape) + + @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) + @skipUnless(has_osl, "Requires OpenSlide") + def test_read_region(self, file_url, patch_info, expected_img): + filename = self.camelyon_data_download(file_url) + reader = WSIReader("OpenSlide") + img_obj = reader.read(filename) + img = reader.get_data(img_obj, **patch_info)[0] + self.assertTupleEqual(img.shape, expected_img.shape) + self.assertIsNone(assert_array_equal(img, expected_img)) + + @parameterized.expand([TEST_CASE_3, TEST_CASE_4]) + @skipUnless(has_osl, "Requires OpenSlide") + def test_read_patches(self, file_url, patch_info, expected_img): + filename = self.camelyon_data_download(file_url) + reader = WSIReader("OpenSlide") + img_obj = reader.read(filename) + img = reader.get_data(img_obj, **patch_info)[0] + self.assertTupleEqual(img.shape, expected_img.shape) + self.assertIsNone(assert_array_equal(img, expected_img)) + + def camelyon_data_download(self, file_url): + filename = os.path.basename(file_url) + if not os.path.exists(filename): + print(f"Test image [{filename}] does not exist. Downloading...") + request.urlretrieve(file_url, filename) + return filename + + +if __name__ == "__main__": + unittest.main() From 232e8a524c7074ceae4c45b756bfddc67772c05f Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Fri, 5 Mar 2021 21:47:56 +0800 Subject: [PATCH 030/457] [DLMED] add WSIReader to LoadImage (#1695) Signed-off-by: Nic Ma --- monai/data/image_reader.py | 10 ++++++---- monai/transforms/io/array.py | 7 ++++--- monai/transforms/io/dictionary.py | 2 +- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index 76bf1817dc..16e8514f48 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -643,11 +643,13 @@ def __init__(self, reader_lib: str = "cuClaraImage"): super().__init__() self.reader_lib = reader_lib.lower() if self.reader_lib == "openslide": - self.wsi_reader = openslide.OpenSlide - print("> OpenSlide is being used.") + if has_osl: + self.wsi_reader = openslide.OpenSlide + print("> OpenSlide is being used.") elif self.reader_lib == "cuclaraimage": - self.wsi_reader = cuimage.CuImage - print("> CuImage is being used.") + if has_cux: + self.wsi_reader = cuimage.CuImage + print("> CuImage is being used.") else: raise ValueError('`reader_lib` should be either "cuClaraImage" or "OpenSlide"') diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index de9a8800a2..002bfd8242 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -19,7 +19,7 @@ import torch from monai.config import DtypeLike -from monai.data.image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader +from monai.data.image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader, WSIReader from monai.data.nifti_saver import NiftiSaver from monai.data.png_saver import PNGSaver from monai.transforms.transform import Transform @@ -78,7 +78,7 @@ def __init__( reader: register reader to load image file and meta data, if None, still can register readers at runtime or use the default readers. If a string of reader name provided, will construct a reader object with the `*args` and `**kwargs` parameters, supported reader name: "NibabelReader", - "PILReader", "ITKReader", "NumpyReader" + "PILReader", "ITKReader", "NumpyReader", "WSIReader". image_only: if True return only the image volume, otherwise return image data array and header dict. dtype: if not None convert the loaded image to this data type. args: additional parameters for reader if providing a reader name. @@ -90,7 +90,7 @@ def __init__( """ # set predefined readers as default - self.readers: List[ImageReader] = [ITKReader(), NumpyReader(), PILReader(), NibabelReader()] + self.readers: List[ImageReader] = [ITKReader(), NumpyReader(), PILReader(), NibabelReader(), WSIReader()] if reader is not None: if isinstance(reader, str): supported_readers = { @@ -98,6 +98,7 @@ def __init__( "pilreader": PILReader, "itkreader": ITKReader, "numpyreader": NumpyReader, + "wsireader": WSIReader, } reader = reader.lower() if reader not in supported_readers: diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index 50ab8f9868..7f663ea303 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -71,7 +71,7 @@ def __init__( reader: register reader to load image file and meta data, if None, still can register readers at runtime or use the default readers. If a string of reader name provided, will construct a reader object with the `*args` and `**kwargs` parameters, supported reader name: "NibabelReader", - "PILReader", "ITKReader", "NumpyReader" + "PILReader", "ITKReader", "NumpyReader", "WSIReader". dtype: if not None convert the loaded image data to this data type. meta_key_postfix: use `key_{postfix}` to store the metadata of the nifti image, default is `meta_dict`. The meta data is a dictionary object. From 66811fee151224996ac1500ef7cc91e389d7ef70 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Sun, 7 Mar 2021 10:19:12 -0500 Subject: [PATCH 031/457] Raise import error for openslide and cuimage (#1699) Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- monai/data/image_reader.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index 16e8514f48..39fc1f46b4 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -639,7 +639,7 @@ class WSIReader(ImageReader): """ - def __init__(self, reader_lib: str = "cuClaraImage"): + def __init__(self, reader_lib: str = "OpenSlide"): super().__init__() self.reader_lib = reader_lib.lower() if self.reader_lib == "openslide": @@ -672,6 +672,11 @@ def read(self, data: Union[Sequence[str], str, np.ndarray], **kwargs): data: file name or a list of file names to read. """ + if (self.reader_lib == "openslide") and (not has_osl): + raise ImportError("No module named 'openslide'") + elif (self.reader_lib == "cuclaraimage") and (not has_cux): + raise ImportError("No module named 'cuimage'") + img_: List = [] filenames: Sequence[str] = ensure_tuple(data) From b25b3d3c924d9286af2dec941b8e99b433413b53 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Mon, 8 Mar 2021 06:15:16 +0000 Subject: [PATCH 032/457] 1604 Revise docstring cam (#1705) * update docstring Signed-off-by: Wenqi Li * revise docstring Signed-off-by: Wenqi Li --- monai/visualize/class_activation_maps.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/monai/visualize/class_activation_maps.py b/monai/visualize/class_activation_maps.py index 624d054500..c6051caa61 100644 --- a/monai/visualize/class_activation_maps.py +++ b/monai/visualize/class_activation_maps.py @@ -173,6 +173,17 @@ def feature_map_size(self, input_size, device="cpu", layer_idx=-1): return self.compute_map(torch.zeros(*input_size, device=device), layer_idx=layer_idx).shape def compute_map(self, x, class_idx=None, layer_idx=-1): + """ + Compute the actual feature map with input tensor `x`. + + Args: + x: input to `nn_module`. + class_idx: index of the class to be visualized. Default to `None` (computing `class_idx` from `argmax`) + layer_idx: index of the target layer if there are multiple target layers. Defaults to -1. + + Returns: + activation maps (raw outputs without upsampling/post-processing.) + """ raise NotImplementedError() def _upsample_and_post_process(self, acti_map, x): @@ -191,6 +202,10 @@ def __call__(self): class CAM(CAMBase): """ Compute class activation map from the last fully-connected layers before the spatial pooling. + This implementation is based on: + + Zhou et al., Learning Deep Features for Discriminative Localization. CVPR '16, + https://arxiv.org/abs/1512.04150 Examples @@ -255,9 +270,6 @@ def __init__( self.fc_layers = fc_layers def compute_map(self, x, class_idx=None, layer_idx=-1): - """ - Compute the actual feature map with input tensor `x`. - """ logits, acti, _ = self.nn_module(x) acti = acti[layer_idx] if class_idx is None: @@ -326,9 +338,6 @@ class GradCAM(CAMBase): """ def compute_map(self, x, class_idx=None, retain_graph=False, layer_idx=-1): - """ - Compute the actual feature map with input tensor `x`. - """ _, acti, grad = self.nn_module(x, class_idx=class_idx, retain_graph=retain_graph) acti, grad = acti[layer_idx], grad[layer_idx] b, c, *spatial = grad.shape @@ -368,9 +377,6 @@ class GradCAMpp(GradCAM): """ def compute_map(self, x, class_idx=None, retain_graph=False, layer_idx=-1): - """ - Compute the actual feature map with input tensor `x`. - """ _, acti, grad = self.nn_module(x, class_idx=class_idx, retain_graph=retain_graph) acti, grad = acti[layer_idx], grad[layer_idx] b, c, *spatial = grad.shape From ce6628a202fd04f8029db6345217ebda9edb8151 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Mon, 8 Mar 2021 16:57:52 +0800 Subject: [PATCH 033/457] 1704 Add Affined transform (#1709) * [DLMED] add Affined transform Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot Co-authored-by: monai-bot --- docs/source/transforms.rst | 6 ++ monai/transforms/__init__.py | 3 + monai/transforms/spatial/dictionary.py | 78 +++++++++++++++++++ tests/test_affined.py | 104 +++++++++++++++++++++++++ 4 files changed, 191 insertions(+) create mode 100644 tests/test_affined.py diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index dd10176de9..3bc8d0899a 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -851,6 +851,12 @@ Spatial (Dict) :members: :special-members: __call__ +`Affined` +""""""""" +.. autoclass:: Affined + :members: + :special-members: __call__ + `RandAffined` """"""""""""" .. autoclass:: RandAffined diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index a8d647b657..14fe71728b 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -195,6 +195,9 @@ Zoom, ) from .spatial.dictionary import ( + Affined, + AffineD, + AffineDict, Flipd, FlipD, FlipDict, diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index a81aeb432b..d9d38242fb 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -24,6 +24,7 @@ from monai.networks.layers.simplelayers import GaussianFilter from monai.transforms.croppad.array import CenterSpatialCrop from monai.transforms.spatial.array import ( + Affine, Flip, Orientation, Rand2DElastic, @@ -53,6 +54,7 @@ "Rotate90d", "RandRotate90d", "Resized", + "Affined", "RandAffined", "Rand2DElasticd", "Rand3DElasticd", @@ -73,6 +75,8 @@ "RandRotate90Dict", "ResizeD", "ResizeDict", + "AffineD", + "AffineDict", "RandAffineD", "RandAffineDict", "Rand2DElasticD", @@ -378,6 +382,79 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +class Affined(RandomizableTransform, MapTransform): + """ + Dictionary-based wrapper of :py:class:`monai.transforms.Affine`. + """ + + def __init__( + self, + keys: KeysCollection, + rotate_params: Optional[Union[Sequence[float], float]] = None, + shear_params: Optional[Union[Sequence[float], float]] = None, + translate_params: Optional[Union[Sequence[float], float]] = None, + scale_params: Optional[Union[Sequence[float], float]] = None, + spatial_size: Optional[Union[Sequence[int], int]] = None, + mode: GridSampleModeSequence = GridSampleMode.BILINEAR, + padding_mode: GridSamplePadModeSequence = GridSamplePadMode.REFLECTION, + as_tensor_output: bool = False, + device: Optional[torch.device] = None, + allow_missing_keys: bool = False, + ) -> None: + """ + Args: + keys: keys of the corresponding items to be transformed. + rotate_params: a rotation angle in radians, a scalar for 2D image, a tuple of 3 floats for 3D. + Defaults to no rotation. + shear_params: a tuple of 2 floats for 2D, a tuple of 6 floats for 3D. Defaults to no shearing. + translate_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Translation is in + pixel/voxel relative to the center of the input image. Defaults to no translation. + scale_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Defaults to no scaling. + spatial_size: output image spatial size. + if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1, + the transform will use the spatial size of `img`. + if the components of the `spatial_size` are non-positive values, the transform will use the + corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted + to `(32, 64)` if the second spatial dimension size of img is `64`. + mode: {``"bilinear"``, ``"nearest"``} + Interpolation mode to calculate output values. Defaults to ``"bilinear"``. + See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample + It also can be a sequence of string, each element corresponds to a key in ``keys``. + padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``} + Padding mode for outside grid values. Defaults to ``"reflection"``. + See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample + It also can be a sequence of string, each element corresponds to a key in ``keys``. + as_tensor_output: the computation is implemented using pytorch tensors, this option specifies + whether to convert it back to numpy arrays. + device: device on which the tensor will be allocated. + allow_missing_keys: don't raise exception if key is missing. + + See also: + - :py:class:`monai.transforms.compose.MapTransform` + - :py:class:`RandAffineGrid` for the random affine parameters configurations. + """ + MapTransform.__init__(self, keys, allow_missing_keys) + self.affine = Affine( + rotate_params=rotate_params, + shear_params=shear_params, + translate_params=translate_params, + scale_params=scale_params, + spatial_size=spatial_size, + as_tensor_output=as_tensor_output, + device=device, + ) + self.mode = ensure_tuple_rep(mode, len(self.keys)) + self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys)) + + def __call__( + self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]] + ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: + d = dict(data) + for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): + d[key] = self.affine(d[key], mode=mode, padding_mode=padding_mode) + return d + + class RandAffined(RandomizableTransform, MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.RandAffine`. @@ -1122,6 +1199,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda Rotate90D = Rotate90Dict = Rotate90d RandRotate90D = RandRotate90Dict = RandRotate90d ResizeD = ResizeDict = Resized +AffineD = AffineDict = Affined RandAffineD = RandAffineDict = RandAffined Rand2DElasticD = Rand2DElasticDict = Rand2DElasticd Rand3DElasticD = Rand3DElasticDict = Rand3DElasticd diff --git a/tests/test_affined.py b/tests/test_affined.py new file mode 100644 index 0000000000..96e6d72fe5 --- /dev/null +++ b/tests/test_affined.py @@ -0,0 +1,104 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from parameterized import parameterized + +from monai.transforms import Affined + +TEST_CASES = [ + [ + dict(keys="img", padding_mode="zeros", as_tensor_output=False, spatial_size=(-1, 0), device=None), + {"img": np.arange(9).reshape((1, 3, 3))}, + np.arange(9).reshape(1, 3, 3), + ], + [ + dict(keys="img", padding_mode="zeros", as_tensor_output=False, device=None), + {"img": np.arange(4).reshape((1, 2, 2))}, + np.arange(4).reshape(1, 2, 2), + ], + [ + dict(keys="img", padding_mode="zeros", spatial_size=(4, 4), as_tensor_output=False, device=None), + {"img": np.arange(4).reshape((1, 2, 2))}, + np.array([[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 2.0, 3.0, 0.0], [0.0, 0.0, 0.0, 0.0]]]), + ], + [ + dict( + keys="img", + rotate_params=[np.pi / 2], + padding_mode="zeros", + spatial_size=(4, 4), + as_tensor_output=False, + device=None, + ), + {"img": np.arange(4).reshape((1, 2, 2))}, + np.array([[[0.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0], [0.0, 3.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]]]), + ], + [ + dict(keys="img", padding_mode="zeros", spatial_size=(-1, 0, 0), as_tensor_output=False, device=None), + {"img": np.arange(27).reshape((1, 3, 3, 3))}, + np.arange(27).reshape(1, 3, 3, 3), + ], + [ + dict(keys="img", padding_mode="zeros", spatial_size=(4, 4, 4), as_tensor_output=False, device=None), + {"img": np.arange(8).reshape((1, 2, 2, 2))}, + np.array( + [ + [ + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 2.0, 3.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 4.0, 5.0, 0.0], [0.0, 6.0, 7.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + ] + ] + ), + ], + [ + dict( + keys="img", + rotate_params=[np.pi / 2], + padding_mode="zeros", + spatial_size=(4, 4, 4), + as_tensor_output=False, + device=None, + ), + {"img": np.arange(8).reshape((1, 2, 2, 2))}, + np.array( + [ + [ + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0], [0.0, 3.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 6.0, 4.0, 0.0], [0.0, 7.0, 5.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + ] + ] + ), + ], +] + + +class TestAffined(unittest.TestCase): + @parameterized.expand(TEST_CASES) + def test_affine(self, input_param, input_data, expected_val): + g = Affined(**input_param) + result = g(input_data)["img"] + self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor)) + if isinstance(result, torch.Tensor): + np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) + else: + np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) + + +if __name__ == "__main__": + unittest.main() From 133f42aca81d7781e03140af072a4ec0fc799917 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Mon, 8 Mar 2021 10:12:44 +0000 Subject: [PATCH 034/457] 1693 update for pytorch 1.8 (#1700) * update for torch 1.8.0 Signed-off-by: Wenqi Li * mute codecov upload error Signed-off-by: Wenqi Li --- .github/workflows/cron.yml | 13 +++++------ .github/workflows/integration.yml | 6 +++--- .github/workflows/pythonapp.yml | 36 +++++++++++++++++++------------ .github/workflows/setupapp.yml | 6 +++--- tests/utils.py | 4 ++-- 5 files changed, 35 insertions(+), 30 deletions(-) diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 98834dbc8f..96368ba6d4 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -13,7 +13,7 @@ jobs: runs-on: [self-hosted, linux, x64, common] strategy: matrix: - pytorch-version: [1.5.0, 1.5.1, 1.6.0, latest] + pytorch-version: [1.5.1, 1.6.0, 1.7.1, latest] steps: - uses: actions/checkout@v2 - name: Install the dependencies @@ -23,15 +23,12 @@ jobs: python -m pip uninstall -y torch torchvision if [ ${{ matrix.pytorch-version }} == "latest" ]; then python -m pip install torch torchvision - elif [ ${{ matrix.pytorch-version }} == "1.5.0" ]; then - python -m pip install torch==1.5.0 - python -m pip install torchvision==0.6.0 elif [ ${{ matrix.pytorch-version }} == "1.5.1" ]; then - python -m pip install torch==1.5.1 - python -m pip install torchvision==0.6.1 + python -m pip install torch==1.5.1 torchvision==0.6.1 elif [ ${{ matrix.pytorch-version }} == "1.6.0" ]; then - python -m pip install torch==1.6.0 - python -m pip install torchvision==0.7.0 + python -m pip install torch==1.6.0 torchvision==0.7.0 + elif [ ${{ matrix.pytorch-version }} == "1.7.1" ]; then + python -m pip install torch==1.7.1 torchvision==0.8.2 fi python -m pip install -r requirements-dev.txt python -m pip list diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 003a746de4..66f6c2956d 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -7,7 +7,7 @@ on: jobs: integration-py3: container: - image: nvcr.io/nvidia/pytorch:20.03-py3 # CUDA 10.2 + image: nvcr.io/nvidia/pytorch:20.12-py3 # CUDA 11.1, default for PT 1.8.0 options: --gpus all runs-on: [self-hosted, linux, x64, common] steps: @@ -28,13 +28,13 @@ jobs: path: | ~/.cache/pip ~/.cache/torch - key: docker-20-03-py3-pip-${{ steps.pip-cache.outputs.datew }} + key: docker-py3-pip-${{ steps.pip-cache.outputs.datew }} - name: Install the dependencies run: | which python python -m pip install --upgrade pip wheel python -m pip uninstall -y torch torchvision - python -m pip install torch==1.7.1 torchvision==0.8.2 + python -m pip install torch==1.8.0 torchvision==0.9.0 python -m pip install -r requirements-dev.txt - name: Run integration tests run: | diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 8e92ea0ed7..227d55a082 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -9,7 +9,7 @@ on: jobs: # caching of these jobs: - # - docker-20-03-py3-pip- (shared) + # - docker-py3-pip- (shared) # - ubuntu py37 pip- # - os-latest-pip- (shared) flake8-py3: @@ -80,13 +80,12 @@ jobs: - if: runner.os == 'windows' name: Install torch cpu from pytorch.org (Windows only) run: | - python -m pip install torch==1.7.1+cpu torchvision==0.8.2+cpu -f https://download.pytorch.org/whl/torch_stable.html + python -m pip install torch==1.8.0+cpu torchvision==0.9.0+cpu -f https://download.pytorch.org/whl/torch_stable.html # min. requirements for windows instances python -c "f=open('requirements-dev.txt', 'r'); txt=f.readlines(); f.close(); print(txt); f=open('requirements-dev.txt', 'w'); f.writelines(txt[1:12]); f.close()" - name: Install the dependencies run: | - python -m pip install torch==1.7.1 - python -m pip install torchvision==0.8.2 + python -m pip install torch==1.8.0 torchvision==0.9.0 cat "requirements-dev.txt" python -m pip install -r requirements-dev.txt python -m pip list @@ -134,11 +133,11 @@ jobs: - if: runner.os == 'windows' name: Install torch cpu from pytorch.org (Windows only) run: | - python -m pip install torch==1.7.1+cpu -f https://download.pytorch.org/whl/torch_stable.html + python -m pip install torch==1.8.0+cpu -f https://download.pytorch.org/whl/torch_stable.html - name: Install the dependencies run: | # min. requirements - python -m pip install torch==1.7.1 + python -m pip install torch==1.8.0 python -m pip install -r requirements-min.txt python -m pip list BUILD_MONAI=0 python setup.py develop # no compile of extensions @@ -156,15 +155,13 @@ jobs: strategy: matrix: environment: - - "PT15+CUDA101" - "PT16+CUDA102" - "PT16+CUDA110" - "PT17+CUDA102" - "PT17+CUDA110" + - "PT18+CUDA102" + - "PT18+CUDA112" include: - - environment: PT15+CUDA101 - pytorch: "torch==1.5.0+cu101 torchvision==0.6.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html" - base: "nvcr.io/nvidia/cuda:10.1-devel-ubuntu18.04" - environment: PT16+CUDA102 pytorch: "torch==1.6.0 torchvision==0.7.0" base: "nvcr.io/nvidia/cuda:10.2-devel-ubuntu18.04" @@ -179,6 +176,13 @@ jobs: # we explicitly set pytorch to -h to avoid pip install error pytorch: "-h" base: "nvcr.io/nvidia/pytorch:20.09-py3" + - environment: PT18+CUDA102 + pytorch: "torch==1.8.0 torchvision==0.9.0" + base: "nvcr.io/nvidia/cuda:10.2-devel-ubuntu18.04" + - environment: PT18+CUDA112 + # we explicitly set pytorch to -h to avoid pip install error + pytorch: "-h" + base: "nvcr.io/nvidia/pytorch:21.02-py3" container: image: ${{ matrix.base }} options: --gpus all @@ -187,7 +191,10 @@ jobs: - uses: actions/checkout@v2 - name: apt install run: | - if [ ${{ matrix.environment }} != "PT16+CUDA110" ]; then \ + if [ ${{ matrix.environment }} = "PT16+CUDA102" ] || \ + [ ${{ matrix.environment }} = "PT17+CUDA102" ] || \ + [ ${{ matrix.environment }} = "PT18+CUDA102" ] + then PYVER=3.6 PYSFX=3 DISTUTILS=python3-distutils && \ apt-get update && apt-get install -y --no-install-recommends \ curl \ @@ -217,7 +224,8 @@ jobs: ln -s /usr/bin/python$PYVER /usr/bin/python`echo $PYVER | cut -c1-1` && curl -O https://bootstrap.pypa.io/get-pip.py && \ python get-pip.py && \ - rm get-pip.py ; fi + rm get-pip.py; + fi - name: Install dependencies run: | which python @@ -231,10 +239,10 @@ jobs: export CUDA_VISIBLE_DEVICES=$(coverage run -m tests.utils) echo $CUDA_VISIBLE_DEVICES python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" - python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))' + python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' python -c "import monai; monai.config.print_config()" BUILD_MONAI=1 ./runtests.sh --quick - if [ ${{ matrix.environment }} == "PT16+CUDA110" ]; then + if [ ${{ matrix.environment }} == "PT18+CUDA112" ]; then # test the clang-format tool downloading once coverage run -m tests.clang_format_utils fi diff --git a/.github/workflows/setupapp.yml b/.github/workflows/setupapp.yml index 7656eb4828..e40660c213 100644 --- a/.github/workflows/setupapp.yml +++ b/.github/workflows/setupapp.yml @@ -8,7 +8,7 @@ on: jobs: # caching of these jobs: - # - docker-20-03-py3-pip- (shared) + # - docker-py3-pip- (shared) # - ubuntu py36 37 38-pip- # - os-latest-pip (shared) coverage-py3: @@ -30,7 +30,7 @@ jobs: path: | ~/.cache/pip ~/.cache/torch - key: docker-20-03-py3-pip-${{ steps.pip-cache.outputs.datew }} + key: docker-py3-pip-${{ steps.pip-cache.outputs.datew }} - name: Install the dependencies run: | which python @@ -51,7 +51,7 @@ jobs: - name: Upload coverage uses: codecov/codecov-action@v1 with: - fail_ci_if_error: true + fail_ci_if_error: false file: ./coverage.xml test-py3x: diff --git a/tests/utils.py b/tests/utils.py index 3636cbe974..20f94cd1eb 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -569,13 +569,13 @@ def query_memory(n=2): """ Find best n idle devices and return a string of device ids. """ - bash_string = "nvidia-smi --query-gpu=utilization.gpu,temperature.gpu,memory.used --format=csv,noheader,nounits" + bash_string = "nvidia-smi --query-gpu=utilization.gpu,power.draw,memory.used --format=csv,noheader,nounits" try: p1 = Popen(bash_string.split(), stdout=PIPE) output, error = p1.communicate() free_memory = [x.split(",") for x in output.decode("utf-8").split("\n")[:-1]] - free_memory = np.asarray(free_memory, dtype=np.float).T + free_memory = np.asarray(free_memory, dtype=float).T ids = np.lexsort(free_memory)[:n] except (FileNotFoundError, TypeError, IndexError): ids = range(n) if isinstance(n, int) else [] From 634b32d5c02af3db08c2b5fbe55ed773d9eb31c1 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Mon, 8 Mar 2021 20:10:54 +0800 Subject: [PATCH 035/457] [DLMED] update datasets (#1712) Signed-off-by: Nic Ma --- monai/data/dataset.py | 36 ++++++++++++++++----------------- tests/test_smartcachedataset.py | 8 +++++--- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/monai/data/dataset.py b/monai/data/dataset.py index bb5a98ba1e..c032e65af6 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -52,16 +52,15 @@ class Dataset(_TorchDataset): }, }, }] """ - def __init__(self, data: Sequence, transform: Optional[Callable] = None, progress: bool = True) -> None: + def __init__(self, data: Sequence, transform: Optional[Callable] = None) -> None: """ Args: data: input data to load and transform to generate dataset for model. transform: a callable data transform on input data. - progress: whether to display a progress bar. + """ self.data = data self.transform = transform - self.progress = progress def __len__(self) -> int: return len(self.data) @@ -118,7 +117,6 @@ def __init__( transform: Union[Sequence[Callable], Callable], cache_dir: Optional[Union[Path, str]] = None, hash_func: Callable[..., bytes] = pickle_hashing, - progress: bool = True, ) -> None: """ Args: @@ -133,11 +131,11 @@ def __init__( If the cache_dir doesn't exist, will automatically create it. hash_func: a callable to compute hash from data items to be cached. defaults to `monai.data.utils.pickle_hashing`. - progress: whether to display a progress bar. + """ if not isinstance(transform, Compose): transform = Compose(transform) - super().__init__(data=data, transform=transform, progress=progress) + super().__init__(data=data, transform=transform) self.cache_dir = Path(cache_dir) if cache_dir is not None else None self.hash_func = hash_func if self.cache_dir is not None: @@ -350,7 +348,8 @@ def __init__( lmdb_kwargs: additional keyword arguments to the lmdb environment. for more details please visit: https://lmdb.readthedocs.io/en/release/#environment-class """ - super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func, progress=progress) + super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func) + self.progress = progress if not self.cache_dir: raise ValueError("cache_dir must be specified.") self.db_file = self.cache_dir / f"{db_name}.lmdb" @@ -490,7 +489,8 @@ def __init__( """ if not isinstance(transform, Compose): transform = Compose(transform) - super().__init__(data=data, transform=transform, progress=progress) + super().__init__(data=data, transform=transform) + self.progress = progress self.cache_num = min(int(cache_num), int(len(data) * cache_rate), len(data)) self.num_workers = num_workers if self.num_workers is not None: @@ -591,7 +591,7 @@ def __init__( cache_num: int = sys.maxsize, cache_rate: float = 1.0, num_init_workers: Optional[int] = None, - num_replace_workers: int = 0, + num_replace_workers: Optional[int] = None, progress: bool = True, ) -> None: """ @@ -606,8 +606,8 @@ def __init__( num_init_workers: the number of worker threads to initialize the cache for first epoch. If num_init_workers is None then the number returned by os.cpu_count() is used. num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch. - if 0, run in main thread, no separate thread will open. - progress: whether to display a progress bar. + If num_replace_workers is None then the number returned by os.cpu_count() is used. + progress: whether to display a progress bar when caching for the first epoch. """ super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress) @@ -617,7 +617,10 @@ def __init__( warnings.warn("cache_num is greater or equal than dataset length, fall back to regular CacheDataset.") if replace_rate <= 0: raise ValueError("replace_rate must be greater than 0, otherwise, please use CacheDataset.") - self.num_replace_workers: int = num_replace_workers + + self.num_replace_workers: Optional[int] = num_replace_workers + if self.num_replace_workers is not None: + self.num_replace_workers = max(int(self.num_replace_workers), 1) self._total_num: int = len(data) self._replace_num: int = min(math.ceil(self.cache_num * replace_rate), len(data) - self.cache_num) @@ -747,12 +750,9 @@ def _compute_replacements(self): It can support multi-threads to accelerate the computation progress. """ - if self.num_replace_workers > 0: - with ThreadPool(self.num_replace_workers) as p: - p.map(self._replace_cache_thread, list(range(self._replace_num))) - else: - for i in range(self._replace_num): - self._replace_cache_thread(i) + with ThreadPool(self.num_replace_workers) as p: + p.map(self._replace_cache_thread, list(range(self._replace_num))) + self._replace_done = True def _try_manage_replacement(self, check_round): diff --git a/tests/test_smartcachedataset.py b/tests/test_smartcachedataset.py index 3d1a051a83..7ebb2858d2 100644 --- a/tests/test_smartcachedataset.py +++ b/tests/test_smartcachedataset.py @@ -24,13 +24,15 @@ TEST_CASE_2 = [0.1, 4, Compose([LoadImaged(keys=["image", "label", "extra"])])] -TEST_CASE_3 = [0.1, 4, None] +TEST_CASE_3 = [0.1, None, Compose([LoadImaged(keys=["image", "label", "extra"])])] -TEST_CASE_4 = [0.5, 2, Compose([LoadImaged(keys=["image", "label", "extra"])])] +TEST_CASE_4 = [0.1, 4, None] + +TEST_CASE_5 = [0.5, 2, Compose([LoadImaged(keys=["image", "label", "extra"])])] class TestSmartCacheDataset(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4]) + @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5]) def test_shape(self, replace_rate, num_replace_workers, transform): test_image = nib.Nifti1Image(np.random.randint(0, 2, size=[8, 8, 8]), np.eye(4)) with tempfile.TemporaryDirectory() as tempdir: From 08af0ccfd2badda1eb09fa64e5c72ba2d8151132 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Mon, 8 Mar 2021 13:48:17 +0000 Subject: [PATCH 036/457] with allow_missing_keys_mode (#1714) with allow_missing_keys_mode --- monai/transforms/__init__.py | 4 +- monai/transforms/compose.py | 9 +++- monai/transforms/transform.py | 29 +++++++++- monai/transforms/utils.py | 78 ++++++++++++++++++--------- tests/test_with_allow_missing_keys.py | 73 +++++++++++++++++++++++++ 5 files changed, 161 insertions(+), 32 deletions(-) create mode 100644 tests/test_with_allow_missing_keys.py diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index 14fe71728b..796804df24 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -244,7 +244,7 @@ ZoomD, ZoomDict, ) -from .transform import MapTransform, Randomizable, RandomizableTransform, Transform +from .transform import MapTransform, Randomizable, RandomizableTransform, Transform, apply_transform from .utility.array import ( AddChannel, AddExtremePointsChannel, @@ -348,7 +348,7 @@ ToTensorDict, ) from .utils import ( - apply_transform, + allow_missing_keys_mode, copypaste_arrays, create_control_grid, create_grid, diff --git a/monai/transforms/compose.py b/monai/transforms/compose.py index a9f66b12a0..21e7da068c 100644 --- a/monai/transforms/compose.py +++ b/monai/transforms/compose.py @@ -18,8 +18,13 @@ import numpy as np # For backwards compatiblity (so this still works: from monai.transforms.compose import MapTransform) -from monai.transforms.transform import MapTransform, Randomizable, RandomizableTransform, Transform # noqa: F401 -from monai.transforms.utils import apply_transform +from monai.transforms.transform import ( # noqa: F401 + MapTransform, + Randomizable, + RandomizableTransform, + Transform, + apply_transform, +) from monai.utils import MAX_SEED, ensure_tuple, get_seed __all__ = ["Compose"] diff --git a/monai/transforms/transform.py b/monai/transforms/transform.py index 7a09efa6d5..2a79b2edf2 100644 --- a/monai/transforms/transform.py +++ b/monai/transforms/transform.py @@ -13,14 +13,39 @@ """ from abc import ABC, abstractmethod -from typing import Any, Dict, Generator, Hashable, Iterable, List, Optional, Tuple +from typing import Any, Callable, Dict, Generator, Hashable, Iterable, List, Optional, Tuple import numpy as np from monai.config import KeysCollection from monai.utils import MAX_SEED, ensure_tuple -__all__ = ["Randomizable", "RandomizableTransform", "Transform", "MapTransform"] +__all__ = ["apply_transform", "Randomizable", "RandomizableTransform", "Transform", "MapTransform"] + + +def apply_transform(transform: Callable, data, map_items: bool = True): + """ + Transform `data` with `transform`. + If `data` is a list or tuple and `map_data` is True, each item of `data` will be transformed + and this method returns a list of outcomes. + otherwise transform will be applied once with `data` as the argument. + + Args: + transform: a callable to be used to transform `data` + data: an object to be transformed. + map_items: whether to apply transform to each item in `data`, + if `data` is a list or tuple. Defaults to True. + + Raises: + Exception: When ``transform`` raises an exception. + + """ + try: + if isinstance(data, (list, tuple)) and map_items: + return [transform(item) for item in data] + return transform(data) + except Exception as e: + raise RuntimeError(f"applying transform {transform}") from e class Randomizable(ABC): diff --git a/monai/transforms/utils.py b/monai/transforms/utils.py index 9a84eb00d9..eb1b194c96 100644 --- a/monai/transforms/utils.py +++ b/monai/transforms/utils.py @@ -12,6 +12,7 @@ import itertools import random import warnings +from contextlib import contextmanager from typing import Callable, List, Optional, Sequence, Tuple, Union import numpy as np @@ -19,7 +20,10 @@ from monai.config import DtypeLike, IndexSelection from monai.networks.layers import GaussianFilter +from monai.transforms.compose import Compose +from monai.transforms.transform import MapTransform from monai.utils import ensure_tuple, ensure_tuple_rep, ensure_tuple_size, fall_back_tuple, min_version, optional_import +from monai.utils.misc import issequenceiterable measure, _ = optional_import("skimage.measure", "0.14.2", min_version) @@ -37,7 +41,6 @@ "map_binary_to_indices", "weighted_patch_samples", "generate_pos_neg_label_crop_centers", - "apply_transform", "create_grid", "create_control_grid", "create_rotate", @@ -49,6 +52,7 @@ "get_extreme_points", "extreme_points_to_image", "map_spatial_axes", + "allow_missing_keys_mode", ] @@ -363,31 +367,6 @@ def _correct_centers( return centers -def apply_transform(transform: Callable, data, map_items: bool = True): - """ - Transform `data` with `transform`. - If `data` is a list or tuple and `map_data` is True, each item of `data` will be transformed - and this method returns a list of outcomes. - otherwise transform will be applied once with `data` as the argument. - - Args: - transform: a callable to be used to transform `data` - data: an object to be transformed. - map_items: whether to apply transform to each item in `data`, - if `data` is a list or tuple. Defaults to True. - - Raises: - Exception: When ``transform`` raises an exception. - - """ - try: - if isinstance(data, (list, tuple)) and map_items: - return [transform(item) for item in data] - return transform(data) - except Exception as e: - raise RuntimeError(f"applying transform {transform}") from e - - def create_grid( spatial_size: Sequence[int], spacing: Optional[Sequence[float]] = None, @@ -730,3 +709,50 @@ def map_spatial_axes( spatial_axes_.append(a - 1 if a < 0 else a) return spatial_axes_ + + +@contextmanager +def allow_missing_keys_mode(transform: Union[MapTransform, Compose, Tuple[MapTransform], Tuple[Compose]]): + """Temporarily set all MapTransforms to not throw an error if keys are missing. After, revert to original states. + + Args: + transform: either MapTransform or a Compose + + Example: + + .. code-block:: python + + data = {"image": np.arange(16, dtype=float).reshape(1, 4, 4)} + t = SpatialPadd(["image", "label"], 10, allow_missing_keys=False) + _ = t(data) # would raise exception + with allow_missing_keys_mode(t): + _ = t(data) # OK! + """ + # If given a sequence of transforms, Compose them to get a single list + if issequenceiterable(transform): + transform = Compose(transform) + + # Get list of MapTransforms + transforms = [] + if isinstance(transform, MapTransform): + transforms = [transform] + elif isinstance(transform, Compose): + # Only keep contained MapTransforms + transforms = [t for t in transform.flatten().transforms if isinstance(t, MapTransform)] + if len(transforms) == 0: + raise TypeError( + "allow_missing_keys_mode expects either MapTransform(s) or Compose(s) containing MapTransform(s)" + ) + + # Get the state of each `allow_missing_keys` + orig_states = [t.allow_missing_keys for t in transforms] + + try: + # Set all to True + for t in transforms: + t.allow_missing_keys = True + yield + finally: + # Revert + for t, o_s in zip(transforms, orig_states): + t.allow_missing_keys = o_s diff --git a/tests/test_with_allow_missing_keys.py b/tests/test_with_allow_missing_keys.py new file mode 100644 index 0000000000..68c5ad30c4 --- /dev/null +++ b/tests/test_with_allow_missing_keys.py @@ -0,0 +1,73 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +from monai.transforms import Compose, SpatialPad, SpatialPadd, allow_missing_keys_mode + + +class TestWithAllowMissingKeysMode(unittest.TestCase): + def setUp(self): + self.data = {"image": np.arange(16, dtype=float).reshape(1, 4, 4)} + + def test_map_transform(self): + for amk in [True, False]: + t = SpatialPadd(["image", "label"], 10, allow_missing_keys=amk) + with allow_missing_keys_mode(t): + # check state is True + self.assertTrue(t.allow_missing_keys) + # and that transform works even though key is missing + _ = t(self.data) + # check it has returned to original state + self.assertEqual(t.allow_missing_keys, amk) + if not amk: + # should fail because amks==False and key is missing + with self.assertRaises(KeyError): + _ = t(self.data) + + def test_compose(self): + amks = [True, False, True] + t = Compose([SpatialPadd(["image", "label"], 10, allow_missing_keys=amk) for amk in amks]) + with allow_missing_keys_mode(t): + # check states are all True + for _t in t.transforms: + self.assertTrue(_t.allow_missing_keys) + # and that transform works even though key is missing + _ = t(self.data) + # check they've returned to original state + for _t, amk in zip(t.transforms, amks): + self.assertEqual(_t.allow_missing_keys, amk) + # should fail because not all amks==True and key is missing + with self.assertRaises((KeyError, RuntimeError)): + _ = t(self.data) + + def test_array_transform(self): + for t in [SpatialPad(10), Compose([SpatialPad(10)])]: + with self.assertRaises(TypeError): + with allow_missing_keys_mode(t): + pass + + def test_multiple(self): + orig_states = [True, False] + ts = [SpatialPadd(["image", "label"], 10, allow_missing_keys=i) for i in orig_states] + with allow_missing_keys_mode(ts): + for t in ts: + self.assertTrue(t.allow_missing_keys) + # and that transform works even though key is missing + _ = t(self.data) + for t, o_s in zip(ts, orig_states): + self.assertEqual(t.allow_missing_keys, o_s) + + +if __name__ == "__main__": + unittest.main() From f4560a183a09336bf6285d4d9f5d882715cde3e2 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Mon, 8 Mar 2021 15:20:51 +0000 Subject: [PATCH 037/457] update links (#1717) Signed-off-by: Wenqi Li --- README.md | 3 +++ docs/source/index.rst | 3 +-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index f06a2d146f..7a4a69e1e9 100644 --- a/README.md +++ b/README.md @@ -62,3 +62,6 @@ Ask and answer questions over on [MONAI's GitHub Discussions tab](https://github - Issue tracker: https://github.com/Project-MONAI/MONAI/issues - Wiki: https://github.com/Project-MONAI/MONAI/wiki - Test status: https://github.com/Project-MONAI/MONAI/actions +- PyPI package: https://pypi.org/project/monai/ +- Weekly previews: https://pypi.org/project/monai-weekly/ +- Docker Hub: https://hub.docker.com/r/projectmonai/monai diff --git a/docs/source/index.rst b/docs/source/index.rst index ea21428e6e..23146ae69e 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -91,9 +91,8 @@ Links - FAQ: https://github.com/Project-MONAI/MONAI/wiki/Frequently-asked-questions-and-answers - Test status: https://github.com/Project-MONAI/MONAI/actions - PyPI package: https://pypi.org/project/monai/ +- Weekly previews: https://pypi.org/project/monai-weekly/ - Docker Hub: https://hub.docker.com/r/projectmonai/monai -- Google Group: https://groups.google.com/forum/#!forum/project-monai -- Reddit: https://www.reddit.com/r/projectmonai/ Indices and tables From cbddce9f3e6a666e9099d835db31bcf2fd94cba2 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Mon, 8 Mar 2021 17:39:12 +0000 Subject: [PATCH 038/457] 1500-update grid dataset to support patch level transforms (#1679) * fixes #1500 update grid dataset to support patch level transforms Signed-off-by: Wenqi Li --- docs/source/data.rst | 5 ++ monai/data/__init__.py | 2 +- monai/data/grid_dataset.py | 137 ++++++++++++++++++++++++++++++------- monai/data/utils.py | 17 ++++- tests/test_grid_dataset.py | 83 ++++++++++++++++++++++ 5 files changed, 215 insertions(+), 29 deletions(-) create mode 100644 tests/test_grid_dataset.py diff --git a/docs/source/data.rst b/docs/source/data.rst index eed4b30ded..3dffeb8977 100644 --- a/docs/source/data.rst +++ b/docs/source/data.rst @@ -77,6 +77,11 @@ Patch-based dataset .. autoclass:: GridPatchDataset :members: +`PatchIter` +~~~~~~~~~~~ +.. autoclass:: PatchIter + :members: + `PatchDataset` ~~~~~~~~~~~~~~ .. autoclass:: PatchDataset diff --git a/monai/data/__init__.py b/monai/data/__init__.py index 54ee7908f4..9fa5c935e2 100644 --- a/monai/data/__init__.py +++ b/monai/data/__init__.py @@ -22,7 +22,7 @@ ZipDataset, ) from .decathlon_datalist import load_decathlon_datalist, load_decathlon_properties -from .grid_dataset import GridPatchDataset, PatchDataset +from .grid_dataset import GridPatchDataset, PatchDataset, PatchIter from .image_dataset import ImageDataset from .image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader, WSIReader from .iterable_dataset import IterableDataset diff --git a/monai/data/grid_dataset.py b/monai/data/grid_dataset.py index f85569d88a..3f373491ed 100644 --- a/monai/data/grid_dataset.py +++ b/monai/data/grid_dataset.py @@ -9,9 +9,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import math from typing import Callable, Dict, Optional, Sequence, Union +import numpy as np import torch from torch.utils.data import IterableDataset @@ -20,31 +20,25 @@ from monai.transforms import apply_transform from monai.utils import NumpyPadMode, ensure_tuple -__all__ = ["PatchDataset", "GridPatchDataset"] +__all__ = ["PatchDataset", "GridPatchDataset", "PatchIter"] -class GridPatchDataset(IterableDataset): +class PatchIter: """ - Yields patches from arrays read from an input dataset. The patches are chosen in a contiguous grid sampling scheme. + A class to return a patch generator with predefined properties such as `patch_size`. + Typically used with :py:class:`monai.data.GridPatchDataset`. """ def __init__( self, - dataset: Sequence, patch_size: Sequence[int], start_pos: Sequence[int] = (), mode: Union[NumpyPadMode, str] = NumpyPadMode.WRAP, **pad_opts: Dict, - ) -> None: + ): """ - Initializes this dataset in terms of the input dataset and patch size. The `patch_size` is the size of the - patch to sample from the input arrays. It is assumed the arrays first dimension is the channel dimension which - will be yielded in its entirety so this should not be specified in `patch_size`. For example, for an input 3D - array with 1 channel of size (1, 20, 20, 20) a regular grid sampling of eight patches (1, 10, 10, 10) would be - specified by a `patch_size` of (10, 10, 10). Args: - dataset: the dataset to read array data from patch_size: size of patches to generate slices for, 0/None selects whole dimension start_pos: starting position in the array, default is 0 for each dimension mode: {``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``, ``"mean"``, @@ -52,32 +46,123 @@ def __init__( One of the listed string values or a user supplied function. Defaults to ``"wrap"``. See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html pad_opts: padding options, see numpy.pad - """ - self.dataset = dataset + Note: + The `patch_size` is the size of the + patch to sample from the input arrays. It is assumed the arrays first dimension is the channel dimension which + will be yielded in its entirety so this should not be specified in `patch_size`. For example, for an input 3D + array with 1 channel of size (1, 20, 20, 20) a regular grid sampling of eight patches (1, 10, 10, 10) would be + specified by a `patch_size` of (10, 10, 10). + + """ self.patch_size = (None,) + tuple(patch_size) self.start_pos = ensure_tuple(start_pos) self.mode: NumpyPadMode = NumpyPadMode(mode) self.pad_opts = pad_opts + def __call__(self, array): + """ + Args: + array: the image to generate patches from. + """ + yield from iter_patch( + array, + patch_size=self.patch_size, # expand to have the channel dim + start_pos=self.start_pos, + copy_back=False, + mode=self.mode, + **self.pad_opts, + ) + + +class GridPatchDataset(IterableDataset): + """ + Yields patches from images read from an image dataset. + Typically used with `PatchIter` so that the patches are chosen in a contiguous grid sampling scheme. + + .. code-block:: python + + import numpy as np + + from monai.data import GridPatchDataset, DataLoader, PatchIter + from monai.transforms import RandShiftIntensity + + # image-level dataset + images = [np.arange(16, dtype=float).reshape(1, 4, 4), + np.arange(16, dtype=float).reshape(1, 4, 4)] + # image-level patch generator, "grid sampling" + patch_iter = PatchIter(patch_size=(2, 2), start_pos=(0, 0)) + # patch-level intensity shifts + patch_intensity = RandShiftIntensity(offsets=1.0, prob=1.0) + + # construct the dataset + ds = GridPatchDataset(dataset=images, + patch_iter=patch_iter, + transform=patch_intensity) + # use the grid patch dataset + for item in DataLoader(ds, batch_size=2, num_workers=2): + print("patch size:", item[0].shape) + print("coordinates:", item[1]) + + # >>> patch size: torch.Size([2, 1, 2, 2]) + # coordinates: tensor([[[0, 1], [0, 2], [0, 2]], + # [[0, 1], [2, 4], [0, 2]]]) + + """ + + def __init__( + self, + dataset: Sequence, + patch_iter: Callable, + transform: Optional[Callable] = None, + with_coordinates: bool = True, + ) -> None: + """ + Initializes this dataset in terms of the image dataset, patch generator, and an optional transform. + + Args: + dataset: the dataset to read image data from. + patch_iter: converts an input image (item from dataset) into a iterable of image patches. + `patch_iter(dataset[idx])` must yield a tuple: (patches, coordinates). + see also: :py:class:`monai.data.PatchIter`. + transform: a callable data transform operates on the patches. + with_coordinates: whether to yield the coordinates of each patch, default to `True`. + + """ + + self.dataset = dataset + self.patch_iter = patch_iter + self.transform = transform + self.with_coordinates = with_coordinates + def __iter__(self): worker_info = torch.utils.data.get_worker_info() - iter_start = 0 - iter_end = len(self.dataset) + iter_start, iter_end = 0, 1 + try: + iter_end = len(self.dataset) # TODO: support iterable self.dataset + except TypeError: + raise NotImplementedError("image dataset must implement `len()`.") if worker_info is not None: # split workload - per_worker = int(math.ceil((iter_end - iter_start) / float(worker_info.num_workers))) - worker_id = worker_info.id - iter_start = iter_start + worker_id * per_worker + per_worker = int(np.ceil((iter_end - iter_start) / float(worker_info.num_workers))) + iter_start = iter_start + worker_info.id * per_worker iter_end = min(iter_start + per_worker, iter_end) for index in range(iter_start, iter_end): - arrays = self.dataset[index] - - iters = [iter_patch(a, self.patch_size, self.start_pos, False, self.mode, **self.pad_opts) for a in arrays] - - yield from zip(*iters) + image = self.dataset[index] + if not self.with_coordinates: + for patch, *_ in self.patch_iter(image): # patch_iter to yield at least 1 item: patch + out_patch = ( + patch if self.transform is None else apply_transform(self.transform, patch, map_items=False) + ) + yield out_patch + else: + for patch, slices, *_ in self.patch_iter(image): # patch_iter to yield at least 2 items: patch, coords + out_patch = ( + patch if self.transform is None else apply_transform(self.transform, patch, map_items=False) + ) + yield out_patch, slices class PatchDataset(Dataset): @@ -95,8 +180,8 @@ class PatchDataset(Dataset): from monai.transforms import RandSpatialCropSamples, RandShiftIntensity # image dataset - images = [np.arange(16, dtype=np.float).reshape(1, 4, 4), - np.arange(16, dtype=np.float).reshape(1, 4, 4)] + images = [np.arange(16, dtype=float).reshape(1, 4, 4), + np.arange(16, dtype=float).reshape(1, 4, 4)] # image patch sampler n_samples = 5 sampler = RandSpatialCropSamples(roi_size=(3, 3), num_samples=n_samples, diff --git a/monai/data/utils.py b/monai/data/utils.py index 60250af441..2e2f8c00cb 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -174,7 +174,7 @@ def iter_patch( copy_back: bool = True, mode: Union[NumpyPadMode, str] = NumpyPadMode.WRAP, **pad_opts: Dict, -) -> Generator[np.ndarray, None, None]: +): """ Yield successive patches from `arr` of size `patch_size`. The iteration can start from position `start_pos` in `arr` but drawing from a padded array extended by the `patch_size` in each dimension (so these coordinates can be negative @@ -194,6 +194,15 @@ def iter_patch( Yields: Patches of array data from `arr` which are views into a padded array which can be modified, if `copy_back` is True these changes will be reflected in `arr` once the iteration completes. + + Note: + coordinate format is: + + [1st_dim_start, 1st_dim_end, + 2nd_dim_start, 2nd_dim_end, + ..., + Nth_dim_start, Nth_dim_end]] + """ # ensure patchSize and startPos are the right length patch_size_ = get_valid_patch_size(arr.shape, patch_size) @@ -210,7 +219,9 @@ def iter_patch( iter_size = tuple(s + p for s, p in zip(arr.shape, patch_size_)) for slices in iter_patch_slices(iter_size, patch_size_, start_pos_padded): - yield arrpad[slices] + # compensate original image padding + coords_no_pad = tuple((coord.start - p, coord.stop - p) for coord, p in zip(slices, patch_size_)) + yield arrpad[slices], np.asarray(coords_no_pad) # data and coords (in numpy; works with torch loader) # copy back data from the padded image if required if copy_back: @@ -411,6 +422,8 @@ def set_rnd(obj, seed: int) -> int: obj.set_random_state(seed=seed % MAX_SEED) return seed + 1 # a different seed for the next component for key in obj.__dict__: + if key.startswith("__"): # skip the private methods + continue seed = set_rnd(obj.__dict__[key], seed=seed) return seed diff --git a/tests/test_grid_dataset.py b/tests/test_grid_dataset.py new file mode 100644 index 0000000000..6e0aa4023e --- /dev/null +++ b/tests/test_grid_dataset.py @@ -0,0 +1,83 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +import numpy as np + +from monai.data import DataLoader, GridPatchDataset, PatchIter +from monai.transforms import RandShiftIntensity +from monai.utils import set_determinism + + +def identity_generator(x): + # simple transform that returns the input itself + for idx, item in enumerate(x): + yield item, idx + + +class TestGridPatchDataset(unittest.TestCase): + def setUp(self): + set_determinism(seed=1234) + + def tearDown(self): + set_determinism(None) + + def test_shape(self): + test_dataset = ["vwxyz", "helloworld", "worldfoobar"] + result = GridPatchDataset(dataset=test_dataset, patch_iter=identity_generator, with_coordinates=False) + output = [] + n_workers = 0 if sys.platform == "win32" else 2 + for item in DataLoader(result, batch_size=3, num_workers=n_workers): + output.append("".join(item)) + expected = ["vwx", "wor", "yzh", "ldf", "ell", "oob", "owo", "ar", "rld"] + self.assertEqual(sorted(output), sorted(expected)) + self.assertEqual(len("".join(expected)), len("".join(test_dataset))) + + def test_loading_array(self): + set_determinism(seed=1234) + # image dataset + images = [np.arange(16, dtype=float).reshape(1, 4, 4), np.arange(16, dtype=float).reshape(1, 4, 4)] + # image level + patch_intensity = RandShiftIntensity(offsets=1.0, prob=1.0) + patch_iter = PatchIter(patch_size=(2, 2), start_pos=(0, 0)) + ds = GridPatchDataset(dataset=images, patch_iter=patch_iter, transform=patch_intensity) + # use the grid patch dataset + for item in DataLoader(ds, batch_size=2, shuffle=False, num_workers=0): + np.testing.assert_equal(tuple(item[0].shape), (2, 1, 2, 2)) + np.testing.assert_allclose( + item[0], + np.array([[[[1.7413, 2.7413], [5.7413, 6.7413]]], [[[9.1419, 10.1419], [13.1419, 14.1419]]]]), + rtol=1e-5, + ) + np.testing.assert_allclose( + item[1], + np.array([[[0, 1], [0, 2], [2, 4]], [[0, 1], [2, 4], [2, 4]]]), + rtol=1e-5, + ) + if sys.platform != "win32": + for item in DataLoader(ds, batch_size=2, shuffle=False, num_workers=2): + np.testing.assert_equal(tuple(item[0].shape), (2, 1, 2, 2)) + np.testing.assert_allclose( + item[0], + np.array([[[[2.3944, 3.3944], [6.3944, 7.3944]]], [[[10.6551, 11.6551], [14.6551, 15.6551]]]]), + rtol=1e-3, + ) + np.testing.assert_allclose( + item[1], + np.array([[[0, 1], [0, 2], [2, 4]], [[0, 1], [2, 4], [2, 4]]]), + rtol=1e-5, + ) + + +if __name__ == "__main__": + unittest.main() From 0751ca053dc1e2d3a4f12ba28c0240ad69684300 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 9 Mar 2021 20:41:35 +0800 Subject: [PATCH 039/457] Update DataLoader arg type of workflow (#1719) * [DLMED] update type of arg in workflows Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot Co-authored-by: monai-bot --- monai/engines/evaluator.py | 14 +++++++------- monai/engines/trainer.py | 11 +++++++---- monai/engines/workflow.py | 28 +++++++++++++++++----------- 3 files changed, 31 insertions(+), 22 deletions(-) diff --git a/monai/engines/evaluator.py b/monai/engines/evaluator.py index 0b7167fb3a..b8977a3652 100644 --- a/monai/engines/evaluator.py +++ b/monai/engines/evaluator.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Callable, Dict, Optional, Sequence, Tuple +from typing import TYPE_CHECKING, Callable, Dict, Iterable, Optional, Sequence, Tuple, Union import torch from torch.utils.data import DataLoader @@ -38,7 +38,7 @@ class Evaluator(Workflow): Args: device: an object representing the device on which to run. - val_data_loader: Ignite engine use data_loader to run, must be torch.DataLoader. + val_data_loader: Ignite engine use data_loader to run, must be Iterable or torch.DataLoader. epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`. non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect. @@ -60,7 +60,7 @@ class Evaluator(Workflow): def __init__( self, device: torch.device, - val_data_loader: DataLoader, + val_data_loader: Union[Iterable, DataLoader], epoch_length: Optional[int] = None, non_blocking: bool = False, prepare_batch: Callable = default_prepare_batch, @@ -110,7 +110,7 @@ class SupervisedEvaluator(Evaluator): Args: device: an object representing the device on which to run. - val_data_loader: Ignite engine use data_loader to run, must be torch.DataLoader. + val_data_loader: Ignite engine use data_loader to run, must be Iterable, typically be torch.DataLoader. network: use the network to run model forward. epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`. non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously @@ -134,7 +134,7 @@ class SupervisedEvaluator(Evaluator): def __init__( self, device: torch.device, - val_data_loader: DataLoader, + val_data_loader: Union[Iterable, DataLoader], network: torch.nn.Module, epoch_length: Optional[int] = None, non_blocking: bool = False, @@ -215,7 +215,7 @@ class EnsembleEvaluator(Evaluator): Args: device: an object representing the device on which to run. - val_data_loader: Ignite engine use data_loader to run, must be torch.DataLoader. + val_data_loader: Ignite engine use data_loader to run, must be Iterable, typically be torch.DataLoader. epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`. networks: use the networks to run model forward in order. pred_keys: the keys to store every prediction data. @@ -241,7 +241,7 @@ class EnsembleEvaluator(Evaluator): def __init__( self, device: torch.device, - val_data_loader: DataLoader, + val_data_loader: Union[Iterable, DataLoader], networks: Sequence[torch.nn.Module], pred_keys: Sequence[str], epoch_length: Optional[int] = None, diff --git a/monai/engines/trainer.py b/monai/engines/trainer.py index efb2ab12fa..c3d471e261 100644 --- a/monai/engines/trainer.py +++ b/monai/engines/trainer.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Callable, Dict, Optional, Sequence, Tuple, Union +from typing import TYPE_CHECKING, Callable, Dict, Iterable, Optional, Sequence, Tuple, Union import torch from torch.optim.optimizer import Optimizer @@ -58,7 +58,7 @@ class SupervisedTrainer(Trainer): Args: device: an object representing the device on which to run. max_epochs: the total epoch number for trainer to run. - train_data_loader: Ignite engine use data_loader to run, must be torch.DataLoader. + train_data_loader: Ignite engine use data_loader to run, must be Iterable or torch.DataLoader. network: to train with this network. optimizer: the optimizer associated to the network. loss_function: the loss function associated to the optimizer. @@ -85,7 +85,7 @@ def __init__( self, device: torch.device, max_epochs: int, - train_data_loader: DataLoader, + train_data_loader: Union[Iterable, DataLoader], network: torch.nn.Module, optimizer: Optimizer, loss_function: Callable, @@ -251,6 +251,9 @@ def __init__( additional_metrics: Optional[Dict[str, Metric]] = None, train_handlers: Optional[Sequence] = None, ): + if not isinstance(train_data_loader, DataLoader): + raise ValueError("train_data_loader must be PyTorch DataLoader.") + # set up Ignite engine and environments super().__init__( device=device, @@ -296,7 +299,7 @@ def _iteration( raise ValueError("must provide batch data for current iteration.") d_input = self.prepare_batch(batchdata, engine.state.device, engine.non_blocking) - batch_size = self.data_loader.batch_size + batch_size = self.data_loader.batch_size # type: ignore g_input = self.g_prepare_batch(batch_size, self.latent_shape, engine.state.device, engine.non_blocking) g_output = self.g_inferer(g_input, self.g_network) diff --git a/monai/engines/workflow.py b/monai/engines/workflow.py index d6415c1966..b50d58f1a2 100644 --- a/monai/engines/workflow.py +++ b/monai/engines/workflow.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Callable, Dict, Optional, Sequence +from typing import TYPE_CHECKING, Callable, Dict, Iterable, Optional, Sequence, Union import torch import torch.distributed as dist @@ -44,7 +44,7 @@ class Workflow(IgniteEngine): # type: ignore[valid-type, misc] # due to optiona Args: device: an object representing the device on which to run. max_epochs: the total epoch number for engine to run, validator and evaluator have only 1 epoch. - data_loader: Ignite engine use data_loader to run, must be torch.DataLoader. + data_loader: Ignite engine use data_loader to run, must be Iterable or torch.DataLoader. epoch_length: number of iterations for one epoch, default to `len(data_loader)`. non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect. @@ -73,7 +73,7 @@ def __init__( self, device: torch.device, max_epochs: int, - data_loader: DataLoader, + data_loader: Union[Iterable, DataLoader], epoch_length: Optional[int] = None, non_blocking: bool = False, prepare_batch: Callable = default_prepare_batch, @@ -90,14 +90,20 @@ def __init__( super().__init__(self._iteration) if not isinstance(device, torch.device): raise TypeError(f"device must be a torch.device but is {type(device).__name__}.") - if not isinstance(data_loader, DataLoader): - raise TypeError(f"data_loader must be a torch.utils.data.DataLoader but is {type(data_loader).__name__}.") - sampler = data_loader.__dict__["sampler"] - if isinstance(sampler, DistributedSampler): - @self.on(Events.EPOCH_STARTED) - def set_sampler_epoch(engine: Engine): - sampler.set_epoch(engine.state.epoch) + if isinstance(data_loader, DataLoader): + sampler = data_loader.__dict__["sampler"] + if isinstance(sampler, DistributedSampler): + + @self.on(Events.EPOCH_STARTED) + def set_sampler_epoch(engine: Engine): + sampler.set_epoch(engine.state.epoch) + + if epoch_length is None: + epoch_length = len(data_loader) + else: + if epoch_length is None: + raise ValueError("if data_loader is not PyTorch DataLoader, must specify the epoch_length.") # set all sharable data for the workflow based on Ignite engine.state self.state = State( @@ -106,7 +112,7 @@ def set_sampler_epoch(engine: Engine): iteration=0, epoch=0, max_epochs=max_epochs, - epoch_length=len(data_loader) if epoch_length is None else epoch_length, + epoch_length=epoch_length, output=None, batch=None, metrics={}, From 78ec66f3a275d596ee0e310c10d23c673fa31a04 Mon Sep 17 00:00:00 2001 From: Yiwen Li <44606435+kate-sann5100@users.noreply.github.com> Date: Tue, 9 Mar 2021 15:31:38 +0000 Subject: [PATCH 040/457] 1651 implement RegUNet (#1658) * 1651 implement RegUNet Signed-off-by: kate-sann5100 --- docs/source/networks.rst | 20 ++ monai/networks/blocks/__init__.py | 1 + monai/networks/blocks/regunet_block.py | 270 +++++++++++++++++++++++++ monai/networks/nets/__init__.py | 1 + monai/networks/nets/regunet.py | 249 +++++++++++++++++++++++ tests/test_regunet.py | 87 ++++++++ tests/test_regunet_block.py | 97 +++++++++ 7 files changed, 725 insertions(+) create mode 100644 monai/networks/blocks/regunet_block.py create mode 100644 monai/networks/nets/regunet.py create mode 100644 tests/test_regunet.py create mode 100644 tests/test_regunet_block.py diff --git a/docs/source/networks.rst b/docs/source/networks.rst index e0ac0f2d75..5688f4b143 100644 --- a/docs/source/networks.rst +++ b/docs/source/networks.rst @@ -119,6 +119,21 @@ Blocks .. autoclass:: Subpixelupsample .. autoclass:: SubpixelUpSample +`Registration Residual Conv Block` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: RegistrationResidualConvBlock + :members: + +`Registration Down Sample Block` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: RegistrationDownSampleBlock + :members: + +`Registration Extraction Block` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: RegistrationExtractionBlock + :members: + `LocalNet DownSample Block` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: LocalNetDownSampleBlock @@ -330,6 +345,11 @@ Nets .. autoclass:: VNet :members: +`RegUNet` +~~~~~~~~~~ +.. autoclass:: RegUNet + :members: + `LocalNet` ~~~~~~~~~~~ .. autoclass:: LocalNet diff --git a/monai/networks/blocks/__init__.py b/monai/networks/blocks/__init__.py index 4a2e31928e..4639630c36 100644 --- a/monai/networks/blocks/__init__.py +++ b/monai/networks/blocks/__init__.py @@ -17,6 +17,7 @@ from .dynunet_block import UnetBasicBlock, UnetOutBlock, UnetResBlock, UnetUpBlock, get_output_padding, get_padding from .fcn import FCN, GCN, MCFCN, Refine from .localnet_block import LocalNetDownSampleBlock, LocalNetFeatureExtractorBlock, LocalNetUpSampleBlock +from .regunet_block import RegistrationDownSampleBlock, RegistrationExtractionBlock, RegistrationResidualConvBlock from .segresnet_block import ResBlock from .squeeze_and_excitation import ( ChannelSELayer, diff --git a/monai/networks/blocks/regunet_block.py b/monai/networks/blocks/regunet_block.py new file mode 100644 index 0000000000..f4c2c1f3a7 --- /dev/null +++ b/monai/networks/blocks/regunet_block.py @@ -0,0 +1,270 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Sequence, Tuple, Type, Union + +import torch +from torch import nn +from torch.nn import functional as F + +from monai.networks.blocks import Convolution +from monai.networks.layers import Conv, Norm, Pool, same_padding + + +def get_conv_block( + spatial_dims: int, + in_channels: int, + out_channels: int, + kernel_size: Union[Sequence[int], int] = 3, + strides: int = 1, + padding: Optional[Union[Tuple[int, ...], int]] = None, + act: Optional[Union[Tuple, str]] = "RELU", + norm: Optional[Union[Tuple, str]] = "BATCH", + initializer: Optional[str] = "kaiming_uniform", +) -> nn.Module: + if padding is None: + padding = same_padding(kernel_size) + conv_block = Convolution( + spatial_dims, + in_channels, + out_channels, + kernel_size=kernel_size, + strides=strides, + act=act, + norm=norm, + bias=False, + conv_only=False, + padding=padding, + ) + conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv[Conv.CONV, spatial_dims] + for m in conv_block.modules(): + if isinstance(m, conv_type): + if initializer == "kaiming_uniform": + nn.init.kaiming_normal_(torch.as_tensor(m.weight)) + elif initializer == "zeros": + nn.init.zeros_(torch.as_tensor(m.weight)) + else: + raise ValueError( + f"initializer {initializer} is not supported, " "currently supporting kaiming_uniform and zeros" + ) + return conv_block + + +def get_conv_layer( + spatial_dims: int, + in_channels: int, + out_channels: int, + kernel_size: Union[Sequence[int], int] = 3, +) -> nn.Module: + padding = same_padding(kernel_size) + return Convolution( + spatial_dims, + in_channels, + out_channels, + kernel_size=kernel_size, + bias=False, + conv_only=True, + padding=padding, + ) + + +class RegistrationResidualConvBlock(nn.Module): + """ + A block with skip links and layer - norm - activation. + Only changes the number of channels, the spatial size is kept same. + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + num_layers: int = 2, + kernel_size: int = 3, + ): + """ + + Args: + spatial_dims: number of spatial dimensions + in_channels: number of input channels + out_channels: number of output channels + num_layers: number of layers inside the block + kernel_size: kernel_size + """ + super(RegistrationResidualConvBlock, self).__init__() + self.num_layers = num_layers + self.layers = nn.ModuleList( + [ + get_conv_layer( + spatial_dims=spatial_dims, + in_channels=in_channels if i == 0 else out_channels, + out_channels=out_channels, + kernel_size=kernel_size, + ) + for i in range(num_layers) + ] + ) + self.norms = nn.ModuleList([Norm[Norm.BATCH, spatial_dims](out_channels) for _ in range(num_layers)]) + self.acts = nn.ModuleList([nn.ReLU() for _ in range(num_layers)]) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + + Args: + x: Tensor in shape (batch, ``in_channels``, insize_1, insize_2, [insize_3]) + + Returns: + Tensor in shape (batch, ``out_channels``, insize_1, insize_2, [insize_3]), + with the same spatial size as ``x`` + """ + skip = x + for i, (conv, norm, act) in enumerate(zip(self.layers, self.norms, self.acts)): + x = conv(x) + x = norm(x) + if i == self.num_layers - 1: + # last block + x = x + skip + x = act(x) + return x + + +class RegistrationDownSampleBlock(nn.Module): + """ + A down-sample module used in RegUNet to half the spatial size. + The number of channels is kept same. + + Adapted from: + DeepReg (https://github.com/DeepRegNet/DeepReg) + """ + + def __init__( + self, + spatial_dims: int, + channels: int, + pooling: bool, + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions. + channels: channels + pooling: use MaxPool if True, strided conv if False + """ + super(RegistrationDownSampleBlock, self).__init__() + if pooling: + self.layer = Pool[Pool.MAX, spatial_dims](kernel_size=2) + else: + self.layer = get_conv_block( + spatial_dims=spatial_dims, + in_channels=channels, + out_channels=channels, + kernel_size=2, + strides=2, + padding=0, + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Halves the spatial dimensions and keeps the same channel. + output in shape (batch, ``channels``, insize_1 / 2, insize_2 / 2, [insize_3 / 2]), + + Args: + x: Tensor in shape (batch, ``channels``, insize_1, insize_2, [insize_3]) + + Raises: + ValueError: when input spatial dimensions are not even. + """ + for i in x.shape[2:]: + if i % 2 != 0: + raise ValueError("expecting x spatial dimensions be even, " f"got x of shape {x.shape}") + out: torch.Tensor = self.layer(x) + return out + + +def get_deconv_block( + spatial_dims: int, + in_channels: int, + out_channels: int, +) -> nn.Module: + return Convolution( + dimensions=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + strides=2, + act="RELU", + norm="BATCH", + bias=False, + is_transposed=True, + padding=1, + output_padding=1, + ) + + +class RegistrationExtractionBlock(nn.Module): + """ + The Extraction Block used in RegUNet. + Extracts feature from each ``extract_levels`` and takes the average. + """ + + def __init__( + self, + spatial_dims: int, + extract_levels: Tuple[int], + num_channels: Union[Tuple[int], List[int]], + out_channels: int, + kernel_initializer: Optional[str] = "kaiming_uniform", + activation: Optional[str] = None, + ): + """ + + Args: + spatial_dims: number of spatial dimensions + extract_levels: spatial levels to extract feature from, 0 refers to the input scale + num_channels: number of channels at each scale level, + List or Tuple of lenth equals to `depth` of the RegNet + out_channels: number of output channels + kernel_initializer: kernel initializer + activation: kernel activation function + """ + super(RegistrationExtractionBlock, self).__init__() + self.extract_levels = extract_levels + self.max_level = max(extract_levels) + self.layers = nn.ModuleList( + [ + get_conv_block( + spatial_dims=spatial_dims, + in_channels=num_channels[d], + out_channels=out_channels, + norm=None, + act=activation, + initializer=kernel_initializer, + ) + for d in extract_levels + ] + ) + + def forward(self, x: List[torch.Tensor], image_size: List[int]) -> torch.Tensor: + """ + + Args: + x: Decoded feature at different spatial levels, sorted from deep to shallow + image_size: output image size + + Returns: + Tensor of shape (batch, `out_channels`, size1, size2, size3), where (size1, size2, size3) = ``image_size`` + """ + feature_list = [ + F.interpolate( + layer(x[self.max_level - level]), + size=image_size, + ) + for layer, level in zip(self.layers, self.extract_levels) + ] + out: torch.Tensor = torch.mean(torch.stack(feature_list, dim=0), dim=0) + return out diff --git a/monai/networks/nets/__init__.py b/monai/networks/nets/__init__.py index a9308de9d7..db4590cf40 100644 --- a/monai/networks/nets/__init__.py +++ b/monai/networks/nets/__init__.py @@ -20,6 +20,7 @@ from .highresnet import HighResBlock, HighResNet from .localnet import LocalNet from .regressor import Regressor +from .regunet import RegUNet from .segresnet import SegResNet, SegResNetVAE from .senet import SENet, se_resnet50, se_resnet101, se_resnet152, se_resnext50_32x4d, se_resnext101_32x4d, senet154 from .unet import UNet, Unet, unet diff --git a/monai/networks/nets/regunet.py b/monai/networks/nets/regunet.py new file mode 100644 index 0000000000..9499fa06fa --- /dev/null +++ b/monai/networks/nets/regunet.py @@ -0,0 +1,249 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Tuple, Union + +import torch +from torch import nn + +from monai.networks.blocks.regunet_block import ( + RegistrationDownSampleBlock, + RegistrationExtractionBlock, + RegistrationResidualConvBlock, + get_conv_block, + get_deconv_block, +) + + +class RegUNet(nn.Module): + """ + Class that implements an adapted UNet. This class also serve as the parent class of LocalNet and GlobalNet + + Reference: + O. Ronneberger, P. Fischer, and T. Brox, + “U-net: Convolutional networks for biomedical image segmentation,”, + Lecture Notes in Computer Science, 2015, vol. 9351, pp. 234–241. + https://arxiv.org/abs/1505.04597 + + Adapted from: + DeepReg (https://github.com/DeepRegNet/DeepReg) + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + num_channel_initial: int, + depth: int, + out_kernel_initializer: Optional[str] = "kaiming_uniform", + out_activation: Optional[str] = None, + out_channels: int = 3, + extract_levels: Optional[Tuple[int]] = None, + pooling: bool = True, + concat_skip: bool = False, + encode_kernel_sizes: Union[int, List[int]] = 3, + ): + """ + Args: + spatial_dims: number of spatial dims + in_channels: number of input channels + num_channel_initial: number of initial channels + depth: input is at level 0, bottom is at level depth. + out_kernel_initializer: kernel initializer for the last layer + out_activation: activation at the last layer + out_channels: number of channels for the output + extract_levels: list, which levels from net to extract. The maximum level must equal to ``depth`` + pooling: for down-sampling, use non-parameterized pooling if true, otherwise use conv3d + concat_skip: when up-sampling, concatenate skipped tensor if true, otherwise use addition + encode_kernel_sizes: kernel size for down-sampling + """ + super(RegUNet, self).__init__() + if not extract_levels: + extract_levels = (depth,) + assert max(extract_levels) == depth + + # save parameters + self.spatial_dims = spatial_dims + self.in_channels = in_channels + self.num_channel_initial = num_channel_initial + self.depth = depth + self.out_kernel_initializer = out_kernel_initializer + self.out_activation = out_activation + self.out_channels = out_channels + self.extract_levels = extract_levels + self.pooling = pooling + self.concat_skip = concat_skip + + if isinstance(encode_kernel_sizes, int): + encode_kernel_sizes = [encode_kernel_sizes] * (self.depth + 1) + assert len(encode_kernel_sizes) == self.depth + 1 + self.encode_kernel_sizes: List[int] = encode_kernel_sizes + + self.num_channels = [self.num_channel_initial * (2 ** d) for d in range(self.depth + 1)] + self.min_extract_level = min(self.extract_levels) + + # init layers + # all lists start with d = 0 + self.encode_convs = None + self.encode_pools = None + self.bottom_block = None + self.decode_deconvs = None + self.decode_convs = None + self.output_block = None + + # build layers + self.build_layers() + + def build_layers( + self, + ): + self.build_encode_layers() + self.build_decode_layers() + + def build_encode_layers(self): + # encoding / down-sampling + self.encode_convs = nn.ModuleList( + [ + self.build_conv_block( + in_channels=self.in_channels if d == 0 else self.num_channels[d - 1], + out_channels=self.num_channels[d], + kernel_size=self.encode_kernel_sizes[d], + ) + for d in range(self.depth) + ] + ) + self.encode_pools = nn.ModuleList( + [ + self.build_down_sampling_block( + channels=self.num_channels[d], + ) + for d in range(self.depth) + ] + ) + self.bottom_block = self.build_bottom_block( + in_channels=self.num_channels[-2], out_channels=self.num_channels[-1] + ) + + def build_conv_block( + self, + in_channels, + out_channels, + kernel_size, + ): + return nn.Sequential( + get_conv_block( + spatial_dims=self.spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + ), + RegistrationResidualConvBlock( + spatial_dims=self.spatial_dims, + in_channels=out_channels, + out_channels=out_channels, + kernel_size=kernel_size, + ), + ) + + def build_down_sampling_block( + self, + channels: int, + ): + return RegistrationDownSampleBlock(spatial_dims=self.spatial_dims, channels=channels, pooling=self.pooling) + + def build_bottom_block(self, in_channels: int, out_channels: int): + kernel_size = self.encode_kernel_sizes[self.depth] + return nn.Sequential( + get_conv_block( + spatial_dims=self.spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + ), + RegistrationResidualConvBlock( + spatial_dims=self.spatial_dims, + in_channels=out_channels, + out_channels=out_channels, + kernel_size=kernel_size, + ), + ) + + def build_decode_layers(self): + # decoding / up-sampling + # [depth - 1, depth - 2, ..., min_extract_level] + self.decode_deconvs = nn.ModuleList( + [ + self.build_up_sampling_block(in_channels=self.num_channels[d + 1], out_channels=self.num_channels[d]) + for d in range(self.depth - 1, self.min_extract_level - 1, -1) + ] + ) + self.decode_convs = nn.ModuleList( + [ + self.build_conv_block( + in_channels=(2 * self.num_channels[d] if self.concat_skip else self.num_channels[d]), + out_channels=self.num_channels[d], + kernel_size=3, + ) + for d in range(self.depth - 1, self.min_extract_level - 1, -1) + ] + ) + + # extraction + self.output_block = self.build_output_block() + + def build_up_sampling_block( + self, + in_channels: int, + out_channels: int, + ) -> nn.Module: + return get_deconv_block(spatial_dims=self.spatial_dims, in_channels=in_channels, out_channels=out_channels) + + def build_output_block(self) -> nn.Module: + return RegistrationExtractionBlock( + spatial_dims=self.spatial_dims, + extract_levels=self.extract_levels, + num_channels=self.num_channels, + out_channels=self.out_channels, + kernel_initializer=self.out_kernel_initializer, + activation=self.out_activation, + ) + + def forward(self, x): + """ + Args: + x: Tensor in shape (batch, ``in_channels``, insize_1, insize_2, [insize_3]) + + Returns: + Tensor in shape (batch, ``out_channels``, insize_1, insize_2, [insize_3]), with the same spatial size as ``x`` + """ + image_size = x.shape[2:] + skips = [] # [0, ..., depth - 1] + encoded = x + for encode_conv, encode_pool in zip(self.encode_convs, self.encode_pools): + skip = encode_conv(encoded) + encoded = encode_pool(skip) + skips.append(skip) + decoded = self.bottom_block(encoded) + + outs = [decoded] + + # [depth - 1, ..., min_extract_level] + for i, (decode_deconv, decode_conv) in enumerate(zip(self.decode_deconvs, self.decode_convs)): + # [depth - 1, depth - 2, ..., min_extract_level] + decoded = decode_deconv(decoded) + if self.concat_skip: + decoded = torch.cat([decoded, skips[-i - 1]], dim=1) + else: + decoded = decoded + skips[-i - 1] + decoded = decode_conv(decoded) + outs.append(decoded) + + out = self.output_block(outs, image_size=image_size) + return out diff --git a/tests/test_regunet.py b/tests/test_regunet.py new file mode 100644 index 0000000000..4dd968a1cf --- /dev/null +++ b/tests/test_regunet.py @@ -0,0 +1,87 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch +from parameterized import parameterized + +from monai.networks import eval_mode +from monai.networks.nets.regunet import RegUNet +from tests.utils import test_script_save + +device = "cuda" if torch.cuda.is_available() else "cpu" + + +TEST_CASE_REGUNET_2D = [ + [ + { + "spatial_dims": 2, + "in_channels": 2, + "num_channel_initial": 16, + "depth": 3, + "out_kernel_initializer": "kaiming_uniform", + "out_activation": None, + "out_channels": 2, + "pooling": False, + "concat_skip": True, + "encode_kernel_sizes": 3, + }, + (1, 2, 16, 16), + (1, 2, 16, 16), + ] +] + +TEST_CASE_REGUNET_3D = [ + [ + { + "spatial_dims": 3, + "in_channels": 2, + "num_channel_initial": 16, + "depth": 3, + "out_kernel_initializer": "kaiming_uniform", + "out_activation": "sigmoid", + "out_channels": 2, + "extract_levels": (0, 1, 2, 3), + "pooling": True, + "concat_skip": False, + "encode_kernel_sizes": (3, 3, 3, 7), + }, + (1, 2, 16, 16, 16), + (1, 2, 16, 16, 16), + ] +] + + +class TestREGUNET(unittest.TestCase): + @parameterized.expand(TEST_CASE_REGUNET_2D + TEST_CASE_REGUNET_3D) + def test_shape(self, input_param, input_shape, expected_shape): + net = RegUNet(**input_param).to(device) + with eval_mode(net): + result = net(torch.randn(input_shape).to(device)) + self.assertEqual(result.shape, expected_shape) + + def test_ill_shape(self): + with self.assertRaisesRegex(ValueError, ""): + input_param, _, _ = TEST_CASE_REGUNET_2D[0] + input_shape = (1, input_param["in_channels"], 17, 17) + net = RegUNet(**input_param).to(device) + net.forward(torch.randn(input_shape).to(device)) + + def test_script(self): + input_param, input_shape, _ = TEST_CASE_REGUNET_2D[0] + net = RegUNet(**input_param) + test_data = torch.randn(input_shape) + test_script_save(net, test_data) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_regunet_block.py b/tests/test_regunet_block.py new file mode 100644 index 0000000000..9b96875432 --- /dev/null +++ b/tests/test_regunet_block.py @@ -0,0 +1,97 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch +from parameterized import parameterized + +from monai.networks import eval_mode +from monai.networks.blocks.regunet_block import ( + RegistrationDownSampleBlock, + RegistrationExtractionBlock, + RegistrationResidualConvBlock, +) + +TEST_CASE_RESIDUAL = [ + [{"spatial_dims": 2, "in_channels": 1, "out_channels": 2, "num_layers": 1}, (1, 1, 5, 5), (1, 2, 5, 5)], + [{"spatial_dims": 3, "in_channels": 2, "out_channels": 2, "num_layers": 2}, (1, 2, 5, 5, 5), (1, 2, 5, 5, 5)], +] + +TEST_CASE_DOWN_SAMPLE = [ + [{"spatial_dims": 2, "channels": 1, "pooling": False}, (1, 1, 4, 4), (1, 1, 2, 2)], + [{"spatial_dims": 3, "channels": 2, "pooling": True}, (1, 2, 4, 4, 4), (1, 2, 2, 2, 2)], +] + +TEST_CASE_EXTRACTION = [ + [ + { + "spatial_dims": 2, + "extract_levels": (0,), + "num_channels": [1], + "out_channels": 1, + "kernel_initializer": "kaiming_uniform", + "activation": None, + }, + [(1, 1, 2, 2)], + (3, 3), + (1, 1, 3, 3), + ], + [ + { + "spatial_dims": 3, + "extract_levels": (1, 2), + "num_channels": [1, 2, 3], + "out_channels": 1, + "kernel_initializer": "zeros", + "activation": "sigmoid", + }, + [(1, 3, 2, 2, 2), (1, 2, 4, 4, 4), (1, 1, 8, 8, 8)], + (3, 3, 3), + (1, 1, 3, 3, 3), + ], +] + + +class TestRegistrationResidualConvBlock(unittest.TestCase): + @parameterized.expand(TEST_CASE_RESIDUAL) + def test_shape(self, input_param, input_shape, expected_shape): + net = RegistrationResidualConvBlock(**input_param) + with eval_mode(net): + x = net(torch.randn(input_shape)) + self.assertEqual(x.shape, expected_shape) + + +class TestRegistrationDownSampleBlock(unittest.TestCase): + @parameterized.expand(TEST_CASE_DOWN_SAMPLE) + def test_shape(self, input_param, input_shape, expected_shape): + net = RegistrationDownSampleBlock(**input_param) + with eval_mode(net): + x = net(torch.rand(input_shape)) + self.assertEqual(x.shape, expected_shape) + + def test_ill_shape(self): + net = RegistrationDownSampleBlock(spatial_dims=2, channels=2, pooling=True) + with self.assertRaises(ValueError): + net(torch.rand((1, 2, 3, 3))) + + +class TestRegistrationExtractionBlock(unittest.TestCase): + @parameterized.expand(TEST_CASE_EXTRACTION) + def test_shape(self, input_param, input_shapes, image_size, expected_shape): + net = RegistrationExtractionBlock(**input_param) + with eval_mode(net): + x = net([torch.rand(input_shape) for input_shape in input_shapes], image_size) + self.assertEqual(x.shape, expected_shape) + + +if __name__ == "__main__": + unittest.main() From ea1d04ba48a8b9aa85ee8baf5a592be1e40d8d43 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Tue, 9 Mar 2021 21:08:05 +0000 Subject: [PATCH 041/457] remove print msg (#1730) Signed-off-by: Wenqi Li --- monai/data/image_reader.py | 2 -- tests/test_rotated.py | 3 ++- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index 39fc1f46b4..08432e53da 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -645,11 +645,9 @@ def __init__(self, reader_lib: str = "OpenSlide"): if self.reader_lib == "openslide": if has_osl: self.wsi_reader = openslide.OpenSlide - print("> OpenSlide is being used.") elif self.reader_lib == "cuclaraimage": if has_cux: self.wsi_reader = cuimage.CuImage - print("> CuImage is being used.") else: raise ValueError('`reader_lib` should be either "cuClaraImage" or "OpenSlide"') diff --git a/tests/test_rotated.py b/tests/test_rotated.py index 3353ae9fba..779fb2054d 100644 --- a/tests/test_rotated.py +++ b/tests/test_rotated.py @@ -104,7 +104,8 @@ def test_correct_results(self, angle, keep_size, mode, padding_mode, align_corne expected = scipy.ndimage.rotate( self.imt[0, 0], -np.rad2deg(angle), (0, 1), not keep_size, order=_order, mode=_mode, prefilter=False ) - np.testing.assert_allclose(expected, rotated["img"][0], atol=1e-3) + good = np.sum(np.isclose(expected, rotated["img"][0], atol=1e-3)) + self.assertLessEqual(np.abs(good - expected.size), 5, "diff at most 5 voxels") expected = scipy.ndimage.rotate( self.segn[0, 0], -np.rad2deg(angle), (0, 1), not keep_size, order=0, mode=_mode, prefilter=False From a462714314057b33c84766aeae90dbb710e38fe6 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Tue, 9 Mar 2021 18:13:25 -0500 Subject: [PATCH 042/457] Fix MONAI docker dependency (#1731) * Update MONAI Docker with OpenSlide dependencies Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update cron job to install apt dependencies Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- .github/workflows/cron.yml | 6 +++++- Dockerfile | 3 +++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 96368ba6d4..681331aae1 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -58,7 +58,11 @@ jobs: runs-on: [self-hosted, linux, x64, common] steps: - uses: actions/checkout@v2 - - name: Install the dependencies + - name: Install APT dependencies + run: | + apt-get update + DEBIAN_FRONTEND="noninteractive" apt-get install -y libopenslide0 + - name: Install Python dependencies run: | which python python -m pip install --upgrade pip wheel diff --git a/Dockerfile b/Dockerfile index c90558c970..57ea567869 100644 --- a/Dockerfile +++ b/Dockerfile @@ -44,6 +44,9 @@ RUN wget -q ${NGC_CLI_URI} && \ unzip ngccli_cat_linux.zip && chmod u+x ngc && \ md5sum -c ngc.md5 && \ rm -rf ngccli_cat_linux.zip ngc.md5 +RUN apt-get update \ + && DEBIAN_FRONTEND="noninteractive" apt-get install -y libopenslide0 \ + && rm -rf /var/lib/apt/lists/* # append /opt/tools to runtime path for NGC CLI to be accessible from all file system locations ENV PATH=${PATH}:/opt/tools WORKDIR /opt/monai From d4c201e1624c1bd91f8127f4d53e5d854c79e0f9 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 10 Mar 2021 10:57:30 +0000 Subject: [PATCH 043/457] [1710] Gradcam eval (#1734) * use eval for getting class_idx for gradcam --- monai/visualize/class_activation_maps.py | 35 +++++++++++------------- tests/test_vis_gradcam.py | 5 ++++ 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/monai/visualize/class_activation_maps.py b/monai/visualize/class_activation_maps.py index c6051caa61..6e93225af3 100644 --- a/monai/visualize/class_activation_maps.py +++ b/monai/visualize/class_activation_maps.py @@ -17,7 +17,6 @@ import torch.nn as nn import torch.nn.functional as F -from monai.networks.utils import eval_mode, train_mode from monai.transforms import ScaleIntensity from monai.utils import ensure_tuple from monai.visualize.visualizer import default_upsampler @@ -110,26 +109,24 @@ def get_layer(self, layer_id: Union[str, Callable]): return mod raise NotImplementedError(f"Could not find {layer_id}.") - def class_score(self, logits, class_idx=None): - if class_idx is not None: - return logits[:, class_idx].squeeze(), class_idx - class_idx = logits.max(1)[-1] - return logits[:, class_idx].squeeze(), class_idx + def class_score(self, logits, class_idx): + return logits[:, class_idx].squeeze() def __call__(self, x, class_idx=None, retain_graph=False): - # Use train_mode if grad is required, else eval_mode - mode = train_mode if self.register_backward else eval_mode - with mode(self.model): - logits = self.model(x) - acti, grad = None, None - if self.register_forward: - acti = tuple(self.activations[layer] for layer in self.target_layers) - if self.register_backward: - score, class_idx = self.class_score(logits, class_idx) - self.model.zero_grad() - self.score, self.class_idx = score, class_idx - score.sum().backward(retain_graph=retain_graph) - grad = tuple(self.gradients[layer] for layer in self.target_layers) + train = self.model.training + self.model.eval() + logits = self.model(x) + self.class_idx = logits.max(1)[-1] if class_idx is None else class_idx + acti, grad = None, None + if self.register_forward: + acti = tuple(self.activations[layer] for layer in self.target_layers) + if self.register_backward: + self.score = self.class_score(logits, self.class_idx) + self.model.zero_grad() + self.score.sum().backward(retain_graph=retain_graph) + grad = tuple(self.gradients[layer] for layer in self.target_layers) + if train: + self.model.train() return logits, acti, grad def get_wrapped_net(self): diff --git a/tests/test_vis_gradcam.py b/tests/test_vis_gradcam.py index 2a7de0e70c..df47c4920e 100644 --- a/tests/test_vis_gradcam.py +++ b/tests/test_vis_gradcam.py @@ -11,6 +11,7 @@ import unittest +import numpy as np import torch from parameterized import parameterized @@ -79,9 +80,13 @@ def test_shape(self, input_data, expected_shape): cam = GradCAM(nn_module=model, target_layers=input_data["target_layers"]) image = torch.rand(input_data["shape"], device=device) result = cam(x=image, layer_idx=-1) + np.testing.assert_array_equal(cam.nn_module.class_idx.cpu(), model(image).max(1)[-1].cpu()) fea_shape = cam.feature_map_size(input_data["shape"], device=device) self.assertTupleEqual(fea_shape, input_data["feature_shape"]) self.assertTupleEqual(result.shape, expected_shape) + # check result is same whether class_idx=None is used or not + result2 = cam(x=image, layer_idx=-1, class_idx=model(image).max(1)[-1].cpu()) + np.testing.assert_array_almost_equal(result, result2) if __name__ == "__main__": From c362beb49750008c833268a12c0e3d3d016fb318 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Wed, 10 Mar 2021 12:58:57 +0000 Subject: [PATCH 044/457] fixes unit test (#1735) * fixes unit test Signed-off-by: Wenqi Li * fixes timed call tests Signed-off-by: Wenqi Li --- tests/test_rotate.py | 3 ++- tests/test_rotated.py | 3 ++- tests/test_timedcall.py | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/test_rotate.py b/tests/test_rotate.py index 6e43ab90e7..a8dca07069 100644 --- a/tests/test_rotate.py +++ b/tests/test_rotate.py @@ -102,7 +102,8 @@ def test_correct_results(self, angle, keep_size, mode, padding_mode, align_corne ) ) expected = np.stack(expected).astype(np.float32) - np.testing.assert_allclose(expected, rotated, atol=1e-1) + n_good = np.sum(np.isclose(expected, rotated, atol=1e-3)) + self.assertLessEqual(expected.size - n_good, 5, "diff at most 5 pixels") @parameterized.expand(TEST_CASES_SHAPE_3D) def test_correct_shape(self, angle, mode, padding_mode, align_corners): diff --git a/tests/test_rotated.py b/tests/test_rotated.py index 779fb2054d..82bc4aed40 100644 --- a/tests/test_rotated.py +++ b/tests/test_rotated.py @@ -52,7 +52,8 @@ def test_correct_results(self, angle, keep_size, mode, padding_mode, align_corne expected = scipy.ndimage.rotate( self.imt[0, 0], -np.rad2deg(angle), (0, 1), not keep_size, order=_order, mode=_mode, prefilter=False ) - np.testing.assert_allclose(expected, rotated["img"][0], atol=1e-3) + good = np.sum(np.isclose(expected, rotated["img"][0], atol=1e-3)) + self.assertLessEqual(np.abs(good - expected.size), 5, "diff at most 5 pixels") expected = scipy.ndimage.rotate( self.segn[0, 0], -np.rad2deg(angle), (0, 1), not keep_size, order=0, mode=_mode, prefilter=False diff --git a/tests/test_timedcall.py b/tests/test_timedcall.py index e87d160743..de10abb8f7 100644 --- a/tests/test_timedcall.py +++ b/tests/test_timedcall.py @@ -10,13 +10,14 @@ # limitations under the License. import multiprocessing +import sys import time import unittest from tests.utils import TimedCall -@TimedCall(seconds=10, force_quit=False) +@TimedCall(seconds=10 if sys.platform == "linux" else 60, force_quit=False) def case_1_seconds(arg=None): time.sleep(1) return "good" if not arg else arg From 0b1fc4e3120806fd4c480ab90e9242d86f6bdf90 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 10 Mar 2021 15:02:30 +0000 Subject: [PATCH 045/457] basic inverse API (#1720) basic inverse API --- docs/source/transforms.rst | 6 + monai/transforms/__init__.py | 1 + monai/transforms/compose.py | 14 +- monai/transforms/croppad/dictionary.py | 27 ++- monai/transforms/inverse.py | 113 +++++++++++++ monai/utils/__init__.py | 1 + monai/utils/enums.py | 12 ++ tests/test_decollate.py | 19 ++- tests/test_inverse.py | 218 +++++++++++++++++++++++++ 9 files changed, 404 insertions(+), 7 deletions(-) create mode 100644 monai/transforms/inverse.py create mode 100644 tests/test_inverse.py diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index 3bc8d0899a..dcdeab1ac8 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -38,6 +38,12 @@ Generic Interfaces :members: :special-members: __call__ +`InvertibleTransform` +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: InvertibleTransform + :members: + + Vanilla Transforms ------------------ diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index 796804df24..5b12da4d21 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -138,6 +138,7 @@ ThresholdIntensityD, ThresholdIntensityDict, ) +from .inverse import InvertibleTransform from .io.array import LoadImage, SaveImage from .io.dictionary import LoadImaged, LoadImageD, LoadImageDict, SaveImaged, SaveImageD, SaveImageDict from .post.array import ( diff --git a/monai/transforms/compose.py b/monai/transforms/compose.py index 21e7da068c..d509ea33a1 100644 --- a/monai/transforms/compose.py +++ b/monai/transforms/compose.py @@ -17,6 +17,8 @@ import numpy as np +from monai.transforms.inverse import InvertibleTransform + # For backwards compatiblity (so this still works: from monai.transforms.compose import MapTransform) from monai.transforms.transform import ( # noqa: F401 MapTransform, @@ -30,7 +32,7 @@ __all__ = ["Compose"] -class Compose(RandomizableTransform): +class Compose(RandomizableTransform, InvertibleTransform): """ ``Compose`` provides the ability to chain a series of calls together in a sequence. Each transform in the sequence must take a single argument and @@ -141,3 +143,13 @@ def __call__(self, input_): for _transform in self.transforms: input_ = apply_transform(_transform, input_) return input_ + + def inverse(self, data): + invertible_transforms = [t for t in self.flatten().transforms if isinstance(t, InvertibleTransform)] + if len(invertible_transforms) == 0: + warnings.warn("inverse has been called but no invertible transforms have been supplied") + + # loop backwards over transforms + for t in reversed(invertible_transforms): + data = apply_transform(t.inverse, data) + return data diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index 823b2dd3f4..667fb7a821 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -15,6 +15,8 @@ Class names are ended with 'd' to denote dictionary-based transforms. """ +from copy import deepcopy +from math import floor from typing import Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union import numpy as np @@ -30,6 +32,7 @@ SpatialCrop, SpatialPad, ) +from monai.transforms.inverse import InvertibleTransform from monai.transforms.transform import MapTransform, Randomizable, RandomizableTransform from monai.transforms.utils import ( generate_pos_neg_label_crop_centers, @@ -38,6 +41,7 @@ weighted_patch_samples, ) from monai.utils import Method, NumpyPadMode, ensure_tuple, ensure_tuple_rep, fall_back_tuple +from monai.utils.enums import InverseKeys __all__ = [ "NumpyPadModeSequence", @@ -82,7 +86,7 @@ NumpyPadModeSequence = Union[Sequence[Union[NumpyPadMode, str]], NumpyPadMode, str] -class SpatialPadd(MapTransform): +class SpatialPadd(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.SpatialPad`. Performs padding to the data, symmetric for all sides or all on one side for each dimension. @@ -119,9 +123,30 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) for key, m in self.key_iterator(d, self.mode): + self.push_transform(d, key) d[key] = self.padder(d[key], mode=m) return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key in self.key_iterator(d): + transform = self.get_most_recent_transform(d, key) + # Create inverse transform + orig_size = transform[InverseKeys.ORIG_SIZE.value] + if self.padder.method == Method.SYMMETRIC: + current_size = d[key].shape[1:] + roi_center = [floor(i / 2) if r % 2 == 0 else (i - 1) // 2 for r, i in zip(orig_size, current_size)] + else: + roi_center = [floor(r / 2) if r % 2 == 0 else (r - 1) // 2 for r in orig_size] + + inverse_transform = SpatialCrop(roi_center, orig_size) + # Apply inverse transform + d[key] = inverse_transform(d[key]) + # Remove the applied transform + self.pop_transform(d, key) + + return d + class BorderPadd(MapTransform): """ diff --git a/monai/transforms/inverse.py b/monai/transforms/inverse.py new file mode 100644 index 0000000000..f9de8746ca --- /dev/null +++ b/monai/transforms/inverse.py @@ -0,0 +1,113 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, Hashable, Optional, Tuple + +import numpy as np + +from monai.transforms.transform import RandomizableTransform, Transform +from monai.utils.enums import InverseKeys + +__all__ = ["InvertibleTransform"] + + +class InvertibleTransform(Transform): + """Classes for invertible transforms. + + This class exists so that an ``invert`` method can be implemented. This allows, for + example, images to be cropped, rotated, padded, etc., during training and inference, + and after be returned to their original size before saving to file for comparison in + an external viewer. + + When the ``__call__`` method is called, the transformation information for each key is + stored. If the transforms were applied to keys "image" and "label", there will be two + extra keys in the dictionary: "image_transforms" and "label_transforms". Each list + contains a list of the transforms applied to that key. When the ``inverse`` method is + called, the inverse is called on each key individually, which allows for different + parameters being passed to each label (e.g., different interpolation for image and + label). + + When the ``inverse`` method is called, the inverse transforms are applied in a last- + in-first-out order. As the inverse is applied, its entry is removed from the list + detailing the applied transformations. That is to say that during the forward pass, + the list of applied transforms grows, and then during the inverse it shrinks back + down to an empty list. + + The information in ``data[key_transform]`` will be compatible with the default collate + since it only stores strings, numbers and arrays. + + We currently check that the ``id()`` of the transform is the same in the forward and + inverse directions. This is a useful check to ensure that the inverses are being + processed in the correct order. However, this may cause issues if the ``id()`` of the + object changes (such as multiprocessing on Windows). If you feel this issue affects + you, please raise a GitHub issue. + + Note to developers: When converting a transform to an invertible transform, you need to: + + #. Inherit from this class. + #. In ``__call__``, add a call to ``push_transform``. + #. Any extra information that might be needed for the inverse can be included with the + dictionary ``extra_info``. This dictionary should have the same keys regardless of + whether ``do_transform`` was `True` or `False` and can only contain objects that are + accepted in pytorch data loader's collate function (e.g., `None` is not allowed). + #. Implement an ``inverse`` method. Make sure that after performing the inverse, + ``pop_transform`` is called. + + """ + + def push_transform( + self, + data: dict, + key: Hashable, + extra_info: Optional[dict] = None, + orig_size: Optional[Tuple] = None, + ) -> None: + """Append to list of applied transforms for that key.""" + key_transform = str(key) + InverseKeys.KEY_SUFFIX.value + info = { + InverseKeys.CLASS_NAME.value: self.__class__.__name__, + InverseKeys.ID.value: id(self), + InverseKeys.ORIG_SIZE.value: orig_size or data[key].shape[1:], + } + if extra_info is not None: + info[InverseKeys.EXTRA_INFO.value] = extra_info + # If class is randomizable transform, store whether the transform was actually performed (based on `prob`) + if isinstance(self, RandomizableTransform): + info[InverseKeys.DO_TRANSFORM.value] = self._do_transform + # If this is the first, create list + if key_transform not in data: + data[key_transform] = [] + data[key_transform].append(info) + + def check_transforms_match(self, transform: dict) -> None: + """Check transforms are of same instance.""" + if transform[InverseKeys.ID.value] != id(self): + raise RuntimeError("Should inverse most recently applied invertible transform first") + + def get_most_recent_transform(self, data: dict, key: Hashable) -> dict: + """Get most recent transform.""" + transform = dict(data[str(key) + InverseKeys.KEY_SUFFIX.value][-1]) + self.check_transforms_match(transform) + return transform + + def pop_transform(self, data: dict, key: Hashable) -> None: + """Remove most recent transform.""" + data[str(key) + InverseKeys.KEY_SUFFIX.value].pop() + + def inverse(self, data: dict) -> Dict[Hashable, np.ndarray]: + """ + Inverse of ``__call__``. + + Raises: + NotImplementedError: When the subclass does not override this method. + + """ + raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") diff --git a/monai/utils/__init__.py b/monai/utils/__init__.py index 1e17d44029..3c1e7efe24 100644 --- a/monai/utils/__init__.py +++ b/monai/utils/__init__.py @@ -20,6 +20,7 @@ GridSampleMode, GridSamplePadMode, InterpolateMode, + InverseKeys, LossReduction, Method, MetricReduction, diff --git a/monai/utils/enums.py b/monai/utils/enums.py index d1d2d3bcce..d661781616 100644 --- a/monai/utils/enums.py +++ b/monai/utils/enums.py @@ -28,6 +28,7 @@ "ChannelMatching", "SkipMode", "Method", + "InverseKeys", ] @@ -214,3 +215,14 @@ class Method(Enum): SYMMETRIC = "symmetric" END = "end" + + +class InverseKeys(Enum): + """Extra meta data keys used for inverse transforms.""" + + CLASS_NAME = "class" + ID = "id" + ORIG_SIZE = "orig_size" + EXTRA_INFO = "extra_info" + DO_TRANSFORM = "do_transforms" + KEY_SUFFIX = "_transforms" diff --git a/tests/test_decollate.py b/tests/test_decollate.py index 5c6f04b48e..24a34482b5 100644 --- a/tests/test_decollate.py +++ b/tests/test_decollate.py @@ -9,7 +9,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys import unittest +from enum import Enum import numpy as np import torch @@ -20,6 +22,7 @@ from monai.transforms import AddChanneld, Compose, LoadImaged, RandFlipd, SpatialPadd, ToTensord from monai.transforms.post.dictionary import Decollated from monai.utils import optional_import, set_determinism +from monai.utils.enums import InverseKeys from tests.utils import make_nifti_image _, has_nib = optional_import("nibabel") @@ -46,14 +49,20 @@ def tearDown(self) -> None: def check_match(self, in1, in2): if isinstance(in1, dict): self.assertTrue(isinstance(in2, dict)) - self.check_match(list(in1.keys()), list(in2.keys())) - self.check_match(list(in1.values()), list(in2.values())) - elif any(isinstance(in1, i) for i in [list, tuple]): + for (k1, v1), (k2, v2) in zip(in1.items(), in2.items()): + if isinstance(k1, Enum) and isinstance(k2, Enum): + k1, k2 = k1.value, k2.value + self.check_match(k1, k2) + # Transform ids won't match for windows with multiprocessing, so don't check values + if k1 == InverseKeys.ID.value and sys.platform in ["darwin", "win32"]: + continue + self.check_match(v1, v2) + elif isinstance(in1, (list, tuple)): for l1, l2 in zip(in1, in2): self.check_match(l1, l2) - elif any(isinstance(in1, i) for i in [str, int]): + elif isinstance(in1, (str, int)): self.assertEqual(in1, in2) - elif any(isinstance(in1, i) for i in [torch.Tensor, np.ndarray]): + elif isinstance(in1, (torch.Tensor, np.ndarray)): np.testing.assert_array_equal(in1, in2) else: raise RuntimeError(f"Not sure how to compare types. type(in1): {type(in1)}, type(in2): {type(in2)}") diff --git a/tests/test_inverse.py b/tests/test_inverse.py new file mode 100644 index 0000000000..46729c7bc6 --- /dev/null +++ b/tests/test_inverse.py @@ -0,0 +1,218 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest +from typing import TYPE_CHECKING, List, Tuple + +import numpy as np +import torch +from parameterized import parameterized + +from monai.data import CacheDataset, DataLoader, create_test_image_2d, create_test_image_3d +from monai.data.utils import decollate_batch +from monai.networks.nets import UNet +from monai.transforms import ( + AddChannel, + AddChanneld, + Compose, + InvertibleTransform, + LoadImaged, + ResizeWithPadOrCrop, + SpatialPadd, + allow_missing_keys_mode, +) +from monai.utils import first, optional_import, set_determinism +from monai.utils.enums import InverseKeys +from tests.utils import make_nifti_image, make_rand_affine + +if TYPE_CHECKING: + + has_nib = True +else: + _, has_nib = optional_import("nibabel") + +KEYS = ["image", "label"] + +TESTS: List[Tuple] = [] + +TESTS.append( + ( + "SpatialPadd (x2) 2d", + "2D", + 0.0, + SpatialPadd(KEYS, spatial_size=[111, 113], method="end"), + SpatialPadd(KEYS, spatial_size=[118, 117]), + ) +) + +TESTS.append( + ( + "SpatialPadd 3d", + "3D", + 0.0, + SpatialPadd(KEYS, spatial_size=[112, 113, 116]), + ) +) + +TESTS_COMPOSE_X2 = [(t[0] + " Compose", t[1], t[2], Compose(Compose(t[3:]))) for t in TESTS] + +TESTS = TESTS + TESTS_COMPOSE_X2 # type: ignore + + +class TestInverse(unittest.TestCase): + """Test inverse methods. + + If tests are failing, the following function might be useful for displaying + `x`, `fx`, `f⁻¹fx` and `x - f⁻¹fx`. + + .. code-block:: python + + def plot_im(orig, fwd_bck, fwd): + import matplotlib.pyplot as plt + diff_orig_fwd_bck = orig - fwd_bck + ims_to_show = [orig, fwd, fwd_bck, diff_orig_fwd_bck] + titles = ["x", "fx", "f⁻¹fx", "x - f⁻¹fx"] + fig, axes = plt.subplots(1, 4, gridspec_kw={"width_ratios": [i.shape[1] for i in ims_to_show]}) + vmin = min(np.array(i).min() for i in [orig, fwd_bck, fwd]) + vmax = max(np.array(i).max() for i in [orig, fwd_bck, fwd]) + for im, title, ax in zip(ims_to_show, titles, axes): + _vmin, _vmax = (vmin, vmax) if id(im) != id(diff_orig_fwd_bck) else (None, None) + im = np.squeeze(np.array(im)) + while im.ndim > 2: + im = im[..., im.shape[-1] // 2] + im_show = ax.imshow(np.squeeze(im), vmin=_vmin, vmax=_vmax) + ax.set_title(title, fontsize=25) + ax.axis("off") + fig.colorbar(im_show, ax=ax) + plt.show() + + This can then be added to the exception: + + .. code-block:: python + + except AssertionError: + print( + f"Failed: {name}. Mean diff = {mean_diff} (expected <= {acceptable_diff}), unmodified diff: {unmodded_diff}" + ) + if orig[0].ndim > 1: + plot_im(orig, fwd_bck, unmodified) + """ + + def setUp(self): + if not has_nib: + self.skipTest("nibabel required for test_inverse") + + set_determinism(seed=0) + + self.all_data = {} + + affine = make_rand_affine() + affine[0] *= 2 + + im_1d = AddChannel()(np.arange(0, 10)) + self.all_data["1D"] = {"image": im_1d, "label": im_1d, "other": im_1d} + + im_2d_fname, seg_2d_fname = [make_nifti_image(i) for i in create_test_image_2d(101, 100)] + im_3d_fname, seg_3d_fname = [make_nifti_image(i, affine) for i in create_test_image_3d(100, 101, 107)] + + load_ims = Compose([LoadImaged(KEYS), AddChanneld(KEYS)]) + self.all_data["2D"] = load_ims({"image": im_2d_fname, "label": seg_2d_fname}) + self.all_data["3D"] = load_ims({"image": im_3d_fname, "label": seg_3d_fname}) + + def tearDown(self): + set_determinism(seed=None) + + def check_inverse(self, name, keys, orig_d, fwd_bck_d, unmodified_d, acceptable_diff): + for key in keys: + orig = orig_d[key] + fwd_bck = fwd_bck_d[key] + if isinstance(fwd_bck, torch.Tensor): + fwd_bck = fwd_bck.cpu().numpy() + unmodified = unmodified_d[key] + if isinstance(orig, np.ndarray): + mean_diff = np.mean(np.abs(orig - fwd_bck)) + unmodded_diff = np.mean(np.abs(orig - ResizeWithPadOrCrop(orig.shape[1:])(unmodified))) + try: + self.assertLessEqual(mean_diff, acceptable_diff) + except AssertionError: + print( + f"Failed: {name}. Mean diff = {mean_diff} (expected <= {acceptable_diff}), unmodified diff: {unmodded_diff}" + ) + raise + + @parameterized.expand(TESTS) + def test_inverse(self, _, data_name, acceptable_diff, *transforms): + name = _ + + data = self.all_data[data_name] + + forwards = [data.copy()] + + # Apply forwards + for t in transforms: + forwards.append(t(forwards[-1])) + + # Check that error is thrown when inverse are used out of order. + t = SpatialPadd("image", [10, 5]) + with self.assertRaises(RuntimeError): + t.inverse(forwards[-1]) + + # Apply inverses + fwd_bck = forwards[-1].copy() + for i, t in enumerate(reversed(transforms)): + if isinstance(t, InvertibleTransform): + fwd_bck = t.inverse(fwd_bck) + self.check_inverse(name, data.keys(), forwards[-i - 2], fwd_bck, forwards[-1], acceptable_diff) + + def test_inverse_inferred_seg(self): + + test_data = [] + for _ in range(20): + image, label = create_test_image_2d(100, 101) + test_data.append({"image": image, "label": label.astype(np.float32)}) + + batch_size = 10 + # num workers = 0 for mac + num_workers = 2 if sys.platform != "darwin" else 0 + transforms = Compose([AddChanneld(KEYS), SpatialPadd(KEYS, (150, 153))]) + num_invertible_transforms = sum(1 for i in transforms.transforms if isinstance(i, InvertibleTransform)) + + dataset = CacheDataset(test_data, transform=transforms, progress=False) + loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers) + + device = "cuda" if torch.cuda.is_available() else "cpu" + model = UNet( + dimensions=2, + in_channels=1, + out_channels=1, + channels=(2, 4), + strides=(2,), + ).to(device) + + data = first(loader) + labels = data["label"].to(device) + segs = model(labels).detach().cpu() + label_transform_key = "label" + InverseKeys.KEY_SUFFIX.value + segs_dict = {"label": segs, label_transform_key: data[label_transform_key]} + segs_dict_decollated = decollate_batch(segs_dict) + + # inverse of individual segmentation + seg_dict = first(segs_dict_decollated) + with allow_missing_keys_mode(transforms): + inv_seg = transforms.inverse(seg_dict)["label"] + self.assertEqual(len(data["label_transforms"]), num_invertible_transforms) + self.assertEqual(len(seg_dict["label_transforms"]), num_invertible_transforms) + self.assertEqual(inv_seg.shape[1:], test_data[0]["label"].shape) + + +if __name__ == "__main__": + unittest.main() From c3e5a692aa2e24a07d8025a535e4d396b8902b01 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 10 Mar 2021 20:28:32 +0000 Subject: [PATCH 046/457] improve decollate test for set_determinism (#1736) Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- tests/test_decollate.py | 21 ++++++--------------- tests/test_thread_buffer.py | 13 ++++++++----- 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/tests/test_decollate.py b/tests/test_decollate.py index 24a34482b5..4ed8de6bbb 100644 --- a/tests/test_decollate.py +++ b/tests/test_decollate.py @@ -15,7 +15,6 @@ import numpy as np import torch -from parameterized import parameterized from monai.data import CacheDataset, DataLoader, create_test_image_2d from monai.data.utils import decollate_batch @@ -27,17 +26,6 @@ _, has_nib = optional_import("nibabel") -IM_2D = create_test_image_2d(100, 101)[0] -DATA_2D = {"image": make_nifti_image(IM_2D) if has_nib else IM_2D} - -TESTS = [] -TESTS.append( - ( - "2D", - [DATA_2D for _ in range(6)], - ) -) - class TestDeCollate(unittest.TestCase): def setUp(self) -> None: @@ -67,8 +55,11 @@ def check_match(self, in1, in2): else: raise RuntimeError(f"Not sure how to compare types. type(in1): {type(in1)}, type(in2): {type(in2)}") - @parameterized.expand(TESTS) - def test_decollation(self, _, data, batch_size=2, num_workers=2): + def test_decollation(self, batch_size=2, num_workers=2): + + im = create_test_image_2d(100, 101)[0] + data = [{"image": make_nifti_image(im) if has_nib else im} for _ in range(6)] + transforms = Compose( [ AddChanneld("image"), @@ -82,7 +73,7 @@ def test_decollation(self, _, data, batch_size=2, num_workers=2): transforms = Compose([LoadImaged("image"), transforms]) dataset = CacheDataset(data, transforms, progress=False) - loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) + loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers) for b, batch_data in enumerate(loader): decollated_1 = decollate_batch(batch_data) diff --git a/tests/test_thread_buffer.py b/tests/test_thread_buffer.py index 07e5a779ca..d139b44c85 100644 --- a/tests/test_thread_buffer.py +++ b/tests/test_thread_buffer.py @@ -9,6 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys import time import unittest @@ -57,11 +58,13 @@ def test_time(self): time.sleep(0.5) # while "computation" is happening the next batch is being generated, saving 0.4 s buffered_time = pc.total_time - - self.assertTrue( - buffered_time < unbuffered_time, - f"Buffered time {buffered_time} should be less than unbuffered time {unbuffered_time}", - ) + if sys.platform == "darwin": # skip macOS measure + print(f"darwin: Buffered time {buffered_time} vs unbuffered time {unbuffered_time}") + else: + self.assertTrue( + buffered_time < unbuffered_time, + f"Buffered time {buffered_time} should be less than unbuffered time {unbuffered_time}", + ) if __name__ == "__main__": From 6d3bf2fb7910afcb656fed91eb1d337c61c40ac4 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Thu, 11 Mar 2021 03:59:45 +0000 Subject: [PATCH 047/457] 1236 integration test memory (#1738) * addresses testing instance memory errors Signed-off-by: Wenqi Li --- .github/pull_request_template.md | 4 ++-- .github/workflows/cron.yml | 27 +++++++++++++++++---- .github/workflows/integration.yml | 2 +- .github/workflows/pythonapp.yml | 4 ++-- .github/workflows/setupapp.yml | 16 ++++++++++--- CONTRIBUTING.md | 8 +++---- runtests.sh | 39 +++++++++++++++++-------------- tests/runner.py | 1 + 8 files changed, 66 insertions(+), 35 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 61b8814857..f7024f1a08 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -11,7 +11,7 @@ A few sentences describing the changes proposed in this pull request. - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] Breaking change (fix or new feature that would cause existing functionality to change). - [ ] New tests added to cover the changes. -- [ ] Integration tests passed locally by running `./runtests.sh --codeformat --coverage`. -- [ ] Quick tests passed locally by running `./runtests.sh --quick`. +- [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. +- [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests`. - [ ] In-line docstrings updated. - [ ] Documentation updated, tested `make html` command in the `docs/` folder. diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 681331aae1..90abb5d7e4 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -40,9 +40,16 @@ jobs: nvidia-smi export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) echo $CUDA_VISIBLE_DEVICES + stop_time=$((LAUNCH_DELAY + $(date +%s))) + while [ $(date +%s) -lt $stop_time ]; do + python -c 'import torch; torch.rand(5, 3, device=torch.device("cuda:0"))'; + done python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" - python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))' - BUILD_MONAI=1 ./runtests.sh --coverage + python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' + BUILD_MONAI=1 ./runtests.sh --coverage --unittests # unit tests with coverage report + export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) + echo $CUDA_VISIBLE_DEVICES + BUILD_MONAI=1 ./runtests.sh --coverage --net # integration tests with coverage report coverage xml - name: Upload coverage uses: codecov/codecov-action@v1 @@ -76,9 +83,16 @@ jobs: nvidia-smi export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) echo $CUDA_VISIBLE_DEVICES + stop_time=$((LAUNCH_DELAY + $(date +%s))) + while [ $(date +%s) -lt $stop_time ]; do + python -c 'import torch; torch.rand(5, 3, device=torch.device("cuda:0"))'; + done python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" - python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))' - BUILD_MONAI=1 ./runtests.sh --coverage + python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' + BUILD_MONAI=1 ./runtests.sh --coverage --unittests # unit tests with coverage report + export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) + echo $CUDA_VISIBLE_DEVICES + BUILD_MONAI=1 ./runtests.sh --coverage --net # integration tests with coverage report coverage xml - name: Upload coverage uses: codecov/codecov-action@v1 @@ -104,7 +118,10 @@ jobs: python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))' ngc --version - BUILD_MONAI=1 ./runtests.sh --coverage --pytype + BUILD_MONAI=1 ./runtests.sh --coverage --pytype --unittests # unit tests with pytype checks, coverage report + export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) + echo $CUDA_VISIBLE_DEVICES + BUILD_MONAI=1 ./runtests.sh --coverage --net # integration tests with coverage report coverage xml - name: Upload coverage uses: codecov/codecov-action@v1 diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 66f6c2956d..ac3efbb751 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -44,7 +44,7 @@ jobs: echo $CUDA_VISIBLE_DEVICES python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))' - BUILD_MONAI=1 ./runtests.sh --net + BUILD_MONAI=1 ./runtests.sh --unittests --net - name: Add reaction uses: peter-evans/create-or-update-comment@v1 with: diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 227d55a082..8ed1f6d21e 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -39,7 +39,7 @@ jobs: # clean up temporary files $(pwd)/runtests.sh --clean # Git hub actions have 2 cores, so parallize pytype - $(pwd)/runtests.sh --nounittests --codeformat -j 2 + $(pwd)/runtests.sh --codeformat -j 2 quick-py3: # full dependencies installed runs-on: ${{ matrix.os }} @@ -241,7 +241,7 @@ jobs: python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' python -c "import monai; monai.config.print_config()" - BUILD_MONAI=1 ./runtests.sh --quick + BUILD_MONAI=1 ./runtests.sh --quick --unittests if [ ${{ matrix.environment }} == "PT18+CUDA112" ]; then # test the clang-format tool downloading once coverage run -m tests.clang_format_utils diff --git a/.github/workflows/setupapp.yml b/.github/workflows/setupapp.yml index e40660c213..e5cb9a7cf1 100644 --- a/.github/workflows/setupapp.yml +++ b/.github/workflows/setupapp.yml @@ -41,12 +41,22 @@ jobs: - name: Run unit tests report coverage run: | python -m pip list + export LAUNCH_DELAY=$[ $RANDOM % 16 * 60 ] + echo "Sleep $LAUNCH_DELAY" + sleep $LAUNCH_DELAY nvidia-smi export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) echo $CUDA_VISIBLE_DEVICES + stop_time=$((LAUNCH_DELAY + $(date +%s))) + while [ $(date +%s) -lt $stop_time ]; do + python -c 'import torch; torch.rand(5, 3, device=torch.device("cuda:0"))'; + done python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" - python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))' - BUILD_MONAI=1 ./runtests.sh --coverage + python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' + BUILD_MONAI=1 ./runtests.sh --coverage --unittests # unit tests with coverage report + export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) + echo $CUDA_VISIBLE_DEVICES + BUILD_MONAI=1 ./runtests.sh --coverage --net # integration tests with coverage report coverage xml - name: Upload coverage uses: codecov/codecov-action@v1 @@ -88,7 +98,7 @@ jobs: run: | python -m pip list python -c 'import torch; print(torch.__version__); print(torch.rand(5,3))' - BUILD_MONAI=1 ./runtests.sh --quick + BUILD_MONAI=1 ./runtests.sh --quick --unittests coverage xml - name: Upload coverage uses: codecov/codecov-action@v1 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 01a4773b5a..325f81b127 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -52,10 +52,10 @@ Before submitting a pull request, we recommend that all linting should pass, by ```bash pip install -U -r requirements-dev.txt # install the latest tools -./runtests.sh --codeformat --nounittests # runs the linting tools only +./runtests.sh --codeformat # runs the linting tools # try to fix the coding style errors automatically -./runtests.sh --autofix --nounittests +./runtests.sh --autofix ``` License information: all source code files should start with this paragraph: @@ -98,12 +98,12 @@ Before submitting a pull request, we recommend that all linting and unit tests should pass, by running the following command locally: ```bash -./runtests.sh --codeformat --coverage +./runtests.sh --codeformat --coverage --unittests ``` or (for new features that would not break existing functionality): ```bash -./runtests.sh --quick +./runtests.sh --quick --unittests ``` It is recommended that the new test `test_[module_name].py` is constructed by using only diff --git a/runtests.sh b/runtests.sh index 1395ccdcfd..0d3551291a 100755 --- a/runtests.sh +++ b/runtests.sh @@ -36,9 +36,7 @@ doQuickTests=false doNetTests=false doDryRun=false doZooTests=false - -doUnitTests=true - +doUnitTests=false doBlackFormat=false doBlackFix=false doIsortFormat=false @@ -55,16 +53,17 @@ PY_EXE=${MONAI_PY_EXE:-$(which python)} function print_usage { echo "runtests.sh [--codeformat] [--autofix] [--black] [--isort] [--flake8] [--clangformat] [--pytype] [--mypy]" - echo " [--nounittests] [--coverage] [--quick] [--net] [--dryrun] [-j number] [--clean] [--help] [--version]" + echo " [--unittests] [--coverage] [--quick] [--net] [--dryrun] [-j number] [--clean] [--help] [--version]" echo "" echo "MONAI unit testing utilities." echo "" echo "Examples:" - echo "./runtests.sh --codeformat --coverage # run full tests (${green}recommended before making pull requests${noColor})." - echo "./runtests.sh --codeformat --nounittests # run coding style and static type checking." - echo "./runtests.sh --quick # run minimal unit tests, for quick verification during code developments." - echo "./runtests.sh --autofix --nounittests # run automatic code formatting using \"isort\" and \"black\"." - echo "./runtests.sh --clean # clean up temporary files and run \"${PY_EXE} setup.py develop --uninstall\"." + echo "./runtests.sh -f -u --net --coverage # run style checks, full tests, print code coverage (${green}recommended for pull requests${noColor})." + echo "./runtests.sh -f -u # run style checks and unit tests." + echo "./runtests.sh -f # run coding style and static type checking." + echo "./runtests.sh --quick --unittests # run minimal unit tests, for quick verification during code developments." + echo "./runtests.sh --autofix # run automatic code formatting using \"isort\" and \"black\"." + echo "./runtests.sh --clean # clean up temporary files and run \"${PY_EXE} setup.py develop --uninstall\"." echo "" echo "Code style check options:" echo " --black : perform \"black\" code format checks" @@ -79,11 +78,11 @@ function print_usage { echo " -j, --jobs : number of parallel jobs to run \"pytype\" (default $NUM_PARALLEL)" echo "" echo "MONAI unit testing options:" - echo " --nounittests : skip doing unit testing (i.e. only format lint testers)" - echo " --coverage : peforms coverage analysis of code for tests run" - echo " -q, --quick : disable long running tests" - echo " --net : perform training/inference/eval integration testing" - echo " --list_tests : list tests and exit" + echo " -u, --unittests : perform unit testing" + echo " --coverage : report testing code coverage, to be used with \"--net\", \"--unittests\"" + echo " -q, --quick : skip long running unit tests and integration tests" + echo " --net : perform integration testing" + echo " --list_tests : list unit tests and exit" echo "" echo "Misc. options:" echo " --dryrun : display the commands to the screen without running" @@ -92,7 +91,7 @@ function print_usage { echo " -h, --help : show this help message and exit" echo " -v, --version : show MONAI and system version information and exit" echo "" - echo "${separator}For bug reports, questions, and discussions, please file an issue at:" + echo "${separator}For bug reports and feature requests, please file an issue at:" echo " https://github.com/Project-MONAI/MONAI/issues/new/choose" echo "" echo "To choose an alternative python executable, set the environmental variable, \"MONAI_PY_EXE\"." @@ -220,8 +219,8 @@ do --dryrun) doDryRun=true ;; - --nou*) # allow --nounittest | --nounittests | --nounittesting etc. - doUnitTests=false + -u|--u*) # allow --unittest | --unittests | --unittesting etc. + doUnitTests=true ;; -f|--codeformat) doBlackFormat=true @@ -268,6 +267,10 @@ do print_version exit 1 ;; + --nou*) # allow --nounittest | --nounittests | --nounittesting etc. + print_error_msg "nounittest option is deprecated, no unit tests is the default setting" + print_usage + ;; *) print_error_msg "Incorrect commandline provided, invalid key: $key" print_usage @@ -511,7 +514,7 @@ if [ $doUnitTests = true ] then echo "${separator}${blue}unittests${noColor}" torch_validate - ${cmdPrefix}${cmd} ./tests/runner.py + ${cmdPrefix}${cmd} ./tests/runner.py -p "test_[!integration]*py" fi # network training/inference/eval integration tests diff --git a/tests/runner.py b/tests/runner.py index b5d1de5fc1..f7be96cfb3 100644 --- a/tests/runner.py +++ b/tests/runner.py @@ -126,6 +126,7 @@ def get_default_pattern(loader): tests = loader.discover(args.path, args.pattern) discovery_time = pc.total_time print(f"time to discover tests: {discovery_time}s") + print(tests) test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast From 401ea299bf094c10725d9a0566379b69b7410735 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Thu, 11 Mar 2021 22:42:52 +0000 Subject: [PATCH 048/457] 1541 update coverage report config (#1746) * update coverage config Signed-off-by: Wenqi Li * temp tests Signed-off-by: Wenqi Li * fixes https://github.com/Project-MONAI/MONAI/runs/2083800079?check_suite_focus=true#step:5:13886 Signed-off-by: Wenqi Li * test cases matching in runner Signed-off-by: Wenqi Li * fixes openslide tests Signed-off-by: Wenqi Li * fixes https://github.com/Project-MONAI/MONAI/runs/2086767998?check_suite_focus=true#step:7:5955 Signed-off-by: Wenqi Li * fixes print stats Signed-off-by: Wenqi Li * remove temp tests Signed-off-by: Wenqi Li * remove unused Signed-off-by: Wenqi Li * remove global logging config Signed-off-by: Wenqi Li * omit setup.py Signed-off-by: Wenqi Li --- .dockerignore | 3 +++ .gitignore | 2 ++ monai/transforms/utility/array.py | 9 +++++++-- monai/transforms/utility/dictionary.py | 1 + requirements-min.txt | 2 +- runtests.sh | 15 +++++++++------ setup.cfg | 24 +++++++++++++++++++++--- tests/runner.py | 24 +++++++++++++++--------- tests/test_affine.py | 5 +---- tests/test_affined.py | 5 +---- tests/test_data_stats.py | 6 ++++-- tests/test_data_statsd.py | 7 +++++-- tests/test_handler_stats.py | 26 ++++++++++++++++---------- tests/test_openslide_reader.py | 22 ++++++++++++---------- tests/test_rand_rotate.py | 3 ++- tests/test_rand_rotated.py | 3 ++- tests/test_rotate.py | 3 ++- 17 files changed, 104 insertions(+), 56 deletions(-) diff --git a/.dockerignore b/.dockerignore index 549e63bad5..262da4d0dd 100644 --- a/.dockerignore +++ b/.dockerignore @@ -4,6 +4,9 @@ __pycache__/ docs/ .coverage +.coverage.* +.coverage/ +coverage.xml .readthedocs.yml *.md *.toml diff --git a/.gitignore b/.gitignore index 0d1455d70d..f60641d6f7 100644 --- a/.gitignore +++ b/.gitignore @@ -40,6 +40,7 @@ htmlcov/ .tox/ .coverage .coverage.* +.coverage/ .cache nosetests.xml coverage.xml @@ -124,6 +125,7 @@ temp/ # temporary testing data MedNIST tests/testing_data/MedNIST* tests/testing_data/*Hippocampus* +tests/testing_data/CMU-1.tiff # clang format tool .clang-format-bin/ diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 8776238711..41804d5c1d 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -14,6 +14,7 @@ """ import logging +import sys import time from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Sequence, Tuple, Union @@ -409,6 +410,7 @@ def __init__( additional_info: user can define callable function to extract additional info from input data. logger_handler: add additional handler to output data: save to file, etc. add existing python logging handlers: https://docs.python.org/3/library/logging.handlers.html + the handler should have a logging level of at least `INFO`. Raises: TypeError: When ``additional_info`` is not an ``Optional[Callable]``. @@ -424,8 +426,11 @@ def __init__( raise TypeError(f"additional_info must be None or callable but is {type(additional_info).__name__}.") self.additional_info = additional_info self.output: Optional[str] = None - logging.basicConfig(level=logging.NOTSET) self._logger = logging.getLogger("DataStats") + self._logger.setLevel(logging.INFO) + console = logging.StreamHandler(sys.stdout) # always stdout + console.setLevel(logging.INFO) + self._logger.addHandler(console) if logger_handler is not None: self._logger.addHandler(logger_handler) @@ -459,7 +464,7 @@ def __call__( lines.append(f"Additional info: {additional_info(img)}") separator = "\n" self.output = f"{separator.join(lines)}" - self._logger.debug(self.output) + self._logger.info(self.output) return img diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index 14f34fb663..a05a5fc904 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -532,6 +532,7 @@ def __init__( corresponds to a key in ``keys``. logger_handler: add additional handler to output data: save to file, etc. add existing python logging handlers: https://docs.python.org/3/library/logging.handlers.html + the handler should have a logging level of at least `INFO`. allow_missing_keys: don't raise exception if key is missing. """ diff --git a/requirements-min.txt b/requirements-min.txt index 3a5585de8d..5db219c840 100644 --- a/requirements-min.txt +++ b/requirements-min.txt @@ -1,5 +1,5 @@ # Requirements for minimal tests -r requirements.txt setuptools>=50.3.0 -coverage +coverage>=5.5 parameterized diff --git a/runtests.sh b/runtests.sh index 0d3551291a..85ede904f6 100755 --- a/runtests.sh +++ b/runtests.sh @@ -138,6 +138,9 @@ function clang_format { } function clean_py { + # remove coverage history + ${cmdPrefix}${PY_EXE} -m coverage erase + # uninstall the development package echo "Uninstalling MONAI development files..." ${cmdPrefix}${PY_EXE} setup.py develop --user --uninstall @@ -149,7 +152,7 @@ function clean_py { find ${TO_CLEAN}/monai -type f -name "*.py[co]" -delete find ${TO_CLEAN}/monai -type f -name "*.so" -delete find ${TO_CLEAN}/monai -type d -name "__pycache__" -delete - find ${TO_CLEAN} -maxdepth 1 -type f -name ".coverage" -delete + find ${TO_CLEAN} -maxdepth 1 -type f -name ".coverage.*" -delete find ${TO_CLEAN} -depth -maxdepth 1 -type d -name ".eggs" -exec rm -r "{}" + find ${TO_CLEAN} -depth -maxdepth 1 -type d -name "monai.egg-info" -exec rm -r "{}" + @@ -496,12 +499,11 @@ then export QUICKTEST=True fi -# set command and clear previous coverage data +# set coverage command if [ $doCoverage = true ] then echo "${separator}${blue}coverage${noColor}" - cmd="${PY_EXE} -m coverage run -a --source ." - ${cmdPrefix}${PY_EXE} -m coverage erase + cmd="${PY_EXE} -m coverage run --append" fi # # download test data if needed @@ -514,7 +516,7 @@ if [ $doUnitTests = true ] then echo "${separator}${blue}unittests${noColor}" torch_validate - ${cmdPrefix}${cmd} ./tests/runner.py -p "test_[!integration]*py" + ${cmdPrefix}${cmd} ./tests/runner.py -p "test_((?!integration).)" fi # network training/inference/eval integration tests @@ -540,5 +542,6 @@ fi if [ $doCoverage = true ] then echo "${separator}${blue}coverage${noColor}" - ${cmdPrefix}${PY_EXE} -m coverage report --skip-covered -m + ${cmdPrefix}${PY_EXE} -m coverage combine --append .coverage/ + ${cmdPrefix}${PY_EXE} -m coverage report fi diff --git a/setup.cfg b/setup.cfg index f18b4610fd..bbdcdf805d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -55,19 +55,19 @@ lmdb = lmdb psutil = psutil -openslide = +openslide = openslide-python==1.1.2 [flake8] select = B,C,E,F,N,P,T4,W,B9 -max-line-length = 120 +max_line_length = 120 # C408 ignored because we like the dict keyword argument syntax # E501 is not flexible enough, we're using B950 instead ignore = E203,E305,E402,E501,E721,E741,F821,F841,F999,W503,W504,C408,E302,W291,E303, # N812 lowercase 'torch.nn.functional' imported as non lowercase 'F' N812 -per-file-ignores = __init__.py: F401 +per_file_ignores = __init__.py: F401 exclude = *.pyi,.git,.eggs,monai/_version.py,versioneer.py,venv,.venv,_version.py [isort] @@ -148,3 +148,21 @@ precise_return = True protocols = True # Experimental: Only load submodules that are explicitly imported. strict_import = False + +[coverage:run] +concurrency = multiprocessing +source = . +data_file = .coverage/.coverage +omit = setup.py + +[coverage:report] +exclude_lines = + pragma: no cover + # Don't complain if tests don't hit code: + raise NotImplementedError + if __name__ == .__main__.: +show_missing = True +skip_covered = True + +[coverage:xml] +output = coverage.xml diff --git a/tests/runner.py b/tests/runner.py index f7be96cfb3..b340d60719 100644 --- a/tests/runner.py +++ b/tests/runner.py @@ -10,8 +10,10 @@ # limitations under the License. import argparse +import glob import inspect import os +import re import sys import time import unittest @@ -62,7 +64,7 @@ def print_results(results, discovery_time, thresh, status): print("Remember to check above times for any errors!") -def parse_args(default_pattern): +def parse_args(): parser = argparse.ArgumentParser(description="Runner for MONAI unittests with timing.") parser.add_argument( "-s", action="store", dest="path", default=".", help="Directory to start discovery (default: '%(default)s')" @@ -71,7 +73,7 @@ def parse_args(default_pattern): "-p", action="store", dest="pattern", - default=default_pattern, + default="test_*.py", help="Pattern to match tests (default: '%(default)s')", ) parser.add_argument( @@ -111,11 +113,8 @@ def get_default_pattern(loader): if __name__ == "__main__": - loader = unittest.TestLoader() - default_pattern = get_default_pattern(loader) - # Parse input arguments - args = parse_args(default_pattern) + args = parse_args() # If quick is desired, set environment variable if args.quick: @@ -123,10 +122,17 @@ def get_default_pattern(loader): # Get all test names (optionally from some path with some pattern) with PerfContext() as pc: - tests = loader.discover(args.path, args.pattern) + # the files are searched from `tests/` folder, starting with `test_` + files = glob.glob(os.path.join(os.path.dirname(__file__), "test_*.py")) + cases = [] + for test_module in {os.path.basename(f)[:-3] for f in files}: + if re.match(args.pattern, test_module): + cases.append(f"tests.{test_module}") + else: + print(f"monai test runner: excluding tests.{test_module}") + tests = unittest.TestLoader().loadTestsFromNames(cases) discovery_time = pc.total_time - print(f"time to discover tests: {discovery_time}s") - print(tests) + print(f"time to discover tests: {discovery_time}s, total cases: {tests.countTestCases()}.") test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast diff --git a/tests/test_affine.py b/tests/test_affine.py index 934473fc5c..ea146e0fbd 100644 --- a/tests/test_affine.py +++ b/tests/test_affine.py @@ -80,10 +80,7 @@ def test_affine(self, input_param, input_data, expected_val): g = Affine(**input_param) result = g(**input_data) self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor)) - if isinstance(result, torch.Tensor): - np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) - else: - np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) + np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) if __name__ == "__main__": diff --git a/tests/test_affined.py b/tests/test_affined.py index 96e6d72fe5..850f12905d 100644 --- a/tests/test_affined.py +++ b/tests/test_affined.py @@ -94,10 +94,7 @@ def test_affine(self, input_param, input_data, expected_val): g = Affined(**input_param) result = g(input_data)["img"] self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor)) - if isinstance(result, torch.Tensor): - np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) - else: - np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) + np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) if __name__ == "__main__": diff --git a/tests/test_data_stats.py b/tests/test_data_stats.py index e7334eb52c..877da52263 100644 --- a/tests/test_data_stats.py +++ b/tests/test_data_stats.py @@ -119,6 +119,7 @@ def test_file(self, input_data, expected_print): with tempfile.TemporaryDirectory() as tempdir: filename = os.path.join(tempdir, "test_data_stats.log") handler = logging.FileHandler(filename, mode="w") + handler.setLevel(logging.INFO) input_param = { "prefix": "test data", "data_shape": True, @@ -129,8 +130,9 @@ def test_file(self, input_data, expected_print): } transform = DataStats(**input_param) _ = transform(input_data) - handler.stream.close() - transform._logger.removeHandler(handler) + for h in transform._logger.handlers[:]: + h.close() + transform._logger.removeHandler(h) with open(filename, "r") as f: content = f.read() self.assertEqual(content, expected_print) diff --git a/tests/test_data_statsd.py b/tests/test_data_statsd.py index a5fae3d66d..bacd70194a 100644 --- a/tests/test_data_statsd.py +++ b/tests/test_data_statsd.py @@ -132,6 +132,7 @@ def test_file(self, input_data, expected_print): with tempfile.TemporaryDirectory() as tempdir: filename = os.path.join(tempdir, "test_stats.log") handler = logging.FileHandler(filename, mode="w") + handler.setLevel(logging.INFO) input_param = { "keys": "img", "prefix": "test data", @@ -143,8 +144,10 @@ def test_file(self, input_data, expected_print): } transform = DataStatsd(**input_param) _ = transform(input_data) - handler.stream.close() - transform.printer._logger.removeHandler(handler) + for h in transform.printer._logger.handlers[:]: + h.close() + transform.printer._logger.removeHandler(h) + del handler with open(filename, "r") as f: content = f.read() self.assertEqual(content, expected_print) diff --git a/tests/test_handler_stats.py b/tests/test_handler_stats.py index d1602f802a..248be9f329 100644 --- a/tests/test_handler_stats.py +++ b/tests/test_handler_stats.py @@ -25,7 +25,8 @@ class TestHandlerStats(unittest.TestCase): def test_metrics_print(self): log_stream = StringIO() - logging.basicConfig(stream=log_stream, level=logging.INFO) + log_handler = logging.StreamHandler(log_stream) + log_handler.setLevel(logging.INFO) key_to_handler = "test_logging" key_to_print = "testing_metric" @@ -42,13 +43,14 @@ def _update_metric(engine): engine.state.metrics[key_to_print] = current_metric + 0.1 # set up testing handler - stats_handler = StatsHandler(name=key_to_handler) + stats_handler = StatsHandler(name=key_to_handler, logger_handler=log_handler) stats_handler.attach(engine) engine.run(range(3), max_epochs=2) # check logging output output_str = log_stream.getvalue() + log_handler.close() grep = re.compile(f".*{key_to_handler}.*") has_key_word = re.compile(f".*{key_to_print}.*") for idx, line in enumerate(output_str.split("\n")): @@ -58,7 +60,8 @@ def _update_metric(engine): def test_loss_print(self): log_stream = StringIO() - logging.basicConfig(stream=log_stream, level=logging.INFO) + log_handler = logging.StreamHandler(log_stream) + log_handler.setLevel(logging.INFO) key_to_handler = "test_logging" key_to_print = "myLoss" @@ -69,13 +72,14 @@ def _train_func(engine, batch): engine = Engine(_train_func) # set up testing handler - stats_handler = StatsHandler(name=key_to_handler, tag_name=key_to_print) + stats_handler = StatsHandler(name=key_to_handler, tag_name=key_to_print, logger_handler=log_handler) stats_handler.attach(engine) engine.run(range(3), max_epochs=2) # check logging output output_str = log_stream.getvalue() + log_handler.close() grep = re.compile(f".*{key_to_handler}.*") has_key_word = re.compile(f".*{key_to_print}.*") for idx, line in enumerate(output_str.split("\n")): @@ -85,7 +89,8 @@ def _train_func(engine, batch): def test_loss_dict(self): log_stream = StringIO() - logging.basicConfig(stream=log_stream, level=logging.INFO) + log_handler = logging.StreamHandler(log_stream) + log_handler.setLevel(logging.INFO) key_to_handler = "test_logging" key_to_print = "myLoss1" @@ -96,13 +101,16 @@ def _train_func(engine, batch): engine = Engine(_train_func) # set up testing handler - stats_handler = StatsHandler(name=key_to_handler, output_transform=lambda x: {key_to_print: x}) + stats_handler = StatsHandler( + name=key_to_handler, output_transform=lambda x: {key_to_print: x}, logger_handler=log_handler + ) stats_handler.attach(engine) engine.run(range(3), max_epochs=2) # check logging output output_str = log_stream.getvalue() + log_handler.close() grep = re.compile(f".*{key_to_handler}.*") has_key_word = re.compile(f".*{key_to_print}.*") for idx, line in enumerate(output_str.split("\n")): @@ -111,13 +119,13 @@ def _train_func(engine, batch): self.assertTrue(has_key_word.match(line)) def test_loss_file(self): - logging.basicConfig(level=logging.INFO) key_to_handler = "test_logging" key_to_print = "myLoss" with tempfile.TemporaryDirectory() as tempdir: filename = os.path.join(tempdir, "test_loss_stats.log") handler = logging.FileHandler(filename, mode="w") + handler.setLevel(logging.INFO) # set up engine def _train_func(engine, batch): @@ -130,7 +138,7 @@ def _train_func(engine, batch): stats_handler.attach(engine) engine.run(range(3), max_epochs=2) - handler.stream.close() + handler.close() stats_handler.logger.removeHandler(handler) with open(filename, "r") as f: output_str = f.read() @@ -142,8 +150,6 @@ def _train_func(engine, batch): self.assertTrue(has_key_word.match(line)) def test_exception(self): - logging.basicConfig(level=logging.INFO) - # set up engine def _train_func(engine, batch): raise RuntimeError("test exception.") diff --git a/tests/test_openslide_reader.py b/tests/test_openslide_reader.py index e1f9187937..67a6683be3 100644 --- a/tests/test_openslide_reader.py +++ b/tests/test_openslide_reader.py @@ -61,11 +61,20 @@ ] +def camelyon_data_download(file_url): + filename = os.path.basename(file_url) + fullname = os.path.join("tests", "testing_data", filename) + if not os.path.exists(fullname): + print(f"Test image [{fullname}] does not exist. Downloading...") + request.urlretrieve(file_url, fullname) + return fullname + + class TestOpenSlideReader(unittest.TestCase): @parameterized.expand([TEST_CASE_0]) @skipUnless(has_osl, "Requires OpenSlide") def test_read_whole_image(self, file_url, expected_shape): - filename = self.camelyon_data_download(file_url) + filename = camelyon_data_download(file_url) reader = WSIReader("OpenSlide") img_obj = reader.read(filename) img = reader.get_data(img_obj)[0] @@ -74,7 +83,7 @@ def test_read_whole_image(self, file_url, expected_shape): @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) @skipUnless(has_osl, "Requires OpenSlide") def test_read_region(self, file_url, patch_info, expected_img): - filename = self.camelyon_data_download(file_url) + filename = camelyon_data_download(file_url) reader = WSIReader("OpenSlide") img_obj = reader.read(filename) img = reader.get_data(img_obj, **patch_info)[0] @@ -84,20 +93,13 @@ def test_read_region(self, file_url, patch_info, expected_img): @parameterized.expand([TEST_CASE_3, TEST_CASE_4]) @skipUnless(has_osl, "Requires OpenSlide") def test_read_patches(self, file_url, patch_info, expected_img): - filename = self.camelyon_data_download(file_url) + filename = camelyon_data_download(file_url) reader = WSIReader("OpenSlide") img_obj = reader.read(filename) img = reader.get_data(img_obj, **patch_info)[0] self.assertTupleEqual(img.shape, expected_img.shape) self.assertIsNone(assert_array_equal(img, expected_img)) - def camelyon_data_download(self, file_url): - filename = os.path.basename(file_url) - if not os.path.exists(filename): - print(f"Test image [{filename}] does not exist. Downloading...") - request.urlretrieve(file_url, filename) - return filename - if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_rotate.py b/tests/test_rand_rotate.py index 79f3036454..0ff8508a0f 100644 --- a/tests/test_rand_rotate.py +++ b/tests/test_rand_rotate.py @@ -52,7 +52,8 @@ def test_correct_results(self, degrees, keep_size, mode, padding_mode, align_cor self.imt[0, 0], -np.rad2deg(angle), (0, 1), not keep_size, order=_order, mode=_mode, prefilter=False ) expected = np.stack(expected).astype(np.float32) - np.testing.assert_allclose(expected, rotated[0]) + good = np.sum(np.isclose(expected, rotated[0], atol=1e-3)) + self.assertLessEqual(np.abs(good - expected.size), 5, "diff at most 5 pixels") class TestRandRotate3D(NumpyImageTestCase3D): diff --git a/tests/test_rand_rotated.py b/tests/test_rand_rotated.py index 962ac5fc51..47b4b7107e 100644 --- a/tests/test_rand_rotated.py +++ b/tests/test_rand_rotated.py @@ -54,7 +54,8 @@ def test_correct_results(self, degrees, keep_size, mode, padding_mode, align_cor self.imt[0, 0], -np.rad2deg(angle), (0, 1), not keep_size, order=_order, mode=_mode, prefilter=False ) expected = np.stack(expected).astype(np.float32) - self.assertTrue(np.allclose(expected, rotated["img"][0])) + good = np.sum(np.isclose(expected, rotated["img"][0], atol=1e-3)) + self.assertLessEqual(np.abs(good - expected.size), 5, "diff at most 5 pixels") class TestRandRotated3D(NumpyImageTestCase3D): diff --git a/tests/test_rotate.py b/tests/test_rotate.py index a8dca07069..436c952d4b 100644 --- a/tests/test_rotate.py +++ b/tests/test_rotate.py @@ -70,7 +70,8 @@ def test_correct_results(self, angle, keep_size, mode, padding_mode, align_corne ) ) expected = np.stack(expected).astype(np.float32) - np.testing.assert_allclose(expected, rotated, atol=1e-1) + good = np.sum(np.isclose(expected, rotated, atol=1e-3)) + self.assertLessEqual(np.abs(good - expected.size), 5, "diff at most 5 pixels") class TestRotate3D(NumpyImageTestCase3D): From ce8f92a565acdd39d2e41566f199758fdf6d2d0f Mon Sep 17 00:00:00 2001 From: Yiwen Li <44606435+kate-sann5100@users.noreply.github.com> Date: Fri, 12 Mar 2021 11:21:01 +0000 Subject: [PATCH 049/457] 1665 adjust localnet (#1752) * 1651 implement RegUNet Signed-off-by: kate-sann5100 --- monai/networks/nets/__init__.py | 3 +- monai/networks/nets/localnet.py | 129 -------------------------------- monai/networks/nets/regunet.py | 91 ++++++++++++++++++++++ tests/test_localnet.py | 58 ++++++-------- 4 files changed, 116 insertions(+), 165 deletions(-) delete mode 100644 monai/networks/nets/localnet.py diff --git a/monai/networks/nets/__init__.py b/monai/networks/nets/__init__.py index db4590cf40..7a39872525 100644 --- a/monai/networks/nets/__init__.py +++ b/monai/networks/nets/__init__.py @@ -18,9 +18,8 @@ from .fullyconnectednet import FullyConnectedNet, VarFullyConnectedNet from .generator import Generator from .highresnet import HighResBlock, HighResNet -from .localnet import LocalNet from .regressor import Regressor -from .regunet import RegUNet +from .regunet import LocalNet, RegUNet from .segresnet import SegResNet, SegResNetVAE from .senet import SENet, se_resnet50, se_resnet101, se_resnet152, se_resnext50_32x4d, se_resnext101_32x4d, senet154 from .unet import UNet, Unet, unet diff --git a/monai/networks/nets/localnet.py b/monai/networks/nets/localnet.py deleted file mode 100644 index e9df68104d..0000000000 --- a/monai/networks/nets/localnet.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import List, Optional, Tuple, Union - -import torch -from torch import nn -from torch.nn import functional as F - -from monai.networks.blocks.localnet_block import ( - LocalNetDownSampleBlock, - LocalNetFeatureExtractorBlock, - LocalNetUpSampleBlock, - get_conv_block, -) - - -class LocalNet(nn.Module): - """ - Reimplementation of LocalNet, based on: - `Weakly-supervised convolutional neural networks for multimodal image registration - `_. - `Label-driven weakly-supervised learning for multimodal deformable image registration - `_. - - Adapted from: - DeepReg (https://github.com/DeepRegNet/DeepReg) - """ - - def __init__( - self, - spatial_dims: int, - in_channels: int, - out_channels: int, - num_channel_initial: int, - extract_levels: List[int], - out_activation: Optional[Union[Tuple, str]], - out_initializer: str = "kaiming_uniform", - ) -> None: - """ - Args: - spatial_dims: number of spatial dimensions. - in_channels: number of input channels. - out_channels: number of output channels. - num_channel_initial: number of initial channels. - extract_levels: number of extraction levels. - out_activation: activation to use at end layer. - out_initializer: initializer for extraction layers. - """ - super(LocalNet, self).__init__() - self.extract_levels = extract_levels - self.extract_max_level = max(self.extract_levels) # E - self.extract_min_level = min(self.extract_levels) # D - - num_channels = [ - num_channel_initial * (2 ** level) for level in range(self.extract_max_level + 1) - ] # level 0 to E - - self.downsample_blocks = nn.ModuleList( - [ - LocalNetDownSampleBlock( - spatial_dims=spatial_dims, - in_channels=in_channels if i == 0 else num_channels[i - 1], - out_channels=num_channels[i], - kernel_size=7 if i == 0 else 3, - ) - for i in range(self.extract_max_level) - ] - ) # level 0 to self.extract_max_level - 1 - self.conv3d_block = get_conv_block( - spatial_dims=spatial_dims, in_channels=num_channels[-2], out_channels=num_channels[-1] - ) # self.extract_max_level - - self.upsample_blocks = nn.ModuleList( - [ - LocalNetUpSampleBlock( - spatial_dims=spatial_dims, - in_channels=num_channels[level + 1], - out_channels=num_channels[level], - ) - for level in range(self.extract_max_level - 1, self.extract_min_level - 1, -1) - ] - ) # self.extract_max_level - 1 to self.extract_min_level - - self.extract_layers = nn.ModuleList( - [ - # if kernels are not initialized by zeros, with init NN, extract may be too large - LocalNetFeatureExtractorBlock( - spatial_dims=spatial_dims, - in_channels=num_channels[level], - out_channels=out_channels, - act=out_activation, - initializer=out_initializer, - ) - for level in self.extract_levels - ] - ) - - def forward(self, x) -> torch.Tensor: - image_size = x.shape[2:] - for size in image_size: - if size % (2 ** self.extract_max_level) != 0: - raise ValueError( - f"given extract_max_level {self.extract_max_level}, " - f"all input spatial dimension must be divisible by {2 ** self.extract_max_level}, " - f"got input of size {image_size}" - ) - mid_features = [] # 0 -> self.extract_max_level - 1 - for downsample_block in self.downsample_blocks: - x, mid = downsample_block(x) - mid_features.append(mid) - x = self.conv3d_block(x) # self.extract_max_level - - decoded_features = [x] - for idx, upsample_block in enumerate(self.upsample_blocks): - x = upsample_block(x, mid_features[-idx - 1]) - decoded_features.append(x) # self.extract_max_level -> self.extract_min_level - - output = torch.mean( - torch.stack( - [ - F.interpolate( - extract_layer(decoded_features[self.extract_max_level - self.extract_levels[idx]]), - size=image_size, - ) - for idx, extract_layer in enumerate(self.extract_layers) - ], - dim=-1, - ), - dim=-1, - ) - return output diff --git a/monai/networks/nets/regunet.py b/monai/networks/nets/regunet.py index 9499fa06fa..3263a6b5bc 100644 --- a/monai/networks/nets/regunet.py +++ b/monai/networks/nets/regunet.py @@ -12,6 +12,7 @@ import torch from torch import nn +from torch.nn import functional as F from monai.networks.blocks.regunet_block import ( RegistrationDownSampleBlock, @@ -247,3 +248,93 @@ def forward(self, x): out = self.output_block(outs, image_size=image_size) return out + + +class AdditiveUpSampleBlock(nn.Module): + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + ): + super(AdditiveUpSampleBlock, self).__init__() + self.deconv = get_deconv_block(spatial_dims=spatial_dims, in_channels=in_channels, out_channels=out_channels) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + output_size = (size * 2 for size in x.shape[2:]) + deconved = self.deconv(x) + resized = F.interpolate(x, output_size) + resized = torch.sum(torch.stack(resized.split(split_size=resized.shape[1] // 2, dim=1), dim=-1), dim=-1) + out: torch.Tensor = deconved + resized + return out + + +class LocalNet(RegUNet): + """ + Reimplementation of LocalNet, based on: + `Weakly-supervised convolutional neural networks for multimodal image registration + `_. + `Label-driven weakly-supervised learning for multimodal deformable image registration + `_. + + Adapted from: + DeepReg (https://github.com/DeepRegNet/DeepReg) + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + num_channel_initial: int, + extract_levels: Tuple[int], + out_kernel_initializer: Optional[str] = "kaiming_uniform", + out_activation: Optional[str] = None, + out_channels: int = 3, + pooling: bool = True, + concat_skip: bool = False, + ): + """ + Args: + spatial_dims: number of spatial dims + in_channels: number of input channels + num_channel_initial: number of initial channels + out_kernel_initializer: kernel initializer for the last layer + out_activation: activation at the last layer + out_channels: number of channels for the output + extract_levels: list, which levels from net to extract. The maximum level must equal to ``depth`` + pooling: for down-sampling, use non-parameterized pooling if true, otherwise use conv3d + concat_skip: when up-sampling, concatenate skipped tensor if true, otherwise use addition + """ + super().__init__( + spatial_dims=spatial_dims, + in_channels=in_channels, + num_channel_initial=num_channel_initial, + depth=max(extract_levels), + out_kernel_initializer=out_kernel_initializer, + out_activation=out_activation, + out_channels=out_channels, + pooling=pooling, + concat_skip=concat_skip, + encode_kernel_sizes=[7] + [3] * max(extract_levels), + ) + + def build_bottom_block(self, in_channels: int, out_channels: int): + kernel_size = self.encode_kernel_sizes[self.depth] + return get_conv_block( + spatial_dims=self.spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + ) + + def build_up_sampling_block( + self, + in_channels: int, + out_channels: int, + ) -> nn.Module: + if self._use_additive_upsampling: + return AdditiveUpSampleBlock( + spatial_dims=self.spatial_dims, in_channels=in_channels, out_channels=out_channels + ) + + return get_deconv_block(spatial_dims=self.spatial_dims, in_channels=in_channels, out_channels=out_channels) diff --git a/tests/test_localnet.py b/tests/test_localnet.py index 97a10d0c83..df1d9f61cb 100644 --- a/tests/test_localnet.py +++ b/tests/test_localnet.py @@ -4,7 +4,7 @@ from parameterized import parameterized from monai.networks import eval_mode -from monai.networks.nets.localnet import LocalNet +from monai.networks.nets.regunet import LocalNet from tests.utils import test_script_save device = "cuda" if torch.cuda.is_available() else "cpu" @@ -15,39 +15,36 @@ { "spatial_dims": 2, "in_channels": 2, - "out_channels": 2, "num_channel_initial": 16, - "extract_levels": [0, 1, 2], - "out_activation": act, + "out_kernel_initializer": "kaiming_uniform", + "out_activation": None, + "out_channels": 2, + "extract_levels": (0, 1), + "pooling": False, + "concat_skip": True, }, (1, 2, 16, 16), (1, 2, 16, 16), ] - for act in ["sigmoid", None] ] -TEST_CASE_LOCALNET_3D = [] -for in_channels in [2, 3]: - for out_channels in [1, 3]: - for num_channel_initial in [4, 16, 32]: - for extract_levels in [[0, 1, 2], [0, 1, 2, 3], [0, 1, 2, 3, 4]]: - for out_activation in ["sigmoid", None]: - for out_initializer in ["kaiming_uniform", "zeros"]: - TEST_CASE_LOCALNET_3D.append( - [ - { - "spatial_dims": 3, - "in_channels": in_channels, - "out_channels": out_channels, - "num_channel_initial": num_channel_initial, - "extract_levels": extract_levels, - "out_activation": out_activation, - "out_initializer": out_initializer, - }, - (1, in_channels, 16, 16, 16), - (1, out_channels, 16, 16, 16), - ] - ) +TEST_CASE_LOCALNET_3D = [ + [ + { + "spatial_dims": 3, + "in_channels": 2, + "num_channel_initial": 16, + "out_kernel_initializer": "zeros", + "out_activation": "sigmoid", + "out_channels": 2, + "extract_levels": (0, 1, 2, 3), + "pooling": True, + "concat_skip": False, + }, + (1, 2, 16, 16, 16), + (1, 2, 16, 16, 16), + ] +] class TestLocalNet(unittest.TestCase): @@ -58,13 +55,6 @@ def test_shape(self, input_param, input_shape, expected_shape): result = net(torch.randn(input_shape).to(device)) self.assertEqual(result.shape, expected_shape) - def test_ill_shape(self): - with self.assertRaisesRegex(ValueError, ""): - input_param, _, _ = TEST_CASE_LOCALNET_2D[0] - input_shape = (1, input_param["in_channels"], 17, 17) - net = LocalNet(**input_param).to(device) - net.forward(torch.randn(input_shape).to(device)) - def test_script(self): input_param, input_shape, _ = TEST_CASE_LOCALNET_2D[0] net = LocalNet(**input_param) From abf8a9992c04d715eaba0bb9877090167f29a35d Mon Sep 17 00:00:00 2001 From: Yiwen Li <44606435+kate-sann5100@users.noreply.github.com> Date: Fri, 12 Mar 2021 12:41:37 +0000 Subject: [PATCH 050/457] 1651 globalnet (#1729) * 1651 implement RegUNet Signed-off-by: kate-sann5100 * 1651 reformat code Signed-off-by: kate-sann5100 * 1651 reformat code Signed-off-by: kate-sann5100 * 1651 add globalnet Signed-off-by: kate-sann5100 * 1651 reformat code Signed-off-by: kate-sann5100 * 1651 reformat code Signed-off-by: kate-sann5100 * 1651 reformat code Signed-off-by: kate-sann5100 * 1651 reformat code Signed-off-by: kate-sann5100 --- docs/source/networks.rst | 5 ++ monai/networks/nets/__init__.py | 2 +- monai/networks/nets/regunet.py | 104 ++++++++++++++++++++++++++++++++ tests/test_globalnet.py | 79 ++++++++++++++++++++++++ 4 files changed, 189 insertions(+), 1 deletion(-) create mode 100644 tests/test_globalnet.py diff --git a/docs/source/networks.rst b/docs/source/networks.rst index 5688f4b143..036ba2aff7 100644 --- a/docs/source/networks.rst +++ b/docs/source/networks.rst @@ -350,6 +350,11 @@ Nets .. autoclass:: RegUNet :members: +`GlobalNet` +~~~~~~~~~~~~ +.. autoclass:: GlobalNet + :members: + `LocalNet` ~~~~~~~~~~~ .. autoclass:: LocalNet diff --git a/monai/networks/nets/__init__.py b/monai/networks/nets/__init__.py index 7a39872525..f3def30736 100644 --- a/monai/networks/nets/__init__.py +++ b/monai/networks/nets/__init__.py @@ -19,7 +19,7 @@ from .generator import Generator from .highresnet import HighResBlock, HighResNet from .regressor import Regressor -from .regunet import LocalNet, RegUNet +from .regunet import GlobalNet, LocalNet, RegUNet from .segresnet import SegResNet, SegResNetVAE from .senet import SENet, se_resnet50, se_resnet101, se_resnet152, se_resnext50_32x4d, se_resnext101_32x4d, senet154 from .unet import UNet, Unet, unet diff --git a/monai/networks/nets/regunet.py b/monai/networks/nets/regunet.py index 3263a6b5bc..25455c2df7 100644 --- a/monai/networks/nets/regunet.py +++ b/monai/networks/nets/regunet.py @@ -250,6 +250,110 @@ def forward(self, x): return out +class AffineHead(nn.Module): + def __init__( + self, + spatial_dims: int, + image_size: List[int], + decode_size: List[int], + in_channels: int, + ): + super(AffineHead, self).__init__() + self.spatial_dims = spatial_dims + if spatial_dims == 2: + in_features = in_channels * decode_size[0] * decode_size[1] + out_features = 6 + elif spatial_dims == 3: + in_features = in_channels * decode_size[0] * decode_size[1] * decode_size[2] + out_features = 12 + else: + raise ValueError(f"only support 2D/3D operation, got spatial_dims={spatial_dims}") + + self.fc = nn.Linear(in_features=in_features, out_features=out_features) + self.grid = self.get_reference_grid(image_size) # (spatial_dims, ...) + + @staticmethod + def get_reference_grid(image_size: Union[Tuple[int], List[int]]) -> torch.Tensor: + mesh_points = [torch.arange(0, dim) for dim in image_size] + grid = torch.stack(torch.meshgrid(*mesh_points), dim=0) # (spatial_dims, ...) + return grid.to(dtype=torch.float) + + def affine_transform(self, theta: torch.Tensor): + # (spatial_dims, ...) -> (spatial_dims + 1, ...) + grid_padded = torch.cat([self.grid, torch.ones_like(self.grid[:1])]) + + # grid_warped[b,p,...] = sum_over_q(grid_padded[q,...] * theta[b,p,q] + if self.spatial_dims == 2: + grid_warped = torch.einsum("qij,bpq->bpij", grid_padded, theta.reshape(-1, 2, 3)) + elif self.spatial_dims == 3: + grid_warped = torch.einsum("qijk,bpq->bpijk", grid_padded, theta.reshape(-1, 3, 4)) + else: + raise ValueError(f"do not support spatial_dims={self.spatial_dims}") + return grid_warped + + def forward(self, x: List[torch.Tensor], image_size: List[int]) -> torch.Tensor: + f = x[0] + self.grid = self.grid.to(device=f.device) + theta = self.fc(f.reshape(f.shape[0], -1)) + out: torch.Tensor = self.affine_transform(theta) - self.grid + return out + + +class GlobalNet(RegUNet): + """ + Build GlobalNet for image registration. + + Reference: + Hu, Yipeng, et al. + "Label-driven weakly-supervised learning + for multimodal deformable image registration," + https://arxiv.org/abs/1711.01666 + """ + + def __init__( + self, + image_size: List[int], + spatial_dims: int, + in_channels: int, + num_channel_initial: int, + depth: int, + out_kernel_initializer: Optional[str] = "kaiming_uniform", + out_activation: Optional[str] = None, + pooling: bool = True, + concat_skip: bool = False, + encode_kernel_sizes: Union[int, List[int]] = 3, + ): + for size in image_size: + if size % (2 ** depth) != 0: + raise ValueError( + f"given depth {depth}, " + f"all input spatial dimension must be divisible by {2 ** depth}, " + f"got input of size {image_size}" + ) + self.image_size = image_size + self.decode_size = [size // (2 ** depth) for size in image_size] + super().__init__( + spatial_dims=spatial_dims, + in_channels=in_channels, + num_channel_initial=num_channel_initial, + depth=depth, + out_kernel_initializer=out_kernel_initializer, + out_activation=out_activation, + out_channels=spatial_dims, + pooling=pooling, + concat_skip=concat_skip, + encode_kernel_sizes=encode_kernel_sizes, + ) + + def build_output_block(self): + return AffineHead( + spatial_dims=self.spatial_dims, + image_size=self.image_size, + decode_size=self.decode_size, + in_channels=self.num_channels[-1], + ) + + class AdditiveUpSampleBlock(nn.Module): def __init__( self, diff --git a/tests/test_globalnet.py b/tests/test_globalnet.py new file mode 100644 index 0000000000..19e9db9137 --- /dev/null +++ b/tests/test_globalnet.py @@ -0,0 +1,79 @@ +import unittest + +import numpy as np +import torch +from parameterized import parameterized + +from monai.networks import eval_mode +from monai.networks.nets import GlobalNet +from monai.networks.nets.regunet import AffineHead +from tests.utils import test_script_save + +TEST_CASES_AFFINE_TRANSFORM = [ + [ + {"spatial_dims": 3, "image_size": (2, 2, 2), "decode_size": (2, 2, 2), "in_channels": 1}, + torch.ones(2, 12), + torch.tensor([[[1, 2], [2, 3]], [[2, 3], [3, 4]]]).unsqueeze(0).unsqueeze(0).expand(2, 3, 2, 2, 2), + ], + [ + {"spatial_dims": 3, "image_size": (2, 2, 2), "decode_size": (2, 2, 2), "in_channels": 1}, + torch.arange(1, 13).reshape(1, 12).to(torch.float), + torch.tensor( + [ + [[[4.0, 7.0], [6.0, 9.0]], [[5.0, 8.0], [7.0, 10.0]]], + [[[8.0, 15.0], [14.0, 21.0]], [[13.0, 20.0], [19.0, 26.0]]], + [[[12.0, 23.0], [22.0, 33.0]], [[21.0, 32.0], [31.0, 42.0]]], + ] + ).unsqueeze(0), + ], +] + + +TEST_CASES_GLOBAL_NET = [ + [ + { + "image_size": (16, 16), + "spatial_dims": 2, + "in_channels": 1, + "num_channel_initial": 16, + "depth": 1, + "out_kernel_initializer": "kaiming_uniform", + "out_activation": None, + "pooling": True, + "concat_skip": True, + "encode_kernel_sizes": 3, + }, + (1, 1, 16, 16), + (1, 2, 16, 16), + ] +] + + +class TestAffineHead(unittest.TestCase): + @parameterized.expand(TEST_CASES_AFFINE_TRANSFORM) + def test_shape(self, input_param, theta, expected_val): + layer = AffineHead(**input_param) + result = layer.affine_transform(theta) + np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) + + +device = "cuda" if torch.cuda.is_available() else "cpu" + + +class TestGlobalNet(unittest.TestCase): + @parameterized.expand(TEST_CASES_GLOBAL_NET) + def test_shape(self, input_param, input_shape, expected_shape): + net = GlobalNet(**input_param).to(device) + with eval_mode(net): + result = net(torch.randn(input_shape).to(device)) + self.assertEqual(result.shape, expected_shape) + + def test_script(self): + input_param, input_shape, _ = TEST_CASES_GLOBAL_NET[0] + net = GlobalNet(**input_param) + test_data = torch.randn(input_shape) + test_script_save(net, test_data) + + +if __name__ == "__main__": + unittest.main() From e001d2a492a71fe96cc7cd6f8fe0287fb063c4d0 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Sat, 13 Mar 2021 00:22:34 +0800 Subject: [PATCH 051/457] [DLMED] update WSIReader for typos and docs (#1758) Signed-off-by: Nic Ma --- monai/data/image_reader.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index 08432e53da..ddb2c8c913 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -635,7 +635,11 @@ def _get_spatial_shape(self, img) -> np.ndarray: class WSIReader(ImageReader): """ - Read whole slide imaging and extract patches + Read whole slide imaging and extract patches. + + Args: + reader_lib: backend library to load the images, available options: "OpenSlide" or "cuClaraImage". + TODO: `cuClaraImage` package is unavailable so far, will enable the support later. """ From 21a3c16fcba5f99a797ddc6e6595cdf2f18cdd57 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Sat, 13 Mar 2021 02:58:50 +0800 Subject: [PATCH 052/457] 1742 Add DistributedWeightedRandomSampler (#1745) * [DLMED] add DistributedWeightedRandomSampler Signed-off-by: Nic Ma --- docs/source/data.rst | 3 + monai/data/__init__.py | 2 +- monai/data/samplers.py | 122 ++++++++++++++++++ monai/data/utils.py | 26 ---- ...est_distributed_weighted_random_sampler.py | 64 +++++++++ tests/test_rotated.py | 3 +- 6 files changed, 192 insertions(+), 28 deletions(-) create mode 100644 monai/data/samplers.py create mode 100644 tests/test_distributed_weighted_random_sampler.py diff --git a/docs/source/data.rst b/docs/source/data.rst index 3dffeb8977..c95659bc6e 100644 --- a/docs/source/data.rst +++ b/docs/source/data.rst @@ -160,6 +160,9 @@ DistributedSampler ~~~~~~~~~~~~~~~~~~ .. autoclass:: monai.data.DistributedSampler +DistributedWeightedRandomSampler +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: monai.data.DistributedWeightedRandomSampler Decathlon Datalist ~~~~~~~~~~~~~~~~~~ diff --git a/monai/data/__init__.py b/monai/data/__init__.py index 9fa5c935e2..54beb53e3f 100644 --- a/monai/data/__init__.py +++ b/monai/data/__init__.py @@ -30,10 +30,10 @@ from .nifti_writer import write_nifti from .png_saver import PNGSaver from .png_writer import write_png +from .samplers import DistributedSampler, DistributedWeightedRandomSampler from .synthetic import create_test_image_2d, create_test_image_3d from .thread_buffer import ThreadBuffer from .utils import ( - DistributedSampler, compute_importance_map, compute_shape_offset, correct_nifti_header_if_necessary, diff --git a/monai/data/samplers.py b/monai/data/samplers.py new file mode 100644 index 0000000000..5fea6959de --- /dev/null +++ b/monai/data/samplers.py @@ -0,0 +1,122 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Sequence + +import torch +from torch.utils.data import Dataset +from torch.utils.data import DistributedSampler as _TorchDistributedSampler + +__all__ = ["DistributedSampler", "DistributedWeightedRandomSampler"] + + +class DistributedSampler(_TorchDistributedSampler): + """ + Enhance PyTorch DistributedSampler to support non-evenly divisible sampling. + + Args: + dataset: Dataset used for sampling. + even_divisible: if False, different ranks can have different data length. + for example, input data: [1, 2, 3, 4, 5], rank 0: [1, 3, 5], rank 1: [2, 4]. + num_replicas: number of processes participating in distributed training. + by default, `world_size` is retrieved from the current distributed group. + rank: rank of the current process within `num_replicas`. by default, + `rank` is retrieved from the current distributed group. + shuffle: if `True`, sampler will shuffle the indices, default to True. + kwargs: additional arguments for `DistributedSampler` super class, can be `seed` and `drop_last`. + + More information about DistributedSampler, please check: + https://github.com/pytorch/pytorch/blob/master/torch/utils/data/distributed.py + + """ + + def __init__( + self, + dataset: Dataset, + even_divisible: bool = True, + num_replicas: Optional[int] = None, + rank: Optional[int] = None, + shuffle: bool = True, + **kwargs, + ): + super().__init__(dataset=dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle, **kwargs) + + if not even_divisible: + data_len = len(dataset) # type: ignore + extra_size = self.total_size - data_len + if self.rank + extra_size >= self.num_replicas: + self.num_samples -= 1 + self.total_size = data_len + + +class DistributedWeightedRandomSampler(DistributedSampler): + """ + Extend the `DistributedSampler` to support weighted sampling. + Refer to `torch.utils.data.WeightedRandomSampler`, for more details please check: + https://github.com/pytorch/pytorch/blob/master/torch/utils/data/sampler.py#L150 + + Args: + dataset: Dataset used for sampling. + weights: a sequence of weights, not necessary summing up to one, length should exactly + match the full dataset. + num_samples_per_rank: number of samples to draw for every rank, sample from + the distributed subset of dataset. + if None, default to the length of dataset split by DistributedSampler. + replacement: if ``True``, samples are drawn with replacement, otherwise, they are + drawn without replacement, which means that when a sample index is drawn for a row, + it cannot be drawn again for that row, default to True. + generator: PyTorch Generator used in sampling. + even_divisible: if False, different ranks can have different data length. + for example, input data: [1, 2, 3, 4, 5], rank 0: [1, 3, 5], rank 1: [2, 4].' + num_replicas: number of processes participating in distributed training. + by default, `world_size` is retrieved from the current distributed group. + rank: rank of the current process within `num_replicas`. by default, + `rank` is retrieved from the current distributed group. + shuffle: if `True`, sampler will shuffle the indices, default to True. + kwargs: additional arguments for `DistributedSampler` super class, can be `seed` and `drop_last`. + + """ + + def __init__( + self, + dataset: Dataset, + weights: Sequence[float], + num_samples_per_rank: Optional[int] = None, + replacement: bool = True, + generator: Optional[torch.Generator] = None, + even_divisible: bool = True, + num_replicas: Optional[int] = None, + rank: Optional[int] = None, + shuffle: bool = True, + **kwargs, + ): + super().__init__( + dataset=dataset, + even_divisible=even_divisible, + num_replicas=num_replicas, + rank=rank, + shuffle=shuffle, + **kwargs, + ) + self.weights = weights + self.num_samples_per_rank = num_samples_per_rank + self.replacement = replacement + self.generator = generator + + def __iter__(self): + indices = list(super().__iter__()) + num_samples = self.num_samples_per_rank if self.num_samples_per_rank is not None else self.num_samples + weights = torch.as_tensor([self.weights[i] for i in indices], dtype=torch.double) + # sample based on the provided weights + rand_tensor = torch.multinomial(weights, num_samples, self.replacement, generator=self.generator) + + for i in rand_tensor: + yield indices[i] diff --git a/monai/data/utils.py b/monai/data/utils.py index 2e2f8c00cb..1db2f6676f 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -22,7 +22,6 @@ import numpy as np import torch -from torch.utils.data import DistributedSampler as _TorchDistributedSampler from torch.utils.data._utils.collate import default_collate from monai.networks.layers.simplelayers import GaussianFilter @@ -61,7 +60,6 @@ "partition_dataset", "partition_dataset_classes", "select_cross_validation_folds", - "DistributedSampler", "json_hashing", "pickle_hashing", "sorted_dict", @@ -921,30 +919,6 @@ def select_cross_validation_folds(partitions: Sequence[Iterable], folds: Union[S return [data_item for fold_id in ensure_tuple(folds) for data_item in partitions[fold_id]] -class DistributedSampler(_TorchDistributedSampler): - """ - Enhance PyTorch DistributedSampler to support non-evenly divisible sampling. - - Args: - even_divisible: if False, different ranks can have different data length. - for example, input data: [1, 2, 3, 4, 5], rank 0: [1, 3, 5], rank 1: [2, 4]. - - More information about DistributedSampler, please check: - https://github.com/pytorch/pytorch/blob/master/torch/utils/data/distributed.py - - """ - - def __init__(self, even_divisible: bool = True, *args, **kwargs): - super().__init__(*args, **kwargs) - - if not even_divisible: - data_len = len(kwargs["dataset"]) - extra_size = self.total_size - data_len - if self.rank + extra_size >= self.num_replicas: - self.num_samples -= 1 - self.total_size = data_len - - def json_hashing(item) -> bytes: """ diff --git a/tests/test_distributed_weighted_random_sampler.py b/tests/test_distributed_weighted_random_sampler.py new file mode 100644 index 0000000000..6e27e78d4c --- /dev/null +++ b/tests/test_distributed_weighted_random_sampler.py @@ -0,0 +1,64 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +import torch.distributed as dist + +from monai.data import DistributedWeightedRandomSampler +from tests.utils import DistCall, DistTestCase + + +class DistributedWeightedRandomSamplerTest(DistTestCase): + @DistCall(nnodes=1, nproc_per_node=2) + def test_replacement(self): + data = [1, 2, 3, 4, 5] + weights = [1, 2, 3, 4, 5] + sampler = DistributedWeightedRandomSampler( + weights=weights, + replacement=True, + dataset=data, + shuffle=False, + generator=torch.Generator().manual_seed(0), + ) + samples = np.array([data[i] for i in list(sampler)]) + + if dist.get_rank() == 0: + np.testing.assert_allclose(samples, np.array([5, 5, 5])) + + if dist.get_rank() == 1: + np.testing.assert_allclose(samples, np.array([1, 4, 4])) + + @DistCall(nnodes=1, nproc_per_node=2) + def test_num_samples(self): + data = [1, 2, 3, 4, 5] + weights = [1, 2, 3, 4, 5] + sampler = DistributedWeightedRandomSampler( + weights=weights, + num_samples_per_rank=5, + replacement=True, + dataset=data, + shuffle=False, + generator=torch.Generator().manual_seed(123), + ) + samples = np.array([data[i] for i in list(sampler)]) + + if dist.get_rank() == 0: + np.testing.assert_allclose(samples, np.array([3, 1, 5, 1, 5])) + + if dist.get_rank() == 1: + np.testing.assert_allclose(samples, np.array([4, 2, 4, 2, 4])) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_rotated.py b/tests/test_rotated.py index 82bc4aed40..e0c1a27e98 100644 --- a/tests/test_rotated.py +++ b/tests/test_rotated.py @@ -79,7 +79,8 @@ def test_correct_results(self, angle, keep_size, mode, padding_mode, align_corne expected = scipy.ndimage.rotate( self.imt[0, 0], np.rad2deg(angle), (0, 2), not keep_size, order=_order, mode=_mode, prefilter=False ) - np.testing.assert_allclose(expected.astype(np.float32), rotated["img"][0], atol=1e-3) + good = np.sum(np.isclose(expected.astype(np.float32), rotated["img"][0], atol=1e-3)) + self.assertLessEqual(np.abs(good - expected.size), 5, "diff at most 5 voxels.") expected = scipy.ndimage.rotate( self.segn[0, 0], np.rad2deg(angle), (0, 2), not keep_size, order=0, mode=_mode, prefilter=False From f56058de7740f83bb0c44f5bee24387f4350b349 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Sat, 13 Mar 2021 12:05:16 +0000 Subject: [PATCH 053/457] Croppad inverse transform (#1737) * croppad inverse Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/transforms/croppad/array.py | 15 +- monai/transforms/croppad/dictionary.py | 194 ++++++++++++++++++++++++- tests/test_inverse.py | 163 ++++++++++++++++++++- 3 files changed, 353 insertions(+), 19 deletions(-) diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index a3d36ad903..6174378e3b 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -106,7 +106,7 @@ class BorderPad(Transform): Pad the input data by adding specified borders to every dimension. Args: - spatial_border: specified size for every spatial border. it can be 3 shapes: + spatial_border: specified size for every spatial border. Any -ve values will be set to 0. It can be 3 shapes: - single int number, pad all the borders with the same size. - length equals the length of image shape, pad every spatial dimension separately. @@ -140,16 +140,16 @@ def __call__(self, img: np.ndarray, mode: Optional[Union[NumpyPadMode, str]] = N See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html Raises: - ValueError: When ``self.spatial_border`` contains a nonnegative int. + ValueError: When ``self.spatial_border`` does not contain ints. ValueError: When ``self.spatial_border`` length is not one of [1, len(spatial_shape), 2*len(spatial_shape)]. """ spatial_shape = img.shape[1:] spatial_border = ensure_tuple(self.spatial_border) - for b in spatial_border: - if not isinstance(b, int) or b < 0: - raise ValueError(f"self.spatial_border must contain only nonnegative ints, got {spatial_border}.") + if not all(isinstance(b, int) for b in spatial_border): + raise ValueError(f"self.spatial_border must contain only ints, got {spatial_border}.") + spatial_border = tuple(max(0, b) for b in spatial_border) if len(spatial_border) == 1: data_pad_width = [(spatial_border[0], spatial_border[0]) for _ in range(len(spatial_shape))] @@ -242,13 +242,16 @@ def __init__( raise ValueError("Please specify either roi_center, roi_size or roi_start, roi_end.") self.roi_start = np.maximum(np.asarray(roi_start, dtype=np.int16), 0) self.roi_end = np.maximum(np.asarray(roi_end, dtype=np.int16), self.roi_start) + # Allow for 1D by converting back to np.array (since np.maximum will convert to int) + self.roi_start = self.roi_start if isinstance(self.roi_start, np.ndarray) else np.array([self.roi_start]) + self.roi_end = self.roi_end if isinstance(self.roi_end, np.ndarray) else np.array([self.roi_end]) def __call__(self, img: Union[np.ndarray, torch.Tensor]) -> np.ndarray: """ Apply the transform to `img`, assuming `img` is channel-first and slicing doesn't apply to the channel dim. """ - sd = min(len(self.roi_start), len(self.roi_end), len(img.shape[1:])) # spatial dims + sd = min(self.roi_start.size, self.roi_end.size, len(img.shape[1:])) # spatial dims slices = [slice(None)] + [slice(s, e) for s, e in zip(self.roi_start[:sd], self.roi_end[:sd])] return np.asarray(img[tuple(slices)]) diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index 667fb7a821..02ddf723be 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -16,6 +16,7 @@ """ from copy import deepcopy +from itertools import chain from math import floor from typing import Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union @@ -148,7 +149,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar return d -class BorderPadd(MapTransform): +class BorderPadd(MapTransform, InvertibleTransform): """ Pad the input data by adding specified borders to every dimension. Dictionary-based wrapper of :py:class:`monai.transforms.BorderPad`. @@ -191,11 +192,36 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) for key, m in self.key_iterator(d, self.mode): + self.push_transform(d, key) d[key] = self.padder(d[key], mode=m) return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + + for key in self.key_iterator(d): + transform = self.get_most_recent_transform(d, key) + # Create inverse transform + orig_size = np.array(transform[InverseKeys.ORIG_SIZE.value]) + roi_start = np.array(self.padder.spatial_border) + # Need to convert single value to [min1,min2,...] + if roi_start.size == 1: + roi_start = np.full((len(orig_size)), roi_start) + # need to convert [min1,max1,min2,...] to [min1,min2,...] + elif roi_start.size == 2 * orig_size.size: + roi_start = roi_start[::2] + roi_end = np.array(transform[InverseKeys.ORIG_SIZE.value]) + roi_start + + inverse_transform = SpatialCrop(roi_start=roi_start, roi_end=roi_end) + # Apply inverse transform + d[key] = inverse_transform(d[key]) + # Remove the applied transform + self.pop_transform(d, key) + + return d + -class DivisiblePadd(MapTransform): +class DivisiblePadd(MapTransform, InvertibleTransform): """ Pad the input data, so that the spatial sizes are divisible by `k`. Dictionary-based wrapper of :py:class:`monai.transforms.DivisiblePad`. @@ -232,11 +258,30 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) for key, m in self.key_iterator(d, self.mode): + self.push_transform(d, key) d[key] = self.padder(d[key], mode=m) return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + + for key in self.key_iterator(d): + transform = self.get_most_recent_transform(d, key) + # Create inverse transform + orig_size = np.array(transform[InverseKeys.ORIG_SIZE.value]) + current_size = np.array(d[key].shape[1:]) + roi_start = np.floor((current_size - orig_size) / 2) + roi_end = orig_size + roi_start + inverse_transform = SpatialCrop(roi_start=roi_start, roi_end=roi_end) + # Apply inverse transform + d[key] = inverse_transform(d[key]) + # Remove the applied transform + self.pop_transform(d, key) + + return d + -class SpatialCropd(MapTransform): +class SpatialCropd(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.SpatialCrop`. Either a spatial center and size must be provided, or alternatively if center and size @@ -268,11 +313,31 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) for key in self.key_iterator(d): + self.push_transform(d, key) d[key] = self.cropper(d[key]) return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + + for key in self.key_iterator(d): + transform = self.get_most_recent_transform(d, key) + # Create inverse transform + orig_size = transform[InverseKeys.ORIG_SIZE.value] + pad_to_start = np.array(self.cropper.roi_start) + pad_to_end = orig_size - self.cropper.roi_end + # interleave mins and maxes + pad = list(chain(*zip(pad_to_start.tolist(), pad_to_end.tolist()))) + inverse_transform = BorderPad(pad) + # Apply inverse transform + d[key] = inverse_transform(d[key]) + # Remove the applied transform + self.pop_transform(d, key) + + return d + -class CenterSpatialCropd(MapTransform): +class CenterSpatialCropd(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.CenterSpatialCrop`. @@ -293,11 +358,34 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) for key in self.key_iterator(d): + orig_size = d[key].shape[1:] d[key] = self.cropper(d[key]) + self.push_transform(d, key, orig_size=orig_size) return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) -class RandSpatialCropd(RandomizableTransform, MapTransform): + for key in self.key_iterator(d): + transform = self.get_most_recent_transform(d, key) + # Create inverse transform + orig_size = np.array(transform[InverseKeys.ORIG_SIZE.value]) + current_size = np.array(d[key].shape[1:]) + pad_to_start = np.floor((orig_size - current_size) / 2).astype(int) + # in each direction, if original size is even and current size is odd, += 1 + pad_to_start[np.logical_and(orig_size % 2 == 0, current_size % 2 == 1)] += 1 + pad_to_end = orig_size - current_size - pad_to_start + pad = list(chain(*zip(pad_to_start.tolist(), pad_to_end.tolist()))) + inverse_transform = BorderPad(pad) + # Apply inverse transform + d[key] = inverse_transform(d[key]) + # Remove the applied transform + self.pop_transform(d, key) + + return d + + +class RandSpatialCropd(RandomizableTransform, MapTransform, InvertibleTransform): """ Dictionary-based version :py:class:`monai.transforms.RandSpatialCrop`. Crop image with random size or specific size ROI. It can crop at a random position as @@ -326,6 +414,7 @@ def __init__( ) -> None: RandomizableTransform.__init__(self) MapTransform.__init__(self, keys, allow_missing_keys) + self._do_transform = True self.roi_size = roi_size self.random_center = random_center self.random_size = random_size @@ -347,12 +436,46 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda raise AssertionError for key in self.key_iterator(d): if self.random_center: + self.push_transform(d, key, {"slices": [(i.start, i.stop) for i in self._slices[1:]]}) # type: ignore d[key] = d[key][self._slices] else: + self.push_transform(d, key) cropper = CenterSpatialCrop(self._size) d[key] = cropper(d[key]) return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + + for key in self.key_iterator(d): + transform = self.get_most_recent_transform(d, key) + # Create inverse transform + orig_size = transform[InverseKeys.ORIG_SIZE.value] + random_center = self.random_center + pad_to_start = np.empty((len(orig_size)), dtype=np.int32) + pad_to_end = np.empty((len(orig_size)), dtype=np.int32) + if random_center: + for i, _slice in enumerate(transform[InverseKeys.EXTRA_INFO.value]["slices"]): + pad_to_start[i] = _slice[0] + pad_to_end[i] = orig_size[i] - _slice[1] + else: + current_size = d[key].shape[1:] + for i, (o_s, c_s) in enumerate(zip(orig_size, current_size)): + pad_to_start[i] = pad_to_end[i] = (o_s - c_s) / 2 + if o_s % 2 == 0 and c_s % 2 == 1: + pad_to_start[i] += 1 + elif o_s % 2 == 1 and c_s % 2 == 0: + pad_to_end[i] += 1 + # interleave mins and maxes + pad = list(chain(*zip(pad_to_start.tolist(), pad_to_end.tolist()))) + inverse_transform = BorderPad(pad) + # Apply inverse transform + d[key] = inverse_transform(d[key]) + # Remove the applied transform + self.pop_transform(d, key) + + return d + class RandSpatialCropSamplesd(RandomizableTransform, MapTransform): """ @@ -409,7 +532,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, n return [self.cropper(data) for _ in range(self.num_samples)] -class CropForegroundd(MapTransform): +class CropForegroundd(MapTransform, InvertibleTransform): """ Dictionary-based version :py:class:`monai.transforms.CropForeground`. Crop only the foreground object of the expected images. @@ -463,9 +586,29 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda d[self.end_coord_key] = np.asarray(box_end) cropper = SpatialCrop(roi_start=box_start, roi_end=box_end) for key in self.key_iterator(d): + self.push_transform(d, key, extra_info={"box_start": box_start, "box_end": box_end}) d[key] = cropper(d[key]) return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key in self.key_iterator(d): + transform = self.get_most_recent_transform(d, key) + # Create inverse transform + orig_size = np.array(transform[InverseKeys.ORIG_SIZE.value]) + extra_info = transform[InverseKeys.EXTRA_INFO.value] + pad_to_start = np.array(extra_info["box_start"]) + pad_to_end = orig_size - np.array(extra_info["box_end"]) + # interleave mins and maxes + pad = list(chain(*zip(pad_to_start.tolist(), pad_to_end.tolist()))) + inverse_transform = BorderPad(pad) + # Apply inverse transform + d[key] = inverse_transform(d[key]) + # Remove the applied transform + self.pop_transform(d, key) + + return d + class RandWeightedCropd(RandomizableTransform, MapTransform): """ @@ -645,7 +788,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, n return results -class ResizeWithPadOrCropd(MapTransform): +class ResizeWithPadOrCropd(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.ResizeWithPadOrCrop`. @@ -675,7 +818,44 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) for key in self.key_iterator(d): + orig_size = d[key].shape[1:] d[key] = self.padcropper(d[key]) + self.push_transform(d, key, orig_size=orig_size) + return d + + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key in self.key_iterator(d): + transform = self.get_most_recent_transform(d, key) + # Create inverse transform + orig_size = np.array(transform[InverseKeys.ORIG_SIZE.value]) + current_size = np.array(d[key].shape[1:]) + # Unfortunately, we can't just use ResizeWithPadOrCrop with original size because of odd/even rounding. + # Instead, we first pad any smaller dimensions, and then we crop any larger dimensions. + + # First, do pad + if np.any((orig_size - current_size) > 0): + pad_to_start = np.floor((orig_size - current_size) / 2).astype(int) + # in each direction, if original size is even and current size is odd, += 1 + pad_to_start[np.logical_and(orig_size % 2 == 0, current_size % 2 == 1)] += 1 + pad_to_start[pad_to_start < 0] = 0 + pad_to_end = orig_size - current_size - pad_to_start + pad_to_end[pad_to_end < 0] = 0 + pad = list(chain(*zip(pad_to_start.tolist(), pad_to_end.tolist()))) + d[key] = BorderPad(pad)(d[key]) + + # Next crop + if np.any((orig_size - current_size) < 0): + if self.padcropper.padder.method == Method.SYMMETRIC: + roi_center = [floor(i / 2) if r % 2 == 0 else (i - 1) // 2 for r, i in zip(orig_size, current_size)] + else: + roi_center = [floor(r / 2) if r % 2 == 0 else (r - 1) // 2 for r in orig_size] + + d[key] = SpatialCrop(roi_center, orig_size)(d[key]) + + # Remove the applied transform + self.pop_transform(d, key) + return d diff --git a/tests/test_inverse.py b/tests/test_inverse.py index 46729c7bc6..6635a4126f 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -11,6 +11,7 @@ import sys import unittest +from functools import partial from typing import TYPE_CHECKING, List, Tuple import numpy as np @@ -21,12 +22,18 @@ from monai.data.utils import decollate_batch from monai.networks.nets import UNet from monai.transforms import ( - AddChannel, AddChanneld, + BorderPadd, + CenterSpatialCropd, Compose, + CropForegroundd, + DivisiblePadd, InvertibleTransform, LoadImaged, + RandSpatialCropd, ResizeWithPadOrCrop, + ResizeWithPadOrCropd, + SpatialCropd, SpatialPadd, allow_missing_keys_mode, ) @@ -44,11 +51,44 @@ TESTS: List[Tuple] = [] +# For pad, start with odd/even images and add odd/even amounts +for name in ("1D even", "1D odd"): + for val in (3, 4): + for t in ( + partial(SpatialPadd, spatial_size=val, method="symmetric"), + partial(SpatialPadd, spatial_size=val, method="end"), + partial(BorderPadd, spatial_border=[val, val + 1]), + partial(DivisiblePadd, k=val), + partial(ResizeWithPadOrCropd, spatial_size=20 + val), + partial(CenterSpatialCropd, roi_size=10 + val), + partial(CropForegroundd, source_key="label"), + partial(SpatialCropd, roi_center=10, roi_size=10 + val), + partial(SpatialCropd, roi_center=11, roi_size=10 + val), + partial(SpatialCropd, roi_start=val, roi_end=17), + partial(SpatialCropd, roi_start=val, roi_end=16), + partial(RandSpatialCropd, roi_size=12 + val), + partial(ResizeWithPadOrCropd, spatial_size=21 - val), + ): + TESTS.append((t.func.__name__ + name, name, 0, t(KEYS))) # type: ignore + +# non-sensical tests: crop bigger or pad smaller or -ve values +for t in ( + partial(DivisiblePadd, k=-3), + partial(CenterSpatialCropd, roi_size=-3), + partial(RandSpatialCropd, roi_size=-3), + partial(SpatialPadd, spatial_size=15), + partial(BorderPadd, spatial_border=[15, 16]), + partial(CenterSpatialCropd, roi_size=30), + partial(SpatialCropd, roi_center=10, roi_size=100), + partial(SpatialCropd, roi_start=3, roi_end=100), +): + TESTS.append((t.func.__name__ + "bad 1D even", "1D even", 0, t(KEYS))) # type: ignore + TESTS.append( ( "SpatialPadd (x2) 2d", "2D", - 0.0, + 0, SpatialPadd(KEYS, spatial_size=[111, 113], method="end"), SpatialPadd(KEYS, spatial_size=[118, 117]), ) @@ -58,11 +98,114 @@ ( "SpatialPadd 3d", "3D", - 0.0, + 0, SpatialPadd(KEYS, spatial_size=[112, 113, 116]), ) ) + +TESTS.append( + ( + "SpatialCropd 2d", + "2D", + 0, + SpatialCropd(KEYS, [49, 51], [90, 89]), + ) +) + +TESTS.append( + ( + "SpatialCropd 2d", + "2D", + 0, + SpatialCropd(KEYS, [49, 51], [390, 89]), + ) +) + +TESTS.append( + ( + "SpatialCropd 3d", + "3D", + 0, + SpatialCropd(KEYS, [49, 51, 44], [90, 89, 93]), + ) +) + +TESTS.append(("RandSpatialCropd 2d", "2D", 0, RandSpatialCropd(KEYS, [96, 93], True, False))) + +TESTS.append(("RandSpatialCropd 3d", "3D", 0, RandSpatialCropd(KEYS, [96, 93, 92], False, False))) + +TESTS.append( + ( + "BorderPadd 2d", + "2D", + 0, + BorderPadd(KEYS, [3, 7, 2, 5]), + ) +) + +TESTS.append( + ( + "BorderPadd 2d", + "2D", + 0, + BorderPadd(KEYS, [3, 7]), + ) +) + +TESTS.append( + ( + "BorderPadd 3d", + "3D", + 0, + BorderPadd(KEYS, [4]), + ) +) + +TESTS.append( + ( + "DivisiblePadd 2d", + "2D", + 0, + DivisiblePadd(KEYS, k=4), + ) +) + +TESTS.append( + ( + "DivisiblePadd 3d", + "3D", + 0, + DivisiblePadd(KEYS, k=[4, 8, 11]), + ) +) + + +TESTS.append( + ( + "CenterSpatialCropd 2d", + "2D", + 0, + CenterSpatialCropd(KEYS, roi_size=95), + ) +) + +TESTS.append( + ( + "CenterSpatialCropd 3d", + "3D", + 0, + CenterSpatialCropd(KEYS, roi_size=[95, 97, 98]), + ) +) + +TESTS.append(("CropForegroundd 2d", "2D", 0, CropForegroundd(KEYS, source_key="label", margin=2))) + +TESTS.append(("CropForegroundd 3d", "3D", 0, CropForegroundd(KEYS, source_key="label"))) + + +TESTS.append(("ResizeWithPadOrCropd 3d", "3D", 0, ResizeWithPadOrCropd(KEYS, [201, 150, 105]))) + TESTS_COMPOSE_X2 = [(t[0] + " Compose", t[1], t[2], Compose(Compose(t[3:]))) for t in TESTS] TESTS = TESTS + TESTS_COMPOSE_X2 # type: ignore @@ -118,8 +261,11 @@ def setUp(self): affine = make_rand_affine() affine[0] *= 2 - im_1d = AddChannel()(np.arange(0, 10)) - self.all_data["1D"] = {"image": im_1d, "label": im_1d, "other": im_1d} + for size in [10, 11]: + # pad 5 onto both ends so that cropping can be lossless + im_1d = np.pad(np.arange(size), 5)[None] + name = "1D even" if size % 2 == 0 else "1D odd" + self.all_data[name] = {"image": im_1d, "label": im_1d, "other": im_1d} im_2d_fname, seg_2d_fname = [make_nifti_image(i) for i in create_test_image_2d(101, 100)] im_3d_fname, seg_3d_fname = [make_nifti_image(i, affine) for i in create_test_image_3d(100, 101, 107)] @@ -147,6 +293,10 @@ def check_inverse(self, name, keys, orig_d, fwd_bck_d, unmodified_d, acceptable_ print( f"Failed: {name}. Mean diff = {mean_diff} (expected <= {acceptable_diff}), unmodified diff: {unmodded_diff}" ) + if orig[0].ndim == 1: + print("orig", orig[0]) + print("fwd_bck", fwd_bck[0]) + print("unmod", unmodified[0]) raise @parameterized.expand(TESTS) @@ -183,7 +333,7 @@ def test_inverse_inferred_seg(self): batch_size = 10 # num workers = 0 for mac num_workers = 2 if sys.platform != "darwin" else 0 - transforms = Compose([AddChanneld(KEYS), SpatialPadd(KEYS, (150, 153))]) + transforms = Compose([AddChanneld(KEYS), SpatialPadd(KEYS, (150, 153)), CenterSpatialCropd(KEYS, (110, 99))]) num_invertible_transforms = sum(1 for i in transforms.transforms if isinstance(i, InvertibleTransform)) dataset = CacheDataset(test_data, transform=transforms, progress=False) @@ -203,6 +353,7 @@ def test_inverse_inferred_seg(self): segs = model(labels).detach().cpu() label_transform_key = "label" + InverseKeys.KEY_SUFFIX.value segs_dict = {"label": segs, label_transform_key: data[label_transform_key]} + segs_dict_decollated = decollate_batch(segs_dict) # inverse of individual segmentation From e17e1586947930ac10f8dbc7c1e811e5862cd41f Mon Sep 17 00:00:00 2001 From: adam aji <3487395+adamaji@users.noreply.github.com> Date: Sat, 13 Mar 2021 18:33:47 -0500 Subject: [PATCH 054/457] TEST: Fix failing pytype check for predict_segmentation (#1762) --- monai/networks/utils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/monai/networks/utils.py b/monai/networks/utils.py index bd25e358f6..c5989f174b 100644 --- a/monai/networks/utils.py +++ b/monai/networks/utils.py @@ -71,9 +71,7 @@ def slice_channels(tensor: torch.Tensor, *slicevals: Optional[int]) -> torch.Ten return tensor[slices] -def predict_segmentation( - logits: torch.Tensor, mutually_exclusive: bool = False, threshold: float = 0.0 -) -> torch.Tensor: +def predict_segmentation(logits: torch.Tensor, mutually_exclusive: bool = False, threshold: float = 0.0) -> Any: """ Given the logits from a network, computing the segmentation by thresholding all values above 0 if multi-labels task, computing the `argmax` along the channel axis if multi-classes task, From 42e963f8826349f525967ae6db542a0002d9e636 Mon Sep 17 00:00:00 2001 From: adam aji <3487395+adamaji@users.noreply.github.com> Date: Sun, 14 Mar 2021 10:47:29 -0400 Subject: [PATCH 055/457] BUG: MedNISTDataset returns differently sized datasets for different seeds (#1761) * TEST: Check for identical MedNISTDataset length for different seeds Signed-off-by: Adam Aji <3487395+adamaji@users.noreply.github.com> * BUG: MedNISTDataset returns differently sized datasets for diff seeds Signed-off-by: Adam Aji <3487395+adamaji@users.noreply.github.com> --- monai/apps/datasets.py | 40 ++++++++++++++++++++---------------- tests/test_mednistdataset.py | 7 ++++++- 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/monai/apps/datasets.py b/monai/apps/datasets.py index f0416b8c4f..a193b7391e 100644 --- a/monai/apps/datasets.py +++ b/monai/apps/datasets.py @@ -11,7 +11,7 @@ import os import sys -from typing import Any, Callable, Dict, List, Optional, Sequence, Union +from typing import Callable, Dict, List, Optional, Sequence, Union import numpy as np @@ -98,8 +98,8 @@ def __init__( self, data, transform, cache_num=cache_num, cache_rate=cache_rate, num_workers=num_workers ) - def randomize(self, data: Optional[Any] = None) -> None: - self.rann = self.R.random() + def randomize(self, data: List[int]) -> None: + self.R.shuffle(data) def get_num_classes(self) -> int: """Get number of classes.""" @@ -132,22 +132,26 @@ def _generate_data_list(self, dataset_dir: str) -> List[Dict]: data = [] - for i in range(num_total): - self.randomize() - if self.section == "training": - if self.rann < self.val_frac + self.test_frac: - continue - elif self.section == "validation": - if self.rann >= self.val_frac: - continue - elif self.section == "test": - if self.rann < self.val_frac or self.rann >= self.val_frac + self.test_frac: - continue - else: - raise ValueError( - f'Unsupported section: {self.section}, available options are ["training", "validation", "test"].' - ) + length = len(image_files_list) + indices = np.arange(length) + self.randomize(indices) + + test_length = int(length * self.test_frac) + val_length = int(length * self.val_frac) + if self.section == "test": + section_indices = indices[:test_length] + elif self.section == "validation": + section_indices = indices[test_length : test_length + val_length] + elif self.section == "training": + section_indices = indices[test_length + val_length :] + else: + raise ValueError( + f'Unsupported section: {self.section}, available options are ["training", "validation", "test"].' + ) + + for i in section_indices: data.append({"image": image_files_list[i], "label": image_class[i], "class_name": class_name[i]}) + return data diff --git a/tests/test_mednistdataset.py b/tests/test_mednistdataset.py index 0887734a7c..2e27f4ba95 100644 --- a/tests/test_mednistdataset.py +++ b/tests/test_mednistdataset.py @@ -18,6 +18,8 @@ from monai.transforms import AddChanneld, Compose, LoadImaged, ScaleIntensityd, ToTensord from tests.utils import skip_if_quick +MEDNIST_FULL_DATASET_LENGTH = 58954 + class TestMedNISTDataset(unittest.TestCase): @skip_if_quick @@ -33,7 +35,7 @@ def test_values(self): ) def _test_dataset(dataset): - self.assertEqual(len(dataset), 5986) + self.assertEqual(len(dataset), int(MEDNIST_FULL_DATASET_LENGTH * dataset.test_frac)) self.assertTrue("image" in dataset[0]) self.assertTrue("label" in dataset[0]) self.assertTrue("image_meta_dict" in dataset[0]) @@ -56,6 +58,9 @@ def _test_dataset(dataset): _test_dataset(data) data = MedNISTDataset(root_dir=testing_dir, section="test", download=False) self.assertTupleEqual(data[0]["image"].shape, (64, 64)) + # test same dataset length with different random seed + data = MedNISTDataset(root_dir=testing_dir, transform=transform, section="test", download=False, seed=42) + _test_dataset(data) shutil.rmtree(os.path.join(testing_dir, "MedNIST")) try: data = MedNISTDataset(root_dir=testing_dir, transform=transform, section="test", download=False) From 4670fd2dedcca715365cca0928b24f90a3f8f4e2 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Mon, 15 Mar 2021 19:57:15 +0800 Subject: [PATCH 056/457] Add __len__ for DistributedWeightedRandomSampler (#1766) * [DLMED] add length API Signed-off-by: Nic Ma * [DLMED] remove replacement arg Signed-off-by: Nic Ma --- monai/data/samplers.py | 13 +++++-------- tests/test_distributed_weighted_random_sampler.py | 4 +--- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/monai/data/samplers.py b/monai/data/samplers.py index 5fea6959de..8bba79c9b0 100644 --- a/monai/data/samplers.py +++ b/monai/data/samplers.py @@ -70,9 +70,6 @@ class DistributedWeightedRandomSampler(DistributedSampler): num_samples_per_rank: number of samples to draw for every rank, sample from the distributed subset of dataset. if None, default to the length of dataset split by DistributedSampler. - replacement: if ``True``, samples are drawn with replacement, otherwise, they are - drawn without replacement, which means that when a sample index is drawn for a row, - it cannot be drawn again for that row, default to True. generator: PyTorch Generator used in sampling. even_divisible: if False, different ranks can have different data length. for example, input data: [1, 2, 3, 4, 5], rank 0: [1, 3, 5], rank 1: [2, 4].' @@ -90,7 +87,6 @@ def __init__( dataset: Dataset, weights: Sequence[float], num_samples_per_rank: Optional[int] = None, - replacement: bool = True, generator: Optional[torch.Generator] = None, even_divisible: bool = True, num_replicas: Optional[int] = None, @@ -107,16 +103,17 @@ def __init__( **kwargs, ) self.weights = weights - self.num_samples_per_rank = num_samples_per_rank - self.replacement = replacement + self.num_samples_per_rank = num_samples_per_rank if num_samples_per_rank is not None else self.num_samples self.generator = generator def __iter__(self): indices = list(super().__iter__()) - num_samples = self.num_samples_per_rank if self.num_samples_per_rank is not None else self.num_samples weights = torch.as_tensor([self.weights[i] for i in indices], dtype=torch.double) # sample based on the provided weights - rand_tensor = torch.multinomial(weights, num_samples, self.replacement, generator=self.generator) + rand_tensor = torch.multinomial(weights, self.num_samples_per_rank, True, generator=self.generator) for i in rand_tensor: yield indices[i] + + def __len__(self): + return self.num_samples_per_rank diff --git a/tests/test_distributed_weighted_random_sampler.py b/tests/test_distributed_weighted_random_sampler.py index 6e27e78d4c..b8e088fdcf 100644 --- a/tests/test_distributed_weighted_random_sampler.py +++ b/tests/test_distributed_weighted_random_sampler.py @@ -21,12 +21,11 @@ class DistributedWeightedRandomSamplerTest(DistTestCase): @DistCall(nnodes=1, nproc_per_node=2) - def test_replacement(self): + def test_sampling(self): data = [1, 2, 3, 4, 5] weights = [1, 2, 3, 4, 5] sampler = DistributedWeightedRandomSampler( weights=weights, - replacement=True, dataset=data, shuffle=False, generator=torch.Generator().manual_seed(0), @@ -46,7 +45,6 @@ def test_num_samples(self): sampler = DistributedWeightedRandomSampler( weights=weights, num_samples_per_rank=5, - replacement=True, dataset=data, shuffle=False, generator=torch.Generator().manual_seed(123), From 1badb41c06a513367627cb5ab5a9aa138a500adb Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Mon, 15 Mar 2021 16:04:11 +0000 Subject: [PATCH 057/457] reduce randomly cropped amount to appease CI (#1769) * reduce randomly cropped amount to appease CI Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * fixes rand inverse Signed-off-by: Wenqi Li --- monai/transforms/croppad/dictionary.py | 7 +++---- monai/transforms/transform.py | 2 +- tests/test_inverse.py | 11 +++++++++-- tests/test_rotated.py | 6 +++--- 4 files changed, 16 insertions(+), 10 deletions(-) diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index 02ddf723be..822db28467 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -412,9 +412,8 @@ def __init__( random_size: bool = True, allow_missing_keys: bool = False, ) -> None: - RandomizableTransform.__init__(self) + RandomizableTransform.__init__(self, prob=1.0, do_transform=True) MapTransform.__init__(self, keys, allow_missing_keys) - self._do_transform = True self.roi_size = roi_size self.random_center = random_center self.random_size = random_size @@ -511,7 +510,7 @@ def __init__( random_size: bool = True, allow_missing_keys: bool = False, ) -> None: - RandomizableTransform.__init__(self) + RandomizableTransform.__init__(self, prob=1.0, do_transform=True) MapTransform.__init__(self, keys, allow_missing_keys) if num_samples < 1: raise ValueError(f"num_samples must be positive, got {num_samples}.") @@ -638,7 +637,7 @@ def __init__( center_coord_key: Optional[str] = None, allow_missing_keys: bool = False, ): - RandomizableTransform.__init__(self) + RandomizableTransform.__init__(self, prob=1.0, do_transform=True) MapTransform.__init__(self, keys, allow_missing_keys) self.spatial_size = ensure_tuple(spatial_size) self.w_key = w_key diff --git a/monai/transforms/transform.py b/monai/transforms/transform.py index 2a79b2edf2..fea46aafa3 100644 --- a/monai/transforms/transform.py +++ b/monai/transforms/transform.py @@ -171,7 +171,7 @@ def __call__(self, img): """ - def __init__(self, prob=1.0, do_transform=False): + def __init__(self, prob: float = 1.0, do_transform: bool = True): self._do_transform = do_transform self.prob = min(max(prob, 0.0), 1.0) diff --git a/tests/test_inverse.py b/tests/test_inverse.py index 6635a4126f..bb2d997eb5 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -30,6 +30,7 @@ DivisiblePadd, InvertibleTransform, LoadImaged, + Randomizable, RandSpatialCropd, ResizeWithPadOrCrop, ResizeWithPadOrCropd, @@ -37,7 +38,7 @@ SpatialPadd, allow_missing_keys_mode, ) -from monai.utils import first, optional_import, set_determinism +from monai.utils import first, get_seed, optional_import, set_determinism from monai.utils.enums import InverseKeys from tests.utils import make_nifti_image, make_rand_affine @@ -265,7 +266,11 @@ def setUp(self): # pad 5 onto both ends so that cropping can be lossless im_1d = np.pad(np.arange(size), 5)[None] name = "1D even" if size % 2 == 0 else "1D odd" - self.all_data[name] = {"image": im_1d, "label": im_1d, "other": im_1d} + self.all_data[name] = { + "image": np.array(im_1d, copy=True), + "label": np.array(im_1d, copy=True), + "other": np.array(im_1d, copy=True), + } im_2d_fname, seg_2d_fname = [make_nifti_image(i) for i in create_test_image_2d(101, 100)] im_3d_fname, seg_3d_fname = [make_nifti_image(i, affine) for i in create_test_image_3d(100, 101, 107)] @@ -309,6 +314,8 @@ def test_inverse(self, _, data_name, acceptable_diff, *transforms): # Apply forwards for t in transforms: + if isinstance(t, Randomizable): + t.set_random_state(seed=get_seed()) forwards.append(t(forwards[-1])) # Check that error is thrown when inverse are used out of order. diff --git a/tests/test_rotated.py b/tests/test_rotated.py index e0c1a27e98..dd57786fff 100644 --- a/tests/test_rotated.py +++ b/tests/test_rotated.py @@ -59,7 +59,7 @@ def test_correct_results(self, angle, keep_size, mode, padding_mode, align_corne self.segn[0, 0], -np.rad2deg(angle), (0, 1), not keep_size, order=0, mode=_mode, prefilter=False ) expected = np.stack(expected).astype(int) - self.assertLessEqual(np.count_nonzero(expected != rotated["seg"][0]), 20) + self.assertLessEqual(np.count_nonzero(expected != rotated["seg"][0]), 30) class TestRotated3D(NumpyImageTestCase3D): @@ -86,7 +86,7 @@ def test_correct_results(self, angle, keep_size, mode, padding_mode, align_corne self.segn[0, 0], np.rad2deg(angle), (0, 2), not keep_size, order=0, mode=_mode, prefilter=False ) expected = np.stack(expected).astype(int) - self.assertLessEqual(np.count_nonzero(expected != rotated["seg"][0]), 105) + self.assertLessEqual(np.count_nonzero(expected != rotated["seg"][0]), 110) class TestRotated3DXY(NumpyImageTestCase3D): @@ -113,7 +113,7 @@ def test_correct_results(self, angle, keep_size, mode, padding_mode, align_corne self.segn[0, 0], -np.rad2deg(angle), (0, 1), not keep_size, order=0, mode=_mode, prefilter=False ) expected = np.stack(expected).astype(int) - self.assertLessEqual(np.count_nonzero(expected != rotated["seg"][0]), 100) + self.assertLessEqual(np.count_nonzero(expected != rotated["seg"][0]), 110) if __name__ == "__main__": From e5726d14f79e0b058fdf2c32e715450a04d01e0a Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 16 Mar 2021 03:40:03 +0800 Subject: [PATCH 058/457] 1756 update ignite version to 0.4.4 (#1759) * [DLMED] update ignite to 0.4.4 Signed-off-by: Nic Ma * [DLMED] fix ignite compatible issues Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] update according to comments Signed-off-by: Nic Ma --- docs/requirements.txt | 2 +- monai/engines/evaluator.py | 4 ++-- monai/engines/multi_gpu_supervised_trainer.py | 10 +++++----- monai/engines/trainer.py | 4 ++-- monai/engines/utils.py | 2 +- monai/engines/workflow.py | 10 +++++----- monai/handlers/checkpoint_loader.py | 6 +++--- monai/handlers/checkpoint_saver.py | 10 +++++----- monai/handlers/classification_saver.py | 6 +++--- monai/handlers/confusion_matrix.py | 4 ++-- monai/handlers/hausdorff_distance.py | 4 ++-- monai/handlers/iteration_metric.py | 14 +++++++------- monai/handlers/lr_schedule_handler.py | 4 ++-- monai/handlers/mean_dice.py | 4 ++-- monai/handlers/metric_logger.py | 4 ++-- monai/handlers/metrics_saver.py | 6 +++--- monai/handlers/roc_auc.py | 6 +++--- monai/handlers/segmentation_saver.py | 4 ++-- monai/handlers/smartcache_handler.py | 4 ++-- monai/handlers/stats_handler.py | 4 ++-- monai/handlers/surface_distance.py | 4 ++-- monai/handlers/tensorboard_handlers.py | 4 ++-- monai/handlers/utils.py | 8 ++++---- monai/handlers/validation_handler.py | 4 ++-- requirements-dev.txt | 2 +- setup.cfg | 4 ++-- 26 files changed, 69 insertions(+), 69 deletions(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index cd06166359..22fd2589f0 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,6 +1,6 @@ -f https://download.pytorch.org/whl/cpu/torch-1.6.0%2Bcpu-cp37-cp37m-linux_x86_64.whl torch>=1.5 -pytorch-ignite==0.4.2 +pytorch-ignite==0.4.4 numpy>=1.17 itk>=5.0 nibabel diff --git a/monai/engines/evaluator.py b/monai/engines/evaluator.py index b8977a3652..0afa3747a4 100644 --- a/monai/engines/evaluator.py +++ b/monai/engines/evaluator.py @@ -26,8 +26,8 @@ from ignite.engine import Engine from ignite.metrics import Metric else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") - Metric, _ = optional_import("ignite.metrics", "0.4.2", exact_version, "Metric") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") + Metric, _ = optional_import("ignite.metrics", "0.4.4", exact_version, "Metric") __all__ = ["Evaluator", "SupervisedEvaluator", "EnsembleEvaluator"] diff --git a/monai/engines/multi_gpu_supervised_trainer.py b/monai/engines/multi_gpu_supervised_trainer.py index d12e012a56..d0e09443fa 100644 --- a/monai/engines/multi_gpu_supervised_trainer.py +++ b/monai/engines/multi_gpu_supervised_trainer.py @@ -19,15 +19,15 @@ from monai.engines.utils import get_devices_spec from monai.utils import exact_version, optional_import -create_supervised_trainer, _ = optional_import("ignite.engine", "0.4.2", exact_version, "create_supervised_trainer") -create_supervised_evaluator, _ = optional_import("ignite.engine", "0.4.2", exact_version, "create_supervised_evaluator") -_prepare_batch, _ = optional_import("ignite.engine", "0.4.2", exact_version, "_prepare_batch") +create_supervised_trainer, _ = optional_import("ignite.engine", "0.4.4", exact_version, "create_supervised_trainer") +create_supervised_evaluator, _ = optional_import("ignite.engine", "0.4.4", exact_version, "create_supervised_evaluator") +_prepare_batch, _ = optional_import("ignite.engine", "0.4.4", exact_version, "_prepare_batch") if TYPE_CHECKING: from ignite.engine import Engine from ignite.metrics import Metric else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") - Metric, _ = optional_import("ignite.metrics", "0.4.2", exact_version, "Metric") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") + Metric, _ = optional_import("ignite.metrics", "0.4.4", exact_version, "Metric") __all__ = [ "create_multigpu_supervised_trainer", diff --git a/monai/engines/trainer.py b/monai/engines/trainer.py index c3d471e261..5b996eafe1 100644 --- a/monai/engines/trainer.py +++ b/monai/engines/trainer.py @@ -26,8 +26,8 @@ from ignite.engine import Engine from ignite.metrics import Metric else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") - Metric, _ = optional_import("ignite.metrics", "0.4.2", exact_version, "Metric") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") + Metric, _ = optional_import("ignite.metrics", "0.4.4", exact_version, "Metric") __all__ = ["Trainer", "SupervisedTrainer", "GanTrainer"] diff --git a/monai/engines/utils.py b/monai/engines/utils.py index 8f5899f2a5..b0b1e44f71 100644 --- a/monai/engines/utils.py +++ b/monai/engines/utils.py @@ -18,7 +18,7 @@ if TYPE_CHECKING: from ignite.engine import EventEnum else: - EventEnum, _ = optional_import("ignite.engine", "0.4.2", exact_version, "EventEnum") + EventEnum, _ = optional_import("ignite.engine", "0.4.4", exact_version, "EventEnum") __all__ = [ "IterationEvents", diff --git a/monai/engines/workflow.py b/monai/engines/workflow.py index b50d58f1a2..61b92ac5dd 100644 --- a/monai/engines/workflow.py +++ b/monai/engines/workflow.py @@ -20,15 +20,15 @@ from monai.transforms import apply_transform from monai.utils import ensure_tuple, exact_version, optional_import -IgniteEngine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") -State, _ = optional_import("ignite.engine", "0.4.2", exact_version, "State") -Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events") +IgniteEngine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") +State, _ = optional_import("ignite.engine", "0.4.4", exact_version, "State") +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") if TYPE_CHECKING: from ignite.engine import Engine from ignite.metrics import Metric else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") - Metric, _ = optional_import("ignite.metrics", "0.4.2", exact_version, "Metric") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") + Metric, _ = optional_import("ignite.metrics", "0.4.4", exact_version, "Metric") class Workflow(IgniteEngine): # type: ignore[valid-type, misc] # due to optional_import diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py index 648cc8360a..e6319a3c64 100644 --- a/monai/handlers/checkpoint_loader.py +++ b/monai/handlers/checkpoint_loader.py @@ -16,12 +16,12 @@ from monai.utils import exact_version, optional_import -Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events") -Checkpoint, _ = optional_import("ignite.handlers", "0.4.2", exact_version, "Checkpoint") +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") +Checkpoint, _ = optional_import("ignite.handlers", "0.4.4", exact_version, "Checkpoint") if TYPE_CHECKING: from ignite.engine import Engine else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") class CheckpointLoader: diff --git a/monai/handlers/checkpoint_saver.py b/monai/handlers/checkpoint_saver.py index 1808e6b251..0c65b8cd4b 100644 --- a/monai/handlers/checkpoint_saver.py +++ b/monai/handlers/checkpoint_saver.py @@ -15,16 +15,16 @@ from monai.utils import exact_version, optional_import -Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events") -Checkpoint, _ = optional_import("ignite.handlers", "0.4.2", exact_version, "Checkpoint") -BaseSaveHandler, _ = optional_import("ignite.handlers.checkpoint", "0.4.2", exact_version, "BaseSaveHandler") +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") +Checkpoint, _ = optional_import("ignite.handlers", "0.4.4", exact_version, "Checkpoint") +BaseSaveHandler, _ = optional_import("ignite.handlers.checkpoint", "0.4.4", exact_version, "BaseSaveHandler") if TYPE_CHECKING: from ignite.engine import Engine from ignite.handlers import DiskSaver else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") - DiskSaver, _ = optional_import("ignite.handlers", "0.4.2", exact_version, "DiskSaver") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") + DiskSaver, _ = optional_import("ignite.handlers", "0.4.4", exact_version, "DiskSaver") class CheckpointSaver: diff --git a/monai/handlers/classification_saver.py b/monai/handlers/classification_saver.py index 33ce7c7ec8..98f917330f 100644 --- a/monai/handlers/classification_saver.py +++ b/monai/handlers/classification_saver.py @@ -17,12 +17,12 @@ from monai.utils import ImageMetaKey as Key from monai.utils import exact_version, optional_import -idist, _ = optional_import("ignite", "0.4.2", exact_version, "distributed") -Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events") +idist, _ = optional_import("ignite", "0.4.4", exact_version, "distributed") +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") if TYPE_CHECKING: from ignite.engine import Engine else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") class ClassificationSaver: diff --git a/monai/handlers/confusion_matrix.py b/monai/handlers/confusion_matrix.py index 1741aa305a..551fd29199 100644 --- a/monai/handlers/confusion_matrix.py +++ b/monai/handlers/confusion_matrix.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Callable, Optional +from typing import Any, Callable, Union import torch @@ -28,7 +28,7 @@ def __init__( include_background: bool = True, metric_name: str = "hit_rate", output_transform: Callable = lambda x: x, - device: Optional[torch.device] = None, + device: Union[str, torch.device] = "cpu", save_details: bool = True, ) -> None: """ diff --git a/monai/handlers/hausdorff_distance.py b/monai/handlers/hausdorff_distance.py index 7ac52d642a..042a587852 100644 --- a/monai/handlers/hausdorff_distance.py +++ b/monai/handlers/hausdorff_distance.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Callable, Optional +from typing import Callable, Optional, Union import torch @@ -30,7 +30,7 @@ def __init__( percentile: Optional[float] = None, directed: bool = False, output_transform: Callable = lambda x: x, - device: Optional[torch.device] = None, + device: Union[str, torch.device] = "cpu", save_details: bool = True, ) -> None: """ diff --git a/monai/handlers/iteration_metric.py b/monai/handlers/iteration_metric.py index 641efad243..f49c799a21 100644 --- a/monai/handlers/iteration_metric.py +++ b/monai/handlers/iteration_metric.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence +from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Union import torch @@ -17,13 +17,13 @@ from monai.metrics import do_metric_reduction from monai.utils import MetricReduction, exact_version, optional_import -idist, _ = optional_import("ignite", "0.4.2", exact_version, "distributed") -Metric, _ = optional_import("ignite.metrics", "0.4.2", exact_version, "Metric") -reinit__is_reduced, _ = optional_import("ignite.metrics.metric", "0.4.2", exact_version, "reinit__is_reduced") +idist, _ = optional_import("ignite", "0.4.4", exact_version, "distributed") +Metric, _ = optional_import("ignite.metrics", "0.4.4", exact_version, "Metric") +reinit__is_reduced, _ = optional_import("ignite.metrics.metric", "0.4.4", exact_version, "reinit__is_reduced") if TYPE_CHECKING: from ignite.engine import Engine else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") class IterationMetric(Metric): # type: ignore[valid-type, misc] # due to optional_import @@ -46,7 +46,7 @@ def __init__( self, metric_fn: Callable, output_transform: Callable = lambda x: x, - device: Optional[torch.device] = None, + device: Union[str, torch.device] = "cpu", save_details: bool = True, ) -> None: self._is_reduced: bool = False @@ -77,7 +77,7 @@ def update(self, output: Sequence[torch.Tensor]) -> None: score = self.metric_fn(y_pred, y) if isinstance(score, (tuple, list)): score = score[0] - self._scores.append(score) + self._scores.append(score.to(self._device)) def compute(self) -> Any: """ diff --git a/monai/handlers/lr_schedule_handler.py b/monai/handlers/lr_schedule_handler.py index e5593f07ff..3b300537b2 100644 --- a/monai/handlers/lr_schedule_handler.py +++ b/monai/handlers/lr_schedule_handler.py @@ -16,11 +16,11 @@ from monai.utils import ensure_tuple, exact_version, optional_import -Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events") +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") if TYPE_CHECKING: from ignite.engine import Engine else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") class LrScheduleHandler: diff --git a/monai/handlers/mean_dice.py b/monai/handlers/mean_dice.py index 7decc3ab9b..6d51c534cf 100644 --- a/monai/handlers/mean_dice.py +++ b/monai/handlers/mean_dice.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Callable, Optional +from typing import Callable, Union import torch @@ -27,7 +27,7 @@ def __init__( self, include_background: bool = True, output_transform: Callable = lambda x: x, - device: Optional[torch.device] = None, + device: Union[str, torch.device] = "cpu", save_details: bool = True, ) -> None: """ diff --git a/monai/handlers/metric_logger.py b/monai/handlers/metric_logger.py index fdd60da57c..758276d03d 100644 --- a/monai/handlers/metric_logger.py +++ b/monai/handlers/metric_logger.py @@ -14,11 +14,11 @@ from monai.utils import exact_version, optional_import -Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events") +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") if TYPE_CHECKING: from ignite.engine import Engine else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") class MetricLogger: diff --git a/monai/handlers/metrics_saver.py b/monai/handlers/metrics_saver.py index 87d7223c96..082c370e48 100644 --- a/monai/handlers/metrics_saver.py +++ b/monai/handlers/metrics_saver.py @@ -15,12 +15,12 @@ from monai.utils import ImageMetaKey as Key from monai.utils import ensure_tuple, exact_version, optional_import -Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events") -idist, _ = optional_import("ignite", "0.4.2", exact_version, "distributed") +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") +idist, _ = optional_import("ignite", "0.4.4", exact_version, "distributed") if TYPE_CHECKING: from ignite.engine import Engine else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") class MetricsSaver: diff --git a/monai/handlers/roc_auc.py b/monai/handlers/roc_auc.py index 2273b9ee89..9a9af601f9 100644 --- a/monai/handlers/roc_auc.py +++ b/monai/handlers/roc_auc.py @@ -17,8 +17,8 @@ from monai.metrics import compute_roc_auc from monai.utils import Average, exact_version, optional_import -idist, _ = optional_import("ignite", "0.4.2", exact_version, "distributed") -EpochMetric, _ = optional_import("ignite.metrics", "0.4.2", exact_version, "EpochMetric") +idist, _ = optional_import("ignite", "0.4.4", exact_version, "distributed") +EpochMetric, _ = optional_import("ignite.metrics", "0.4.4", exact_version, "EpochMetric") class ROCAUC(EpochMetric): # type: ignore[valid-type, misc] # due to optional_import @@ -61,7 +61,7 @@ def __init__( other_act: Optional[Callable] = None, average: Union[Average, str] = Average.MACRO, output_transform: Callable = lambda x: x, - device: Optional[torch.device] = None, + device: Union[str, torch.device] = "cpu", ) -> None: def _compute_fn(pred, label): return compute_roc_auc( diff --git a/monai/handlers/segmentation_saver.py b/monai/handlers/segmentation_saver.py index 56370fd41c..25238ea442 100644 --- a/monai/handlers/segmentation_saver.py +++ b/monai/handlers/segmentation_saver.py @@ -18,11 +18,11 @@ from monai.transforms import SaveImage from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, exact_version, optional_import -Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events") +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") if TYPE_CHECKING: from ignite.engine import Engine else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") class SegmentationSaver: diff --git a/monai/handlers/smartcache_handler.py b/monai/handlers/smartcache_handler.py index 423d87c22a..821f883d91 100644 --- a/monai/handlers/smartcache_handler.py +++ b/monai/handlers/smartcache_handler.py @@ -14,11 +14,11 @@ from monai.data import SmartCacheDataset from monai.utils import exact_version, optional_import -Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events") +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") if TYPE_CHECKING: from ignite.engine import Engine else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") class SmartCacheHandler: diff --git a/monai/handlers/stats_handler.py b/monai/handlers/stats_handler.py index 24d844569f..6d4a4e958b 100644 --- a/monai/handlers/stats_handler.py +++ b/monai/handlers/stats_handler.py @@ -17,11 +17,11 @@ from monai.utils import exact_version, is_scalar, optional_import -Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events") +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") if TYPE_CHECKING: from ignite.engine import Engine else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") DEFAULT_KEY_VAL_FORMAT = "{}: {:.4f} " DEFAULT_TAG = "Loss" diff --git a/monai/handlers/surface_distance.py b/monai/handlers/surface_distance.py index d3fa69bfce..7c2322354a 100644 --- a/monai/handlers/surface_distance.py +++ b/monai/handlers/surface_distance.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Callable, Optional +from typing import Callable, Union import torch @@ -29,7 +29,7 @@ def __init__( symmetric: bool = False, distance_metric: str = "euclidean", output_transform: Callable = lambda x: x, - device: Optional[torch.device] = None, + device: Union[str, torch.device] = "cpu", save_details: bool = True, ) -> None: """ diff --git a/monai/handlers/tensorboard_handlers.py b/monai/handlers/tensorboard_handlers.py index 4ee88bcfc9..9ad1fe6353 100644 --- a/monai/handlers/tensorboard_handlers.py +++ b/monai/handlers/tensorboard_handlers.py @@ -18,12 +18,12 @@ from monai.utils import exact_version, is_scalar, optional_import from monai.visualize import plot_2d_or_3d_image -Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events") +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") if TYPE_CHECKING: from ignite.engine import Engine from torch.utils.tensorboard import SummaryWriter else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") SummaryWriter, _ = optional_import("torch.utils.tensorboard", name="SummaryWriter") DEFAULT_TAG = "Loss" diff --git a/monai/handlers/utils.py b/monai/handlers/utils.py index 9ed13d292c..2eaf3ab932 100644 --- a/monai/handlers/utils.py +++ b/monai/handlers/utils.py @@ -18,11 +18,11 @@ from monai.utils import ensure_tuple, exact_version, get_torch_version_tuple, optional_import -idist, _ = optional_import("ignite", "0.4.2", exact_version, "distributed") +idist, _ = optional_import("ignite", "0.4.4", exact_version, "distributed") if TYPE_CHECKING: from ignite.engine import Engine else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") __all__ = [ "stopping_fn_from_metric", @@ -75,7 +75,7 @@ def evenly_divisible_all_gather(data: torch.Tensor) -> torch.Tensor: # make sure the data is evenly-divisible on multi-GPUs length = data.shape[0] all_lens = idist.all_gather(length) - max_len = max(all_lens).item() + max_len = max(all_lens) if length < max_len: size = [max_len - length] + list(data.shape[1:]) data = torch.cat([data, data.new_full(size, 0)], dim=0) @@ -103,7 +103,7 @@ def string_list_all_gather(strings: List[str]) -> List[str]: # get length of strings length = len(strings) all_lens = idist.all_gather(length) - max_len = max(all_lens).item() + max_len = max(all_lens) # pad the item to make sure the same length if length < max_len: strings = strings + ["" for _ in range(max_len - length)] diff --git a/monai/handlers/validation_handler.py b/monai/handlers/validation_handler.py index 9cc2e926f4..4458a17380 100644 --- a/monai/handlers/validation_handler.py +++ b/monai/handlers/validation_handler.py @@ -14,11 +14,11 @@ from monai.engines.evaluator import Evaluator from monai.utils import exact_version, optional_import -Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events") +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") if TYPE_CHECKING: from ignite.engine import Engine else: - Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine") + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") class ValidationHandler: diff --git a/requirements-dev.txt b/requirements-dev.txt index 3eeab474b6..1508eae4fe 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,6 +1,6 @@ # Full requirements for developments -r requirements-min.txt -pytorch-ignite==0.4.2 +pytorch-ignite==0.4.4 gdown>=3.6.4 scipy itk>=5.0 diff --git a/setup.cfg b/setup.cfg index bbdcdf805d..9dd9fa106b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,7 +27,7 @@ all = scikit-image>=0.14.2 pillow tensorboard - pytorch-ignite==0.4.2 + pytorch-ignite==0.4.4 gdown>=3.6.4 torchvision itk>=5.0 @@ -44,7 +44,7 @@ tensorboard = gdown = gdown>=3.6.4 ignite = - pytorch-ignite==0.4.2 + pytorch-ignite==0.4.4 torchvision = torchvision itk = From 3ea9ed4397cfdf134cb49a0808b6fada332ed56a Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Tue, 16 Mar 2021 02:53:02 +0000 Subject: [PATCH 059/457] update contrib guide to include some recent changes in dev tools (#1763) Signed-off-by: Wenqi Li --- CONTRIBUTING.md | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 325f81b127..79cbbfabac 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -24,7 +24,7 @@ We are happy to talk with you about your needs for MONAI and your ideas for cont ### Does it belong in PyTorch instead of MONAI? -MONAI is based on the PyTorch and Numpy libraries. These libraries implement what we consider to be best practice for general scientific computing and deep learning functionality. MONAI builds on these with a strong focus on medical applications. As such, it is a good idea to consider whether your functionality is medical-application specific or not. General deep learning functionality may be better off in PyTorch; you can find their contribution guidelines [here](https://pytorch.org/docs/stable/community/contribution_guide.html). +MONAI is part of [PyTorch Ecosystem](https://pytorch.org/ecosystem/), and mainly based on the PyTorch and Numpy libraries. These libraries implement what we consider to be best practice for general scientific computing and deep learning functionality. MONAI builds on these with a strong focus on medical applications. As such, it is a good idea to consider whether your functionality is medical-application specific or not. General deep learning functionality may be better off in PyTorch; you can find their contribution guidelines [here](https://pytorch.org/docs/stable/community/contribution_guide.html). ## The contribution process @@ -51,8 +51,12 @@ Coding style is checked and enforced by flake8, black, and isort, using [a flake Before submitting a pull request, we recommend that all linting should pass, by running the following command locally: ```bash -pip install -U -r requirements-dev.txt # install the latest tools -./runtests.sh --codeformat # runs the linting tools +# optionally update the dependencies and dev tools +python -m pip install -U pip +python -m pip install -U -r requirements-dev.txt + +# run the linting and type checking tools +./runtests.sh --codeformat # try to fix the coding style errors automatically ./runtests.sh --autofix @@ -86,7 +90,7 @@ MONAI tests are located under `tests/`. - The unit test's file name follows `test_[module_name].py`. - The integration test's file name follows `test_integration_[workflow_name].py`. -A bash script (`runtests.sh`) is provided to run all tests locally +A bash script (`runtests.sh`) is provided to run all tests locally. Please run ``./runtests.sh -h`` to see all options. To run a particular test, for example `tests/test_dice_loss.py`: @@ -98,7 +102,7 @@ Before submitting a pull request, we recommend that all linting and unit tests should pass, by running the following command locally: ```bash -./runtests.sh --codeformat --coverage --unittests +./runtests.sh -f -u --net --coverage ``` or (for new features that would not break existing functionality): @@ -107,7 +111,7 @@ or (for new features that would not break existing functionality): ``` It is recommended that the new test `test_[module_name].py` is constructed by using only -python 3.6+ build-in functions, `torch`, `numpy`, and `parameterized` packages. +python 3.6+ build-in functions, `torch`, `numpy`, `coverage` (for reporting code coverages) and `parameterized` (for organising test cases) packages. If it requires any other external packages, please make sure: - the packages are listed in [`requirements-dev.txt`](requirements-dev.txt) - the new test `test_[module_name].py` is added to the `exclude_cases` in [`./tests/min_tests.py`](./tests/min_tests.py) so that @@ -141,7 +145,7 @@ Before submitting a pull request, it is recommended to: - check the auto-generated documentation (by browsing `./docs/build/html/index.html` with a web browser) - type `make clean` in `docs/` folder to remove the current build files. -Please type `make help` for all supported format options. +Please type `make help` in `docs/` folder for all supported format options. #### Automatic code formatting MONAI provides support of automatic Python code formatting via [a customised GitHub action](https://github.com/Project-MONAI/monai-code-formatter). @@ -251,6 +255,7 @@ All code review comments should be specific, constructive, and actionable. 1. Read carefully the descriptions of the pull request and the files changed, write comments if needed. 1. Make in-line comments to specific code segments, [request for changes](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-request-reviews) if needed. 1. Review any further code changes until all comments addressed by the contributors. +1. Comment to trigger `/black` and/or `/integration-test` for optional auto code formatting and [integration tests](.github/workflows/integration.yml). 1. Merge the pull request to the master branch. 1. Close the corresponding task ticket on [the issue list][monai issue list]. From 13a2c59c55863f4a2a968d7cb5629857e7011259 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 16 Mar 2021 17:40:15 +0800 Subject: [PATCH 060/457] 1768 Update docs based on feedback (#1774) * [DLMED] fix docs Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot Co-authored-by: monai-bot --- docs/source/highlights.md | 6 +++--- monai/transforms/io/array.py | 7 ++++++- runtests.sh | 2 +- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/docs/source/highlights.md b/docs/source/highlights.md index 29302bda77..5baaa75f4c 100644 --- a/docs/source/highlights.md +++ b/docs/source/highlights.md @@ -299,9 +299,9 @@ In 2017, NVIDIA researchers developed a methodology for mixed-precision training For the PyTorch 1.6 release, developers at NVIDIA and Facebook moved mixed precision functionality into PyTorch core as the AMP package, `torch.cuda.amp`. -MONAI workflows can easily set `amp=True/False` in `SupervisedTrainer` or `SupervisedEvaluator` during training or evaluation to enable/disable AMP. And we tried to compare the training speed if AMP ON/OFF on Tesla V100 GPU with CUDA 11 and PyTorch 1.6, got some benchmark for reference: +MONAI workflows can easily set `amp=True/False` in `SupervisedTrainer` or `SupervisedEvaluator` during training or evaluation to enable/disable AMP. And we tried to compare the training speed if AMP ON/OFF on NVIDIA V100 GPU with CUDA 11 and PyTorch 1.6, got some benchmark for reference: ![image](../images/amp_training_v100.png) -We also executed the same test program on Testa A100 GPU with the same software environment, got much faster benchmark for reference: +We also executed the same test program on NVIDIA A100 GPU with the same software environment, got much faster benchmark for reference: ![image](../images/amp_training_a100.png) More details is available at [AMP training tutorial](https://github.com/Project-MONAI/tutorials/blob/master/acceleration/automatic_mixed_precision.ipynb). We also tried to combine AMP with `CacheDataset` and `Novograd` optimizer to achieve the fast training in MONAI, able to obtain approximately 12x speedup compared with a Pytorch native implementation when the training converges at a validation mean dice of 0.93. Benchmark for reference: @@ -310,7 +310,7 @@ More details is available at [Fast training tutorial](https://github.com/Project ### 2. Distributed data parallel Distributed data parallel is an important feature of PyTorch to connect multiple GPU devices on single or multiple nodes to train or evaluate models. MONAI provides demos for reference: train/evaluate with PyTorch DDP, train/evaluate with Horovod, train/evaluate with Ignite DDP, partition dataset and train with SmartCacheDataset, as well as a real world training example based on Decathlon challenge Task01 - Brain Tumor segmentation. -The demo contains distributed caching, training, and validation. We tried to train this example on NVIDIA NGC server, got some performance benchmarks for reference(PyTorch 1.6, CUDA 11, Tesla V100 GPUs): +The demo contains distributed caching, training, and validation. We tried to train this example on NVIDIA NGC server, got some performance benchmarks for reference(PyTorch 1.6, CUDA 11, NVIDIA V100 GPUs): ![image](../images/distributed_training.png) ### 3. C++/CUDA optimized modules diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index 002bfd8242..60437307be 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -145,7 +145,12 @@ def __call__( break if reader is None: - raise RuntimeError(f"can not find suitable reader for this file: {filename}.") + raise RuntimeError( + f"can not find suitable reader for this file: {filename}. \ + Please install dependency libraries: (nii, nii.gz) -> Nibabel, (png, jpg, bmp) -> PIL, \ + (npz, npy) -> Numpy, others -> ITK. Refer to the installation instruction: \ + https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies." + ) img = reader.read(filename) img_array, meta_data = reader.get_data(img) diff --git a/runtests.sh b/runtests.sh index 85ede904f6..0e87a3a4e5 100755 --- a/runtests.sh +++ b/runtests.sh @@ -175,7 +175,7 @@ function print_error_msg() { function print_style_fail_msg() { echo "${red}Check failed!${noColor}" - echo "Please run auto style fixes: ${green}./runtests.sh --autofix --nounittests${noColor}" + echo "Please run auto style fixes: ${green}./runtests.sh --autofix${noColor}" } function is_pip_installed() { From 3406398d37926923da9253e018c933c8b92468cc Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Tue, 16 Mar 2021 11:19:21 +0000 Subject: [PATCH 061/457] lossless inverse (#1767) lossless inverse transforms (non-croppad) --- monai/transforms/spatial/array.py | 8 +- monai/transforms/spatial/dictionary.py | 134 +++++++++++++++++++++++-- tests/test_inverse.py | 79 +++++++++++++++ 3 files changed, 209 insertions(+), 12 deletions(-) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 2867361b8e..33b8da3ebb 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -281,7 +281,7 @@ def __call__( ornt[:, 0] += 1 # skip channel dim ornt = np.concatenate([np.array([[0, 1]]), ornt]) shape = data_array.shape[1:] - data_array = nib.orientations.apply_orientation(data_array, ornt) + data_array = np.ascontiguousarray(nib.orientations.apply_orientation(data_array, ornt)) new_affine = affine_ @ nib.orientations.inv_ornt_aff(spatial_ornt, shape) new_affine = to_affine_nd(affine, new_affine) return data_array, affine, new_affine @@ -590,7 +590,7 @@ def __init__(self, k: int = 1, spatial_axes: Tuple[int, int] = (0, 1)) -> None: If axis is negative it counts from the last to the first axis. """ self.k = k - spatial_axes_ = ensure_tuple(spatial_axes) + spatial_axes_: Tuple[int, int] = ensure_tuple(spatial_axes) # type: ignore if len(spatial_axes_) != 2: raise ValueError("spatial_axes must be 2 int numbers to indicate the axes to rotate 90 degrees.") self.spatial_axes = spatial_axes_ @@ -620,7 +620,7 @@ def __init__(self, prob: float = 0.1, max_k: int = 3, spatial_axes: Tuple[int, i spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes. Default: (0, 1), this is the first two axis in spatial dimensions. """ - RandomizableTransform.__init__(self, min(max(prob, 0.0), 1.0)) + RandomizableTransform.__init__(self, prob) self.max_k = max_k self.spatial_axes = spatial_axes @@ -758,7 +758,7 @@ class RandFlip(RandomizableTransform): """ def __init__(self, prob: float = 0.1, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None: - RandomizableTransform.__init__(self, min(max(prob, 0.0), 1.0)) + RandomizableTransform.__init__(self, prob) self.flipper = Flip(spatial_axis=spatial_axis) def __call__(self, img: np.ndarray) -> np.ndarray: diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index d9d38242fb..170006ed2b 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -15,6 +15,7 @@ Class names are ended with 'd' to denote dictionary-based transforms. """ +from copy import deepcopy from typing import Any, Dict, Hashable, Mapping, Optional, Sequence, Tuple, Union import numpy as np @@ -23,6 +24,7 @@ from monai.config import DtypeLike, KeysCollection from monai.networks.layers.simplelayers import GaussianFilter from monai.transforms.croppad.array import CenterSpatialCrop +from monai.transforms.inverse import InvertibleTransform from monai.transforms.spatial.array import ( Affine, Flip, @@ -47,6 +49,10 @@ ensure_tuple_rep, fall_back_tuple, ) +from monai.utils.enums import InverseKeys +from monai.utils.module import optional_import + +nib, _ = optional_import("nibabel") __all__ = [ "Spacingd", @@ -204,7 +210,7 @@ def __call__( return d -class Orientationd(MapTransform): +class Orientationd(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.Orientation`. @@ -259,13 +265,36 @@ def __call__( ) -> Dict[Union[Hashable, str], Union[np.ndarray, Dict[str, np.ndarray]]]: d: Dict = dict(data) for key in self.key_iterator(d): - meta_data = d[f"{key}_{self.meta_key_postfix}"] - d[key], _, new_affine = self.ornt_transform(d[key], affine=meta_data["affine"]) + meta_data_key = f"{key}_{self.meta_key_postfix}" + meta_data = d[meta_data_key] + d[key], old_affine, new_affine = self.ornt_transform(d[key], affine=meta_data["affine"]) + self.push_transform(d, key, extra_info={"meta_data_key": meta_data_key, "old_affine": old_affine}) + d[meta_data_key]["affine"] = new_affine + return d + + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key in self.key_iterator(d): + transform = self.get_most_recent_transform(d, key) + # Create inverse transform + meta_data = d[transform[InverseKeys.EXTRA_INFO.value]["meta_data_key"]] + orig_affine = transform[InverseKeys.EXTRA_INFO.value]["old_affine"] + orig_axcodes = nib.orientations.aff2axcodes(orig_affine) + inverse_transform = Orientation( + axcodes=orig_axcodes, + as_closest_canonical=False, + labels=self.ornt_transform.labels, + ) + # Apply inverse + d[key], _, new_affine = inverse_transform(d[key], affine=meta_data["affine"]) meta_data["affine"] = new_affine + # Remove the applied transform + self.pop_transform(d, key) + return d -class Rotate90d(MapTransform): +class Rotate90d(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.Rotate90`. """ @@ -286,11 +315,31 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) for key in self.key_iterator(d): + self.push_transform(d, key) d[key] = self.rotator(d[key]) return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key in self.key_iterator(d): + _ = self.get_most_recent_transform(d, key) + # Create inverse transform + spatial_axes = self.rotator.spatial_axes + num_times_rotated = self.rotator.k + num_times_to_rotate = 4 - num_times_rotated + inverse_transform = Rotate90(num_times_to_rotate, spatial_axes) + # Might need to convert to numpy + if isinstance(d[key], torch.Tensor): + d[key] = torch.Tensor(d[key]).cpu().numpy() + # Apply inverse + d[key] = inverse_transform(d[key]) + # Remove the applied transform + self.pop_transform(d, key) + + return d + -class RandRotate90d(RandomizableTransform, MapTransform): +class RandRotate90d(RandomizableTransform, MapTransform, InvertibleTransform): """ Dictionary-based version :py:class:`monai.transforms.RandRotate90`. With probability `prob`, input arrays are rotated by 90 degrees @@ -337,6 +386,27 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Mapping[Hashable, np. for key in self.key_iterator(d): if self._do_transform: d[key] = rotator(d[key]) + self.push_transform(d, key, extra_info={"rand_k": self._rand_k}) + return d + + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key in self.key_iterator(d): + transform = self.get_most_recent_transform(d, key) + # Check if random transform was actually performed (based on `prob`) + if transform[InverseKeys.DO_TRANSFORM.value]: + # Create inverse transform + num_times_rotated = transform[InverseKeys.EXTRA_INFO.value]["rand_k"] + num_times_to_rotate = 4 - num_times_rotated + inverse_transform = Rotate90(num_times_to_rotate, self.spatial_axes) + # Might need to convert to numpy + if isinstance(d[key], torch.Tensor): + d[key] = torch.Tensor(d[key]).cpu().numpy() + # Apply inverse + d[key] = inverse_transform(d[key]) + # Remove the applied transform + self.pop_transform(d, key) + return d @@ -789,7 +859,7 @@ def __call__( return d -class Flipd(MapTransform): +class Flipd(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.Flip`. @@ -814,11 +884,26 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) for key in self.key_iterator(d): + self.push_transform(d, key) d[key] = self.flipper(d[key]) return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key in self.key_iterator(d): + _ = self.get_most_recent_transform(d, key) + # Might need to convert to numpy + if isinstance(d[key], torch.Tensor): + d[key] = torch.Tensor(d[key]).cpu().numpy() + # Inverse is same as forward + d[key] = self.flipper(d[key]) + # Remove the applied transform + self.pop_transform(d, key) -class RandFlipd(RandomizableTransform, MapTransform): + return d + + +class RandFlipd(RandomizableTransform, MapTransform, InvertibleTransform): """ Dictionary-based version :py:class:`monai.transforms.RandFlip`. @@ -851,10 +936,26 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda for key in self.key_iterator(d): if self._do_transform: d[key] = self.flipper(d[key]) + self.push_transform(d, key) + return d + + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key in self.key_iterator(d): + transform = self.get_most_recent_transform(d, key) + # Check if random transform was actually performed (based on `prob`) + if transform[InverseKeys.DO_TRANSFORM.value]: + # Might need to convert to numpy + if isinstance(d[key], torch.Tensor): + d[key] = torch.Tensor(d[key]).cpu().numpy() + # Inverse is same as forward + d[key] = self.flipper(d[key]) + # Remove the applied transform + self.pop_transform(d, key) return d -class RandAxisFlipd(RandomizableTransform, MapTransform): +class RandAxisFlipd(RandomizableTransform, MapTransform, InvertibleTransform): """ Dictionary-based version :py:class:`monai.transforms.RandAxisFlip`. @@ -885,6 +986,23 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda for key in self.key_iterator(d): if self._do_transform: d[key] = flipper(d[key]) + self.push_transform(d, key, extra_info={"axis": self._axis}) + return d + + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key in self.key_iterator(d): + transform = self.get_most_recent_transform(d, key) + # Check if random transform was actually performed (based on `prob`) + if transform[InverseKeys.DO_TRANSFORM.value]: + flipper = Flip(spatial_axis=transform[InverseKeys.EXTRA_INFO.value]["axis"]) + # Might need to convert to numpy + if isinstance(d[key], torch.Tensor): + d[key] = torch.Tensor(d[key]).cpu().numpy() + # Inverse is same as forward + d[key] = flipper(d[key]) + # Remove the applied transform + self.pop_transform(d, key) return d diff --git a/tests/test_inverse.py b/tests/test_inverse.py index bb2d997eb5..0c29ea7b08 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -28,12 +28,18 @@ Compose, CropForegroundd, DivisiblePadd, + Flipd, InvertibleTransform, LoadImaged, + Orientationd, + RandAxisFlipd, + RandFlipd, Randomizable, + RandRotate90d, RandSpatialCropd, ResizeWithPadOrCrop, ResizeWithPadOrCropd, + Rotate90d, SpatialCropd, SpatialPadd, allow_missing_keys_mode, @@ -207,6 +213,79 @@ TESTS.append(("ResizeWithPadOrCropd 3d", "3D", 0, ResizeWithPadOrCropd(KEYS, [201, 150, 105]))) +TESTS.append( + ( + "Flipd 3d", + "3D", + 0, + Flipd(KEYS, [1, 2]), + ) +) + +TESTS.append( + ( + "Flipd 3d", + "3D", + 0, + Flipd(KEYS, [1, 2]), + ) +) + +TESTS.append( + ( + "RandFlipd 3d", + "3D", + 0, + RandFlipd(KEYS, 1, [1, 2]), + ) +) + +TESTS.append( + ( + "RandAxisFlipd 3d", + "3D", + 0, + RandAxisFlipd(KEYS, 1), + ) +) + +for acc in [True, False]: + TESTS.append( + ( + "Orientationd 3d", + "3D", + 0, + Orientationd(KEYS, "RAS", as_closest_canonical=acc), + ) + ) + +TESTS.append( + ( + "Rotate90d 2d", + "2D", + 0, + Rotate90d(KEYS), + ) +) + +TESTS.append( + ( + "Rotate90d 3d", + "3D", + 0, + Rotate90d(KEYS, k=2, spatial_axes=(1, 2)), + ) +) + +TESTS.append( + ( + "RandRotate90d 3d", + "3D", + 0, + RandRotate90d(KEYS, prob=1, spatial_axes=(1, 2)), + ) +) + TESTS_COMPOSE_X2 = [(t[0] + " Compose", t[1], t[2], Compose(Compose(t[3:]))) for t in TESTS] TESTS = TESTS + TESTS_COMPOSE_X2 # type: ignore From 43d9582cbe16f699c4c89d448be3c095af04e01d Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Wed, 17 Mar 2021 00:26:51 +0800 Subject: [PATCH 062/457] 1776 Add support for new arg `greater_or_equal` from ignite 0.4.4 (#1777) * [DLMED] add greater_or_equal Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot Co-authored-by: monai-bot Co-authored-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> --- monai/handlers/checkpoint_saver.py | 4 ++ tests/test_handler_checkpoint_saver.py | 55 +++++++++++++++++++++++--- 2 files changed, 54 insertions(+), 5 deletions(-) diff --git a/monai/handlers/checkpoint_saver.py b/monai/handlers/checkpoint_saver.py index 0c65b8cd4b..9c67992b36 100644 --- a/monai/handlers/checkpoint_saver.py +++ b/monai/handlers/checkpoint_saver.py @@ -60,6 +60,8 @@ class CheckpointSaver: if `True`, then will save an object in the checkpoint file with key `checkpointer` to be consistent with ignite: https://github.com/pytorch/ignite/blob/master/ignite/handlers/checkpoint.py#L99. typically, it's used to resume training and compare current metric with previous N values. + key_metric_greater_or_equal: if `True`, the latest equally scored model is stored. Otherwise, + save the the first equally scored model. default to `False`. epoch_level: save checkpoint during training for every N epochs or every N iterations. `True` is epoch level, `False` is iteration level. save_interval: save checkpoint every N epochs, default is 0 to save no checkpoint. @@ -90,6 +92,7 @@ def __init__( key_metric_n_saved: int = 1, key_metric_filename: Optional[str] = None, key_metric_save_state: bool = False, + key_metric_greater_or_equal: bool = False, epoch_level: bool = True, save_interval: int = 0, n_saved: Optional[int] = None, @@ -163,6 +166,7 @@ def _score_func(engine: Engine): score_name="key_metric", n_saved=key_metric_n_saved, include_self=key_metric_save_state, + greater_or_equal=key_metric_greater_or_equal, ) if save_interval > 0: diff --git a/tests/test_handler_checkpoint_saver.py b/tests/test_handler_checkpoint_saver.py index 5c2b750a57..14474054df 100644 --- a/tests/test_handler_checkpoint_saver.py +++ b/tests/test_handler_checkpoint_saver.py @@ -22,7 +22,20 @@ from monai.handlers import CheckpointLoader, CheckpointSaver -TEST_CASE_1 = [True, None, False, None, 1, None, False, True, 0, None, ["test_checkpoint_final_iteration=40.pt"]] +TEST_CASE_1 = [ + True, + None, + False, + None, + 1, + None, + False, + False, + True, + 0, + None, + ["test_checkpoint_final_iteration=40.pt"], +] TEST_CASE_2 = [ False, @@ -33,6 +46,7 @@ None, False, True, + False, 0, None, ["test_checkpoint_key_metric=32.pt", "test_checkpoint_key_metric=40.pt"], @@ -47,6 +61,7 @@ None, False, True, + True, 2, 2, ["test_checkpoint_epoch=2.pt", "test_checkpoint_epoch=4.pt"], @@ -61,20 +76,48 @@ None, False, False, + False, 10, 2, ["test_checkpoint_iteration=30.pt", "test_checkpoint_iteration=40.pt"], ] -TEST_CASE_5 = [True, None, False, None, 1, None, False, True, 0, None, ["test_checkpoint_final_iteration=40.pt"], True] +TEST_CASE_5 = [ + True, + None, + False, + None, + 1, + None, + False, + False, + True, + 0, + None, + ["test_checkpoint_final_iteration=40.pt"], + True, +] + +TEST_CASE_6 = [True, "final_model.pt", False, None, 1, None, False, False, True, 0, None, ["final_model.pt"]] -TEST_CASE_6 = [True, "final_model.pt", False, None, 1, None, False, True, 0, None, ["final_model.pt"]] +TEST_CASE_7 = [False, None, True, "val_loss", 1, "model.pt", False, False, True, 0, None, ["model.pt"]] -TEST_CASE_7 = [False, None, True, "val_loss", 1, "model.pt", False, True, 0, None, ["model.pt"]] +TEST_CASE_8 = [False, None, True, "val_loss", 1, "model.pt", False, True, True, 0, None, ["model.pt"]] class TestHandlerCheckpointSaver(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7]) + @parameterized.expand( + [ + TEST_CASE_1, + TEST_CASE_2, + TEST_CASE_3, + TEST_CASE_4, + TEST_CASE_5, + TEST_CASE_6, + TEST_CASE_7, + TEST_CASE_8, + ] + ) def test_file( self, save_final, @@ -84,6 +127,7 @@ def test_file( key_metric_n_saved, key_metric_filename, key_metric_save_state, + key_metric_greater_or_equal, epoch_level, save_interval, n_saved, @@ -117,6 +161,7 @@ def _train_func(engine, batch): key_metric_n_saved, key_metric_filename, key_metric_save_state, + key_metric_greater_or_equal, epoch_level, save_interval, n_saved, From 050efb7cc13237c716cc9d4d7751cd33805d1056 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Wed, 17 Mar 2021 03:58:23 +0800 Subject: [PATCH 063/457] 1757 Enhance CheckpointLoader to restore max_epochs (#1775) * [DLMED] enhance CheckpointLoader Signed-off-by: Nic Ma * [DLMED] add more unit tests Signed-off-by: Nic Ma * [DLMED] update according to comments Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] update according to comments Signed-off-by: Nic Ma --- monai/handlers/checkpoint_loader.py | 11 +++++++++ tests/test_handler_checkpoint_loader.py | 31 ++++++++++++++++++------- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py index e6319a3c64..bb67428bef 100644 --- a/monai/handlers/checkpoint_loader.py +++ b/monai/handlers/checkpoint_loader.py @@ -80,5 +80,16 @@ def __call__(self, engine: Engine) -> None: """ checkpoint = torch.load(self.load_path, map_location=self.map_location) + # save current max epochs setting in the engine, don't overwrite it if larger than max_epochs in checkpoint + prior_max_epochs = engine.state.max_epochs Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint) + if engine.state.epoch > prior_max_epochs: + raise ValueError( + f"Epoch count ({engine.state.epoch}) in checkpoint is larger than " + f"the `engine.state.max_epochs` ({prior_max_epochs}) of engine. To further train from checkpoint, " + "construct trainer with `max_epochs` larger than checkpoint's epoch count. " + "To use checkpoint for inference, no need to load state_dict for the engine." + ) + engine.state.max_epochs = prior_max_epochs + self.logger.info(f"Restored all variables from {self.load_path}") diff --git a/tests/test_handler_checkpoint_loader.py b/tests/test_handler_checkpoint_loader.py index 8b0f752ff4..838cc3f4dd 100644 --- a/tests/test_handler_checkpoint_loader.py +++ b/tests/test_handler_checkpoint_loader.py @@ -16,7 +16,7 @@ import torch import torch.optim as optim -from ignite.engine import Engine +from ignite.engine import Engine, Events from monai.handlers import CheckpointLoader, CheckpointSaver @@ -33,15 +33,30 @@ def test_one_save_one_load(self): data2["weight"] = torch.tensor([0.2]) net2.load_state_dict(data2) with tempfile.TemporaryDirectory() as tempdir: - engine = Engine(lambda e, b: None) - CheckpointSaver(save_dir=tempdir, save_dict={"net": net1}, save_final=True).attach(engine) - engine.run([0] * 8, max_epochs=5) - path = tempdir + "/net_final_iteration=40.pt" - engine = Engine(lambda e, b: None) - CheckpointLoader(load_path=path, load_dict={"net": net2}).attach(engine) - engine.run([0] * 8, max_epochs=1) + engine1 = Engine(lambda e, b: None) + CheckpointSaver(save_dir=tempdir, save_dict={"net": net1, "eng": engine1}, save_final=True).attach(engine1) + engine1.run([0] * 8, max_epochs=5) + path = tempdir + "/checkpoint_final_iteration=40.pt" + engine2 = Engine(lambda e, b: None) + CheckpointLoader(load_path=path, load_dict={"net": net2, "eng": engine2}).attach(engine2) + + @engine2.on(Events.STARTED) + def check_epoch(engine: Engine): + self.assertEqual(engine.state.epoch, 5) + + engine2.run([0] * 8, max_epochs=8) torch.testing.assert_allclose(net2.state_dict()["weight"], torch.tensor([0.1])) + # test bad case with max_epochs smaller than current epoch + engine3 = Engine(lambda e, b: None) + CheckpointLoader(load_path=path, load_dict={"net": net2, "eng": engine3}).attach(engine3) + + try: + engine3.run([0] * 8, max_epochs=3) + except ValueError: + self.assertEqual(engine3.state.epoch, 5) + self.assertEqual(engine3.state.max_epochs, 5) + def test_two_save_one_load(self): logging.basicConfig(stream=sys.stdout, level=logging.INFO) net1 = torch.nn.PReLU() From bcbd6859ef0652b6dce491f372edeb8e9fc43c73 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Tue, 16 Mar 2021 18:09:36 -0400 Subject: [PATCH 064/457] Update WSIReader with cuCIM (#1773) * Update WSIReader with cuCIM Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update unittests for cuCIM Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- .github/workflows/pythonapp.yml | 4 ++++ docs/requirements.txt | 1 + monai/data/image_reader.py | 21 ++++++++++----------- requirements-dev.txt | 1 + setup.cfg | 3 +++ tests/test_cuimage_reader.py | 16 ++++++++-------- 6 files changed, 27 insertions(+), 19 deletions(-) diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 8ed1f6d21e..83d01ff5e0 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -83,6 +83,10 @@ jobs: python -m pip install torch==1.8.0+cpu torchvision==0.9.0+cpu -f https://download.pytorch.org/whl/torch_stable.html # min. requirements for windows instances python -c "f=open('requirements-dev.txt', 'r'); txt=f.readlines(); f.close(); print(txt); f=open('requirements-dev.txt', 'w'); f.writelines(txt[1:12]); f.close()" + - if: runner.os == 'macos' + name: Remove cucim installation (Mac only) + run: | + python -c "f=open('requirements-dev.txt', 'r'); txt=f.readlines(); f.close(); print(txt); f=open('requirements-dev.txt', 'w'); f.writelines([t for t in txt if not t.startswith('cucim')]); f.close()" - name: Install the dependencies run: | python -m pip install torch==1.8.0 torchvision==0.9.0 diff --git a/docs/requirements.txt b/docs/requirements.txt index 22fd2589f0..ae3ced2c05 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -4,6 +4,7 @@ pytorch-ignite==0.4.4 numpy>=1.17 itk>=5.0 nibabel +cucim==0.18.0 openslide-python==1.1.2 parameterized scikit-image>=0.14.2 diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index ddb2c8c913..67425c0f47 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -25,7 +25,7 @@ from .utils import is_supported_format if TYPE_CHECKING: - import cuimage + import cucim import itk # type: ignore import nibabel as nib import openslide @@ -33,14 +33,14 @@ from nibabel.nifti1 import Nifti1Image from PIL import Image as PILImage - has_itk = has_nib = has_pil = has_cux = has_osl = True + has_itk = has_nib = has_pil = has_cim = has_osl = True else: itk, has_itk = optional_import("itk", allow_namespace_pkg=True) Image, _ = optional_import("itk", allow_namespace_pkg=True, name="Image") nib, has_nib = optional_import("nibabel") Nifti1Image, _ = optional_import("nibabel.nifti1", name="Nifti1Image") PILImage, has_pil = optional_import("PIL.Image") - cuimage, has_cux = optional_import("cuimage") + cucim, has_cim = optional_import("cucim") openslide, has_osl = optional_import("openslide") __all__ = ["ImageReader", "ITKReader", "NibabelReader", "NumpyReader", "PILReader", "WSIReader"] @@ -638,8 +638,7 @@ class WSIReader(ImageReader): Read whole slide imaging and extract patches. Args: - reader_lib: backend library to load the images, available options: "OpenSlide" or "cuClaraImage". - TODO: `cuClaraImage` package is unavailable so far, will enable the support later. + reader_lib: backend library to load the images, available options: "OpenSlide" or "cuCIM". """ @@ -649,11 +648,11 @@ def __init__(self, reader_lib: str = "OpenSlide"): if self.reader_lib == "openslide": if has_osl: self.wsi_reader = openslide.OpenSlide - elif self.reader_lib == "cuclaraimage": - if has_cux: - self.wsi_reader = cuimage.CuImage + elif self.reader_lib == "cucim": + if has_cim: + self.wsi_reader = cucim.CuImage else: - raise ValueError('`reader_lib` should be either "cuClaraImage" or "OpenSlide"') + raise ValueError('`reader_lib` should be either "cuCIM" or "OpenSlide"') def verify_suffix(self, filename: Union[Sequence[str], str]) -> bool: """ @@ -676,8 +675,8 @@ def read(self, data: Union[Sequence[str], str, np.ndarray], **kwargs): """ if (self.reader_lib == "openslide") and (not has_osl): raise ImportError("No module named 'openslide'") - elif (self.reader_lib == "cuclaraimage") and (not has_cux): - raise ImportError("No module named 'cuimage'") + elif (self.reader_lib == "cucim") and (not has_cim): + raise ImportError("No module named 'cucim'") img_: List = [] diff --git a/requirements-dev.txt b/requirements-dev.txt index 1508eae4fe..82dd695d47 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -30,4 +30,5 @@ Sphinx==3.3.0 recommonmark==0.6.0 sphinx-autodoc-typehints==1.11.1 sphinx-rtd-theme==0.5.0 +cucim==0.18.0 openslide-python==1.1.2 diff --git a/setup.cfg b/setup.cfg index 9dd9fa106b..1222761801 100644 --- a/setup.cfg +++ b/setup.cfg @@ -32,6 +32,7 @@ all = torchvision itk>=5.0 tqdm>=4.47.0 + cucim==0.18.0 openslide-python==1.1.2 nibabel = nibabel @@ -55,6 +56,8 @@ lmdb = lmdb psutil = psutil +cucim = + cucim==0.18.0 openslide = openslide-python==1.1.2 diff --git a/tests/test_cuimage_reader.py b/tests/test_cuimage_reader.py index 7cdf692a30..221a458ca8 100644 --- a/tests/test_cuimage_reader.py +++ b/tests/test_cuimage_reader.py @@ -10,7 +10,7 @@ from monai.data.image_reader import WSIReader from monai.utils import optional_import -_, has_cui = optional_import("cuimage") +_, has_cim = optional_import("cucim") FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" @@ -61,31 +61,31 @@ ] -class TestCuClaraImageReader(unittest.TestCase): +class TestCuCIMReader(unittest.TestCase): @parameterized.expand([TEST_CASE_0]) - @skipUnless(has_cui, "Requires CuClaraImage") + @skipUnless(has_cim, "Requires CuCIM") def test_read_whole_image(self, file_url, expected_shape): filename = self.camelyon_data_download(file_url) - reader = WSIReader("CuClaraImage") + reader = WSIReader("cuCIM") img_obj = reader.read(filename) img = reader.get_data(img_obj)[0] self.assertTupleEqual(img.shape, expected_shape) @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) - @skipUnless(has_cui, "Requires CuClaraImage") + @skipUnless(has_cim, "Requires cuCIM") def test_read_region(self, file_url, patch_info, expected_img): filename = self.camelyon_data_download(file_url) - reader = WSIReader("CuClaraImage") + reader = WSIReader("cuCIM") img_obj = reader.read(filename) img = reader.get_data(img_obj, **patch_info)[0] self.assertTupleEqual(img.shape, expected_img.shape) self.assertIsNone(assert_array_equal(img, expected_img)) @parameterized.expand([TEST_CASE_3, TEST_CASE_4]) - @skipUnless(has_cui, "Requires CuClaraImage") + @skipUnless(has_cim, "Requires cuCIM") def test_read_patches(self, file_url, patch_info, expected_img): filename = self.camelyon_data_download(file_url) - reader = WSIReader("CuClaraImage") + reader = WSIReader("cuCIM") img_obj = reader.read(filename) img = reader.get_data(img_obj, **patch_info)[0] self.assertTupleEqual(img.shape, expected_img.shape) From 55885283d5055f673fdca5f83156044649e5560c Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Tue, 16 Mar 2021 23:59:34 +0000 Subject: [PATCH 065/457] Inverse Spacingd (#1779) --- monai/transforms/spatial/array.py | 2 +- monai/transforms/spatial/dictionary.py | 39 ++++++++++++++++++++++++-- tests/test_inverse.py | 3 ++ tests/test_spacingd.py | 14 ++++++--- 4 files changed, 50 insertions(+), 8 deletions(-) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 33b8da3ebb..8dd2692c2d 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -151,7 +151,7 @@ def __call__( ValueError: When ``pixdim`` is nonpositive. Returns: - data_array (resampled into `self.pixdim`), original pixdim, current pixdim. + data_array (resampled into `self.pixdim`), original affine, current affine. """ _dtype = dtype or self.dtype or data_array.dtype diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 170006ed2b..dd385bfc6e 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -111,7 +111,7 @@ NumpyPadModeSequence = Union[Sequence[Union[NumpyPadMode, str]], NumpyPadMode, str] -class Spacingd(MapTransform): +class Spacingd(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.Spacing`. @@ -194,10 +194,11 @@ def __call__( for key, mode, padding_mode, align_corners, dtype in self.key_iterator( d, self.mode, self.padding_mode, self.align_corners, self.dtype ): - meta_data = d[f"{key}_{self.meta_key_postfix}"] + meta_data_key = f"{key}_{self.meta_key_postfix}" + meta_data = d[meta_data_key] # resample array of each corresponding key # using affine fetched from d[affine_key] - d[key], _, new_affine = self.spacing_transform( + d[key], old_affine, new_affine = self.spacing_transform( data_array=np.asarray(d[key]), affine=meta_data["affine"], mode=mode, @@ -205,10 +206,42 @@ def __call__( align_corners=align_corners, dtype=dtype, ) + self.push_transform(d, key, extra_info={"meta_data_key": meta_data_key, "old_affine": old_affine}) # set the 'affine' key meta_data["affine"] = new_affine return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key, mode, padding_mode, align_corners, dtype in self.key_iterator( + d, self.mode, self.padding_mode, self.align_corners, self.dtype + ): + transform = self.get_most_recent_transform(d, key) + if self.spacing_transform.diagonal: + raise RuntimeError( + "Spacingd:inverse not yet implemented for diagonal=True. " + + "Please raise a github issue if you need this feature" + ) + # Create inverse transform + meta_data = d[transform[InverseKeys.EXTRA_INFO.value]["meta_data_key"]] + old_affine = np.array(transform[InverseKeys.EXTRA_INFO.value]["old_affine"]) + orig_pixdim = np.sqrt(np.sum(np.square(old_affine), 0))[:-1] + inverse_transform = Spacing(orig_pixdim, diagonal=self.spacing_transform.diagonal) + # Apply inverse + d[key], _, new_affine = inverse_transform( + data_array=np.asarray(d[key]), + affine=meta_data["affine"], + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, + dtype=dtype, + ) + meta_data["affine"] = new_affine + # Remove the applied transform + self.pop_transform(d, key) + + return d + class Orientationd(MapTransform, InvertibleTransform): """ diff --git a/tests/test_inverse.py b/tests/test_inverse.py index 0c29ea7b08..204e2da723 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -40,6 +40,7 @@ ResizeWithPadOrCrop, ResizeWithPadOrCropd, Rotate90d, + Spacingd, SpatialCropd, SpatialPadd, allow_missing_keys_mode, @@ -286,6 +287,8 @@ ) ) +TESTS.append(("Spacingd 3d", "3D", 3e-2, Spacingd(KEYS, [0.5, 0.7, 0.9], diagonal=False))) + TESTS_COMPOSE_X2 = [(t[0] + " Compose", t[1], t[2], Compose(Compose(t[3:]))) for t in TESTS] TESTS = TESTS + TESTS_COMPOSE_X2 # type: ignore diff --git a/tests/test_spacingd.py b/tests/test_spacingd.py index ec32563543..e4efe4241d 100644 --- a/tests/test_spacingd.py +++ b/tests/test_spacingd.py @@ -21,7 +21,7 @@ def test_spacingd_3d(self): data = {"image": np.ones((2, 10, 15, 20)), "image_meta_dict": {"affine": np.eye(4)}} spacing = Spacingd(keys="image", pixdim=(1, 2, 1.4)) res = spacing(data) - self.assertEqual(("image", "image_meta_dict"), tuple(sorted(res))) + self.assertEqual(("image", "image_meta_dict", "image_transforms"), tuple(sorted(res))) np.testing.assert_allclose(res["image"].shape, (2, 10, 8, 15)) np.testing.assert_allclose(res["image_meta_dict"]["affine"], np.diag([1, 2, 1.4, 1.0])) @@ -29,7 +29,7 @@ def test_spacingd_2d(self): data = {"image": np.ones((2, 10, 20)), "image_meta_dict": {"affine": np.eye(3)}} spacing = Spacingd(keys="image", pixdim=(1, 2, 1.4)) res = spacing(data) - self.assertEqual(("image", "image_meta_dict"), tuple(sorted(res))) + self.assertEqual(("image", "image_meta_dict", "image_transforms"), tuple(sorted(res))) np.testing.assert_allclose(res["image"].shape, (2, 10, 10)) np.testing.assert_allclose(res["image_meta_dict"]["affine"], np.diag((1, 2, 1))) @@ -49,7 +49,10 @@ def test_interp_all(self): ), ) res = spacing(data) - self.assertEqual(("image", "image_meta_dict", "seg", "seg_meta_dict"), tuple(sorted(res))) + self.assertEqual( + ("image", "image_meta_dict", "image_transforms", "seg", "seg_meta_dict", "seg_transforms"), + tuple(sorted(res)), + ) np.testing.assert_allclose(res["image"].shape, (2, 1, 46)) np.testing.assert_allclose(res["image_meta_dict"]["affine"], np.diag((1, 0.2, 1, 1))) @@ -69,7 +72,10 @@ def test_interp_sep(self): ), ) res = spacing(data) - self.assertEqual(("image", "image_meta_dict", "seg", "seg_meta_dict"), tuple(sorted(res))) + self.assertEqual( + ("image", "image_meta_dict", "image_transforms", "seg", "seg_meta_dict", "seg_transforms"), + tuple(sorted(res)), + ) np.testing.assert_allclose(res["image"].shape, (2, 1, 46)) np.testing.assert_allclose(res["image_meta_dict"]["affine"], np.diag((1, 0.2, 1, 1))) From b868087f1c0ec7709e20d9630ff1b189c691dd78 Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Wed, 17 Mar 2021 16:46:14 +0800 Subject: [PATCH 066/457] 1755 improve focal loss (#1765) * Add to_onehot_y and include_background Signed-off-by: Yiheng Wang * Add parameters to test seg loss Signed-off-by: Yiheng Wang * modify log softmax Signed-off-by: Yiheng Wang * Modify weight format Signed-off-by: Yiheng Wang --- monai/losses/dice.py | 10 ++- monai/losses/focal_loss.py | 113 +++++++++++++++-------------- tests/test_focal_loss.py | 58 ++++++++++----- tests/test_seg_loss_integration.py | 17 ++++- 4 files changed, 118 insertions(+), 80 deletions(-) diff --git a/monai/losses/dice.py b/monai/losses/dice.py index 24bd038b68..47a1605fdc 100644 --- a/monai/losses/dice.py +++ b/monai/losses/dice.py @@ -54,7 +54,7 @@ def __init__( ) -> None: """ Args: - include_background: if False channel index 0 (background category) is excluded from the calculation. + include_background: if False, channel index 0 (background category) is excluded from the calculation. to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False. sigmoid: if True, apply a sigmoid function to the prediction. softmax: if True, apply a softmax function to the prediction. @@ -101,10 +101,12 @@ def __init__( def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """ Args: - input: the shape should be BNH[WD]. - target: the shape should be BNH[WD]. + input: the shape should be BNH[WD], where N is the number of classes. + target: the shape should be BNH[WD] or B1H[WD], where N is the number of classes. Raises: + AssertionError: When input and target (after one hot transform if setted) + have different shapes. ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"]. """ @@ -136,7 +138,7 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: input = input[:, 1:] if target.shape != input.shape: - raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})") + raise AssertionError(f"ground truth has different shape ({target.shape}) from input ({input.shape})") # reducing only spatial dimensions (not batch nor channels) reduce_axis: List[int] = torch.arange(2, len(input.shape)).tolist() diff --git a/monai/losses/focal_loss.py b/monai/losses/focal_loss.py index 920661f76f..664e7673a4 100644 --- a/monai/losses/focal_loss.py +++ b/monai/losses/focal_loss.py @@ -9,16 +9,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional, Union +import warnings +from typing import Optional, Sequence, Union import torch import torch.nn.functional as F -from torch.nn.modules.loss import _WeightedLoss +from torch.nn.modules.loss import _Loss +from monai.networks import one_hot from monai.utils import LossReduction -class FocalLoss(_WeightedLoss): +class FocalLoss(_Loss): """ Reimplementation of the Focal Loss described in: @@ -29,15 +31,21 @@ class FocalLoss(_WeightedLoss): def __init__( self, + include_background: bool = True, + to_onehot_y: bool = False, gamma: float = 2.0, - weight: Optional[torch.Tensor] = None, + weight: Optional[Union[Sequence[float], float, int, torch.Tensor]] = None, reduction: Union[LossReduction, str] = LossReduction.MEAN, ) -> None: """ Args: + include_background: if False, channel index 0 (background category) is excluded from the calculation. + to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False. gamma: value of the exponent gamma in the definition of the Focal loss. weight: weights to apply to the voxels of each class. If None no weights are applied. This corresponds to the weights `\alpha` in [1]. + The input can be a single value (same weight for all classes), a sequence of values (the length + of the sequence should be the same as the number of classes). reduction: {``"none"``, ``"mean"``, ``"sum"``} Specifies the reduction to apply to the output. Defaults to ``"mean"``. @@ -57,80 +65,75 @@ def __init__( fl(pred, grnd) """ - super(FocalLoss, self).__init__(weight=weight, reduction=LossReduction(reduction).value) + super(FocalLoss, self).__init__(reduction=LossReduction(reduction).value) + self.include_background = include_background + self.to_onehot_y = to_onehot_y self.gamma = gamma - self.weight: Optional[torch.Tensor] = None + self.weight: Optional[Union[Sequence[float], float, int, torch.Tensor]] = weight - def forward(self, logits: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """ Args: - logits: the shape should be BCH[WD]. - where C (greater than 1) is the number of classes. - Softmax over the logits is integrated in this module for improved numerical stability. - target: the shape should be B1H[WD] or BCH[WD]. - If the target's shape is B1H[WD], the target that this loss expects should be a class index - in the range [0, C-1] where C is the number of classes. + input: the shape should be BNH[WD], where N is the number of classes. + The input should be the original logits since it will be transferred by + `F.log_softmax` in the forward function. + target: the shape should be BNH[WD] or B1H[WD], where N is the number of classes. Raises: - ValueError: When ``target`` ndim differs from ``logits``. - ValueError: When ``target`` channel is not 1 and ``target`` shape differs from ``logits``. + AssertionError: When input and target (after one hot transform if setted) + have different shapes. ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"]. """ - i = logits + n_pred_ch = input.shape[1] + + if self.to_onehot_y: + if n_pred_ch == 1: + warnings.warn("single channel prediction, `to_onehot_y=True` ignored.") + else: + target = one_hot(target, num_classes=n_pred_ch) + + if not self.include_background: + if n_pred_ch == 1: + warnings.warn("single channel prediction, `include_background=False` ignored.") + else: + # if skipping background, removing first channel + target = target[:, 1:] + input = input[:, 1:] + + if target.shape != input.shape: + raise AssertionError(f"ground truth has different shape ({target.shape}) from input ({input.shape})") + + i = input t = target - if i.ndim != t.ndim: - raise ValueError(f"logits and target ndim must match, got logits={i.ndim} target={t.ndim}.") - - if t.shape[1] != 1 and t.shape[1] != i.shape[1]: - raise ValueError( - "target must have one channel or have the same shape as the logits. " - "If it has one channel, it should be a class index in the range [0, C-1] " - f"where C is the number of classes inferred from 'logits': C={i.shape[1]}. " - ) - if i.shape[1] == 1: - raise NotImplementedError("Single-channel predictions not supported.") - - # Change the shape of logits and target to - # num_batch x num_class x num_voxels. - if i.dim() > 2: - i = i.view(i.size(0), i.size(1), -1) # N,C,H,W => N,C,H*W - t = t.view(t.size(0), t.size(1), -1) # N,1,H,W => N,1,H*W or N,C,H*W - else: # Compatibility with classification. - i = i.unsqueeze(2) # N,C => N,C,1 - t = t.unsqueeze(2) # N,1 => N,1,1 or N,C,1 - - # Compute the log proba (more stable numerically than softmax). - logpt = F.log_softmax(i, dim=1) # N,C,H*W - # Keep only log proba values of the ground truth class for each voxel. - if target.shape[1] == 1: - logpt = logpt.gather(1, t.long()) # N,C,H*W => N,1,H*W - logpt = torch.squeeze(logpt, dim=1) # N,1,H*W => N,H*W + # Change the shape of input and target to B x N x num_voxels. + i = i.view(i.size(0), i.size(1), -1) + t = t.view(t.size(0), t.size(1), -1) + # Compute the log proba. + logpt = F.log_softmax(i, dim=1) # Get the proba - pt = torch.exp(logpt) # N,H*W or N,C,H*W + pt = torch.exp(logpt) # B,H*W or B,N,H*W if self.weight is not None: - self.weight = self.weight.to(i) + class_weight: Optional[torch.Tensor] = None + if isinstance(self.weight, (float, int)): + class_weight = torch.as_tensor([self.weight] * i.size(1)) + else: + class_weight = torch.as_tensor(self.weight) + class_weight = class_weight.to(i) # Convert the weight to a map in which each voxel # has the weight associated with the ground-truth label # associated with this voxel in target. - at = self.weight[None, :, None] # C => 1,C,1 - at = at.expand((t.size(0), -1, t.size(2))) # 1,C,1 => N,C,H*W - if target.shape[1] == 1: - at = at.gather(1, t.long()) # selection of the weights => N,1,H*W - at = torch.squeeze(at, dim=1) # N,1,H*W => N,H*W + at = class_weight[None, :, None] # N => 1,N,1 + at = at.expand((t.size(0), -1, t.size(2))) # 1,N,1 => B,N,H*W # Multiply the log proba by their weights. logpt = logpt * at # Compute the loss mini-batch. weight = torch.pow(-pt + 1.0, self.gamma) - if target.shape[1] == 1: - loss = torch.mean(-weight * logpt, dim=1) # N - else: - loss = torch.mean(-weight * t * logpt, dim=-1) # N,C - + loss = torch.mean(-weight * t * logpt, dim=-1) if self.reduction == LossReduction.SUM.value: return loss.sum() if self.reduction == LossReduction.NONE.value: diff --git a/tests/test_focal_loss.py b/tests/test_focal_loss.py index 2d1df602c7..4512dac4b9 100644 --- a/tests/test_focal_loss.py +++ b/tests/test_focal_loss.py @@ -16,13 +16,14 @@ import torch.nn.functional as F from monai.losses import FocalLoss +from monai.networks import one_hot from tests.utils import SkipIfBeforePyTorchVersion, test_script_save class TestFocalLoss(unittest.TestCase): def test_consistency_with_cross_entropy_2d(self): # For gamma=0 the focal loss reduces to the cross entropy loss - focal_loss = FocalLoss(gamma=0.0, reduction="mean") + focal_loss = FocalLoss(to_onehot_y=True, gamma=0.0, reduction="mean", weight=1.0) ce = nn.CrossEntropyLoss(reduction="mean") max_error = 0 class_num = 10 @@ -36,7 +37,30 @@ def test_consistency_with_cross_entropy_2d(self): x = x.cuda() l = l.cuda() output0 = focal_loss(x, l) - output1 = ce(x, l[:, 0]) + output1 = ce(x, l[:, 0]) / class_num + a = float(output0.cpu().detach()) + b = float(output1.cpu().detach()) + if abs(a - b) > max_error: + max_error = abs(a - b) + self.assertAlmostEqual(max_error, 0.0, places=3) + + def test_consistency_with_cross_entropy_2d_onehot_label(self): + # For gamma=0 the focal loss reduces to the cross entropy loss + focal_loss = FocalLoss(to_onehot_y=False, gamma=0.0, reduction="mean") + ce = nn.CrossEntropyLoss(reduction="mean") + max_error = 0 + class_num = 10 + batch_size = 128 + for _ in range(100): + # Create a random tensor of shape (batch_size, class_num, 8, 4) + x = torch.rand(batch_size, class_num, 8, 4, requires_grad=True) + # Create a random batch of classes + l = torch.randint(low=0, high=class_num, size=(batch_size, 1, 8, 4)) + if torch.cuda.is_available(): + x = x.cuda() + l = l.cuda() + output0 = focal_loss(x, one_hot(l, num_classes=class_num)) + output1 = ce(x, l[:, 0]) / class_num a = float(output0.cpu().detach()) b = float(output1.cpu().detach()) if abs(a - b) > max_error: @@ -45,7 +69,7 @@ def test_consistency_with_cross_entropy_2d(self): def test_consistency_with_cross_entropy_classification(self): # for gamma=0 the focal loss reduces to the cross entropy loss - focal_loss = FocalLoss(gamma=0.0, reduction="mean") + focal_loss = FocalLoss(to_onehot_y=True, gamma=0.0, reduction="mean") ce = nn.CrossEntropyLoss(reduction="mean") max_error = 0 class_num = 10 @@ -60,7 +84,7 @@ def test_consistency_with_cross_entropy_classification(self): x = x.cuda() l = l.cuda() output0 = focal_loss(x, l) - output1 = ce(x, l[:, 0]) + output1 = ce(x, l[:, 0]) / class_num a = float(output0.cpu().detach()) b = float(output1.cpu().detach()) if abs(a - b) > max_error: @@ -75,7 +99,7 @@ def test_bin_seg_2d(self): pred_very_good = 1000 * F.one_hot(target, num_classes=2).permute(0, 3, 1, 2).float() # initialize the mean dice loss - loss = FocalLoss() + loss = FocalLoss(to_onehot_y=True) # focal loss for pred_very_good should be close to 0 target = target.unsqueeze(1) # shape (1, 1, H, W) @@ -91,7 +115,7 @@ def test_empty_class_2d(self): pred_very_good = 1000 * F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2).float() # initialize the mean dice loss - loss = FocalLoss() + loss = FocalLoss(to_onehot_y=True) # focal loss for pred_very_good should be close to 0 target = target.unsqueeze(1) # shape (1, 1, H, W) @@ -106,7 +130,8 @@ def test_multi_class_seg_2d(self): target = target.unsqueeze(0) # shape (1, H, W) pred_very_good = 1000 * F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2).float() # initialize the mean dice loss - loss = FocalLoss() + loss = FocalLoss(to_onehot_y=True) + loss_onehot = FocalLoss(to_onehot_y=False) # focal loss for pred_very_good should be close to 0 target_one_hot = F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2) # test one hot @@ -115,7 +140,7 @@ def test_multi_class_seg_2d(self): focal_loss_good = float(loss(pred_very_good, target).cpu()) self.assertAlmostEqual(focal_loss_good, 0.0, places=3) - focal_loss_good = float(loss(pred_very_good, target_one_hot).cpu()) + focal_loss_good = float(loss_onehot(pred_very_good, target_one_hot).cpu()) self.assertAlmostEqual(focal_loss_good, 0.0, places=3) def test_bin_seg_3d(self): @@ -137,33 +162,30 @@ def test_bin_seg_3d(self): pred_very_good = 1000 * F.one_hot(target, num_classes=num_classes).permute(0, 4, 1, 2, 3).float() # initialize the mean dice loss - loss = FocalLoss() + loss = FocalLoss(to_onehot_y=True) + loss_onehot = FocalLoss(to_onehot_y=False) # focal loss for pred_very_good should be close to 0 target = target.unsqueeze(1) # shape (1, 1, H, W) focal_loss_good = float(loss(pred_very_good, target).cpu()) self.assertAlmostEqual(focal_loss_good, 0.0, places=3) - focal_loss_good = float(loss(pred_very_good, target_one_hot).cpu()) + focal_loss_good = float(loss_onehot(pred_very_good, target_one_hot).cpu()) self.assertAlmostEqual(focal_loss_good, 0.0, places=3) def test_ill_opts(self): chn_input = torch.ones((1, 2, 3)) - chn_target = torch.ones((1, 1, 3)) + chn_target = torch.ones((1, 2, 3)) with self.assertRaisesRegex(ValueError, ""): FocalLoss(reduction="unknown")(chn_input, chn_target) - with self.assertRaisesRegex(ValueError, ""): - FocalLoss(reduction=None)(chn_input, chn_target) + with self.assertRaisesRegex(TypeError, ""): + FocalLoss(other_act="tanh")(chn_input, chn_target) def test_ill_shape(self): chn_input = torch.ones((1, 2, 3)) chn_target = torch.ones((1, 3)) - with self.assertRaisesRegex(ValueError, ""): + with self.assertRaisesRegex(AssertionError, ""): FocalLoss(reduction="mean")(chn_input, chn_target) - chn_input = torch.ones((1, 1, 30)) - chn_target = torch.ones((1, 1, 30)) - with self.assertRaisesRegex(NotImplementedError, ""): - FocalLoss()(chn_input, chn_target) @SkipIfBeforePyTorchVersion((1, 7, 0)) def test_script(self): diff --git a/tests/test_seg_loss_integration.py b/tests/test_seg_loss_integration.py index 2103119342..d2f991f160 100644 --- a/tests/test_seg_loss_integration.py +++ b/tests/test_seg_loss_integration.py @@ -18,23 +18,29 @@ from parameterized import parameterized from monai.losses import DiceLoss, FocalLoss, GeneralizedDiceLoss, TverskyLoss +from monai.networks import one_hot TEST_CASES = [ [DiceLoss, {"to_onehot_y": True, "squared_pred": True, "smooth_nr": 1e-4, "smooth_dr": 1e-4}, {}], [DiceLoss, {"to_onehot_y": True, "squared_pred": True, "smooth_nr": 0, "smooth_dr": 1e-3}, {}], + [DiceLoss, {"to_onehot_y": False, "squared_pred": True, "smooth_nr": 0, "smooth_dr": 1e-3}, {}], [DiceLoss, {"to_onehot_y": True, "squared_pred": True, "batch": True}, {}], [DiceLoss, {"to_onehot_y": True, "sigmoid": True}, {}], [DiceLoss, {"to_onehot_y": True, "softmax": True}, {}], - [FocalLoss, {"gamma": 1.5, "weight": torch.tensor([1, 2])}, {}], - [FocalLoss, {"gamma": 1.5}, {}], + [FocalLoss, {"to_onehot_y": True, "gamma": 1.5, "weight": torch.tensor([1, 2])}, {}], + [FocalLoss, {"to_onehot_y": False, "gamma": 1.5, "weight": [1, 2]}, {}], + [FocalLoss, {"to_onehot_y": False, "gamma": 1.5, "weight": 1.0}, {}], + [FocalLoss, {"to_onehot_y": True, "gamma": 1.5}, {}], [GeneralizedDiceLoss, {"to_onehot_y": True, "softmax": True}, {}], [GeneralizedDiceLoss, {"to_onehot_y": True, "sigmoid": True}, {}], [GeneralizedDiceLoss, {"to_onehot_y": True, "sigmoid": True, "w_type": "simple"}, {}], [GeneralizedDiceLoss, {"to_onehot_y": True, "sigmoid": True, "w_type": "uniform"}, {}], [GeneralizedDiceLoss, {"to_onehot_y": True, "sigmoid": True, "w_type": "uniform", "batch": True}, {}], + [GeneralizedDiceLoss, {"to_onehot_y": False, "sigmoid": True, "w_type": "uniform", "batch": True}, {}], [TverskyLoss, {"to_onehot_y": True, "softmax": True, "alpha": 0.8, "beta": 0.2}, {}], [TverskyLoss, {"to_onehot_y": True, "softmax": True, "alpha": 0.8, "beta": 0.2, "batch": True}, {}], [TverskyLoss, {"to_onehot_y": True, "softmax": True, "alpha": 1.0, "beta": 0.0}, {}], + [TverskyLoss, {"to_onehot_y": False, "softmax": True, "alpha": 1.0, "beta": 0.0}, {}], ] @@ -80,6 +86,8 @@ def test_convergence(self, loss_type, loss_args, forward_args): num_classes = 2 num_voxels = 3 * 4 * 4 + target_onehot = one_hot(target_seg, num_classes=num_classes) + # define a one layer model class OnelayerNet(nn.Module): def __init__(self): @@ -118,7 +126,10 @@ def forward(self, x): if init_output is None: init_output = torch.argmax(output, 1).detach().cpu().numpy() - loss_val = loss(output, target_seg, **forward_args) + if loss_args["to_onehot_y"] is False: + loss_val = loss(output, target_onehot, **forward_args) + else: + loss_val = loss(output, target_seg, **forward_args) if iter_i % 10 == 0: pred = torch.argmax(output, 1).detach().cpu().numpy() From 46154ec78fb41680561fe275068c52dcd073faa5 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 17 Mar 2021 10:38:44 +0000 Subject: [PATCH 067/457] Inverse Resized (#1780) Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/transforms/spatial/array.py | 2 +- monai/transforms/spatial/dictionary.py | 17 ++++++++++++++++- tests/test_inverse.py | 5 +++++ 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 8dd2692c2d..891c811186 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -317,7 +317,7 @@ def __call__(self, img: np.ndarray) -> np.ndarray: class Resize(Transform): """ - Resize the input image to given spatial size. + Resize the input image to given spatial size (with scaling, not cropping/padding). Implemented using :py:class:`torch.nn.functional.interpolate`. Args: diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index dd385bfc6e..9b76b4d18d 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -443,7 +443,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar return d -class Resized(MapTransform): +class Resized(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.Resize`. @@ -481,9 +481,24 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) for key, mode, align_corners in self.key_iterator(d, self.mode, self.align_corners): + self.push_transform(d, key) d[key] = self.resizer(d[key], mode=mode, align_corners=align_corners) return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key, mode, align_corners in self.key_iterator(d, self.mode, self.align_corners): + transform = self.get_most_recent_transform(d, key) + orig_size = transform[InverseKeys.ORIG_SIZE.value] + # Create inverse transform + inverse_transform = Resize(orig_size, mode, align_corners) + # Apply inverse transform + d[key] = inverse_transform(d[key]) + # Remove the applied transform + self.pop_transform(d, key) + + return d + class Affined(RandomizableTransform, MapTransform): """ diff --git a/tests/test_inverse.py b/tests/test_inverse.py index 204e2da723..4a43cd729b 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -37,6 +37,7 @@ Randomizable, RandRotate90d, RandSpatialCropd, + Resized, ResizeWithPadOrCrop, ResizeWithPadOrCropd, Rotate90d, @@ -289,6 +290,10 @@ TESTS.append(("Spacingd 3d", "3D", 3e-2, Spacingd(KEYS, [0.5, 0.7, 0.9], diagonal=False))) +TESTS.append(("Resized 2d", "2D", 2e-1, Resized(KEYS, [50, 47]))) + +TESTS.append(("Resized 3d", "3D", 5e-2, Resized(KEYS, [201, 150, 78]))) + TESTS_COMPOSE_X2 = [(t[0] + " Compose", t[1], t[2], Compose(Compose(t[3:]))) for t in TESTS] TESTS = TESTS + TESTS_COMPOSE_X2 # type: ignore From cf3bd1a82869dfcde8db9c8f7be99a50abd81811 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 17 Mar 2021 12:43:10 +0000 Subject: [PATCH 068/457] Inverse Zoomd and RandZoomd (#1778) Zoomd and RandZoomd --- monai/transforms/spatial/dictionary.py | 58 ++++++++++++++++++++++++-- tests/test_inverse.py | 32 ++++++++++++++ 2 files changed, 87 insertions(+), 3 deletions(-) diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 9b76b4d18d..d9eb98302c 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -23,7 +23,7 @@ from monai.config import DtypeLike, KeysCollection from monai.networks.layers.simplelayers import GaussianFilter -from monai.transforms.croppad.array import CenterSpatialCrop +from monai.transforms.croppad.array import CenterSpatialCrop, SpatialPad from monai.transforms.inverse import InvertibleTransform from monai.transforms.spatial.array import ( Affine, @@ -1215,7 +1215,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class Zoomd(MapTransform): +class Zoomd(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.Zoom`. @@ -1261,6 +1261,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda for key, mode, padding_mode, align_corners in self.key_iterator( d, self.mode, self.padding_mode, self.align_corners ): + self.push_transform(d, key) d[key] = self.zoomer( d[key], mode=mode, @@ -1269,8 +1270,31 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda ) return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key, mode, padding_mode, align_corners in self.key_iterator( + d, self.mode, self.padding_mode, self.align_corners + ): + transform = self.get_most_recent_transform(d, key) + # Create inverse transform + zoom = np.array(self.zoomer.zoom) + inverse_transform = Zoom(zoom=1 / zoom, keep_size=self.zoomer.keep_size) + # Apply inverse + d[key] = inverse_transform( + d[key], + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, + ) + # Size might be out by 1 voxel so pad + d[key] = SpatialPad(transform[InverseKeys.ORIG_SIZE.value])(d[key]) + # Remove the applied transform + self.pop_transform(d, key) + + return d -class RandZoomd(RandomizableTransform, MapTransform): + +class RandZoomd(RandomizableTransform, MapTransform, InvertibleTransform): """ Dict-based version :py:class:`monai.transforms.RandZoom`. @@ -1338,6 +1362,8 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda self.randomize() d = dict(data) if not self._do_transform: + for key in self.keys: + self.push_transform(d, key, extra_info={"zoom": self._zoom}) return d img_dims = data[self.keys[0]].ndim @@ -1351,6 +1377,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda for key, mode, padding_mode, align_corners in self.key_iterator( d, self.mode, self.padding_mode, self.align_corners ): + self.push_transform(d, key, extra_info={"zoom": self._zoom}) d[key] = zoomer( d[key], mode=mode, @@ -1359,6 +1386,31 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda ) return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key, mode, padding_mode, align_corners in self.key_iterator( + d, self.mode, self.padding_mode, self.align_corners + ): + transform = self.get_most_recent_transform(d, key) + # Check if random transform was actually performed (based on `prob`) + if transform[InverseKeys.DO_TRANSFORM.value]: + # Create inverse transform + zoom = np.array(transform[InverseKeys.EXTRA_INFO.value]["zoom"]) + inverse_transform = Zoom(zoom=1 / zoom, keep_size=self.keep_size) + # Apply inverse + d[key] = inverse_transform( + d[key], + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, + ) + # Size might be out by 1 voxel so pad + d[key] = SpatialPad(transform[InverseKeys.ORIG_SIZE.value])(d[key]) + # Remove the applied transform + self.pop_transform(d, key) + + return d + SpacingD = SpacingDict = Spacingd OrientationD = OrientationDict = Orientationd diff --git a/tests/test_inverse.py b/tests/test_inverse.py index 4a43cd729b..b0f2b8187a 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -37,6 +37,7 @@ Randomizable, RandRotate90d, RandSpatialCropd, + RandZoomd, Resized, ResizeWithPadOrCrop, ResizeWithPadOrCropd, @@ -44,6 +45,7 @@ Spacingd, SpatialCropd, SpatialPadd, + Zoomd, allow_missing_keys_mode, ) from monai.utils import first, get_seed, optional_import, set_determinism @@ -294,6 +296,36 @@ TESTS.append(("Resized 3d", "3D", 5e-2, Resized(KEYS, [201, 150, 78]))) + +TESTS.append( + ( + "Zoomd 1d", + "1D odd", + 0, + Zoomd(KEYS, zoom=2, keep_size=False), + ) +) + +TESTS.append( + ( + "Zoomd 2d", + "2D", + 2e-1, + Zoomd(KEYS, zoom=0.9), + ) +) + +TESTS.append( + ( + "Zoomd 3d", + "3D", + 3e-2, + Zoomd(KEYS, zoom=[2.5, 1, 3], keep_size=False), + ) +) + +TESTS.append(("RandZoom 3d", "3D", 9e-2, RandZoomd(KEYS, 1, [0.5, 0.6, 0.9], [1.1, 1, 1.05], keep_size=True))) + TESTS_COMPOSE_X2 = [(t[0] + " Compose", t[1], t[2], Compose(Compose(t[3:]))) for t in TESTS] TESTS = TESTS + TESTS_COMPOSE_X2 # type: ignore From 592806a763f64dd67c05797288f9f0949276853c Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 17 Mar 2021 15:35:32 +0000 Subject: [PATCH 069/457] Inverse Rotated (#1782) Inverse Rotated and RandRotated --- monai/transforms/spatial/array.py | 8 ++- monai/transforms/spatial/dictionary.py | 74 ++++++++++++++++++++++++-- tests/test_inverse.py | 39 ++++++++++++++ 3 files changed, 117 insertions(+), 4 deletions(-) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 891c811186..17626e4582 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -421,6 +421,7 @@ def __init__( self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode) self.align_corners = align_corners self.dtype = dtype + self.rotation_matrix: Optional[np.ndarray] = None def __call__( self, @@ -482,8 +483,13 @@ def __call__( torch.as_tensor(np.ascontiguousarray(transform).astype(_dtype)), spatial_size=output_shape, ) + self.rotation_matrix = transform return np.asarray(output.squeeze(0).detach().cpu().numpy(), dtype=np.float32) + def get_rotation_matrix(self) -> Optional[np.ndarray]: + """Get the most recently applied rotation matrix""" + return self.rotation_matrix + class Zoom(Transform): """ @@ -743,7 +749,7 @@ def __call__( align_corners=self.align_corners if align_corners is None else align_corners, dtype=dtype or self.dtype or img.dtype, ) - return rotator(img) + return np.array(rotator(img)) class RandFlip(RandomizableTransform): diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index d9eb98302c..1c6b6a14bc 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -22,6 +22,7 @@ import torch from monai.config import DtypeLike, KeysCollection +from monai.networks.layers import AffineTransform from monai.networks.layers.simplelayers import GaussianFilter from monai.transforms.croppad.array import CenterSpatialCrop, SpatialPad from monai.transforms.inverse import InvertibleTransform @@ -1054,7 +1055,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar return d -class Rotated(MapTransform): +class Rotated(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.Rotate`. @@ -1106,6 +1107,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda for key, mode, padding_mode, align_corners, dtype in self.key_iterator( d, self.mode, self.padding_mode, self.align_corners, self.dtype ): + orig_size = d[key].shape[1:] d[key] = self.rotator( d[key], mode=mode, @@ -1113,10 +1115,40 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda align_corners=align_corners, dtype=dtype, ) + rot_mat = self.rotator.get_rotation_matrix() + self.push_transform(d, key, orig_size=orig_size, extra_info={"rot_mat": rot_mat}) return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key, mode, padding_mode, align_corners, dtype in self.key_iterator( + d, self.mode, self.padding_mode, self.align_corners, self.dtype + ): + transform = self.get_most_recent_transform(d, key) + # Create inverse transform + fwd_rot_mat = transform[InverseKeys.EXTRA_INFO.value]["rot_mat"] + inv_rot_mat = np.linalg.inv(fwd_rot_mat) + + xform = AffineTransform( + normalized=False, + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, + reverse_indexing=True, + ) + output = xform( + torch.as_tensor(np.ascontiguousarray(d[key]).astype(dtype)).unsqueeze(0), + torch.as_tensor(np.ascontiguousarray(inv_rot_mat).astype(dtype)), + spatial_size=transform[InverseKeys.ORIG_SIZE.value], + ) + d[key] = np.asarray(output.squeeze(0).detach().cpu().numpy(), dtype=np.float32) + # Remove the applied transform + self.pop_transform(d, key) -class RandRotated(RandomizableTransform, MapTransform): + return d + + +class RandRotated(RandomizableTransform, MapTransform, InvertibleTransform): """ Dictionary-based version :py:class:`monai.transforms.RandRotate` Randomly rotates the input arrays. @@ -1197,14 +1229,18 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda self.randomize() d = dict(data) if not self._do_transform: + for key in self.keys: + self.push_transform(d, key, extra_info={"rot_mat": np.eye(4)}) return d + angle: Union[Sequence[float], float] = self.x if d[self.keys[0]].ndim == 3 else (self.x, self.y, self.z) rotator = Rotate( - angle=self.x if d[self.keys[0]].ndim == 3 else (self.x, self.y, self.z), + angle=angle, keep_size=self.keep_size, ) for key, mode, padding_mode, align_corners, dtype in self.key_iterator( d, self.mode, self.padding_mode, self.align_corners, self.dtype ): + orig_size = d[key].shape[1:] d[key] = rotator( d[key], mode=mode, @@ -1212,6 +1248,38 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda align_corners=align_corners, dtype=dtype, ) + rot_mat = rotator.get_rotation_matrix() + self.push_transform(d, key, orig_size=orig_size, extra_info={"rot_mat": rot_mat}) + return d + + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + for key, mode, padding_mode, align_corners, dtype in self.key_iterator( + d, self.mode, self.padding_mode, self.align_corners, self.dtype + ): + transform = self.get_most_recent_transform(d, key) + # Check if random transform was actually performed (based on `prob`) + if transform[InverseKeys.DO_TRANSFORM.value]: + # Create inverse transform + fwd_rot_mat = transform[InverseKeys.EXTRA_INFO.value]["rot_mat"] + inv_rot_mat = np.linalg.inv(fwd_rot_mat) + + xform = AffineTransform( + normalized=False, + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, + reverse_indexing=True, + ) + output = xform( + torch.as_tensor(np.ascontiguousarray(d[key]).astype(dtype)).unsqueeze(0), + torch.as_tensor(np.ascontiguousarray(inv_rot_mat).astype(dtype)), + spatial_size=transform[InverseKeys.ORIG_SIZE.value], + ) + d[key] = np.asarray(output.squeeze(0).detach().cpu().numpy(), dtype=np.float32) + # Remove the applied transform + self.pop_transform(d, key) + return d diff --git a/tests/test_inverse.py b/tests/test_inverse.py index b0f2b8187a..03e1270ea3 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -9,6 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import random import sys import unittest from functools import partial @@ -36,12 +37,14 @@ RandFlipd, Randomizable, RandRotate90d, + RandRotated, RandSpatialCropd, RandZoomd, Resized, ResizeWithPadOrCrop, ResizeWithPadOrCropd, Rotate90d, + Rotated, Spacingd, SpatialCropd, SpatialPadd, @@ -326,6 +329,42 @@ TESTS.append(("RandZoom 3d", "3D", 9e-2, RandZoomd(KEYS, 1, [0.5, 0.6, 0.9], [1.1, 1, 1.05], keep_size=True))) +TESTS.append( + ( + "RandRotated, prob 0", + "2D", + 0, + RandRotated(KEYS, prob=0), + ) +) + +TESTS.append( + ( + "Rotated 2d", + "2D", + 8e-2, + Rotated(KEYS, random.uniform(np.pi / 6, np.pi), keep_size=True, align_corners=False), + ) +) + +TESTS.append( + ( + "Rotated 3d", + "3D", + 1e-1, + Rotated(KEYS, [random.uniform(np.pi / 6, np.pi) for _ in range(3)], True), # type: ignore + ) +) + +TESTS.append( + ( + "RandRotated 3d", + "3D", + 1e-1, + RandRotated(KEYS, *[random.uniform(np.pi / 6, np.pi) for _ in range(3)], 1), # type: ignore + ) +) + TESTS_COMPOSE_X2 = [(t[0] + " Compose", t[1], t[2], Compose(Compose(t[3:]))) for t in TESTS] TESTS = TESTS + TESTS_COMPOSE_X2 # type: ignore From 7f0e789514554928c664cd02a64f7cef8fa901e5 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Wed, 17 Mar 2021 19:26:12 +0000 Subject: [PATCH 070/457] Inverse Affined and RandAffined (#1781) * inverse RandAffined Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/transforms/spatial/array.py | 58 +++++++++++++++++-------- monai/transforms/spatial/dictionary.py | 59 +++++++++++++++++++++++++- tests/test_inverse.py | 36 ++++++++++++++++ tests/test_rand_affined.py | 2 + 4 files changed, 135 insertions(+), 20 deletions(-) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 17626e4582..de9bba8e95 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -931,6 +931,9 @@ class AffineGrid(Transform): as_tensor_output: whether to output tensor instead of numpy array. defaults to True. device: device to store the output grid data. + affine: If applied, ignore the params (`rotate_params`, etc.) and use the + supplied matrix. Should be square with each side = num of image spatial + dimensions + 1. """ @@ -942,6 +945,7 @@ def __init__( scale_params: Optional[Union[Sequence[float], float]] = None, as_tensor_output: bool = True, device: Optional[torch.device] = None, + affine: Optional[Union[np.ndarray, torch.Tensor]] = None, ) -> None: self.rotate_params = rotate_params self.shear_params = shear_params @@ -951,8 +955,12 @@ def __init__( self.as_tensor_output = as_tensor_output self.device = device + self.affine = affine + def __call__( - self, spatial_size: Optional[Sequence[int]] = None, grid: Optional[Union[np.ndarray, torch.Tensor]] = None + self, + spatial_size: Optional[Sequence[int]] = None, + grid: Optional[Union[np.ndarray, torch.Tensor]] = None, ) -> Union[np.ndarray, torch.Tensor]: """ Args: @@ -969,27 +977,32 @@ def __call__( else: raise ValueError("Incompatible values: grid=None and spatial_size=None.") - spatial_dims = len(grid.shape) - 1 - affine = np.eye(spatial_dims + 1) - if self.rotate_params: - affine = affine @ create_rotate(spatial_dims, self.rotate_params) - if self.shear_params: - affine = affine @ create_shear(spatial_dims, self.shear_params) - if self.translate_params: - affine = affine @ create_translate(spatial_dims, self.translate_params) - if self.scale_params: - affine = affine @ create_scale(spatial_dims, self.scale_params) - affine = torch.as_tensor(np.ascontiguousarray(affine), device=self.device) + if self.affine is None: + spatial_dims = len(grid.shape) - 1 + affine = np.eye(spatial_dims + 1) + if self.rotate_params: + affine = affine @ create_rotate(spatial_dims, self.rotate_params) + if self.shear_params: + affine = affine @ create_shear(spatial_dims, self.shear_params) + if self.translate_params: + affine = affine @ create_translate(spatial_dims, self.translate_params) + if self.scale_params: + affine = affine @ create_scale(spatial_dims, self.scale_params) + self.affine = affine + + self.affine = torch.as_tensor(np.ascontiguousarray(self.affine), device=self.device) grid = torch.tensor(grid) if not isinstance(grid, torch.Tensor) else grid.detach().clone() if self.device: grid = grid.to(self.device) - grid = (affine.float() @ grid.reshape((grid.shape[0], -1)).float()).reshape([-1] + list(grid.shape[1:])) + grid = (self.affine.float() @ grid.reshape((grid.shape[0], -1)).float()).reshape([-1] + list(grid.shape[1:])) if grid is None or not isinstance(grid, torch.Tensor): raise ValueError("Unknown grid.") - if self.as_tensor_output: - return grid - return np.asarray(grid.cpu().numpy()) + return grid if self.as_tensor_output else np.asarray(grid.cpu().numpy()) + + def get_transformation_matrix(self) -> Optional[Union[np.ndarray, torch.Tensor]]: + """Get the most recently applied transformation matrix""" + return self.affine class RandAffineGrid(RandomizableTransform): @@ -1040,6 +1053,7 @@ def __init__( self.as_tensor_output = as_tensor_output self.device = device + self.affine: Optional[Union[np.ndarray, torch.Tensor]] = None def _get_rand_param(self, param_range, add_scalar: float = 0.0): out_param = [] @@ -1059,7 +1073,9 @@ def randomize(self, data: Optional[Any] = None) -> None: self.scale_params = self._get_rand_param(self.scale_range, 1.0) def __call__( - self, spatial_size: Optional[Sequence[int]] = None, grid: Optional[Union[np.ndarray, torch.Tensor]] = None + self, + spatial_size: Optional[Sequence[int]] = None, + grid: Optional[Union[np.ndarray, torch.Tensor]] = None, ) -> Union[np.ndarray, torch.Tensor]: """ Args: @@ -1078,7 +1094,13 @@ def __call__( as_tensor_output=self.as_tensor_output, device=self.device, ) - return affine_grid(spatial_size, grid) + grid = affine_grid(spatial_size, grid) + self.affine = affine_grid.get_transformation_matrix() + return grid + + def get_transformation_matrix(self) -> Optional[Union[np.ndarray, torch.Tensor]]: + """Get the most recently applied transformation matrix""" + return self.affine class RandDeformGrid(RandomizableTransform): diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 1c6b6a14bc..caa1a34e08 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -28,6 +28,7 @@ from monai.transforms.inverse import InvertibleTransform from monai.transforms.spatial.array import ( Affine, + AffineGrid, Flip, Orientation, Rand2DElastic, @@ -501,7 +502,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar return d -class Affined(RandomizableTransform, MapTransform): +class Affined(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.Affine`. """ @@ -570,11 +571,38 @@ def __call__( ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: d = dict(data) for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): + orig_size = d[key].shape[1:] d[key] = self.affine(d[key], mode=mode, padding_mode=padding_mode) + affine = self.affine.affine_grid.get_transformation_matrix() + self.push_transform(d, key, orig_size=orig_size, extra_info={"affine": affine}) + return d + + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + + for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): + transform = self.get_most_recent_transform(d, key) + orig_size = transform[InverseKeys.ORIG_SIZE.value] + # Create inverse transform + fwd_affine = transform[InverseKeys.EXTRA_INFO.value]["affine"] + inv_affine = np.linalg.inv(fwd_affine) + + affine_grid = AffineGrid(affine=inv_affine) + grid: torch.Tensor = affine_grid(orig_size) # type: ignore + + # Apply inverse transform + out = self.affine.resampler(d[key], grid, mode, padding_mode) + + # Convert to numpy + d[key] = out if isinstance(out, np.ndarray) else out.cpu().numpy() + + # Remove the applied transform + self.pop_transform(d, key) + return d -class RandAffined(RandomizableTransform, MapTransform): +class RandAffined(RandomizableTransform, MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.RandAffine`. """ @@ -667,13 +695,40 @@ def __call__( sp_size = fall_back_tuple(self.rand_affine.spatial_size, data[self.keys[0]].shape[1:]) if self._do_transform: grid = self.rand_affine.rand_affine_grid(spatial_size=sp_size) + affine = self.rand_affine.rand_affine_grid.get_transformation_matrix() else: grid = create_grid(spatial_size=sp_size) + affine = np.eye(len(sp_size) + 1) for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): + self.push_transform(d, key, extra_info={"affine": affine}) d[key] = self.rand_affine.resampler(d[key], grid, mode=mode, padding_mode=padding_mode) return d + def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = deepcopy(dict(data)) + + for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): + transform = self.get_most_recent_transform(d, key) + orig_size = transform[InverseKeys.ORIG_SIZE.value] + # Create inverse transform + fwd_affine = transform[InverseKeys.EXTRA_INFO.value]["affine"] + inv_affine = np.linalg.inv(fwd_affine) + + affine_grid = AffineGrid(affine=inv_affine) + grid: torch.Tensor = affine_grid(orig_size) # type: ignore + + # Apply inverse transform + out = self.rand_affine.resampler(d[key], grid, mode, padding_mode) + + # Convert to numpy + d[key] = out if isinstance(out, np.ndarray) else out.cpu().numpy() + + # Remove the applied transform + self.pop_transform(d, key) + + return d + class Rand2DElasticd(RandomizableTransform, MapTransform): """ diff --git a/tests/test_inverse.py b/tests/test_inverse.py index 03e1270ea3..c1225ea11c 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -24,6 +24,7 @@ from monai.networks.nets import UNet from monai.transforms import ( AddChanneld, + Affined, BorderPadd, CenterSpatialCropd, Compose, @@ -33,6 +34,7 @@ InvertibleTransform, LoadImaged, Orientationd, + RandAffined, RandAxisFlipd, RandFlipd, Randomizable, @@ -365,6 +367,40 @@ ) ) +TESTS.append( + ( + "Affine 3d", + "3D", + 1e-1, + Affined( + KEYS, + spatial_size=[155, 179, 192], + rotate_params=[np.pi / 6, -np.pi / 5, np.pi / 7], + shear_params=[0.5, 0.5], + translate_params=[10, 5, -4], + scale_params=[0.8, 1.3], + ), + ) +) + +TESTS.append( + ( + "RandAffine 3d", + "3D", + 1e-1, + RandAffined( + KEYS, + [155, 179, 192], + prob=1, + padding_mode="zeros", + rotate_range=[np.pi / 6, -np.pi / 5, np.pi / 7], + shear_range=[(0.5, 0.5)], + translate_range=[10, 5, -4], + scale_range=[(0.8, 1.2), (0.9, 1.3)], + ), + ) +) + TESTS_COMPOSE_X2 = [(t[0] + " Compose", t[1], t[2], Compose(Compose(t[3:]))) for t in TESTS] TESTS = TESTS + TESTS_COMPOSE_X2 # type: ignore diff --git a/tests/test_rand_affined.py b/tests/test_rand_affined.py index 54d71ad8f7..ae2adbe3b3 100644 --- a/tests/test_rand_affined.py +++ b/tests/test_rand_affined.py @@ -145,6 +145,8 @@ def test_rand_affined(self, input_param, input_data, expected_val): res = g(input_data) for key in res: result = res[key] + if "_transforms" in key: + continue expected = expected_val[key] if isinstance(expected_val, dict) else expected_val self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected, torch.Tensor)) if isinstance(result, torch.Tensor): From 053d0c8aab5bf038b65300bf2632fc0be7a10991 Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Thu, 18 Mar 2021 05:10:07 +0800 Subject: [PATCH 071/457] Fix type error (#1792) Signed-off-by: Yiheng Wang --- monai/transforms/intensity/array.py | 3 ++- monai/transforms/intensity/dictionary.py | 7 ++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 1bddc0137d..91407323b9 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -148,7 +148,8 @@ def __init__( Args: minv: minimum value of output data. maxv: maximum value of output data. - factor: factor scale by ``v = v * (1 + factor)``. + factor: factor scale by ``v = v * (1 + factor)``. In order to use + this parameter, please set `minv` and `maxv` into None. """ self.minv = minv self.maxv = maxv diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 4602d59379..7d4319eab0 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -221,8 +221,8 @@ class ScaleIntensityd(MapTransform): def __init__( self, keys: KeysCollection, - minv: float = 0.0, - maxv: float = 1.0, + minv: Optional[float] = 0.0, + maxv: Optional[float] = 1.0, factor: Optional[float] = None, allow_missing_keys: bool = False, ) -> None: @@ -232,7 +232,8 @@ def __init__( See also: :py:class:`monai.transforms.compose.MapTransform` minv: minimum value of output data. maxv: maximum value of output data. - factor: factor scale by ``v = v * (1 + factor)``. + factor: factor scale by ``v = v * (1 + factor)``. In order to use + this parameter, please set `minv` and `maxv` into None. allow_missing_keys: don't raise exception if key is missing. """ From f063a4791bd5cab9a9bf36f677e770dbf80cc6d0 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Wed, 17 Mar 2021 20:13:04 -0400 Subject: [PATCH 072/457] Update cucim to remove warning message (#1799) Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- docs/requirements.txt | 2 +- requirements-dev.txt | 2 +- setup.cfg | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index ae3ced2c05..f05bc5b9ca 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -4,7 +4,7 @@ pytorch-ignite==0.4.4 numpy>=1.17 itk>=5.0 nibabel -cucim==0.18.0 +cucim==0.18.1 openslide-python==1.1.2 parameterized scikit-image>=0.14.2 diff --git a/requirements-dev.txt b/requirements-dev.txt index 82dd695d47..dc4181b310 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -30,5 +30,5 @@ Sphinx==3.3.0 recommonmark==0.6.0 sphinx-autodoc-typehints==1.11.1 sphinx-rtd-theme==0.5.0 -cucim==0.18.0 +cucim==0.18.1 openslide-python==1.1.2 diff --git a/setup.cfg b/setup.cfg index 1222761801..15e6a6d127 100644 --- a/setup.cfg +++ b/setup.cfg @@ -32,7 +32,7 @@ all = torchvision itk>=5.0 tqdm>=4.47.0 - cucim==0.18.0 + cucim==0.18.1 openslide-python==1.1.2 nibabel = nibabel @@ -57,7 +57,7 @@ lmdb = psutil = psutil cucim = - cucim==0.18.0 + cucim==0.18.1 openslide = openslide-python==1.1.2 From 466a0bf629cbf98920935dd6cf4512dc5c908a8f Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Fri, 19 Mar 2021 01:14:51 +0800 Subject: [PATCH 073/457] Implement FROC metric (#1509) * Implement FROC metric Signed-off-by: yiheng-wang-nv * Fix doc error Signed-off-by: yiheng-wang-nv * Change param lowercase Signed-off-by: yiheng-wang-nv * Update calculate functions Signed-off-by: yiheng-wang-nv * fix mypy error Signed-off-by: yiheng-wang-nv * Update parameter names Signed-off-by: Yiheng Wang --- docs/source/metrics.rst | 4 ++ monai/metrics/__init__.py | 1 + monai/metrics/froc.py | 137 +++++++++++++++++++++++++++++++++++++ tests/test_compute_froc.py | 101 +++++++++++++++++++++++++++ 4 files changed, 243 insertions(+) create mode 100644 monai/metrics/froc.py create mode 100644 tests/test_compute_froc.py diff --git a/docs/source/metrics.rst b/docs/source/metrics.rst index 32a3faf380..7cfd10f196 100644 --- a/docs/source/metrics.rst +++ b/docs/source/metrics.rst @@ -6,6 +6,10 @@ Metrics ======= .. currentmodule:: monai.metrics +`FROC` +------ +.. autofunction:: compute_froc_score + `Mean Dice` ----------- .. autofunction:: compute_meandice diff --git a/monai/metrics/__init__.py b/monai/metrics/__init__.py index 818413c30d..35dea5f387 100644 --- a/monai/metrics/__init__.py +++ b/monai/metrics/__init__.py @@ -10,6 +10,7 @@ # limitations under the License. from .confusion_matrix import ConfusionMatrixMetric, compute_confusion_matrix_metric, get_confusion_matrix +from .froc import compute_fp_tp_probs, compute_froc_curve_data, compute_froc_score from .hausdorff_distance import HausdorffDistanceMetric, compute_hausdorff_distance, compute_percent_hausdorff_distance from .meandice import DiceMetric, compute_meandice from .rocauc import compute_roc_auc diff --git a/monai/metrics/froc.py b/monai/metrics/froc.py new file mode 100644 index 0000000000..ec349967c6 --- /dev/null +++ b/monai/metrics/froc.py @@ -0,0 +1,137 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + + +def compute_fp_tp_probs( + probs: Union[np.ndarray, torch.Tensor], + y_coord: Union[np.ndarray, torch.Tensor], + x_coord: Union[np.ndarray, torch.Tensor], + evaluation_mask: Union[np.ndarray, torch.Tensor], + labels_to_exclude: Optional[List] = None, + resolution_level: int = 0, +): + """ + This function is modified from the official evaluation code of + `CAMELYON 16 Challenge `_, and used to distinguish + true positive and false positive predictions. A true positive prediction is defined when + the detection point is within the annotated ground truth region. + + Args: + probs: an array with shape (n,) that represents the probabilities of the detections. + Where, n is the number of predicted detections. + y_coord: an array with shape (n,) that represents the Y-coordinates of the detections. + x_coord: an array with shape (n,) that represents the X-coordinates of the detections. + evaluation_mask: the ground truth mask for evaluation. + labels_to_exclude: labels in this list will not be counted for metric calculation. + resolution_level: the level at which the evaluation mask is made. + + Returns: + fp_probs: an array that contains the probabilities of the false positive detections. + tp_probs: an array that contains the probabilities of the True positive detections. + num_targets: the total number of targets (excluding `labels_to_exclude`) for all images under evaluation. + + """ + assert ( + probs.shape == y_coord.shape == x_coord.shape + ), "the shapes for coordinates and probabilities should be the same." + + if isinstance(probs, torch.Tensor): + probs = probs.detach().cpu().numpy() + if isinstance(y_coord, torch.Tensor): + y_coord = y_coord.detach().cpu().numpy() + if isinstance(x_coord, torch.Tensor): + x_coord = x_coord.detach().cpu().numpy() + if isinstance(evaluation_mask, torch.Tensor): + evaluation_mask = evaluation_mask.detach().cpu().numpy() + + if labels_to_exclude is None: + labels_to_exclude = [] + + max_label = np.max(evaluation_mask) + tp_probs = np.zeros((max_label,), dtype=np.float32) + + y_coord = (y_coord / pow(2, resolution_level)).astype(int) + x_coord = (x_coord / pow(2, resolution_level)).astype(int) + + hittedlabel = evaluation_mask[y_coord, x_coord] + fp_probs = probs[np.where(hittedlabel == 0)] + for i in range(1, max_label + 1): + if i not in labels_to_exclude and i in hittedlabel: + tp_probs[i - 1] = probs[np.where(hittedlabel == i)].max() + + num_targets = max_label - len(labels_to_exclude) + return fp_probs, tp_probs, num_targets + + +def compute_froc_curve_data( + fp_probs: Union[np.ndarray, torch.Tensor], + tp_probs: Union[np.ndarray, torch.Tensor], + num_targets: int, + num_images: int, +): + """ + This function is modified from the official evaluation code of + `CAMELYON 16 Challenge `_, and used to compute + the required data for plotting the Free Response Operating Characteristic (FROC) curve. + + Args: + fp_probs: an array that contains the probabilities of the false positive detections for all + images under evaluation. + tp_probs: an array that contains the probabilities of the True positive detections for all + images under evaluation. + num_targets: the total number of targets (excluding `labels_to_exclude`) for all images under evaluation. + num_images: the number of images under evaluation. + + """ + assert type(fp_probs) == type(tp_probs), "fp and tp probs should have same type." + if isinstance(fp_probs, torch.Tensor): + fp_probs = fp_probs.detach().cpu().numpy() + if isinstance(tp_probs, torch.Tensor): + tp_probs = tp_probs.detach().cpu().numpy() + + total_fps, total_tps = [], [] + all_probs = sorted(set(list(fp_probs) + list(tp_probs))) + for thresh in all_probs[1:]: + total_fps.append((fp_probs >= thresh).sum()) + total_tps.append((tp_probs >= thresh).sum()) + total_fps.append(0) + total_tps.append(0) + fps_per_image = np.asarray(total_fps) / float(num_images) + total_sensitivity = np.asarray(total_tps) / float(num_targets) + return fps_per_image, total_sensitivity + + +def compute_froc_score( + fps_per_image: np.ndarray, + total_sensitivity: np.ndarray, + eval_thresholds: Tuple = (0.25, 0.5, 1, 2, 4, 8), +): + """ + This function is modified from the official evaluation code of + `CAMELYON 16 Challenge `_, and used to compute + the challenge's second evaluation metric, which is defined as the average sensitivity at + the predefined false positive rates per whole slide image. + + Args: + fps_per_image: the average number of false positives per image for different thresholds. + total_sensitivity: sensitivities (true positive rates) for different thresholds. + eval_thresholds: the false positive rates for calculating the average sensitivity. Defaults + to (0.25, 0.5, 1, 2, 4, 8) which is the same as the CAMELYON 16 Challenge. + + """ + interp_sens = np.interp(eval_thresholds, fps_per_image[::-1], total_sensitivity[::-1]) + return np.mean(interp_sens) diff --git a/tests/test_compute_froc.py b/tests/test_compute_froc.py new file mode 100644 index 0000000000..70de836dd9 --- /dev/null +++ b/tests/test_compute_froc.py @@ -0,0 +1,101 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from parameterized import parameterized + +from monai.metrics import compute_fp_tp_probs, compute_froc_curve_data, compute_froc_score + +TEST_CASE_1 = [ + { + "probs": torch.tensor([1, 0.6, 0.8]), + "y_coord": torch.tensor([0, 2, 3]), + "x_coord": torch.tensor([3, 0, 1]), + "evaluation_mask": np.array([[0, 0, 1, 1], [2, 2, 0, 0], [0, 3, 3, 0], [0, 3, 3, 3]]), + "labels_to_exclude": [2], + "resolution_level": 0, + }, + np.array([0.6]), + np.array([1, 0, 0.8]), + 2, +] + +TEST_CASE_2 = [ + { + "probs": torch.tensor([1, 0.6, 0.8]), + "y_coord": torch.tensor([0, 2, 3]), + "x_coord": torch.tensor([3, 0, 1]), + "evaluation_mask": np.array([[0, 0, 1, 1], [2, 2, 0, 0], [0, 3, 3, 0], [0, 3, 3, 3]]), + "resolution_level": 0, + }, + np.array([0.6]), + np.array([1, 0, 0.8]), + 3, +] + +TEST_CASE_3 = [ + { + "probs": torch.tensor([1, 0.6, 0.8]), + "y_coord": torch.tensor([0, 4, 6]), + "x_coord": torch.tensor([6, 0, 2]), + "evaluation_mask": np.array([[0, 0, 1, 1], [2, 2, 0, 0], [0, 3, 3, 0], [0, 3, 3, 3]]), + "resolution_level": 1, + }, + np.array([0.6]), + np.array([1, 0, 0.8]), + 3, +] + +TEST_CASE_4 = [ + { + "fp_probs": np.array([0.8, 0.6]), + "tp_probs": np.array([1, 1, 0, 0, 0.8, 0.8, 0]), + "num_targets": 4, + "num_images": 2, + }, + (0.25, 0.5, 1, 2, 4, 8), + 0.95833333, +] + +TEST_CASE_5 = [ + { + "fp_probs": torch.tensor([0.8, 0.6]), + "tp_probs": torch.tensor([1, 1, 0, 0, 0.8, 0.8, 0]), + "num_targets": 4, + "num_images": 2, + }, + (0.25), + 0.75, +] + + +class TestComputeFpTp(unittest.TestCase): + @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) + def test_value(self, input_data, expected_fp, expected_tp, expected_num): + fp_probs, tp_probs, num_tumors = compute_fp_tp_probs(**input_data) + np.testing.assert_allclose(fp_probs, expected_fp, rtol=1e-5) + np.testing.assert_allclose(tp_probs, expected_tp, rtol=1e-5) + np.testing.assert_equal(num_tumors, expected_num) + + +class TestComputeFrocScore(unittest.TestCase): + @parameterized.expand([TEST_CASE_4, TEST_CASE_5]) + def test_value(self, input_data, thresholds, expected_score): + fps_per_image, total_sensitivity = compute_froc_curve_data(**input_data) + score = compute_froc_score(fps_per_image, total_sensitivity, thresholds) + np.testing.assert_allclose(score, expected_score, rtol=1e-5) + + +if __name__ == "__main__": + unittest.main() From 86cbf05a19271a0c320030435a90d7c6d23a8b06 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Thu, 18 Mar 2021 19:21:17 +0000 Subject: [PATCH 074/457] [1798] fix RandZoomd collation (#1801) * fix RandZoomd collation Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * dont check class ID on windows Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * if elif change Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * code review Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * remove extra import Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * win32 change Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * changes Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * update RandAxisFlipd Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * help message more general Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * more changes Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * use torch Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * wyli changes Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * more changes Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * skip test if win32 Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * get default mp Signed-off-by: Wenqi Li * dedicated fail test Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * requires reason Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> Co-authored-by: Wenqi Li --- monai/data/utils.py | 2 +- monai/transforms/inverse.py | 12 +++- monai/transforms/spatial/dictionary.py | 19 +++--- tests/test_inverse.py | 18 +++-- tests/test_inverse_collation.py | 93 ++++++++++++++++++++++++++ 5 files changed, 125 insertions(+), 19 deletions(-) create mode 100644 tests/test_inverse_collation.py diff --git a/monai/data/utils.py b/monai/data/utils.py index 1db2f6676f..ae0180f4b5 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -257,7 +257,7 @@ def list_data_collate(batch: Sequence): return default_collate(data) except RuntimeError as re: re_str = str(re) - if "stack expects each tensor to be equal size" in re_str: + if "equal size" in re_str: re_str += ( "\nMONAI hint: if your transforms intentionally create images of different shapes, creating your " + "`DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem (check its " diff --git a/monai/transforms/inverse.py b/monai/transforms/inverse.py index f9de8746ca..9708f103e6 100644 --- a/monai/transforms/inverse.py +++ b/monai/transforms/inverse.py @@ -12,6 +12,7 @@ from typing import Dict, Hashable, Optional, Tuple import numpy as np +import torch from monai.transforms.transform import RandomizableTransform, Transform from monai.utils.enums import InverseKeys @@ -89,8 +90,15 @@ def push_transform( def check_transforms_match(self, transform: dict) -> None: """Check transforms are of same instance.""" - if transform[InverseKeys.ID.value] != id(self): - raise RuntimeError("Should inverse most recently applied invertible transform first") + if transform[InverseKeys.ID.value] == id(self): + return + # basic check if multiprocessing uses 'spawn' (objects get recreated so don't have same ID) + if ( + torch.multiprocessing.get_start_method(allow_none=False) == "spawn" + and transform[InverseKeys.CLASS_NAME.value] == self.__class__.__name__ + ): + return + raise RuntimeError("Should inverse most recently applied invertible transform first") def get_most_recent_transform(self, data: dict, key: Hashable) -> dict: """Get most recent transform.""" diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index caa1a34e08..32327ec302 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -1090,7 +1090,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda for key in self.key_iterator(d): if self._do_transform: d[key] = flipper(d[key]) - self.push_transform(d, key, extra_info={"axis": self._axis}) + self.push_transform(d, key, extra_info={"axis": self._axis}) return d def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: @@ -1484,10 +1484,6 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda # match the spatial dim of first item self.randomize() d = dict(data) - if not self._do_transform: - for key in self.keys: - self.push_transform(d, key, extra_info={"zoom": self._zoom}) - return d img_dims = data[self.keys[0]].ndim if len(self._zoom) == 1: @@ -1501,12 +1497,13 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda d, self.mode, self.padding_mode, self.align_corners ): self.push_transform(d, key, extra_info={"zoom": self._zoom}) - d[key] = zoomer( - d[key], - mode=mode, - padding_mode=padding_mode, - align_corners=align_corners, - ) + if self._do_transform: + d[key] = zoomer( + d[key], + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, + ) return d def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: diff --git a/tests/test_inverse.py b/tests/test_inverse.py index c1225ea11c..8ce4e3bbf3 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -14,6 +14,7 @@ import unittest from functools import partial from typing import TYPE_CHECKING, List, Tuple +from unittest.case import skipUnless import numpy as np import torch @@ -512,11 +513,6 @@ def test_inverse(self, _, data_name, acceptable_diff, *transforms): t.set_random_state(seed=get_seed()) forwards.append(t(forwards[-1])) - # Check that error is thrown when inverse are used out of order. - t = SpatialPadd("image", [10, 5]) - with self.assertRaises(RuntimeError): - t.inverse(forwards[-1]) - # Apply inverses fwd_bck = forwards[-1].copy() for i, t in enumerate(reversed(transforms)): @@ -524,6 +520,18 @@ def test_inverse(self, _, data_name, acceptable_diff, *transforms): fwd_bck = t.inverse(fwd_bck) self.check_inverse(name, data.keys(), forwards[-i - 2], fwd_bck, forwards[-1], acceptable_diff) + # skip this test if multiprocessing uses 'spawn', as the check is only basic anyway + @skipUnless(torch.multiprocessing.get_start_method(allow_none=False) == "spawn", "requires spawn") + def test_fail(self): + + t1 = SpatialPadd("image", [10, 5]) + data = t1(self.all_data["2D"]) + + # Check that error is thrown when inverse are used out of order. + t2 = ResizeWithPadOrCropd("image", [10, 5]) + with self.assertRaises(RuntimeError): + t2.inverse(data) + def test_inverse_inferred_seg(self): test_data = [] diff --git a/tests/test_inverse_collation.py b/tests/test_inverse_collation.py new file mode 100644 index 0000000000..c5d77fb8f2 --- /dev/null +++ b/tests/test_inverse_collation.py @@ -0,0 +1,93 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest +from typing import TYPE_CHECKING + +import numpy as np +from parameterized import parameterized + +from monai.data import CacheDataset, DataLoader, create_test_image_3d, pad_list_data_collate +from monai.transforms import ( + AddChanneld, + Compose, + LoadImaged, + RandAffined, + RandAxisFlipd, + RandFlipd, + RandRotate90d, + RandRotated, + RandZoomd, + ResizeWithPadOrCropd, +) +from monai.utils import optional_import, set_determinism +from tests.utils import make_nifti_image + +if TYPE_CHECKING: + + has_nib = True +else: + _, has_nib = optional_import("nibabel") + +KEYS = ["image", "label"] + +TESTS = [ + (t.__class__.__name__ + (" pad_list_data_collate" if collate_fn else " default_collate"), t, collate_fn) + for collate_fn in [None, pad_list_data_collate] + for t in [ + RandFlipd(keys=KEYS, spatial_axis=[1, 2]), + RandAxisFlipd(keys=KEYS), + RandRotate90d(keys=KEYS, spatial_axes=(1, 2)), + RandZoomd(keys=KEYS, prob=0.5, min_zoom=0.5, max_zoom=1.1, keep_size=True), + RandRotated(keys=KEYS, range_x=np.pi), + RandAffined(keys=KEYS, rotate_range=np.pi), + ] +] + + +class TestInverseCollation(unittest.TestCase): + """Test collation for of random transformations with prob == 0 and 1.""" + + def setUp(self): + if not has_nib: + self.skipTest("nibabel required for test_inverse") + + set_determinism(seed=0) + + im_fname, seg_fname = [make_nifti_image(i) for i in create_test_image_3d(101, 100, 107)] + load_ims = Compose([LoadImaged(KEYS), AddChanneld(KEYS)]) + self.batch_size = 10 + self.data = [load_ims({"image": im_fname, "label": seg_fname}) for _ in range(self.batch_size)] + + def tearDown(self): + set_determinism(seed=None) + + @parameterized.expand(TESTS) + def test_collation(self, _, transform, collate_fn): + + if collate_fn: + modified_transform = transform + else: + modified_transform = Compose([transform, ResizeWithPadOrCropd(KEYS, [100, 100, 100])]) + + # num workers = 0 for mac + num_workers = 2 if sys.platform != "darwin" else 0 + + dataset = CacheDataset(self.data, transform=modified_transform, progress=False) + loader = DataLoader(dataset, num_workers, batch_size=self.batch_size, collate_fn=collate_fn) + + for _ in loader: + pass + + +if __name__ == "__main__": + unittest.main() From 4411758c4eacb3d017f156969870166024934e1c Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Thu, 18 Mar 2021 18:18:53 -0400 Subject: [PATCH 075/457] Remove WSIReader from LoadImage (#1796) * Remove WSIReader from LoadImage Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove WSIReader from LoadImaged Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- monai/transforms/io/array.py | 7 +++---- monai/transforms/io/dictionary.py | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index 60437307be..164c7b0e76 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -19,7 +19,7 @@ import torch from monai.config import DtypeLike -from monai.data.image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader, WSIReader +from monai.data.image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader from monai.data.nifti_saver import NiftiSaver from monai.data.png_saver import PNGSaver from monai.transforms.transform import Transform @@ -78,7 +78,7 @@ def __init__( reader: register reader to load image file and meta data, if None, still can register readers at runtime or use the default readers. If a string of reader name provided, will construct a reader object with the `*args` and `**kwargs` parameters, supported reader name: "NibabelReader", - "PILReader", "ITKReader", "NumpyReader", "WSIReader". + "PILReader", "ITKReader", "NumpyReader". image_only: if True return only the image volume, otherwise return image data array and header dict. dtype: if not None convert the loaded image to this data type. args: additional parameters for reader if providing a reader name. @@ -90,7 +90,7 @@ def __init__( """ # set predefined readers as default - self.readers: List[ImageReader] = [ITKReader(), NumpyReader(), PILReader(), NibabelReader(), WSIReader()] + self.readers: List[ImageReader] = [ITKReader(), NumpyReader(), PILReader(), NibabelReader()] if reader is not None: if isinstance(reader, str): supported_readers = { @@ -98,7 +98,6 @@ def __init__( "pilreader": PILReader, "itkreader": ITKReader, "numpyreader": NumpyReader, - "wsireader": WSIReader, } reader = reader.lower() if reader not in supported_readers: diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index 7f663ea303..79f8561d5e 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -71,7 +71,7 @@ def __init__( reader: register reader to load image file and meta data, if None, still can register readers at runtime or use the default readers. If a string of reader name provided, will construct a reader object with the `*args` and `**kwargs` parameters, supported reader name: "NibabelReader", - "PILReader", "ITKReader", "NumpyReader", "WSIReader". + "PILReader", "ITKReader", "NumpyReader". dtype: if not None convert the loaded image data to this data type. meta_key_postfix: use `key_{postfix}` to store the metadata of the nifti image, default is `meta_dict`. The meta data is a dictionary object. From 7d257434f6164e267bd03eddc45e9415d35e3f40 Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Fri, 19 Mar 2021 17:26:17 +0800 Subject: [PATCH 076/457] 1804 add pretrain options (#1805) * add pretrain options Signed-off-by: Yiheng Wang --- monai/networks/nets/densenet.py | 113 ++++++++++++++--------- monai/networks/nets/senet.py | 153 ++++++++++++++++++-------------- tests/test_densenet.py | 18 +++- tests/test_senet.py | 26 +++++- 4 files changed, 195 insertions(+), 115 deletions(-) diff --git a/monai/networks/nets/densenet.py b/monai/networks/nets/densenet.py index a59ab99e68..4b4f2cc6a4 100644 --- a/monai/networks/nets/densenet.py +++ b/monai/networks/nets/densenet.py @@ -115,6 +115,11 @@ class DenseNet(nn.Module): bn_size: multiplicative factor for number of bottle neck layers. (i.e. bn_size * k features in the bottleneck layer) dropout_prob: dropout rate after each dense layer. + pretrained: whether to load ImageNet pretrained weights when `spatial_dims == 2`. + In order to load weights correctly, Please ensure that the `block_config` + is consistent with the corresponding arch. + pretrained_arch: the arch name for pretrained weights. + progress: If True, displays a progress bar of the download to stderr. """ def __init__( @@ -127,6 +132,9 @@ def __init__( block_config: Sequence[int] = (6, 12, 24, 16), bn_size: int = 4, dropout_prob: float = 0.0, + pretrained: bool = False, + pretrained_arch: str = "densenet121", + progress: bool = True, ) -> None: super(DenseNet, self).__init__() @@ -190,43 +198,49 @@ def __init__( elif isinstance(m, nn.Linear): nn.init.constant_(torch.as_tensor(m.bias), 0) + if pretrained: + self._load_state_dict(pretrained_arch, progress) + def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.features(x) x = self.class_layers(x) return x + def _load_state_dict(self, arch, progress): + """ + This function is used to load pretrained models. + Adapted from `PyTorch Hub 2D version + `_ + """ + model_urls = { + "densenet121": "https://download.pytorch.org/models/densenet121-a639ec97.pth", + "densenet169": "https://download.pytorch.org/models/densenet169-b2777c0a.pth", + "densenet201": "https://download.pytorch.org/models/densenet201-c1103571.pth", + } + if arch in model_urls.keys(): + model_url = model_urls[arch] + else: + raise ValueError( + "only 'densenet121', 'densenet169' and 'densenet201' are supported to load pretrained weights." + ) + pattern = re.compile( + r"^(.*denselayer\d+)(\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$" + ) -model_urls = { - "densenet121": "https://download.pytorch.org/models/densenet121-a639ec97.pth", - "densenet169": "https://download.pytorch.org/models/densenet169-b2777c0a.pth", - "densenet201": "https://download.pytorch.org/models/densenet201-c1103571.pth", -} - - -def _load_state_dict(model, model_url, progress): - """ - This function is used to load pretrained models. - Adapted from `PyTorch Hub 2D version - `_ - """ - pattern = re.compile( - r"^(.*denselayer\d+)(\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$" - ) - - state_dict = load_state_dict_from_url(model_url, progress=progress) - for key in list(state_dict.keys()): - res = pattern.match(key) - if res: - new_key = res.group(1) + ".layers" + res.group(2) + res.group(3) - state_dict[new_key] = state_dict[key] - del state_dict[key] + state_dict = load_state_dict_from_url(model_url, progress=progress) + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + ".layers" + res.group(2) + res.group(3) + state_dict[new_key] = state_dict[key] + del state_dict[key] - model_dict = model.state_dict() - state_dict = { - k: v for k, v in state_dict.items() if (k in model_dict) and (model_dict[k].shape == state_dict[k].shape) - } - model_dict.update(state_dict) - model.load_state_dict(model_dict) + model_dict = self.state_dict() + state_dict = { + k: v for k, v in state_dict.items() if (k in model_dict) and (model_dict[k].shape == state_dict[k].shape) + } + model_dict.update(state_dict) + self.load_state_dict(model_dict) def densenet121(pretrained: bool = False, progress: bool = True, **kwargs) -> DenseNet: @@ -235,10 +249,15 @@ def densenet121(pretrained: bool = False, progress: bool = True, **kwargs) -> De from `PyTorch Hub 2D version `_ """ - model = DenseNet(init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), **kwargs) - if pretrained: - arch = "densenet121" - _load_state_dict(model, model_urls[arch], progress) + model = DenseNet( + init_features=64, + growth_rate=32, + block_config=(6, 12, 24, 16), + pretrained=pretrained, + pretrained_arch="densenet121", + progress=progress, + **kwargs, + ) return model @@ -248,10 +267,15 @@ def densenet169(pretrained: bool = False, progress: bool = True, **kwargs) -> De from `PyTorch Hub 2D version `_ """ - model = DenseNet(init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), **kwargs) - if pretrained: - arch = "densenet169" - _load_state_dict(model, model_urls[arch], progress) + model = DenseNet( + init_features=64, + growth_rate=32, + block_config=(6, 12, 32, 32), + pretrained=pretrained, + pretrained_arch="densenet169", + progress=progress, + **kwargs, + ) return model @@ -261,10 +285,15 @@ def densenet201(pretrained: bool = False, progress: bool = True, **kwargs) -> De from `PyTorch Hub 2D version `_ """ - model = DenseNet(init_features=64, growth_rate=32, block_config=(6, 12, 48, 32), **kwargs) - if pretrained: - arch = "densenet201" - _load_state_dict(model, model_urls[arch], progress) + model = DenseNet( + init_features=64, + growth_rate=32, + block_config=(6, 12, 48, 32), + pretrained=pretrained, + pretrained_arch="densenet201", + progress=progress, + **kwargs, + ) return model diff --git a/monai/networks/nets/senet.py b/monai/networks/nets/senet.py index ef67f853d6..333a3b1159 100644 --- a/monai/networks/nets/senet.py +++ b/monai/networks/nets/senet.py @@ -66,7 +66,11 @@ class SENet(nn.Module): - For SE-ResNeXt models: False num_classes: number of outputs in `last_linear` layer. for all models: 1000 - + pretrained: whether to load ImageNet pretrained weights when `spatial_dims == 2`. + In order to load weights correctly, Please ensure that the `block_config` + is consistent with the corresponding arch. + pretrained_arch: the arch name for pretrained weights. + progress: If True, displays a progress bar of the download to stderr. """ def __init__( @@ -83,6 +87,9 @@ def __init__( downsample_kernel_size: int = 3, input_3x3: bool = True, num_classes: int = 1000, + pretrained: bool = False, + pretrained_arch: str = "se_resnet50", + progress: bool = True, ) -> None: super(SENet, self).__init__() @@ -176,6 +183,64 @@ def __init__( elif isinstance(m, nn.Linear): nn.init.constant_(torch.as_tensor(m.bias), 0) + if pretrained: + self._load_state_dict(pretrained_arch, progress) + + def _load_state_dict(self, arch, progress): + """ + This function is used to load pretrained models. + """ + model_urls = { + "senet154": "http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth", + "se_resnet50": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth", + "se_resnet101": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth", + "se_resnet152": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth", + "se_resnext50_32x4d": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth", + "se_resnext101_32x4d": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth", + } + if arch in model_urls.keys(): + model_url = model_urls[arch] + else: + raise ValueError( + "only 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152', 'se_resnext50_32x4d', \ + and se_resnext101_32x4d are supported to load pretrained weights." + ) + + pattern_conv = re.compile(r"^(layer[1-4]\.\d\.(?:conv)\d\.)(\w*)$") + pattern_bn = re.compile(r"^(layer[1-4]\.\d\.)(?:bn)(\d\.)(\w*)$") + pattern_se = re.compile(r"^(layer[1-4]\.\d\.)(?:se_module.fc1.)(\w*)$") + pattern_se2 = re.compile(r"^(layer[1-4]\.\d\.)(?:se_module.fc2.)(\w*)$") + pattern_down_conv = re.compile(r"^(layer[1-4]\.\d\.)(?:downsample.0.)(\w*)$") + pattern_down_bn = re.compile(r"^(layer[1-4]\.\d\.)(?:downsample.1.)(\w*)$") + + state_dict = load_state_dict_from_url(model_url, progress=progress) + for key in list(state_dict.keys()): + new_key = None + if pattern_conv.match(key): + new_key = re.sub(pattern_conv, r"\1conv.\2", key) + elif pattern_bn.match(key): + new_key = re.sub(pattern_bn, r"\1conv\2adn.N.\3", key) + elif pattern_se.match(key): + state_dict[key] = state_dict[key].squeeze() + new_key = re.sub(pattern_se, r"\1se_layer.fc.0.\2", key) + elif pattern_se2.match(key): + state_dict[key] = state_dict[key].squeeze() + new_key = re.sub(pattern_se2, r"\1se_layer.fc.2.\2", key) + elif pattern_down_conv.match(key): + new_key = re.sub(pattern_down_conv, r"\1project.conv.\2", key) + elif pattern_down_bn.match(key): + new_key = re.sub(pattern_down_bn, r"\1project.adn.N.\2", key) + if new_key: + state_dict[new_key] = state_dict[key] + del state_dict[key] + + model_dict = self.state_dict() + state_dict = { + k: v for k, v in state_dict.items() if (k in model_dict) and (model_dict[k].shape == state_dict[k].shape) + } + model_dict.update(state_dict) + self.load_state_dict(model_dict) + def _make_layer( self, block: Type[Union[SEBottleneck, SEResNetBottleneck, SEResNeXtBottleneck]], @@ -248,56 +313,6 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return x -model_urls = { - "senet154": "http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth", - "se_resnet50": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth", - "se_resnet101": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth", - "se_resnet152": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth", - "se_resnext50_32x4d": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth", - "se_resnext101_32x4d": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth", -} - - -def _load_state_dict(model, model_url, progress): - """ - This function is used to load pretrained models. - """ - pattern_conv = re.compile(r"^(layer[1-4]\.\d\.(?:conv)\d\.)(\w*)$") - pattern_bn = re.compile(r"^(layer[1-4]\.\d\.)(?:bn)(\d\.)(\w*)$") - pattern_se = re.compile(r"^(layer[1-4]\.\d\.)(?:se_module.fc1.)(\w*)$") - pattern_se2 = re.compile(r"^(layer[1-4]\.\d\.)(?:se_module.fc2.)(\w*)$") - pattern_down_conv = re.compile(r"^(layer[1-4]\.\d\.)(?:downsample.0.)(\w*)$") - pattern_down_bn = re.compile(r"^(layer[1-4]\.\d\.)(?:downsample.1.)(\w*)$") - - state_dict = load_state_dict_from_url(model_url, progress=progress) - for key in list(state_dict.keys()): - new_key = None - if pattern_conv.match(key): - new_key = re.sub(pattern_conv, r"\1conv.\2", key) - elif pattern_bn.match(key): - new_key = re.sub(pattern_bn, r"\1conv\2adn.N.\3", key) - elif pattern_se.match(key): - state_dict[key] = state_dict[key].squeeze() - new_key = re.sub(pattern_se, r"\1se_layer.fc.0.\2", key) - elif pattern_se2.match(key): - state_dict[key] = state_dict[key].squeeze() - new_key = re.sub(pattern_se2, r"\1se_layer.fc.2.\2", key) - elif pattern_down_conv.match(key): - new_key = re.sub(pattern_down_conv, r"\1project.conv.\2", key) - elif pattern_down_bn.match(key): - new_key = re.sub(pattern_down_bn, r"\1project.adn.N.\2", key) - if new_key: - state_dict[new_key] = state_dict[key] - del state_dict[key] - - model_dict = model.state_dict() - state_dict = { - k: v for k, v in state_dict.items() if (k in model_dict) and (model_dict[k].shape == state_dict[k].shape) - } - model_dict.update(state_dict) - model.load_state_dict(model_dict) - - def senet154( spatial_dims: int, in_channels: int, @@ -320,10 +335,10 @@ def senet154( dropout_prob=0.2, dropout_dim=1, num_classes=num_classes, + pretrained=pretrained, + pretrained_arch="senet154", + progress=progress, ) - if pretrained: - arch = "senet154" - _load_state_dict(model, model_urls[arch], progress) return model @@ -347,10 +362,10 @@ def se_resnet50( input_3x3=False, downsample_kernel_size=1, num_classes=num_classes, + pretrained=pretrained, + pretrained_arch="se_resnet50", + progress=progress, ) - if pretrained: - arch = "se_resnet50" - _load_state_dict(model, model_urls[arch], progress) return model @@ -375,10 +390,10 @@ def se_resnet101( input_3x3=False, downsample_kernel_size=1, num_classes=num_classes, + pretrained=pretrained, + pretrained_arch="se_resnet101", + progress=progress, ) - if pretrained: - arch = "se_resnet101" - _load_state_dict(model, model_urls[arch], progress) return model @@ -403,10 +418,10 @@ def se_resnet152( input_3x3=False, downsample_kernel_size=1, num_classes=num_classes, + pretrained=pretrained, + pretrained_arch="se_resnet152", + progress=progress, ) - if pretrained: - arch = "se_resnet152" - _load_state_dict(model, model_urls[arch], progress) return model @@ -430,10 +445,10 @@ def se_resnext50_32x4d( input_3x3=False, downsample_kernel_size=1, num_classes=num_classes, + pretrained=pretrained, + pretrained_arch="se_resnext50_32x4d", + progress=progress, ) - if pretrained: - arch = "se_resnext50_32x4d" - _load_state_dict(model, model_urls[arch], progress) return model @@ -457,8 +472,8 @@ def se_resnext101_32x4d( input_3x3=False, downsample_kernel_size=1, num_classes=num_classes, + pretrained=pretrained, + pretrained_arch="se_resnext101_32x4d", + progress=progress, ) - if pretrained: - arch = "se_resnext101_32x4d" - _load_state_dict(model, model_urls[arch], progress) return model diff --git a/tests/test_densenet.py b/tests/test_densenet.py index 41b5fbf7d6..5ead5f5818 100644 --- a/tests/test_densenet.py +++ b/tests/test_densenet.py @@ -17,7 +17,7 @@ from parameterized import parameterized from monai.networks import eval_mode -from monai.networks.nets import densenet121, densenet169, densenet201, densenet264 +from monai.networks.nets import DenseNet, densenet121, densenet169, densenet201, densenet264 from monai.utils import optional_import from tests.utils import skip_if_quick, test_pretrained_networks, test_script_save @@ -78,6 +78,17 @@ (1, 3, 32, 32), ] +TEST_PRETRAINED_2D_CASE_4 = [ + { + "pretrained": True, + "pretrained_arch": "densenet264", + "progress": False, + "spatial_dims": 2, + "in_channels": 3, + "out_channels": 1, + }, +] + class TestPretrainedDENSENET(unittest.TestCase): @parameterized.expand([TEST_PRETRAINED_2D_CASE_1, TEST_PRETRAINED_2D_CASE_2]) @@ -100,6 +111,11 @@ def test_pretrain_consistency(self, model, input_param, input_shape): expected_result = torchvision_net.features.forward(example) self.assertTrue(torch.all(result == expected_result)) + @parameterized.expand([TEST_PRETRAINED_2D_CASE_4]) + def test_ill_pretrain(self, input_param): + with self.assertRaisesRegex(ValueError, ""): + net = DenseNet(**input_param) + class TestDENSENET(unittest.TestCase): @parameterized.expand(TEST_CASES) diff --git a/tests/test_senet.py b/tests/test_senet.py index c1327ceb7d..a2d96e1f18 100644 --- a/tests/test_senet.py +++ b/tests/test_senet.py @@ -17,7 +17,9 @@ from parameterized import parameterized from monai.networks import eval_mode +from monai.networks.blocks.squeeze_and_excitation import SEBottleneck from monai.networks.nets import ( + SENet, se_resnet50, se_resnet101, se_resnet152, @@ -46,7 +48,20 @@ TEST_CASE_5 = [se_resnext50_32x4d, NET_ARGS] TEST_CASE_6 = [se_resnext101_32x4d, NET_ARGS] -TEST_CASE_PRETRAINED = [se_resnet50, {"spatial_dims": 2, "in_channels": 3, "num_classes": 2, "pretrained": True}] +TEST_CASE_PRETRAINED_1 = [se_resnet50, {"spatial_dims": 2, "in_channels": 3, "num_classes": 2, "pretrained": True}] +TEST_CASE_PRETRAINED_2 = [ + { + "spatial_dims": 2, + "in_channels": 3, + "block": SEBottleneck, + "layers": [3, 8, 36, 3], + "groups": 64, + "reduction": 16, + "num_classes": 2, + "pretrained": True, + "pretrained_arch": "resnet50", + } +] class TestSENET(unittest.TestCase): @@ -67,7 +82,7 @@ def test_script(self, net, net_args): class TestPretrainedSENET(unittest.TestCase): - @parameterized.expand([TEST_CASE_PRETRAINED]) + @parameterized.expand([TEST_CASE_PRETRAINED_1]) def test_senet_shape(self, model, input_param): net = test_pretrained_networks(model, input_param, device) input_data = torch.randn(3, 3, 64, 64).to(device) @@ -77,7 +92,7 @@ def test_senet_shape(self, model, input_param): result = net(input_data) self.assertEqual(result.shape, expected_shape) - @parameterized.expand([TEST_CASE_PRETRAINED]) + @parameterized.expand([TEST_CASE_PRETRAINED_1]) @skipUnless(has_cadene_pretrain, "Requires `pretrainedmodels` package.") def test_pretrain_consistency(self, model, input_param): input_data = torch.randn(1, 3, 64, 64).to(device) @@ -92,6 +107,11 @@ def test_pretrain_consistency(self, model, input_param): # a conv layer with kernel size equals to 1. It may bring a little difference. self.assertTrue(torch.allclose(result, expected_result, rtol=1e-5, atol=1e-5)) + @parameterized.expand([TEST_CASE_PRETRAINED_2]) + def test_ill_pretrain(self, input_param): + with self.assertRaisesRegex(ValueError, ""): + net = SENet(**input_param) + if __name__ == "__main__": unittest.main() From 70388fa298384f698dfb3ba790e3bedb1ffcb07b Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Sat, 20 Mar 2021 02:06:17 +0800 Subject: [PATCH 077/457] 1809 add std shift intensity transform (#1810) * add pretrain options Signed-off-by: Yiheng Wang --- docs/source/transforms.rst | 24 +++++++ monai/transforms/__init__.py | 8 +++ monai/transforms/intensity/array.py | 92 +++++++++++++++++++++++- monai/transforms/intensity/dictionary.py | 88 +++++++++++++++++++++++ tests/test_normalize_intensity.py | 2 +- tests/test_rand_std_shift_intensity.py | 32 +++++++++ tests/test_rand_std_shift_intensityd.py | 33 +++++++++ tests/test_std_shift_intensity.py | 57 +++++++++++++++ tests/test_std_shift_intensityd.py | 61 ++++++++++++++++ 9 files changed, 395 insertions(+), 2 deletions(-) create mode 100644 tests/test_rand_std_shift_intensity.py create mode 100644 tests/test_rand_std_shift_intensityd.py create mode 100644 tests/test_std_shift_intensity.py create mode 100644 tests/test_std_shift_intensityd.py diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index dcdeab1ac8..768c0665a2 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -143,6 +143,18 @@ Intensity :members: :special-members: __call__ +`StdShiftIntensity` +""""""""""""""""""" +.. autoclass:: StdShiftIntensity + :members: + :special-members: __call__ + +`RandStdShiftIntensity` +""""""""""""""""""""""" +.. autoclass:: RandStdShiftIntensity + :members: + :special-members: __call__ + `ScaleIntensity` """""""""""""""" .. autoclass:: ScaleIntensity @@ -638,6 +650,18 @@ Instensity (Dict) :members: :special-members: __call__ +`StdShiftIntensityd` +"""""""""""""""""""" +.. autoclass:: StdShiftIntensityd + :members: + :special-members: __call__ + +`RandStdShiftIntensityd` +"""""""""""""""""""""""" +.. autoclass:: RandStdShiftIntensityd + :members: + :special-members: __call__ + `ScaleIntensityd` """"""""""""""""" .. autoclass:: ScaleIntensityd diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index 5b12da4d21..0ce09e69d2 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -78,11 +78,13 @@ RandHistogramShift, RandScaleIntensity, RandShiftIntensity, + RandStdShiftIntensity, SavitzkyGolaySmooth, ScaleIntensity, ScaleIntensityRange, ScaleIntensityRangePercentiles, ShiftIntensity, + StdShiftIntensity, ThresholdIntensity, ) from .intensity.dictionary import ( @@ -122,6 +124,9 @@ RandShiftIntensityd, RandShiftIntensityD, RandShiftIntensityDict, + RandStdShiftIntensityd, + RandStdShiftIntensityD, + RandStdShiftIntensityDict, ScaleIntensityd, ScaleIntensityD, ScaleIntensityDict, @@ -134,6 +139,9 @@ ShiftIntensityd, ShiftIntensityD, ShiftIntensityDict, + StdShiftIntensityd, + StdShiftIntensityD, + StdShiftIntensityDict, ThresholdIntensityd, ThresholdIntensityD, ThresholdIntensityDict, diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 91407323b9..abd7de151f 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -30,6 +30,8 @@ "RandGaussianNoise", "ShiftIntensity", "RandShiftIntensity", + "StdShiftIntensity", + "RandStdShiftIntensity", "ScaleIntensity", "RandScaleIntensity", "NormalizeIntensity", @@ -135,6 +137,94 @@ def __call__(self, img: np.ndarray) -> np.ndarray: return shifter(img) +class StdShiftIntensity(Transform): + """ + Shift intensity for the image with a factor and the standard deviation of the image + by: ``v = v + factor * std(v)``. + This transform can focus on only non-zero values or the entire image, + and can also calculate the std on each channel separately. + + Args: + factor: factor shift by ``v = v + factor * std(v)``. + nonzero: whether only count non-zero values. + channel_wise: if True, calculate on each channel separately. Please ensure + that the first dimension represents the channel of the image if True. + """ + + def __init__(self, factor: float, nonzero: bool = False, channel_wise: bool = False) -> None: + self.factor = factor + self.nonzero = nonzero + self.channel_wise = channel_wise + + def _stdshift(self, img: np.ndarray) -> np.ndarray: + slices = (img != 0) if self.nonzero else np.ones(img.shape, dtype=bool) + if not np.any(slices): + return img + offset = self.factor * np.std(img[slices]) + img[slices] = img[slices] + offset + return img + + def __call__(self, img: np.ndarray) -> np.ndarray: + """ + Apply the transform to `img`. + """ + if img.dtype != float: + img = img.astype(float) + if self.channel_wise: + for i, d in enumerate(img): + img[i] = self._stdshift(d) + else: + img = self._stdshift(img) + return img + + +class RandStdShiftIntensity(RandomizableTransform): + """ + Shift intensity for the image with a factor and the standard deviation of the image + by: ``v = v + factor * std(v)`` where the `factor` is randomly picked. + """ + + def __init__( + self, + factors: Union[Tuple[float, float], float], + prob: float = 0.1, + nonzero: bool = False, + channel_wise: bool = False, + ) -> None: + """ + Args: + factors: if tuple, the randomly picked range is (min(factors), max(factors)). + If single number, the range is (-factors, factors). + prob: probability of std shift. + nonzero: whether only count non-zero values. + channel_wise: if True, calculate on each channel separately. + + """ + RandomizableTransform.__init__(self, prob) + if isinstance(factors, (int, float)): + self.factors = (min(-factors, factors), max(-factors, factors)) + else: + if len(factors) != 2: + raise AssertionError("factors should be a number or pair of numbers.") + self.factors = (min(factors), max(factors)) + self.nonzero = nonzero + self.channel_wise = channel_wise + + def randomize(self, data: Optional[Any] = None) -> None: + self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) + super().randomize(None) + + def __call__(self, img: np.ndarray) -> np.ndarray: + """ + Apply the transform to `img`. + """ + self.randomize() + if not self._do_transform: + return img + shifter = StdShiftIntensity(factor=self.factor, nonzero=self.nonzero, channel_wise=self.channel_wise) + return shifter(img) + + class ScaleIntensity(Transform): """ Scale the intensity of input image to the given value range (minv, maxv). @@ -173,7 +263,7 @@ def __call__(self, img: np.ndarray) -> np.ndarray: class RandScaleIntensity(RandomizableTransform): """ Randomly scale the intensity of input image by ``v = v * (1 + factor)`` where the `factor` - is randomly picked from (-factors[0], factors[0]). + is randomly picked. """ def __init__(self, factors: Union[Tuple[float, float], float], prob: float = 0.1) -> None: diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 7d4319eab0..881a0d3dc9 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -32,6 +32,7 @@ ScaleIntensityRange, ScaleIntensityRangePercentiles, ShiftIntensity, + StdShiftIntensity, ThresholdIntensity, ) from monai.transforms.transform import MapTransform, RandomizableTransform @@ -211,6 +212,91 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +class StdShiftIntensityd(MapTransform): + """ + Dictionary-based wrapper of :py:class:`monai.transforms.StdShiftIntensity`. + """ + + def __init__( + self, + keys: KeysCollection, + factor: float, + nonzero: bool = False, + channel_wise: bool = False, + allow_missing_keys: bool = False, + ) -> None: + """ + Args: + keys: keys of the corresponding items to be transformed. + See also: :py:class:`monai.transforms.compose.MapTransform` + factor: factor shift by ``v = v + factor * std(v)``. + nonzero: whether only count non-zero values. + channel_wise: if True, calculate on each channel separately. Please ensure + that the first dimension represents the channel of the image if True. + allow_missing_keys: don't raise exception if key is missing. + """ + super().__init__(keys, allow_missing_keys) + self.shifter = StdShiftIntensity(factor, nonzero, channel_wise) + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = dict(data) + for key in self.key_iterator(d): + d[key] = self.shifter(d[key]) + return d + + +class RandStdShiftIntensityd(RandomizableTransform, MapTransform): + """ + Dictionary-based version :py:class:`monai.transforms.RandStdShiftIntensity`. + """ + + def __init__( + self, + keys: KeysCollection, + factors: Union[Tuple[float, float], float], + prob: float = 0.1, + nonzero: bool = False, + channel_wise: bool = False, + allow_missing_keys: bool = False, + ) -> None: + """ + Args: + keys: keys of the corresponding items to be transformed. + See also: :py:class:`monai.transforms.compose.MapTransform` + factors: if tuple, the randomly picked range is (min(factors), max(factors)). + If single number, the range is (-factors, factors). + prob: probability of std shift. + nonzero: whether only count non-zero values. + channel_wise: if True, calculate on each channel separately. + allow_missing_keys: don't raise exception if key is missing. + """ + MapTransform.__init__(self, keys, allow_missing_keys) + RandomizableTransform.__init__(self, prob) + + if isinstance(factors, (int, float)): + self.factors = (min(-factors, factors), max(-factors, factors)) + else: + if len(factors) != 2: + raise AssertionError("factors should be a number or pair of numbers.") + self.factors = (min(factors), max(factors)) + self.nonzero = nonzero + self.channel_wise = channel_wise + + def randomize(self, data: Optional[Any] = None) -> None: + self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) + super().randomize(None) + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = dict(data) + self.randomize() + if not self._do_transform: + return d + shifter = StdShiftIntensity(self.factor, self.nonzero, self.channel_wise) + for key in self.key_iterator(d): + d[key] = shifter(d[key]) + return d + + class ScaleIntensityd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.ScaleIntensity`. @@ -812,6 +898,8 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda RandGaussianNoiseD = RandGaussianNoiseDict = RandGaussianNoised ShiftIntensityD = ShiftIntensityDict = ShiftIntensityd RandShiftIntensityD = RandShiftIntensityDict = RandShiftIntensityd +StdShiftIntensityD = StdShiftIntensityDict = StdShiftIntensityd +RandStdShiftIntensityD = RandStdShiftIntensityDict = RandStdShiftIntensityd ScaleIntensityD = ScaleIntensityDict = ScaleIntensityd RandScaleIntensityD = RandScaleIntensityDict = RandScaleIntensityd NormalizeIntensityD = NormalizeIntensityDict = NormalizeIntensityd diff --git a/tests/test_normalize_intensity.py b/tests/test_normalize_intensity.py index ecf162e12f..dfb0de18fa 100644 --- a/tests/test_normalize_intensity.py +++ b/tests/test_normalize_intensity.py @@ -58,7 +58,7 @@ class TestNormalizeIntensity(NumpyImageTestCase2D): def test_default(self): normalizer = NormalizeIntensity() - normalized = normalizer(self.imt) + normalized = normalizer(self.imt.copy()) self.assertTrue(normalized.dtype == np.float32) expected = (self.imt - np.mean(self.imt)) / np.std(self.imt) np.testing.assert_allclose(normalized, expected, rtol=1e-5) diff --git a/tests/test_rand_std_shift_intensity.py b/tests/test_rand_std_shift_intensity.py new file mode 100644 index 0000000000..9aff50ab66 --- /dev/null +++ b/tests/test_rand_std_shift_intensity.py @@ -0,0 +1,32 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +from monai.transforms import RandStdShiftIntensity +from tests.utils import NumpyImageTestCase2D + + +class TestRandStdShiftIntensity(NumpyImageTestCase2D): + def test_value(self): + shifter = RandStdShiftIntensity(factors=1.0, prob=1.0) + shifter.set_random_state(seed=0) + result = shifter(self.imt) + np.random.seed(0) + factor = np.random.uniform(low=-1.0, high=1.0) + expected = self.imt + factor * np.std(self.imt) + np.testing.assert_allclose(result, expected, rtol=1e-5) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_rand_std_shift_intensityd.py b/tests/test_rand_std_shift_intensityd.py new file mode 100644 index 0000000000..0cb6bd66be --- /dev/null +++ b/tests/test_rand_std_shift_intensityd.py @@ -0,0 +1,33 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +from monai.transforms import RandStdShiftIntensityd +from tests.utils import NumpyImageTestCase2D + + +class TestRandStdShiftIntensityd(NumpyImageTestCase2D): + def test_value(self): + key = "img" + shifter = RandStdShiftIntensityd(keys=[key], factors=1.0, prob=1.0) + shifter.set_random_state(seed=0) + result = shifter({key: self.imt}) + np.random.seed(0) + factor = np.random.uniform(low=-1.0, high=1.0) + expected = self.imt + factor * np.std(self.imt) + np.testing.assert_allclose(result[key], expected, rtol=1e-5) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_std_shift_intensity.py b/tests/test_std_shift_intensity.py new file mode 100644 index 0000000000..a0a3b3ff0f --- /dev/null +++ b/tests/test_std_shift_intensity.py @@ -0,0 +1,57 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +from monai.transforms import ShiftIntensity, StdShiftIntensity +from tests.utils import NumpyImageTestCase2D + + +class TestStdShiftIntensity(NumpyImageTestCase2D): + def test_value(self): + factor = np.random.rand() + offset = np.std(self.imt) * factor + shifter = ShiftIntensity(offset=offset) + expected = shifter(self.imt) + std_shifter = StdShiftIntensity(factor=factor) + result = std_shifter(self.imt) + np.testing.assert_allclose(result, expected, rtol=1e-5) + + def test_zerostd(self): + image = np.ones([2, 3, 3]) + for nonzero in [True, False]: + for channel_wise in [True, False]: + factor = np.random.rand() + std_shifter = StdShiftIntensity(factor=factor, nonzero=nonzero, channel_wise=channel_wise) + result = std_shifter(image) + np.testing.assert_equal(result, image) + + def test_nonzero(self): + image = np.asarray([[4.0, 0.0, 2.0], [0, 2, 4]]) # std = 1 + factor = np.random.rand() + std_shifter = StdShiftIntensity(factor=factor, nonzero=True) + result = std_shifter(image) + expected = np.asarray([[4 + factor, 0, 2 + factor], [0, 2 + factor, 4 + factor]]) + np.testing.assert_equal(result, expected) + + def test_channel_wise(self): + image = np.stack((np.asarray([1.0, 2.0]), np.asarray([1.0, 1.0]))) # std: 0.5, 0 + factor = np.random.rand() + std_shifter = StdShiftIntensity(factor=factor, channel_wise=True) + result = std_shifter(image) + expected = np.stack((np.asarray([1 + 0.5 * factor, 2 + 0.5 * factor]), np.asarray([1, 1]))) + np.testing.assert_equal(result, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_std_shift_intensityd.py b/tests/test_std_shift_intensityd.py new file mode 100644 index 0000000000..f5c2dd650c --- /dev/null +++ b/tests/test_std_shift_intensityd.py @@ -0,0 +1,61 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +from monai.transforms import ShiftIntensityd, StdShiftIntensityd +from tests.utils import NumpyImageTestCase2D + + +class TestStdShiftIntensityd(NumpyImageTestCase2D): + def test_value(self): + key = "img" + factor = np.random.rand() + offset = np.std(self.imt) * factor + shifter = ShiftIntensityd(keys=[key], offset=offset) + expected = shifter({key: self.imt}) + std_shifter = StdShiftIntensityd(keys=[key], factor=factor) + result = std_shifter({key: self.imt}) + np.testing.assert_allclose(result[key], expected[key], rtol=1e-5) + + def test_zerostd(self): + key = "img" + image = np.ones([2, 3, 3]) + for nonzero in [True, False]: + for channel_wise in [True, False]: + factor = np.random.rand() + std_shifter = StdShiftIntensityd(keys=[key], factor=factor, nonzero=nonzero, channel_wise=channel_wise) + result = std_shifter({key: image}) + np.testing.assert_equal(result[key], image) + + def test_nonzero(self): + key = "img" + image = np.asarray([[4.0, 0.0, 2.0], [0, 2, 4]]) # std = 1 + factor = np.random.rand() + std_shifter = StdShiftIntensityd(keys=[key], factor=factor, nonzero=True) + result = std_shifter({key: image}) + expected = np.asarray([[4 + factor, 0, 2 + factor], [0, 2 + factor, 4 + factor]]) + np.testing.assert_equal(result[key], expected) + + def test_channel_wise(self): + key = "img" + image = np.stack((np.asarray([1.0, 2.0]), np.asarray([1.0, 1.0]))) # std: 0.5, 0 + factor = np.random.rand() + std_shifter = StdShiftIntensityd(keys=[key], factor=factor, channel_wise=True) + result = std_shifter({key: image}) + expected = np.stack((np.asarray([1 + 0.5 * factor, 2 + 0.5 * factor]), np.asarray([1, 1]))) + np.testing.assert_equal(result[key], expected) + + +if __name__ == "__main__": + unittest.main() From 3e313d242f7b534c9f683a6cfb37e4930039ddfb Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Fri, 19 Mar 2021 21:44:34 +0000 Subject: [PATCH 078/457] PadListDataCollate transform (#1813) Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/data/utils.py | 59 +++---------- monai/transforms/__init__.py | 1 + monai/transforms/croppad/batch.py | 129 ++++++++++++++++++++++++++++ monai/transforms/post/dictionary.py | 1 + tests/test_decollate.py | 35 +++++--- tests/test_pad_collation.py | 33 ++++--- 6 files changed, 181 insertions(+), 77 deletions(-) create mode 100644 monai/transforms/croppad/batch.py diff --git a/monai/data/utils.py b/monai/data/utils.py index ae0180f4b5..bdbfa5c636 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -337,64 +337,25 @@ def pad_list_data_collate( mode: Union[NumpyPadMode, str] = NumpyPadMode.CONSTANT, ): """ + Function version of :py:class:`monai.transforms.croppad.batch.PadListDataCollate`. + Same as MONAI's ``list_data_collate``, except any tensors are centrally padded to match the shape of the biggest - tensor in each dimension. + tensor in each dimension. This transform is useful if some of the applied transforms generate batch data of + different sizes. - Note: - Need to use this collate if apply some transforms that can generate batch data. + This can be used on both list and dictionary data. In the case of the dictionary data, this transform will be added + to the list of invertible transforms. + + The inverse can be called using the static method: `monai.transforms.croppad.batch.PadListDataCollate.inverse`. Args: batch: batch of data to pad-collate method: padding method (see :py:class:`monai.transforms.SpatialPad`) mode: padding mode (see :py:class:`monai.transforms.SpatialPad`) """ - list_of_dicts = isinstance(batch[0], dict) - for key_or_idx in batch[0].keys() if list_of_dicts else range(len(batch[0])): - max_shapes = [] - for elem in batch: - if not isinstance(elem[key_or_idx], (torch.Tensor, np.ndarray)): - break - max_shapes.append(elem[key_or_idx].shape[1:]) - # len > 0 if objects were arrays - if len(max_shapes) == 0: - continue - max_shape = np.array(max_shapes).max(axis=0) - # If all same size, skip - if np.all(np.array(max_shapes).min(axis=0) == max_shape): - continue - # Do we need to convert output to Tensor? - output_to_tensor = isinstance(batch[0][key_or_idx], torch.Tensor) - - # Use `SpatialPadd` or `SpatialPad` to match sizes - # Default params are central padding, padding with 0's - # If input is dictionary, use the dictionary version so that the transformation is recorded - padder: Union[SpatialPadd, SpatialPad] - if list_of_dicts: - from monai.transforms.croppad.dictionary import SpatialPadd # needs to be here to avoid circular import + from monai.transforms.croppad.batch import PadListDataCollate # needs to be here to avoid circular import - padder = SpatialPadd(key_or_idx, max_shape, method, mode) # type: ignore - - else: - from monai.transforms.croppad.array import SpatialPad # needs to be here to avoid circular import - - padder = SpatialPad(max_shape, method, mode) # type: ignore - - for idx in range(len(batch)): - padded = padder(batch[idx])[key_or_idx] if list_of_dicts else padder(batch[idx][key_or_idx]) - # since tuple is immutable we'll have to recreate - if isinstance(batch[idx], tuple): - batch[idx] = list(batch[idx]) # type: ignore - batch[idx][key_or_idx] = padded - batch[idx] = tuple(batch[idx]) # type: ignore - # else, replace - else: - batch[idx][key_or_idx] = padder(batch[idx])[key_or_idx] - - if output_to_tensor: - batch[idx][key_or_idx] = torch.Tensor(batch[idx][key_or_idx]) - - # After padding, use default list collator - return list_data_collate(batch) + return PadListDataCollate(method, mode)(batch) def worker_init_fn(worker_id: int) -> None: diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index 0ce09e69d2..22311cdca6 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -25,6 +25,7 @@ SpatialCrop, SpatialPad, ) +from .croppad.batch import PadListDataCollate from .croppad.dictionary import ( BorderPadd, BorderPadD, diff --git a/monai/transforms/croppad/batch.py b/monai/transforms/croppad/batch.py new file mode 100644 index 0000000000..7cbf39597c --- /dev/null +++ b/monai/transforms/croppad/batch.py @@ -0,0 +1,129 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +A collection of "vanilla" transforms for crop and pad operations acting on batches of data +https://github.com/Project-MONAI/MONAI/wiki/MONAI_Design +""" + +from copy import deepcopy +from typing import Any, Dict, Hashable, Union + +import numpy as np +import torch + +from monai.data.utils import list_data_collate +from monai.transforms.compose import Compose +from monai.transforms.croppad.array import CenterSpatialCrop, SpatialPad +from monai.transforms.inverse import InvertibleTransform +from monai.transforms.utility.array import ToTensor +from monai.utils.enums import InverseKeys, Method, NumpyPadMode + +__all__ = [ + "PadListDataCollate", +] + + +def replace_element(to_replace, batch, idx, key_or_idx): + # since tuple is immutable we'll have to recreate + if isinstance(batch[idx], tuple): + batch_idx_list = list(batch[idx]) + batch_idx_list[key_or_idx] = to_replace + batch[idx] = tuple(batch_idx_list) + # else, replace + else: + batch[idx][key_or_idx] = to_replace + return batch + + +class PadListDataCollate(InvertibleTransform): + """ + Same as MONAI's ``list_data_collate``, except any tensors are centrally padded to match the shape of the biggest + tensor in each dimension. This transform is useful if some of the applied transforms generate batch data of + different sizes. + + This can be used on both list and dictionary data. In the case of the dictionary data, this transform will be added + to the list of invertible transforms. + + Note that normally, a user won't explicitly use the `__call__` method. Rather this would be passed to the `DataLoader`. + This means that `__call__` handles data as it comes out of a `DataLoader`, containing batch dimension. However, the + `inverse` operates on dictionaries containing images of shape `C,H,W,[D]`. This asymmetry is necessary so that we can + pass the inverse through multiprocessing. + + Args: + batch: batch of data to pad-collate + method: padding method (see :py:class:`monai.transforms.SpatialPad`) + mode: padding mode (see :py:class:`monai.transforms.SpatialPad`) + """ + + def __init__( + self, + method: Union[Method, str] = Method.SYMMETRIC, + mode: Union[NumpyPadMode, str] = NumpyPadMode.CONSTANT, + ) -> None: + self.method = method + self.mode = mode + + def __call__(self, batch: Any): + # data is either list of dicts or list of lists + is_list_of_dicts = isinstance(batch[0], dict) + # loop over items inside of each element in a batch + for key_or_idx in batch[0].keys() if is_list_of_dicts else range(len(batch[0])): + # calculate max size of each dimension + max_shapes = [] + for elem in batch: + if not isinstance(elem[key_or_idx], (torch.Tensor, np.ndarray)): + break + max_shapes.append(elem[key_or_idx].shape[1:]) + # len > 0 if objects were arrays, else skip as no padding to be done + if len(max_shapes) == 0: + continue + max_shape = np.array(max_shapes).max(axis=0) + # If all same size, skip + if np.all(np.array(max_shapes).min(axis=0) == max_shape): + continue + # Do we need to convert output to Tensor? + output_to_tensor = isinstance(batch[0][key_or_idx], torch.Tensor) + + # Use `SpatialPadd` or `SpatialPad` to match sizes + # Default params are central padding, padding with 0's + # If input is dictionary, use the dictionary version so that the transformation is recorded + + padder = SpatialPad(max_shape, self.method, self.mode) # type: ignore + transform = padder if not output_to_tensor else Compose([padder, ToTensor()]) + + for idx in range(len(batch)): + im = batch[idx][key_or_idx] + orig_size = im.shape[1:] + padded = transform(batch[idx][key_or_idx]) + batch = replace_element(padded, batch, idx, key_or_idx) + + # If we have a dictionary of data, append to list + if is_list_of_dicts: + self.push_transform(batch[idx], key_or_idx, orig_size=orig_size) + + # After padding, use default list collator + return list_data_collate(batch) + + @staticmethod + def inverse(data: dict) -> Dict[Hashable, np.ndarray]: + if not isinstance(data, dict): + raise RuntimeError("Inverse can only currently be applied on dictionaries.") + + d = deepcopy(data) + for key in d.keys(): + transform_key = str(key) + InverseKeys.KEY_SUFFIX.value + if transform_key in d.keys(): + transform = d[transform_key][-1] + if transform[InverseKeys.CLASS_NAME.value] == PadListDataCollate.__name__: + d[key] = CenterSpatialCrop(transform["orig_size"])(d[key]) + # remove transform + d[transform_key].pop() + return d diff --git a/monai/transforms/post/dictionary.py b/monai/transforms/post/dictionary.py index 42796e2412..6d28f780d4 100644 --- a/monai/transforms/post/dictionary.py +++ b/monai/transforms/post/dictionary.py @@ -333,6 +333,7 @@ class Decollated(MapTransform): """ def __init__(self, batch_size: Optional[int] = None) -> None: + super().__init__(None) self.batch_size = batch_size def __call__(self, data: dict) -> List[dict]: diff --git a/tests/test_decollate.py b/tests/test_decollate.py index 4ed8de6bbb..4dc5a217a7 100644 --- a/tests/test_decollate.py +++ b/tests/test_decollate.py @@ -12,25 +12,38 @@ import sys import unittest from enum import Enum +from typing import List, Tuple import numpy as np import torch +from parameterized import parameterized from monai.data import CacheDataset, DataLoader, create_test_image_2d from monai.data.utils import decollate_batch from monai.transforms import AddChanneld, Compose, LoadImaged, RandFlipd, SpatialPadd, ToTensord from monai.transforms.post.dictionary import Decollated +from monai.transforms.spatial.dictionary import RandAffined, RandRotate90d from monai.utils import optional_import, set_determinism from monai.utils.enums import InverseKeys from tests.utils import make_nifti_image _, has_nib = optional_import("nibabel") +KEYS = ["image"] + +TESTS: List[Tuple] = [] +TESTS.append((SpatialPadd(KEYS, 150), RandFlipd(KEYS, prob=1.0, spatial_axis=1))) +TESTS.append((RandRotate90d(KEYS, prob=0.0, max_k=1),)) +TESTS.append((RandAffined(KEYS, prob=0.0, translate_range=10),)) + class TestDeCollate(unittest.TestCase): def setUp(self) -> None: set_determinism(seed=0) + im = create_test_image_2d(100, 101)[0] + self.data = [{"image": make_nifti_image(im) if has_nib else im} for _ in range(6)] + def tearDown(self) -> None: set_determinism(None) @@ -55,24 +68,18 @@ def check_match(self, in1, in2): else: raise RuntimeError(f"Not sure how to compare types. type(in1): {type(in1)}, type(in2): {type(in2)}") - def test_decollation(self, batch_size=2, num_workers=2): + @parameterized.expand(TESTS) + def test_decollation(self, *transforms): - im = create_test_image_2d(100, 101)[0] - data = [{"image": make_nifti_image(im) if has_nib else im} for _ in range(6)] - - transforms = Compose( - [ - AddChanneld("image"), - SpatialPadd("image", 150), - RandFlipd("image", prob=1.0, spatial_axis=1), - ToTensord("image"), - ] - ) + batch_size = 2 + num_workers = 2 + + t_compose = Compose([AddChanneld(KEYS), Compose(transforms), ToTensord(KEYS)]) # If nibabel present, read from disk if has_nib: - transforms = Compose([LoadImaged("image"), transforms]) + t_compose = Compose([LoadImaged("image"), t_compose]) - dataset = CacheDataset(data, transforms, progress=False) + dataset = CacheDataset(self.data, t_compose, progress=False) loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers) for b, batch_data in enumerate(loader): diff --git a/tests/test_pad_collation.py b/tests/test_pad_collation.py index 156d2649e0..3835dc8895 100644 --- a/tests/test_pad_collation.py +++ b/tests/test_pad_collation.py @@ -18,8 +18,9 @@ from parameterized import parameterized from monai.data import CacheDataset, DataLoader -from monai.data.utils import pad_list_data_collate +from monai.data.utils import decollate_batch, pad_list_data_collate from monai.transforms import ( + PadListDataCollate, RandRotate, RandRotate90, RandRotate90d, @@ -33,16 +34,16 @@ TESTS: List[Tuple] = [] +for pad_collate in [pad_list_data_collate, PadListDataCollate()]: + TESTS.append((dict, pad_collate, RandSpatialCropd("image", roi_size=[8, 7], random_size=True))) + TESTS.append((dict, pad_collate, RandRotated("image", prob=1, range_x=np.pi, keep_size=False))) + TESTS.append((dict, pad_collate, RandZoomd("image", prob=1, min_zoom=1.1, max_zoom=2.0, keep_size=False))) + TESTS.append((dict, pad_collate, RandRotate90d("image", prob=1, max_k=2))) -TESTS.append((dict, RandSpatialCropd("image", roi_size=[8, 7], random_size=True))) -TESTS.append((dict, RandRotated("image", prob=1, range_x=np.pi, keep_size=False))) -TESTS.append((dict, RandZoomd("image", prob=1, min_zoom=1.1, max_zoom=2.0, keep_size=False))) -TESTS.append((dict, RandRotate90d("image", prob=1, max_k=2))) - -TESTS.append((list, RandSpatialCrop(roi_size=[8, 7], random_size=True))) -TESTS.append((list, RandRotate(prob=1, range_x=np.pi, keep_size=False))) -TESTS.append((list, RandZoom(prob=1, min_zoom=1.1, max_zoom=2.0, keep_size=False))) -TESTS.append((list, RandRotate90(prob=1, max_k=2))) + TESTS.append((list, pad_collate, RandSpatialCrop(roi_size=[8, 7], random_size=True))) + TESTS.append((list, pad_collate, RandRotate(prob=1, range_x=np.pi, keep_size=False))) + TESTS.append((list, pad_collate, RandZoom(prob=1, min_zoom=1.1, max_zoom=2.0, keep_size=False))) + TESTS.append((list, pad_collate, RandRotate90(prob=1, max_k=2))) class _Dataset(torch.utils.data.Dataset): @@ -72,7 +73,7 @@ def tearDown(self) -> None: set_determinism(None) @parameterized.expand(TESTS) - def test_pad_collation(self, t_type, transform): + def test_pad_collation(self, t_type, collate_method, transform): if t_type == dict: dataset = CacheDataset(self.dict_data, transform, progress=False) @@ -86,9 +87,13 @@ def test_pad_collation(self, t_type, transform): pass # Padded collation shouldn't - loader = DataLoader(dataset, batch_size=2, collate_fn=pad_list_data_collate) - for _ in loader: - pass + loader = DataLoader(dataset, batch_size=10, collate_fn=collate_method) + # check collation in forward direction + for data in loader: + if t_type == dict: + decollated_data = decollate_batch(data) + for d in decollated_data: + PadListDataCollate.inverse(d) if __name__ == "__main__": From fdf26fbd08da035d1aab0c0eaf5a8f3713d1ee94 Mon Sep 17 00:00:00 2001 From: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Date: Fri, 19 Mar 2021 23:40:32 +0000 Subject: [PATCH 079/457] Jupyter Utilities (#1797) * Jupyter and other additions Signed-off-by: Eric Kerfoot * Jupyter utilities update Signed-off-by: Eric Kerfoot * Jupyter utilities update Signed-off-by: Eric Kerfoot * Jupyter utilities update Signed-off-by: Eric Kerfoot * Jupyter utilities update Signed-off-by: Eric Kerfoot * Jupyter utilities update Signed-off-by: Eric Kerfoot * Jupyter utilities update Signed-off-by: Eric Kerfoot * Jupyter utilities update Signed-off-by: Eric Kerfoot * Jupyter utilities update Signed-off-by: Eric Kerfoot * Jupyter utilities update Signed-off-by: Eric Kerfoot * Update Signed-off-by: Eric Kerfoot Co-authored-by: Nic Ma Co-authored-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- docs/source/data.rst | 6 + monai/data/__init__.py | 3 +- monai/data/dataset.py | 55 ++++- monai/data/thread_buffer.py | 19 ++ monai/handlers/__init__.py | 2 +- monai/handlers/metric_logger.py | 79 ++++++- monai/utils/__init__.py | 1 + monai/utils/jupyter_utils.py | 351 +++++++++++++++++++++++++++++++ tests/test_npzdictitemdataset.py | 55 +++++ tests/test_thread_buffer.py | 12 +- tests/test_threadcontainer.py | 58 +++++ 11 files changed, 626 insertions(+), 15 deletions(-) create mode 100644 monai/utils/jupyter_utils.py create mode 100644 tests/test_npzdictitemdataset.py create mode 100644 tests/test_threadcontainer.py diff --git a/docs/source/data.rst b/docs/source/data.rst index c95659bc6e..6ed6be9702 100644 --- a/docs/source/data.rst +++ b/docs/source/data.rst @@ -68,6 +68,12 @@ Generic Interfaces .. autoclass:: ImageDataset :members: :special-members: __getitem__ + +`NPZDictItemDataset` +~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: NPZDictItemDataset + :members: + :special-members: __getitem__ Patch-based dataset ------------------- diff --git a/monai/data/__init__.py b/monai/data/__init__.py index 54beb53e3f..2a7647e527 100644 --- a/monai/data/__init__.py +++ b/monai/data/__init__.py @@ -17,6 +17,7 @@ CacheNTransDataset, Dataset, LMDBDataset, + NPZDictItemDataset, PersistentDataset, SmartCacheDataset, ZipDataset, @@ -32,7 +33,7 @@ from .png_writer import write_png from .samplers import DistributedSampler, DistributedWeightedRandomSampler from .synthetic import create_test_image_2d, create_test_image_3d -from .thread_buffer import ThreadBuffer +from .thread_buffer import ThreadBuffer, ThreadDataLoader from .utils import ( compute_importance_map, compute_shape_offset, diff --git a/monai/data/dataset.py b/monai/data/dataset.py index c032e65af6..c10c500bf8 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -19,12 +19,13 @@ from copy import deepcopy from multiprocessing.pool import ThreadPool from pathlib import Path -from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Union +from typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Union +import numpy as np import torch from torch.utils.data import Dataset as _TorchDataset -from monai.data.utils import pickle_hashing +from monai.data.utils import first, pickle_hashing from monai.transforms import Compose, Randomizable, Transform, apply_transform from monai.transforms.transform import RandomizableTransform from monai.utils import MAX_SEED, get_seed, min_version, optional_import @@ -931,3 +932,53 @@ def __getitem__(self, index: int): if isinstance(transform, RandomizableTransform): transform.set_random_state(seed=self._seed) return self.dataset[index] + + +class NPZDictItemDataset(Dataset): + """ + Represents a dataset from a loaded NPZ file. The members of the file to load are named in the keys of `keys` and + stored under the keyed name. All loaded arrays must have the same 0-dimension (batch) size. Items are always dicts + mapping names to an item extracted from the loaded arrays. + + Args: + npzfile: Path to .npz file or stream containing .npz file data + keys: Maps keys to load from file to name to store in dataset + transform: Transform to apply to batch dict + other_keys: secondary data to load from file and store in dict `other_keys`, not returned by __getitem__ + """ + + def __init__( + self, + npzfile: Union[str, IO], + keys: Dict[str, str], + transform: Optional[Callable] = None, + other_keys: Optional[Sequence[str]] = (), + ): + self.npzfile: Union[str, IO] = npzfile if isinstance(npzfile, str) else "STREAM" + self.keys: Dict[str, str] = dict(keys) + dat = np.load(npzfile) + + self.arrays = {storedk: dat[datak] for datak, storedk in self.keys.items()} + self.length = self.arrays[first(self.keys.values())].shape[0] + + self.other_keys = {} if other_keys is None else {k: dat[k] for k in other_keys} + + for k, v in self.arrays.items(): + if v.shape[0] != self.length: + raise ValueError( + "All loaded arrays must have the same first dimension " + f"size {self.length}, array `{k}` has size {v.shape[0]}" + ) + + super().__init__([], transform) + + def __len__(self): + return self.length + + def __getitem__(self, index: int): + data = {k: v[index] for k, v in self.arrays.items()} + + if self.transform is not None: + data = apply_transform(self.transform, data) + + return data diff --git a/monai/data/thread_buffer.py b/monai/data/thread_buffer.py index 252fdd6a21..da5f864900 100644 --- a/monai/data/thread_buffer.py +++ b/monai/data/thread_buffer.py @@ -13,6 +13,8 @@ from queue import Empty, Full, Queue from threading import Thread +from monai.data import DataLoader, Dataset + class ThreadBuffer: """ @@ -73,3 +75,20 @@ def __iter__(self): pass # queue was empty this time, try again finally: self.stop() # ensure thread completion + + +class ThreadDataLoader(DataLoader): + """ + Subclass of `DataLoader` using a `ThreadBuffer` object to implement `__iter__` method asynchronously. This will + iterate over data from the loader as expected however the data is generated on a separate thread. Use this class + where a `DataLoader` instance is required and not just an iterable object. + """ + + def __init__(self, dataset: Dataset, num_workers: int = 0, **kwargs): + super().__init__(dataset, num_workers, **kwargs) + + # ThreadBuffer will use the inherited __iter__ instead of the one defined below + self.buffer = ThreadBuffer(super()) + + def __iter__(self): + yield from self.buffer diff --git a/monai/handlers/__init__.py b/monai/handlers/__init__.py index 8f73f7f2fd..5669e8a9ee 100644 --- a/monai/handlers/__init__.py +++ b/monai/handlers/__init__.py @@ -17,7 +17,7 @@ from .iteration_metric import IterationMetric from .lr_schedule_handler import LrScheduleHandler from .mean_dice import MeanDice -from .metric_logger import MetricLogger +from .metric_logger import MetricLogger, MetricLoggerKeys from .metrics_saver import MetricsSaver from .roc_auc import ROCAUC from .segmentation_saver import SegmentationSaver diff --git a/monai/handlers/metric_logger.py b/monai/handlers/metric_logger.py index 758276d03d..c749d4bbab 100644 --- a/monai/handlers/metric_logger.py +++ b/monai/handlers/metric_logger.py @@ -10,8 +10,11 @@ # limitations under the License. from collections import defaultdict -from typing import TYPE_CHECKING, Callable, DefaultDict, List +from enum import Enum +from threading import RLock +from typing import TYPE_CHECKING, Callable, DefaultDict, List, Optional +from monai.engines.utils import CommonKeys from monai.utils import exact_version, optional_import Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") @@ -21,12 +24,43 @@ Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") +def _get_loss_from_output(output, loss_key: str = CommonKeys.LOSS): + return output[loss_key].item() + + +class MetricLoggerKeys(Enum): + METRICS = "Metrics" + LOSS = "Loss" + + class MetricLogger: - def __init__(self, loss_transform: Callable = lambda x: x, metric_transform: Callable = lambda x: x) -> None: + """ + Collect per-iteration metrics and loss value from the attached trainer. This will also collect metric values from + a given evaluator object which is expected to perform evaluation at the end of training epochs. This class is + useful for collecting loss and metric values in one place for storage with checkpoint savers (`state_dict` and + `load_state_dict` methods provided as expected by Pytorch and Ignite) and for graphing during training. + + Args: + loss_transform: Converts the `output` value from the trainer's state into a loss value + metric_transform: Converts the metric value coming from the trainer/evaluator's state into a storable value + evaluator: Optional evaluator to consume metric results from at the end of its evaluation run + """ + + def __init__( + self, + loss_transform: Callable = _get_loss_from_output, + metric_transform: Callable = lambda x: x, + evaluator: Optional[Engine] = None, + ) -> None: self.loss_transform = loss_transform self.metric_transform = metric_transform self.loss: List = [] self.metrics: DefaultDict = defaultdict(list) + self.iteration = 0 + self.lock = RLock() + + if evaluator is not None: + self.attach_evaluator(evaluator) def attach(self, engine: Engine) -> None: """ @@ -35,21 +69,46 @@ def attach(self, engine: Engine) -> None: """ engine.add_event_handler(Events.ITERATION_COMPLETED, self) + def attach_evaluator(self, evaluator: Engine) -> None: + """ + Attach event handlers to the given evaluator to log metric values from it. + + Args: + evaluator: Ignite Engine implementing network evaluation + """ + evaluator.add_event_handler(Events.COMPLETED, self.log_metrics) + def __call__(self, engine: Engine) -> None: """ Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. """ - self.loss.append(self.loss_transform(engine.state.output)) + with self.lock: + self.iteration = engine.state.iteration + lossval = self.loss_transform(engine.state.output) + + self.loss.append((self.iteration, lossval)) + self.log_metrics(engine) + + def log_metrics(self, engine: Engine) -> None: + """ + Log metrics from the given Engine's state member. + + Args: + engine: Ignite Engine to log from + """ + with self.lock: + for m, v in engine.state.metrics.items(): + v = self.metric_transform(v) + self.metrics[m].append((self.iteration, v)) - for m, v in engine.state.metrics.items(): - v = self.metric_transform(v) - # # metrics may not be added on the first timestep, pad the list if this is the case - # # so that each metric list is the same length as self.loss - # if len(self.metrics[m])==0: - # self.metrics[m].append([v[0]]*len(self.loss)) + def state_dict(self): + return {MetricLoggerKeys.LOSS: self.loss, MetricLoggerKeys.METRICS: self.metrics} - self.metrics[m].append(v) + def load_state_dict(self, state_dict): + self.loss[:] = state_dict[MetricLoggerKeys.LOSS] + self.metrics.clear() + self.metrics.update(state_dict[MetricLoggerKeys.METRICS]) metriclogger = MetricLogger diff --git a/monai/utils/__init__.py b/monai/utils/__init__.py index 3c1e7efe24..4d272ac6ff 100644 --- a/monai/utils/__init__.py +++ b/monai/utils/__init__.py @@ -31,6 +31,7 @@ UpsampleMode, Weight, ) +from .jupyter_utils import StatusMembers, ThreadContainer from .misc import ( MAX_SEED, ImageMetaKey, diff --git a/monai/utils/jupyter_utils.py b/monai/utils/jupyter_utils.py new file mode 100644 index 0000000000..a7e712619e --- /dev/null +++ b/monai/utils/jupyter_utils.py @@ -0,0 +1,351 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This set of utility function is meant to make using Jupyter notebooks easier with MONAI. Plotting functions using +Matplotlib produce common plots for metrics and images. +""" + +from enum import Enum +from threading import RLock, Thread +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch + +# from monai.utils import exact_version, optional_import + +# if TYPE_CHECKING: +# import matplotlib.pyplot as plt +# from ignite.engine import Engine, Events + +# Figure = plt.Figure +# Axes = plt.Axes +# has_matplotlib = True +# else: +# Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") +# Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") +# plt, has_matplotlib = optional_import("matplotlib.pyplot") +# Figure, _ = optional_import("matplotlib.pyplot", name="Figure") +# Axes, _ = optional_import("matplotlib.pyplot", name="Axes") + +try: + import matplotlib.pyplot as plt + + has_matplotlib = True +except ImportError: + has_matplotlib = False + +try: + from ignite.engine import Engine, Events + + has_ignite = True +except ImportError: + Engine = object + Events = object + has_ignite = False + +LOSS_NAME = "loss" + + +def plot_metric_graph( + ax, + title: str, + graphmap: Dict[str, Union[List[float], Tuple[List[float], List[float]]]], + yscale: str = "log", + avg_keys: Tuple[str] = (LOSS_NAME,), + window_fraction: int = 20, +): + """ + Plot metrics on a single graph with running averages plotted for selected keys. The values in `graphmap` + should be lists of (timepoint, value) pairs as stored in MetricLogger objects. + + Args: + ax: Axes object to plot into + title: graph title + graphmap: dictionary of named graph values, which are lists of values or (index, value) pairs + yscale: scale for y-axis compatible with `Axes.set_yscale` + avg_keys: tuple of keys in `graphmap` to provide running average plots for + window_fraction: what fraction of the graph value length to use as the running average window + """ + from matplotlib.ticker import MaxNLocator + + for n, v in graphmap.items(): + if len(v) > 0: + if isinstance(v[0], (tuple, list)): # values are (x,y) pairs + inds, vals = zip(*v) # separate values into list of indices in X dimension and values + else: + inds, vals = tuple(range(len(v))), tuple(v) # values are without indices, make indices for them + + ax.plot(inds, vals, label=f"{n} = {vals[-1]:.5g}") + + # if requested compute and plot a running average for the values using a fractional window size + if n in avg_keys and len(v) > window_fraction: + window = len(v) // window_fraction + kernel = np.ones((window,)) / window + ra = np.convolve((vals[0],) * (window - 1) + vals, kernel, mode="valid") + + ax.plot(inds, ra, label=f"{n} Avg = {ra[-1]:.5g}") + + ax.set_title(title) + ax.set_yscale(yscale) + ax.axis("on") + ax.legend(bbox_to_anchor=(1, 1), loc=1, borderaxespad=0.0) + ax.grid(True, "both", "both") + ax.xaxis.set_major_locator(MaxNLocator(integer=True)) + + +def plot_metric_images( + fig, + title: str, + graphmap: Dict[str, Union[List[float], Tuple[List[float], List[float]]]], + imagemap: Dict[str, np.ndarray], + yscale: str = "log", + avg_keys: Tuple[str] = (LOSS_NAME,), + window_fraction: int = 20, +) -> List: + """ + Plot metric graph data with images below into figure `fig`. The intended use is for the graph data to be + metrics from a training run and the images to be the batch and output from the last iteration. This uses + `plot_metric_graph` to plot the metric graph. + + Args: + fig: Figure object to plot into, reuse from previous plotting for flicker-free refreshing + title: graph title + graphmap: dictionary of named graph values, which are lists of values or (index, value) pairs + imagemap: dictionary of named images to show with metric plot + yscale: for metric plot, scale for y-axis compatible with `Axes.set_yscale` + avg_keys: for metric plot, tuple of keys in `graphmap` to provide running average plots for + window_fraction: for metric plot, what fraction of the graph value length to use as the running average window + + Returns: + list of Axes objects for graph followed by images + """ + gridshape = (4, max(1, len(imagemap))) + + graph = plt.subplot2grid(gridshape, (0, 0), colspan=gridshape[1], fig=fig) + + plot_metric_graph(graph, title, graphmap, yscale, avg_keys, window_fraction) + + axes = [graph] + for i, n in enumerate(imagemap): + im = plt.subplot2grid(gridshape, (1, i), rowspan=2, fig=fig) + + if imagemap[n].shape[0] == 3: + im.imshow(imagemap[n].transpose([1, 2, 0])) + else: + im.imshow(np.squeeze(imagemap[n]), cmap="gray") + + im.set_title("%s\n%.3g -> %.3g" % (n, imagemap[n].min(), imagemap[n].max())) + im.axis("off") + axes.append(im) + + return axes + + +def tensor_to_images(name: str, tensor: torch.Tensor): + """ + Return an tuple of images derived from the given tensor. The `name` value indices which key from the + output or batch value the tensor was stored as, or is "Batch" or "Output" if these were single tensors + instead of dictionaries. Returns a tuple of 2D images of shape HW, or 3D images of shape CHW where C is + color channels RGB or RGBA. This allows multiple images to be created from a single tensor, ie. to show + each channel separately. + """ + if tensor.ndim == 4 and tensor.shape[2] > 2 and tensor.shape[3] > 2: + return tuple(tensor[0].cpu().data.numpy()) + elif tensor.ndim == 5 and tensor.shape[3] > 2 and tensor.shape[4] > 2: + dmid = tensor.shape[2] // 2 + return tuple(tensor[0, :, dmid].cpu().data.numpy()) + + return () + + +def plot_engine_status( + engine: Engine, + logger, + title: str = "Training Log", + yscale: str = "log", + avg_keys: Tuple[str] = (LOSS_NAME,), + window_fraction: int = 20, + image_fn: Optional[Callable] = tensor_to_images, + fig=None, +) -> Tuple: + """ + Plot the status of the given Engine with its logger. The plot will consist of a graph of loss values and metrics + taken from the logger, and images taken from the `output` and `batch` members of `engine.state`. The images are + converted to Numpy arrays suitable for input to `Axes.imshow` using `image_fn`, if this is None then no image + plotting is done. + + Args: + engine: Engine to extract images from + logger: MetricLogger to extract loss and metric data from + title: graph title + yscale: for metric plot, scale for y-axis compatible with `Axes.set_yscale` + avg_keys: for metric plot, tuple of keys in `graphmap` to provide running average plots for + window_fraction: for metric plot, what fraction of the graph value length to use as the running average window + image_fn: callable converting tensors keyed to a name in the Engine to a tuple of images to plot + fig: Figure object to plot into, reuse from previous plotting for flicker-free refreshing + + Returns: + Figure object (or `fig` if given), list of Axes objects for graph and images + """ + if fig is not None: + fig.clf() + else: + fig = plt.Figure(figsize=(20, 10), tight_layout=True, facecolor="white") + + graphmap = {LOSS_NAME: logger.loss} + graphmap.update(logger.metrics) + + imagemap = {} + + if image_fn is not None and engine.state is not None and engine.state.batch is not None: + for src in (engine.state.batch, engine.state.output): + if isinstance(src, dict): + for k, v in src.items(): + images = image_fn(k, v) + + for i, im in enumerate(images): + imagemap[f"{k}_{i}"] = im + else: + label = "Batch" if src is engine.state.batch else "Output" + images = image_fn(label, src) + + for i, im in enumerate(images): + imagemap[f"{label}_{i}"] = im + + axes = plot_metric_images(fig, title, graphmap, imagemap, yscale, avg_keys, window_fraction) + + axes[0].axhline(logger.loss[-1][1], c="k", ls=":") # draw dotted horizontal line at last loss value + + return fig, axes + + +def _get_loss_from_output(output: Union[Dict[str, torch.Tensor], torch.Tensor]) -> float: + """Returns a single value from the network output, which is a dict or tensor.""" + if isinstance(output, dict): + return output["loss"].item() + else: + return output.item() + + +class StatusMembers(Enum): + """ + Named members of the status dictionary, others may be present for named metric values. + """ + + STATUS = "Status" + EPOCHS = "Epochs" + ITERS = "Iters" + LOSS = "Loss" + + +class ThreadContainer(Thread): + """ + Contains a running `Engine` object within a separate thread from main thread in a Jupyter notebook. This + allows an engine to begin a run in the background and allow the starting notebook cell to complete. A + user can thus start a run and then navigate away from the notebook without concern for loosing connection + with the running cell. All output is acquired through methods which synchronize with the running engine + using an internal `lock` member, acquiring this lock allows the engine to be inspected while it's prevented + from starting the next iteration. + + Args: + engine: wrapped `Engine` object, when the container is started its `run` method is called + loss_transform: callable to convert an output dict into a single numeric value + metric_transform: callable to convert a named metric value into a single numeric value + """ + + def __init__( + self, + engine: Engine, + loss_transform: Callable = _get_loss_from_output, + metric_transform: Callable = lambda name, value: value, + ): + super().__init__() + self.lock = RLock() + self.engine = engine + self._status_dict: Dict[str, Any] = {} + self.loss_transform = loss_transform + self.metric_transform = metric_transform + self.fig = None + + self.engine.add_event_handler(Events.ITERATION_COMPLETED, self._update_status) + + def run(self): + """Calls the `run` method of the wrapped engine.""" + self.engine.run() + + def stop(self): + """Stop the engine and join the thread.""" + self.engine.terminate() + self.join() + + def _update_status(self): + """Called as an event, updates the internal status dict at the end of iterations.""" + with self.lock: + state = self.engine.state + stats = { + StatusMembers.EPOCHS.value: 0, + StatusMembers.ITERS.value: 0, + StatusMembers.LOSS.value: float("nan"), + } + + if state is not None: + if state.max_epochs >= 1: + epoch = f"{state.epoch}/{state.max_epochs}" + else: + epoch = str(state.epoch) + + if state.epoch_length is not None: + iters = f"{state.iteration % state.epoch_length}/{state.epoch_length}" + else: + iters = str(state.iteration) + + stats[StatusMembers.EPOCHS.value] = epoch + stats[StatusMembers.ITERS.value] = iters + stats[StatusMembers.LOSS.value] = self.loss_transform(state.output) + + metrics = state.metrics or {} + for m, v in metrics.items(): + v = self.metric_transform(m, v) + if v is not None: + stats[m].append(v) + + self._status_dict.update(stats) + + @property + def status_dict(self) -> Dict[str, str]: + """A dictionary containing status information, current loss, and current metric values.""" + with self.lock: + stats = {StatusMembers.STATUS.value: "Running" if self.is_alive else "Stopped"} + stats.update(self._status_dict) + return stats + + def status(self) -> str: + """Returns a status string for the current state of the engine.""" + stats = self.status_dict + + msgs = [stats.pop(StatusMembers.STATUS.value), "Iters: " + str(stats.pop(StatusMembers.ITERS.value))] + msgs += ["%s: %s" % kv for kv in stats.items()] + + return ", ".join(msgs) + + def plot_status(self, logger, plot_func: Callable = plot_engine_status): + """ + Generate a plot of the current status of the contained engine whose loss and metrics were tracked by `logger`. + The function `plot_func` must accept arguments `title`, `engine`, `logger`, and `fig` which are the plot title, + `self.engine`, `logger`, and `self.fig` respectively. The return value must be a figure object (stored in + `self.fig`) and a list of Axes objects for the plots in the figure. Only the figure is returned by this method, + which holds the internal lock during the plot generation. + """ + with self.lock: + self.fig, axes = plot_func(title=self.status(), engine=self.engine, logger=logger, fig=self.fig) + return self.fig diff --git a/tests/test_npzdictitemdataset.py b/tests/test_npzdictitemdataset.py new file mode 100644 index 0000000000..5ec52f45a2 --- /dev/null +++ b/tests/test_npzdictitemdataset.py @@ -0,0 +1,55 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tempfile +import unittest +from io import BytesIO + +import numpy as np + +from monai.data import NPZDictItemDataset + + +class TestNPZDictItemDataset(unittest.TestCase): + def test_load_stream(self): + dat0 = np.random.rand(10, 1, 4, 4) + dat1 = np.random.rand(10, 1, 4, 4) + + npzfile = BytesIO() + npz = np.savez_compressed(npzfile, dat0=dat0, dat1=dat1) + npzfile.seek(0) + + npzds = NPZDictItemDataset(npzfile, {"dat0": "images", "dat1": "seg"}) + + item = npzds[0] + + np.testing.assert_allclose(item["images"].shape, (1, 4, 4)) + np.testing.assert_allclose(item["seg"].shape, (1, 4, 4)) + + def test_load_file(self): + dat0 = np.random.rand(10, 1, 4, 4) + dat1 = np.random.rand(10, 1, 4, 4) + + with tempfile.TemporaryDirectory() as tempdir: + npzfile = f"{tempdir}/test.npz" + + npz = np.savez_compressed(npzfile, dat0=dat0, dat1=dat1) + + npzds = NPZDictItemDataset(npzfile, {"dat0": "images", "dat1": "seg"}) + + item = npzds[0] + + np.testing.assert_allclose(item["images"].shape, (1, 4, 4)) + np.testing.assert_allclose(item["seg"].shape, (1, 4, 4)) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_thread_buffer.py b/tests/test_thread_buffer.py index d139b44c85..1b3ebb910d 100644 --- a/tests/test_thread_buffer.py +++ b/tests/test_thread_buffer.py @@ -13,7 +13,7 @@ import time import unittest -from monai.data import DataLoader, Dataset, ThreadBuffer +from monai.data import DataLoader, Dataset, ThreadBuffer, ThreadDataLoader from monai.transforms import Compose, SimulateDelayd from monai.utils import PerfContext @@ -41,6 +41,16 @@ def test_values(self): self.assertEqual(d["label"][0], "spleen_label_19.nii.gz") self.assertEqual(d["label"][1], "spleen_label_31.nii.gz") + def test_dataloader(self): + dataset = Dataset(data=self.datalist, transform=self.transform) + dataloader = ThreadDataLoader(dataset=dataset, batch_size=2, num_workers=0) + + for d in dataloader: + self.assertEqual(d["image"][0], "spleen_19.nii.gz") + self.assertEqual(d["image"][1], "spleen_31.nii.gz") + self.assertEqual(d["label"][0], "spleen_label_19.nii.gz") + self.assertEqual(d["label"][1], "spleen_label_31.nii.gz") + def test_time(self): dataset = Dataset(data=self.datalist * 2, transform=self.transform) # contains data for 2 batches dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0) diff --git a/tests/test_threadcontainer.py b/tests/test_threadcontainer.py new file mode 100644 index 0000000000..92a50a15aa --- /dev/null +++ b/tests/test_threadcontainer.py @@ -0,0 +1,58 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import unittest + +import torch + +from monai.utils import optional_import + +try: + _, has_ignite = optional_import("ignite") + + from monai.engines import CommonKeys, SupervisedTrainer + from monai.utils import ThreadContainer +except ImportError: + has_ignite = False + +from monai.data import DataLoader + + +class TestThreadContainer(unittest.TestCase): + @unittest.skipIf(not has_ignite, "Ignite needed for this test") + def test_container(self): + net = torch.nn.Conv2d(1, 1, 3, padding=1) + + opt = torch.optim.Adam(net.parameters()) + + img = torch.rand(1, 16, 16) + data = {CommonKeys.IMAGE: img, CommonKeys.LABEL: img} + loader = DataLoader([data for _ in range(10)]) + + trainer = SupervisedTrainer( + device=torch.device("cpu"), + max_epochs=1, + train_data_loader=loader, + network=net, + optimizer=opt, + loss_function=torch.nn.L1Loss(), + ) + + con = ThreadContainer(trainer) + con.start() + time.sleep(1) # wait for trainer to start + + self.assertTrue(con.is_alive) + self.assertIsNotNone(con.status()) + self.assertTrue(len(con.status_dict) > 0) + + con.join() From a2aeb4040bd4b8c927b04550eef40425a651e126 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Sat, 20 Mar 2021 09:51:35 +0000 Subject: [PATCH 080/457] Batch inverse (#1795) batch inverse --- docs/source/data.rst | 5 ++ monai/data/__init__.py | 1 + monai/data/inverse_batch_transform.py | 84 +++++++++++++++++++++++++++ tests/test_inverse.py | 11 ++++ 4 files changed, 101 insertions(+) create mode 100644 monai/data/inverse_batch_transform.py diff --git a/docs/source/data.rst b/docs/source/data.rst index 6ed6be9702..8071bb1585 100644 --- a/docs/source/data.rst +++ b/docs/source/data.rst @@ -183,3 +183,8 @@ DataLoader ThreadBuffer ~~~~~~~~~~~~ .. autoclass:: monai.data.ThreadBuffer + + +BatchInverseTransform +~~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: monai.data.BatchInverseTransform diff --git a/monai/data/__init__.py b/monai/data/__init__.py index 2a7647e527..2001ccfc8f 100644 --- a/monai/data/__init__.py +++ b/monai/data/__init__.py @@ -26,6 +26,7 @@ from .grid_dataset import GridPatchDataset, PatchDataset, PatchIter from .image_dataset import ImageDataset from .image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader, WSIReader +from .inverse_batch_transform import BatchInverseTransform from .iterable_dataset import IterableDataset from .nifti_saver import NiftiSaver from .nifti_writer import write_nifti diff --git a/monai/data/inverse_batch_transform.py b/monai/data/inverse_batch_transform.py new file mode 100644 index 0000000000..fbc42c6ce1 --- /dev/null +++ b/monai/data/inverse_batch_transform.py @@ -0,0 +1,84 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Callable, Dict, Hashable, Optional, Sequence + +import numpy as np +from torch.utils.data.dataloader import DataLoader as TorchDataLoader + +from monai.data.dataloader import DataLoader +from monai.data.dataset import Dataset +from monai.data.utils import decollate_batch, pad_list_data_collate +from monai.transforms.croppad.batch import PadListDataCollate +from monai.transforms.inverse import InvertibleTransform +from monai.transforms.transform import Transform +from monai.utils import first + +__all__ = ["BatchInverseTransform"] + + +class _BatchInverseDataset(Dataset): + def __init__( + self, + data: Sequence[Any], + transform: InvertibleTransform, + pad_collation_used: bool, + ) -> None: + super().__init__(data, transform) + self.invertible_transform = transform + self.pad_collation_used = pad_collation_used + + def __getitem__(self, index: int) -> Dict[Hashable, np.ndarray]: + data = dict(self.data[index]) + # If pad collation was used, then we need to undo this first + if self.pad_collation_used: + data = PadListDataCollate.inverse(data) + + return self.invertible_transform.inverse(data) + + +def no_collation(x): + return x + + +class BatchInverseTransform(Transform): + """Perform inverse on a batch of data. This is useful if you have inferred a batch of images and want to invert them all.""" + + def __init__( + self, transform: InvertibleTransform, loader: TorchDataLoader, collate_fn: Optional[Callable] = no_collation + ) -> None: + """ + Args: + transform: a callable data transform on input data. + loader: data loader used to generate the batch of data. + collate_fn: how to collate data after inverse transformations. Default won't do any collation, so the output will be a + list of size batch size. + """ + self.transform = transform + self.batch_size = loader.batch_size + self.num_workers = loader.num_workers + self.collate_fn = collate_fn + self.pad_collation_used = loader.collate_fn == pad_list_data_collate + + def __call__(self, data: Dict[str, Any]) -> Any: + + decollated_data = decollate_batch(data) + inv_ds = _BatchInverseDataset(decollated_data, self.transform, self.pad_collation_used) + inv_loader = DataLoader( + inv_ds, batch_size=self.batch_size, num_workers=self.num_workers, collate_fn=self.collate_fn + ) + try: + return first(inv_loader) + except RuntimeError as re: + re_str = str(re) + if "equal size" in re_str: + re_str += "\nMONAI hint: try creating `BatchInverseTransform` with `collate_fn=lambda x: x`." + raise RuntimeError(re_str) diff --git a/tests/test_inverse.py b/tests/test_inverse.py index 8ce4e3bbf3..f548b53f11 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -21,6 +21,7 @@ from parameterized import parameterized from monai.data import CacheDataset, DataLoader, create_test_image_2d, create_test_image_3d +from monai.data.inverse_batch_transform import BatchInverseTransform from monai.data.utils import decollate_batch from monai.networks.nets import UNet from monai.transforms import ( @@ -407,6 +408,10 @@ TESTS = TESTS + TESTS_COMPOSE_X2 # type: ignore +def no_collation(x): + return x + + class TestInverse(unittest.TestCase): """Test inverse methods. @@ -573,6 +578,12 @@ def test_inverse_inferred_seg(self): self.assertEqual(len(seg_dict["label_transforms"]), num_invertible_transforms) self.assertEqual(inv_seg.shape[1:], test_data[0]["label"].shape) + # Inverse of batch + batch_inverter = BatchInverseTransform(transforms, loader, collate_fn=no_collation) + with allow_missing_keys_mode(transforms): + inv_batch = batch_inverter(segs_dict) + self.assertEqual(inv_batch[0]["label"].shape[1:], test_data[0]["label"].shape) + if __name__ == "__main__": unittest.main() From b6f213e57cea3e7b335b3a1f4f214cb43fc6d087 Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Mon, 22 Mar 2021 14:58:37 +0800 Subject: [PATCH 081/457] 1804 add pretrain for densenet senet by using subclass (#1812) * add pretrain options Signed-off-by: Yiheng Wang * rewrite error message add test cases Signed-off-by: Yiheng Wang --- docs/source/networks.rst | 10 - monai/networks/nets/__init__.py | 4 +- monai/networks/nets/densenet.py | 198 +++++---- monai/networks/nets/senet.py | 452 ++++++++++---------- monai/visualize/class_activation_maps.py | 8 +- monai/visualize/occlusion_sensitivity.py | 4 +- tests/test_densenet.py | 34 +- tests/test_integration_classification_2d.py | 6 +- tests/test_occlusion_sensitivity.py | 4 +- tests/test_senet.py | 45 +- tests/test_vis_cam.py | 8 +- tests/test_vis_gradcam.py | 8 +- tests/test_vis_gradcampp.py | 8 +- 13 files changed, 370 insertions(+), 419 deletions(-) diff --git a/docs/source/networks.rst b/docs/source/networks.rst index 036ba2aff7..f5d498a363 100644 --- a/docs/source/networks.rst +++ b/docs/source/networks.rst @@ -286,10 +286,6 @@ Nets ~~~~~~~~~~ .. autoclass:: DenseNet :members: -.. autofunction:: densenet121 -.. autofunction:: densenet169 -.. autofunction:: densenet201 -.. autofunction:: densenet264 `SegResNet` ~~~~~~~~~~~ @@ -305,12 +301,6 @@ Nets ~~~~~~~ .. autoclass:: SENet :members: -.. autofunction:: senet154 -.. autofunction:: se_resnet50 -.. autofunction:: se_resnet101 -.. autofunction:: se_resnet152 -.. autofunction:: se_resnext50_32x4d -.. autofunction:: se_resnext101_32x4d `HighResNet` ~~~~~~~~~~~~ diff --git a/monai/networks/nets/__init__.py b/monai/networks/nets/__init__.py index f3def30736..cd00ea1aa1 100644 --- a/monai/networks/nets/__init__.py +++ b/monai/networks/nets/__init__.py @@ -13,7 +13,7 @@ from .autoencoder import AutoEncoder from .basic_unet import BasicUNet, BasicUnet, Basicunet from .classifier import Classifier, Critic, Discriminator -from .densenet import DenseNet, densenet121, densenet169, densenet201, densenet264 +from .densenet import DenseNet, DenseNet121, DenseNet169, DenseNet201, DenseNet264 from .dynunet import DynUNet, DynUnet, Dynunet from .fullyconnectednet import FullyConnectedNet, VarFullyConnectedNet from .generator import Generator @@ -21,7 +21,7 @@ from .regressor import Regressor from .regunet import GlobalNet, LocalNet, RegUNet from .segresnet import SegResNet, SegResNetVAE -from .senet import SENet, se_resnet50, se_resnet101, se_resnet152, se_resnext50_32x4d, se_resnext101_32x4d, senet154 +from .senet import SENet, SENet154, SEResNet50, SEResNet101, SEResNet152, SEResNext50, SEResNext101 from .unet import UNet, Unet, unet from .varautoencoder import VarAutoEncoder from .vnet import VNet diff --git a/monai/networks/nets/densenet.py b/monai/networks/nets/densenet.py index 4b4f2cc6a4..280bc6b0cb 100644 --- a/monai/networks/nets/densenet.py +++ b/monai/networks/nets/densenet.py @@ -115,11 +115,6 @@ class DenseNet(nn.Module): bn_size: multiplicative factor for number of bottle neck layers. (i.e. bn_size * k features in the bottleneck layer) dropout_prob: dropout rate after each dense layer. - pretrained: whether to load ImageNet pretrained weights when `spatial_dims == 2`. - In order to load weights correctly, Please ensure that the `block_config` - is consistent with the corresponding arch. - pretrained_arch: the arch name for pretrained weights. - progress: If True, displays a progress bar of the download to stderr. """ def __init__( @@ -132,9 +127,6 @@ def __init__( block_config: Sequence[int] = (6, 12, 24, 16), bn_size: int = 4, dropout_prob: float = 0.0, - pretrained: bool = False, - pretrained_arch: str = "densenet121", - progress: bool = True, ) -> None: super(DenseNet, self).__init__() @@ -198,107 +190,127 @@ def __init__( elif isinstance(m, nn.Linear): nn.init.constant_(torch.as_tensor(m.bias), 0) - if pretrained: - self._load_state_dict(pretrained_arch, progress) - def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.features(x) x = self.class_layers(x) return x - def _load_state_dict(self, arch, progress): - """ - This function is used to load pretrained models. - Adapted from `PyTorch Hub 2D version - `_ - """ - model_urls = { - "densenet121": "https://download.pytorch.org/models/densenet121-a639ec97.pth", - "densenet169": "https://download.pytorch.org/models/densenet169-b2777c0a.pth", - "densenet201": "https://download.pytorch.org/models/densenet201-c1103571.pth", - } - if arch in model_urls.keys(): - model_url = model_urls[arch] - else: - raise ValueError( - "only 'densenet121', 'densenet169' and 'densenet201' are supported to load pretrained weights." - ) - pattern = re.compile( - r"^(.*denselayer\d+)(\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$" + +def _load_state_dict(model, arch, progress): + """ + This function is used to load pretrained models. + Adapted from `PyTorch Hub 2D version + `_ + """ + model_urls = { + "densenet121": "https://download.pytorch.org/models/densenet121-a639ec97.pth", + "densenet169": "https://download.pytorch.org/models/densenet169-b2777c0a.pth", + "densenet201": "https://download.pytorch.org/models/densenet201-c1103571.pth", + } + if arch in model_urls: + model_url = model_urls[arch] + else: + raise ValueError( + "only 'densenet121', 'densenet169' and 'densenet201' are supported to load pretrained weights." ) + pattern = re.compile( + r"^(.*denselayer\d+)(\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$" + ) - state_dict = load_state_dict_from_url(model_url, progress=progress) - for key in list(state_dict.keys()): - res = pattern.match(key) - if res: - new_key = res.group(1) + ".layers" + res.group(2) + res.group(3) - state_dict[new_key] = state_dict[key] - del state_dict[key] + state_dict = load_state_dict_from_url(model_url, progress=progress) + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + ".layers" + res.group(2) + res.group(3) + state_dict[new_key] = state_dict[key] + del state_dict[key] - model_dict = self.state_dict() - state_dict = { - k: v for k, v in state_dict.items() if (k in model_dict) and (model_dict[k].shape == state_dict[k].shape) - } - model_dict.update(state_dict) - self.load_state_dict(model_dict) + model_dict = model.state_dict() + state_dict = { + k: v for k, v in state_dict.items() if (k in model_dict) and (model_dict[k].shape == state_dict[k].shape) + } + model_dict.update(state_dict) + model.load_state_dict(model_dict) -def densenet121(pretrained: bool = False, progress: bool = True, **kwargs) -> DenseNet: - """ - when `spatial_dims = 2`, specify `pretrained = True` can load Imagenet pretrained weights achieved - from `PyTorch Hub 2D version - `_ - """ - model = DenseNet( - init_features=64, - growth_rate=32, - block_config=(6, 12, 24, 16), - pretrained=pretrained, - pretrained_arch="densenet121", - progress=progress, +class DenseNet121(DenseNet): + def __init__( + self, + init_features: int = 64, + growth_rate: int = 32, + block_config: Sequence[int] = (6, 12, 24, 16), + pretrained: bool = False, + progress: bool = True, **kwargs, - ) - return model + ) -> None: + super(DenseNet121, self).__init__( + init_features=init_features, + growth_rate=growth_rate, + block_config=block_config, + **kwargs, + ) + if pretrained: + # it only worked when `spatial_dims` is 2 + _load_state_dict(self, "densenet121", progress) -def densenet169(pretrained: bool = False, progress: bool = True, **kwargs) -> DenseNet: - """ - when `spatial_dims = 2`, specify `pretrained = True` can load Imagenet pretrained weights achieved - from `PyTorch Hub 2D version - `_ - """ - model = DenseNet( - init_features=64, - growth_rate=32, - block_config=(6, 12, 32, 32), - pretrained=pretrained, - pretrained_arch="densenet169", - progress=progress, +class DenseNet169(DenseNet): + def __init__( + self, + init_features: int = 64, + growth_rate: int = 32, + block_config: Sequence[int] = (6, 12, 32, 32), + pretrained: bool = False, + progress: bool = True, **kwargs, - ) - return model + ) -> None: + super(DenseNet169, self).__init__( + init_features=init_features, + growth_rate=growth_rate, + block_config=block_config, + **kwargs, + ) + if pretrained: + # it only worked when `spatial_dims` is 2 + _load_state_dict(self, "densenet169", progress) -def densenet201(pretrained: bool = False, progress: bool = True, **kwargs) -> DenseNet: - """ - when `spatial_dims = 2`, specify `pretrained = True` can load Imagenet pretrained weights achieved - from `PyTorch Hub 2D version - `_ - """ - model = DenseNet( - init_features=64, - growth_rate=32, - block_config=(6, 12, 48, 32), - pretrained=pretrained, - pretrained_arch="densenet201", - progress=progress, +class DenseNet201(DenseNet): + def __init__( + self, + init_features: int = 64, + growth_rate: int = 32, + block_config: Sequence[int] = (6, 12, 48, 32), + pretrained: bool = False, + progress: bool = True, **kwargs, - ) - return model + ) -> None: + super(DenseNet201, self).__init__( + init_features=init_features, + growth_rate=growth_rate, + block_config=block_config, + **kwargs, + ) + if pretrained: + # it only worked when `spatial_dims` is 2 + _load_state_dict(self, "densenet201", progress) -def densenet264(pretrained: bool = False, progress: bool = True, **kwargs) -> DenseNet: - model = DenseNet(init_features=64, growth_rate=32, block_config=(6, 12, 64, 48), **kwargs) - if pretrained: - print("Currently PyTorch Hub does not provide densenet264 pretrained models.") - return model +class DenseNet264(DenseNet): + def __init__( + self, + init_features: int = 64, + growth_rate: int = 32, + block_config: Sequence[int] = (6, 12, 48, 32), + pretrained: bool = False, + progress: bool = True, + **kwargs, + ) -> None: + super(DenseNet264, self).__init__( + init_features=init_features, + growth_rate=growth_rate, + block_config=block_config, + **kwargs, + ) + if pretrained: + print("Currently PyTorch Hub does not provide densenet264 pretrained models.") diff --git a/monai/networks/nets/senet.py b/monai/networks/nets/senet.py index 333a3b1159..f5738edeeb 100644 --- a/monai/networks/nets/senet.py +++ b/monai/networks/nets/senet.py @@ -11,7 +11,7 @@ import re from collections import OrderedDict -from typing import Any, List, Optional, Tuple, Type, Union +from typing import Any, List, Optional, Sequence, Tuple, Type, Union import torch import torch.nn as nn @@ -66,11 +66,6 @@ class SENet(nn.Module): - For SE-ResNeXt models: False num_classes: number of outputs in `last_linear` layer. for all models: 1000 - pretrained: whether to load ImageNet pretrained weights when `spatial_dims == 2`. - In order to load weights correctly, Please ensure that the `block_config` - is consistent with the corresponding arch. - pretrained_arch: the arch name for pretrained weights. - progress: If True, displays a progress bar of the download to stderr. """ def __init__( @@ -78,7 +73,7 @@ def __init__( spatial_dims: int, in_channels: int, block: Type[Union[SEBottleneck, SEResNetBottleneck, SEResNeXtBottleneck]], - layers: List[int], + layers: Sequence[int], groups: int, reduction: int, dropout_prob: Optional[float] = 0.2, @@ -87,9 +82,6 @@ def __init__( downsample_kernel_size: int = 3, input_3x3: bool = True, num_classes: int = 1000, - pretrained: bool = False, - pretrained_arch: str = "se_resnet50", - progress: bool = True, ) -> None: super(SENet, self).__init__() @@ -183,64 +175,6 @@ def __init__( elif isinstance(m, nn.Linear): nn.init.constant_(torch.as_tensor(m.bias), 0) - if pretrained: - self._load_state_dict(pretrained_arch, progress) - - def _load_state_dict(self, arch, progress): - """ - This function is used to load pretrained models. - """ - model_urls = { - "senet154": "http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth", - "se_resnet50": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth", - "se_resnet101": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth", - "se_resnet152": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth", - "se_resnext50_32x4d": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth", - "se_resnext101_32x4d": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth", - } - if arch in model_urls.keys(): - model_url = model_urls[arch] - else: - raise ValueError( - "only 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152', 'se_resnext50_32x4d', \ - and se_resnext101_32x4d are supported to load pretrained weights." - ) - - pattern_conv = re.compile(r"^(layer[1-4]\.\d\.(?:conv)\d\.)(\w*)$") - pattern_bn = re.compile(r"^(layer[1-4]\.\d\.)(?:bn)(\d\.)(\w*)$") - pattern_se = re.compile(r"^(layer[1-4]\.\d\.)(?:se_module.fc1.)(\w*)$") - pattern_se2 = re.compile(r"^(layer[1-4]\.\d\.)(?:se_module.fc2.)(\w*)$") - pattern_down_conv = re.compile(r"^(layer[1-4]\.\d\.)(?:downsample.0.)(\w*)$") - pattern_down_bn = re.compile(r"^(layer[1-4]\.\d\.)(?:downsample.1.)(\w*)$") - - state_dict = load_state_dict_from_url(model_url, progress=progress) - for key in list(state_dict.keys()): - new_key = None - if pattern_conv.match(key): - new_key = re.sub(pattern_conv, r"\1conv.\2", key) - elif pattern_bn.match(key): - new_key = re.sub(pattern_bn, r"\1conv\2adn.N.\3", key) - elif pattern_se.match(key): - state_dict[key] = state_dict[key].squeeze() - new_key = re.sub(pattern_se, r"\1se_layer.fc.0.\2", key) - elif pattern_se2.match(key): - state_dict[key] = state_dict[key].squeeze() - new_key = re.sub(pattern_se2, r"\1se_layer.fc.2.\2", key) - elif pattern_down_conv.match(key): - new_key = re.sub(pattern_down_conv, r"\1project.conv.\2", key) - elif pattern_down_bn.match(key): - new_key = re.sub(pattern_down_bn, r"\1project.adn.N.\2", key) - if new_key: - state_dict[new_key] = state_dict[key] - del state_dict[key] - - model_dict = self.state_dict() - state_dict = { - k: v for k, v in state_dict.items() if (k in model_dict) and (model_dict[k].shape == state_dict[k].shape) - } - model_dict.update(state_dict) - self.load_state_dict(model_dict) - def _make_layer( self, block: Type[Union[SEBottleneck, SEResNetBottleneck, SEResNeXtBottleneck]], @@ -313,167 +247,225 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return x -def senet154( - spatial_dims: int, - in_channels: int, - num_classes: int, - pretrained: bool = False, - progress: bool = True, -) -> SENet: +def _load_state_dict(model, arch, progress): """ - when `spatial_dims = 2`, specify `pretrained = True` can load Imagenet pretrained weights achieved - from `Cadene Hub 2D version - `_. - """ - model = SENet( - spatial_dims=spatial_dims, - in_channels=in_channels, - block=SEBottleneck, - layers=[3, 8, 36, 3], - groups=64, - reduction=16, - dropout_prob=0.2, - dropout_dim=1, - num_classes=num_classes, - pretrained=pretrained, - pretrained_arch="senet154", - progress=progress, - ) - return model - - -def se_resnet50( - spatial_dims: int, in_channels: int, num_classes: int, pretrained: bool = False, progress: bool = True -) -> SENet: - """ - when `spatial_dims = 2`, specify `pretrained = True` can load Imagenet pretrained weights achieved - from `Cadene Hub 2D version - `_. - """ - model = SENet( - spatial_dims=spatial_dims, - in_channels=in_channels, - block=SEResNetBottleneck, - layers=[3, 4, 6, 3], - groups=1, - reduction=16, - dropout_prob=None, - inplanes=64, - input_3x3=False, - downsample_kernel_size=1, - num_classes=num_classes, - pretrained=pretrained, - pretrained_arch="se_resnet50", - progress=progress, - ) - return model - - -def se_resnet101( - spatial_dims: int, in_channels: int, num_classes: int, pretrained: bool = False, progress: bool = True -) -> SENet: + This function is used to load pretrained models. """ - when `spatial_dims = 2`, specify `pretrained = True` can load Imagenet pretrained weights achieved - from `Cadene Hub 2D version - `_. - """ - model = SENet( - spatial_dims=spatial_dims, - in_channels=in_channels, - block=SEResNetBottleneck, - layers=[3, 4, 23, 3], - groups=1, - reduction=16, - dropout_prob=0.2, - dropout_dim=1, - inplanes=64, - input_3x3=False, - downsample_kernel_size=1, - num_classes=num_classes, - pretrained=pretrained, - pretrained_arch="se_resnet101", - progress=progress, - ) - return model - - -def se_resnet152( - spatial_dims: int, in_channels: int, num_classes: int, pretrained: bool = False, progress: bool = True -) -> SENet: - """ - when `spatial_dims = 2`, specify `pretrained = True` can load Imagenet pretrained weights achieved - from `Cadene Hub 2D version - `_. - """ - model = SENet( - spatial_dims=spatial_dims, - in_channels=in_channels, - block=SEResNetBottleneck, - layers=[3, 8, 36, 3], - groups=1, - reduction=16, - dropout_prob=0.2, - dropout_dim=1, - inplanes=64, - input_3x3=False, - downsample_kernel_size=1, - num_classes=num_classes, - pretrained=pretrained, - pretrained_arch="se_resnet152", - progress=progress, - ) - return model - - -def se_resnext50_32x4d( - spatial_dims: int, in_channels: int, num_classes: int, pretrained: bool = False, progress: bool = True -) -> SENet: - """ - when `spatial_dims = 2`, specify `pretrained = True` can load Imagenet pretrained weights achieved - from `Cadene Hub 2D version - `_. - """ - model = SENet( - spatial_dims=spatial_dims, - in_channels=in_channels, - block=SEResNeXtBottleneck, - layers=[3, 4, 6, 3], - groups=32, - reduction=16, - dropout_prob=None, - inplanes=64, - input_3x3=False, - downsample_kernel_size=1, - num_classes=num_classes, - pretrained=pretrained, - pretrained_arch="se_resnext50_32x4d", - progress=progress, - ) - return model - - -def se_resnext101_32x4d( - spatial_dims: int, in_channels: int, num_classes: int, pretrained: bool = False, progress: bool = True -) -> SENet: - """ - when `spatial_dims = 2`, specify `pretrained = True` can load Imagenet pretrained weights achieved - from `Cadene Hub 2D version - `_. - """ - model = SENet( - spatial_dims=spatial_dims, - in_channels=in_channels, - block=SEResNeXtBottleneck, - layers=[3, 4, 23, 3], - groups=32, - reduction=16, - dropout_prob=None, - inplanes=64, - input_3x3=False, - downsample_kernel_size=1, - num_classes=num_classes, - pretrained=pretrained, - pretrained_arch="se_resnext101_32x4d", - progress=progress, - ) - return model + model_urls = { + "senet154": "http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth", + "se_resnet50": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth", + "se_resnet101": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth", + "se_resnet152": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth", + "se_resnext50_32x4d": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth", + "se_resnext101_32x4d": "http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth", + } + if arch in model_urls: + model_url = model_urls[arch] + else: + raise ValueError( + "only 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152', 'se_resnext50_32x4d', \ + and se_resnext101_32x4d are supported to load pretrained weights." + ) + + pattern_conv = re.compile(r"^(layer[1-4]\.\d\.(?:conv)\d\.)(\w*)$") + pattern_bn = re.compile(r"^(layer[1-4]\.\d\.)(?:bn)(\d\.)(\w*)$") + pattern_se = re.compile(r"^(layer[1-4]\.\d\.)(?:se_module.fc1.)(\w*)$") + pattern_se2 = re.compile(r"^(layer[1-4]\.\d\.)(?:se_module.fc2.)(\w*)$") + pattern_down_conv = re.compile(r"^(layer[1-4]\.\d\.)(?:downsample.0.)(\w*)$") + pattern_down_bn = re.compile(r"^(layer[1-4]\.\d\.)(?:downsample.1.)(\w*)$") + + state_dict = load_state_dict_from_url(model_url, progress=progress) + for key in list(state_dict.keys()): + new_key = None + if pattern_conv.match(key): + new_key = re.sub(pattern_conv, r"\1conv.\2", key) + elif pattern_bn.match(key): + new_key = re.sub(pattern_bn, r"\1conv\2adn.N.\3", key) + elif pattern_se.match(key): + state_dict[key] = state_dict[key].squeeze() + new_key = re.sub(pattern_se, r"\1se_layer.fc.0.\2", key) + elif pattern_se2.match(key): + state_dict[key] = state_dict[key].squeeze() + new_key = re.sub(pattern_se2, r"\1se_layer.fc.2.\2", key) + elif pattern_down_conv.match(key): + new_key = re.sub(pattern_down_conv, r"\1project.conv.\2", key) + elif pattern_down_bn.match(key): + new_key = re.sub(pattern_down_bn, r"\1project.adn.N.\2", key) + if new_key: + state_dict[new_key] = state_dict[key] + del state_dict[key] + + model_dict = model.state_dict() + state_dict = { + k: v for k, v in state_dict.items() if (k in model_dict) and (model_dict[k].shape == state_dict[k].shape) + } + model_dict.update(state_dict) + model.load_state_dict(model_dict) + + +class SENet154(SENet): + def __init__( + self, + layers: Sequence[int] = (3, 8, 36, 3), + groups: int = 64, + reduction: int = 16, + pretrained: bool = False, + progress: bool = True, + **kwargs, + ) -> None: + super(SENet154, self).__init__( + block=SEBottleneck, + layers=layers, + groups=groups, + reduction=reduction, + **kwargs, + ) + if pretrained: + # it only worked when `spatial_dims` is 2 + _load_state_dict(self, "senet154", progress) + + +class SEResNet50(SENet): + def __init__( + self, + layers: Sequence[int] = (3, 4, 6, 3), + groups: int = 1, + reduction: int = 16, + dropout_prob: Optional[float] = None, + inplanes: int = 64, + downsample_kernel_size: int = 1, + input_3x3: bool = False, + pretrained: bool = False, + progress: bool = True, + **kwargs, + ) -> None: + super(SEResNet50, self).__init__( + block=SEResNetBottleneck, + layers=layers, + groups=groups, + reduction=reduction, + dropout_prob=dropout_prob, + inplanes=inplanes, + downsample_kernel_size=downsample_kernel_size, + input_3x3=input_3x3, + **kwargs, + ) + if pretrained: + # it only worked when `spatial_dims` is 2 + _load_state_dict(self, "se_resnet50", progress) + + +class SEResNet101(SENet): + def __init__( + self, + layers: Sequence[int] = (3, 4, 23, 3), + groups: int = 1, + reduction: int = 16, + inplanes: int = 64, + downsample_kernel_size: int = 1, + input_3x3: bool = False, + pretrained: bool = False, + progress: bool = True, + **kwargs, + ) -> None: + super(SEResNet101, self).__init__( + block=SEResNetBottleneck, + layers=layers, + groups=groups, + reduction=reduction, + inplanes=inplanes, + downsample_kernel_size=downsample_kernel_size, + input_3x3=input_3x3, + **kwargs, + ) + if pretrained: + # it only worked when `spatial_dims` is 2 + _load_state_dict(self, "se_resnet101", progress) + + +class SEResNet152(SENet): + def __init__( + self, + layers: Sequence[int] = (3, 8, 36, 3), + groups: int = 1, + reduction: int = 16, + inplanes: int = 64, + downsample_kernel_size: int = 1, + input_3x3: bool = False, + pretrained: bool = False, + progress: bool = True, + **kwargs, + ) -> None: + super(SEResNet152, self).__init__( + block=SEResNetBottleneck, + layers=layers, + groups=groups, + reduction=reduction, + inplanes=inplanes, + downsample_kernel_size=downsample_kernel_size, + input_3x3=input_3x3, + **kwargs, + ) + if pretrained: + # it only worked when `spatial_dims` is 2 + _load_state_dict(self, "se_resnet152", progress) + + +class SEResNext50(SENet): + def __init__( + self, + layers: Sequence[int] = (3, 4, 6, 3), + groups: int = 32, + reduction: int = 16, + dropout_prob: Optional[float] = None, + inplanes: int = 64, + downsample_kernel_size: int = 1, + input_3x3: bool = False, + pretrained: bool = False, + progress: bool = True, + **kwargs, + ) -> None: + super(SEResNext50, self).__init__( + block=SEResNeXtBottleneck, + layers=layers, + groups=groups, + dropout_prob=dropout_prob, + reduction=reduction, + inplanes=inplanes, + downsample_kernel_size=downsample_kernel_size, + input_3x3=input_3x3, + **kwargs, + ) + if pretrained: + # it only worked when `spatial_dims` is 2 + _load_state_dict(self, "se_resnext50_32x4d", progress) + + +class SEResNext101(SENet): + def __init__( + self, + layers: Sequence[int] = (3, 4, 23, 3), + groups: int = 32, + reduction: int = 16, + dropout_prob: Optional[float] = None, + inplanes: int = 64, + downsample_kernel_size: int = 1, + input_3x3: bool = False, + pretrained: bool = False, + progress: bool = True, + **kwargs, + ) -> None: + super(SEResNext101, self).__init__( + block=SEResNeXtBottleneck, + layers=layers, + groups=groups, + dropout_prob=dropout_prob, + reduction=reduction, + inplanes=inplanes, + downsample_kernel_size=downsample_kernel_size, + input_3x3=input_3x3, + **kwargs, + ) + if pretrained: + # it only worked when `spatial_dims` is 2 + _load_state_dict(self, "se_resnext101_32x4d", progress) diff --git a/monai/visualize/class_activation_maps.py b/monai/visualize/class_activation_maps.py index 6e93225af3..b310ec0834 100644 --- a/monai/visualize/class_activation_maps.py +++ b/monai/visualize/class_activation_maps.py @@ -209,10 +209,10 @@ class CAM(CAMBase): .. code-block:: python # densenet 2d - from monai.networks.nets import densenet121 + from monai.networks.nets import DenseNet121 from monai.visualize import CAM - model_2d = densenet121(spatial_dims=2, in_channels=1, out_channels=3) + model_2d = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3) cam = CAM(nn_module=model_2d, target_layers="class_layers.relu", fc_layers="class_layers.out") result = cam(x=torch.rand((1, 1, 48, 64))) @@ -307,10 +307,10 @@ class GradCAM(CAMBase): .. code-block:: python # densenet 2d - from monai.networks.nets import densenet121 + from monai.networks.nets import DenseNet121 from monai.visualize import GradCAM - model_2d = densenet121(spatial_dims=2, in_channels=1, out_channels=3) + model_2d = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3) cam = GradCAM(nn_module=model_2d, target_layers="class_layers.relu") result = cam(x=torch.rand((1, 1, 48, 64))) diff --git a/monai/visualize/occlusion_sensitivity.py b/monai/visualize/occlusion_sensitivity.py index 5863614965..ee9a967da1 100644 --- a/monai/visualize/occlusion_sensitivity.py +++ b/monai/visualize/occlusion_sensitivity.py @@ -122,10 +122,10 @@ class OcclusionSensitivity: .. code-block:: python # densenet 2d - from monai.networks.nets import densenet121 + from monai.networks.nets import DenseNet121 from monai.visualize import OcclusionSensitivity - model_2d = densenet121(spatial_dims=2, in_channels=1, out_channels=3) + model_2d = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3) occ_sens = OcclusionSensitivity(nn_module=model_2d) occ_map, most_probable_class = occ_sens(x=torch.rand((1, 1, 48, 64)), class_idx=None, b_box=[-1, -1, 2, 40, 1, 62]) diff --git a/tests/test_densenet.py b/tests/test_densenet.py index 5ead5f5818..c934841598 100644 --- a/tests/test_densenet.py +++ b/tests/test_densenet.py @@ -17,9 +17,9 @@ from parameterized import parameterized from monai.networks import eval_mode -from monai.networks.nets import DenseNet, densenet121, densenet169, densenet201, densenet264 +from monai.networks.nets import DenseNet121, DenseNet169, DenseNet201, DenseNet264 from monai.utils import optional_import -from tests.utils import skip_if_quick, test_pretrained_networks, test_script_save +from tests.utils import skip_if_quick, test_script_save if TYPE_CHECKING: import torchvision @@ -51,50 +51,39 @@ TEST_CASES = [] for case in [TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]: - for model in [densenet121, densenet169, densenet201, densenet264]: + for model in [DenseNet121, DenseNet169, DenseNet201, DenseNet264]: TEST_CASES.append([model, *case]) -TEST_SCRIPT_CASES = [[model, *TEST_CASE_1] for model in [densenet121, densenet169, densenet201, densenet264]] +TEST_SCRIPT_CASES = [[model, *TEST_CASE_1] for model in [DenseNet121, DenseNet169, DenseNet201, DenseNet264]] TEST_PRETRAINED_2D_CASE_1 = [ # 4-channel 2D, batch 2 - densenet121, + DenseNet121, {"pretrained": True, "progress": True, "spatial_dims": 2, "in_channels": 2, "out_channels": 3}, (1, 2, 32, 64), (1, 3), ] TEST_PRETRAINED_2D_CASE_2 = [ # 4-channel 2D, batch 2 - densenet121, + DenseNet121, {"pretrained": True, "progress": False, "spatial_dims": 2, "in_channels": 2, "out_channels": 1}, (1, 2, 32, 64), (1, 1), ] TEST_PRETRAINED_2D_CASE_3 = [ - densenet121, + DenseNet121, {"pretrained": True, "progress": False, "spatial_dims": 2, "in_channels": 3, "out_channels": 1}, (1, 3, 32, 32), ] -TEST_PRETRAINED_2D_CASE_4 = [ - { - "pretrained": True, - "pretrained_arch": "densenet264", - "progress": False, - "spatial_dims": 2, - "in_channels": 3, - "out_channels": 1, - }, -] - class TestPretrainedDENSENET(unittest.TestCase): @parameterized.expand([TEST_PRETRAINED_2D_CASE_1, TEST_PRETRAINED_2D_CASE_2]) @skip_if_quick def test_121_2d_shape_pretrain(self, model, input_param, input_shape, expected_shape): - net = test_pretrained_networks(model, input_param, device) + net = model(**input_param).to(device) with eval_mode(net): result = net.forward(torch.randn(input_shape).to(device)) self.assertEqual(result.shape, expected_shape) @@ -103,7 +92,7 @@ def test_121_2d_shape_pretrain(self, model, input_param, input_shape, expected_s @skipUnless(has_torchvision, "Requires `torchvision` package.") def test_pretrain_consistency(self, model, input_param, input_shape): example = torch.randn(input_shape).to(device) - net = test_pretrained_networks(model, input_param, device) + net = model(**input_param).to(device) with eval_mode(net): result = net.features.forward(example) torchvision_net = torchvision.models.densenet121(pretrained=True).to(device) @@ -111,11 +100,6 @@ def test_pretrain_consistency(self, model, input_param, input_shape): expected_result = torchvision_net.features.forward(example) self.assertTrue(torch.all(result == expected_result)) - @parameterized.expand([TEST_PRETRAINED_2D_CASE_4]) - def test_ill_pretrain(self, input_param): - with self.assertRaisesRegex(ValueError, ""): - net = DenseNet(**input_param) - class TestDENSENET(unittest.TestCase): @parameterized.expand(TEST_CASES) diff --git a/tests/test_integration_classification_2d.py b/tests/test_integration_classification_2d.py index 4be59cba41..6f8c949d78 100644 --- a/tests/test_integration_classification_2d.py +++ b/tests/test_integration_classification_2d.py @@ -22,7 +22,7 @@ from monai.apps import download_and_extract from monai.metrics import compute_roc_auc from monai.networks import eval_mode -from monai.networks.nets import densenet121 +from monai.networks.nets import DenseNet121 from monai.transforms import AddChannel, Compose, LoadImage, RandFlip, RandRotate, RandZoom, ScaleIntensity, ToTensor from monai.utils import set_determinism from tests.testing_data.integration_answers import test_integration_value @@ -71,7 +71,7 @@ def run_training_test(root_dir, train_x, train_y, val_x, val_y, device="cuda:0", val_ds = MedNISTDataset(val_x, val_y, val_transforms) val_loader = DataLoader(val_ds, batch_size=300, num_workers=num_workers) - model = densenet121(spatial_dims=2, in_channels=1, out_channels=len(np.unique(train_y))).to(device) + model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=len(np.unique(train_y))).to(device) loss_function = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), 1e-5) epoch_num = 4 @@ -133,7 +133,7 @@ def run_inference_test(root_dir, test_x, test_y, device="cuda:0", num_workers=10 val_ds = MedNISTDataset(test_x, test_y, val_transforms) val_loader = DataLoader(val_ds, batch_size=300, num_workers=num_workers) - model = densenet121(spatial_dims=2, in_channels=1, out_channels=len(np.unique(test_y))).to(device) + model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=len(np.unique(test_y))).to(device) model_filename = os.path.join(root_dir, "best_metric_model.pth") model.load_state_dict(torch.load(model_filename)) diff --git a/tests/test_occlusion_sensitivity.py b/tests/test_occlusion_sensitivity.py index 47a13d01e1..d58359a598 100644 --- a/tests/test_occlusion_sensitivity.py +++ b/tests/test_occlusion_sensitivity.py @@ -14,13 +14,13 @@ import torch from parameterized import parameterized -from monai.networks.nets import DenseNet, densenet121 +from monai.networks.nets import DenseNet, DenseNet121 from monai.visualize import OcclusionSensitivity device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") out_channels_2d = 4 out_channels_3d = 3 -model_2d = densenet121(spatial_dims=2, in_channels=1, out_channels=out_channels_2d).to(device) +model_2d = DenseNet121(spatial_dims=2, in_channels=1, out_channels=out_channels_2d).to(device) model_3d = DenseNet( spatial_dims=3, in_channels=1, out_channels=out_channels_3d, init_features=2, growth_rate=2, block_config=(6,) ).to(device) diff --git a/tests/test_senet.py b/tests/test_senet.py index a2d96e1f18..1c6222d6a0 100644 --- a/tests/test_senet.py +++ b/tests/test_senet.py @@ -17,16 +17,7 @@ from parameterized import parameterized from monai.networks import eval_mode -from monai.networks.blocks.squeeze_and_excitation import SEBottleneck -from monai.networks.nets import ( - SENet, - se_resnet50, - se_resnet101, - se_resnet152, - se_resnext50_32x4d, - se_resnext101_32x4d, - senet154, -) +from monai.networks.nets import SENet154, SEResNet50, SEResNet101, SEResNet152, SEResNext50, SEResNext101 from monai.utils import optional_import from tests.utils import test_pretrained_networks, test_script_save @@ -41,27 +32,14 @@ device = "cuda" if torch.cuda.is_available() else "cpu" NET_ARGS = {"spatial_dims": 3, "in_channels": 2, "num_classes": 2} -TEST_CASE_1 = [senet154, NET_ARGS] -TEST_CASE_2 = [se_resnet50, NET_ARGS] -TEST_CASE_3 = [se_resnet101, NET_ARGS] -TEST_CASE_4 = [se_resnet152, NET_ARGS] -TEST_CASE_5 = [se_resnext50_32x4d, NET_ARGS] -TEST_CASE_6 = [se_resnext101_32x4d, NET_ARGS] - -TEST_CASE_PRETRAINED_1 = [se_resnet50, {"spatial_dims": 2, "in_channels": 3, "num_classes": 2, "pretrained": True}] -TEST_CASE_PRETRAINED_2 = [ - { - "spatial_dims": 2, - "in_channels": 3, - "block": SEBottleneck, - "layers": [3, 8, 36, 3], - "groups": 64, - "reduction": 16, - "num_classes": 2, - "pretrained": True, - "pretrained_arch": "resnet50", - } -] +TEST_CASE_1 = [SENet154, NET_ARGS] +TEST_CASE_2 = [SEResNet50, NET_ARGS] +TEST_CASE_3 = [SEResNet101, NET_ARGS] +TEST_CASE_4 = [SEResNet152, NET_ARGS] +TEST_CASE_5 = [SEResNext50, NET_ARGS] +TEST_CASE_6 = [SEResNext101, NET_ARGS] + +TEST_CASE_PRETRAINED_1 = [SEResNet50, {"spatial_dims": 2, "in_channels": 3, "num_classes": 2, "pretrained": True}] class TestSENET(unittest.TestCase): @@ -107,11 +85,6 @@ def test_pretrain_consistency(self, model, input_param): # a conv layer with kernel size equals to 1. It may bring a little difference. self.assertTrue(torch.allclose(result, expected_result, rtol=1e-5, atol=1e-5)) - @parameterized.expand([TEST_CASE_PRETRAINED_2]) - def test_ill_pretrain(self, input_param): - with self.assertRaisesRegex(ValueError, ""): - net = SENet(**input_param) - if __name__ == "__main__": unittest.main() diff --git a/tests/test_vis_cam.py b/tests/test_vis_cam.py index d400c27f02..47c116cd5d 100644 --- a/tests/test_vis_cam.py +++ b/tests/test_vis_cam.py @@ -14,7 +14,7 @@ import torch from parameterized import parameterized -from monai.networks.nets import DenseNet, densenet121, se_resnet50 +from monai.networks.nets import DenseNet, DenseNet121, SEResNet50 from monai.visualize import CAM # 2D @@ -68,15 +68,15 @@ class TestClassActivationMap(unittest.TestCase): @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_shape(self, input_data, expected_shape): if input_data["model"] == "densenet2d": - model = densenet121(spatial_dims=2, in_channels=1, out_channels=3) + model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3) if input_data["model"] == "densenet3d": model = DenseNet( spatial_dims=3, in_channels=1, out_channels=3, init_features=2, growth_rate=2, block_config=(6,) ) if input_data["model"] == "senet2d": - model = se_resnet50(spatial_dims=2, in_channels=3, num_classes=4) + model = SEResNet50(spatial_dims=2, in_channels=3, num_classes=4) if input_data["model"] == "senet3d": - model = se_resnet50(spatial_dims=3, in_channels=3, num_classes=4) + model = SEResNet50(spatial_dims=3, in_channels=3, num_classes=4) device = "cuda:0" if torch.cuda.is_available() else "cpu" model.to(device) model.eval() diff --git a/tests/test_vis_gradcam.py b/tests/test_vis_gradcam.py index df47c4920e..f8e49f486f 100644 --- a/tests/test_vis_gradcam.py +++ b/tests/test_vis_gradcam.py @@ -15,7 +15,7 @@ import torch from parameterized import parameterized -from monai.networks.nets import DenseNet, densenet121, se_resnet50 +from monai.networks.nets import DenseNet, DenseNet121, SEResNet50 from monai.visualize import GradCAM # 2D @@ -65,15 +65,15 @@ class TestGradientClassActivationMap(unittest.TestCase): @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_shape(self, input_data, expected_shape): if input_data["model"] == "densenet2d": - model = densenet121(spatial_dims=2, in_channels=1, out_channels=3) + model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3) if input_data["model"] == "densenet3d": model = DenseNet( spatial_dims=3, in_channels=1, out_channels=3, init_features=2, growth_rate=2, block_config=(6,) ) if input_data["model"] == "senet2d": - model = se_resnet50(spatial_dims=2, in_channels=3, num_classes=4) + model = SEResNet50(spatial_dims=2, in_channels=3, num_classes=4) if input_data["model"] == "senet3d": - model = se_resnet50(spatial_dims=3, in_channels=3, num_classes=4) + model = SEResNet50(spatial_dims=3, in_channels=3, num_classes=4) device = "cuda:0" if torch.cuda.is_available() else "cpu" model.to(device) model.eval() diff --git a/tests/test_vis_gradcampp.py b/tests/test_vis_gradcampp.py index fce68ccde0..92a4b2ac7b 100644 --- a/tests/test_vis_gradcampp.py +++ b/tests/test_vis_gradcampp.py @@ -14,7 +14,7 @@ import torch from parameterized import parameterized -from monai.networks.nets import DenseNet, densenet121, se_resnet50 +from monai.networks.nets import DenseNet, DenseNet121, SEResNet50 from monai.visualize import GradCAMpp # 2D @@ -64,15 +64,15 @@ class TestGradientClassActivationMapPP(unittest.TestCase): @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) def test_shape(self, input_data, expected_shape): if input_data["model"] == "densenet2d": - model = densenet121(spatial_dims=2, in_channels=1, out_channels=3) + model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3) if input_data["model"] == "densenet3d": model = DenseNet( spatial_dims=3, in_channels=1, out_channels=3, init_features=2, growth_rate=2, block_config=(6,) ) if input_data["model"] == "senet2d": - model = se_resnet50(spatial_dims=2, in_channels=3, num_classes=4) + model = SEResNet50(spatial_dims=2, in_channels=3, num_classes=4) if input_data["model"] == "senet3d": - model = se_resnet50(spatial_dims=3, in_channels=3, num_classes=4) + model = SEResNet50(spatial_dims=3, in_channels=3, num_classes=4) device = "cuda:0" if torch.cuda.is_available() else "cpu" model.to(device) model.eval() From 5151b15aaadc90bf578729d85e8011faae5d1836 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Mon, 22 Mar 2021 11:17:28 +0000 Subject: [PATCH 082/457] remove .value from InverseKeys enum (#1817) Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/transforms/croppad/batch.py | 4 +-- monai/transforms/croppad/dictionary.py | 22 ++++++------ monai/transforms/inverse.py | 20 +++++------ monai/transforms/spatial/dictionary.py | 46 +++++++++++++------------- monai/utils/enums.py | 2 +- tests/test_decollate.py | 2 +- tests/test_inverse.py | 2 +- 7 files changed, 49 insertions(+), 49 deletions(-) diff --git a/monai/transforms/croppad/batch.py b/monai/transforms/croppad/batch.py index 7cbf39597c..37ff8618fa 100644 --- a/monai/transforms/croppad/batch.py +++ b/monai/transforms/croppad/batch.py @@ -119,10 +119,10 @@ def inverse(data: dict) -> Dict[Hashable, np.ndarray]: d = deepcopy(data) for key in d.keys(): - transform_key = str(key) + InverseKeys.KEY_SUFFIX.value + transform_key = str(key) + InverseKeys.KEY_SUFFIX if transform_key in d.keys(): transform = d[transform_key][-1] - if transform[InverseKeys.CLASS_NAME.value] == PadListDataCollate.__name__: + if transform[InverseKeys.CLASS_NAME] == PadListDataCollate.__name__: d[key] = CenterSpatialCrop(transform["orig_size"])(d[key]) # remove transform d[transform_key].pop() diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index 822db28467..c3523f3993 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -133,7 +133,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform - orig_size = transform[InverseKeys.ORIG_SIZE.value] + orig_size = transform[InverseKeys.ORIG_SIZE] if self.padder.method == Method.SYMMETRIC: current_size = d[key].shape[1:] roi_center = [floor(i / 2) if r % 2 == 0 else (i - 1) // 2 for r, i in zip(orig_size, current_size)] @@ -202,7 +202,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform - orig_size = np.array(transform[InverseKeys.ORIG_SIZE.value]) + orig_size = np.array(transform[InverseKeys.ORIG_SIZE]) roi_start = np.array(self.padder.spatial_border) # Need to convert single value to [min1,min2,...] if roi_start.size == 1: @@ -210,7 +210,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar # need to convert [min1,max1,min2,...] to [min1,min2,...] elif roi_start.size == 2 * orig_size.size: roi_start = roi_start[::2] - roi_end = np.array(transform[InverseKeys.ORIG_SIZE.value]) + roi_start + roi_end = np.array(transform[InverseKeys.ORIG_SIZE]) + roi_start inverse_transform = SpatialCrop(roi_start=roi_start, roi_end=roi_end) # Apply inverse transform @@ -268,7 +268,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform - orig_size = np.array(transform[InverseKeys.ORIG_SIZE.value]) + orig_size = np.array(transform[InverseKeys.ORIG_SIZE]) current_size = np.array(d[key].shape[1:]) roi_start = np.floor((current_size - orig_size) / 2) roi_end = orig_size + roi_start @@ -323,7 +323,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform - orig_size = transform[InverseKeys.ORIG_SIZE.value] + orig_size = transform[InverseKeys.ORIG_SIZE] pad_to_start = np.array(self.cropper.roi_start) pad_to_end = orig_size - self.cropper.roi_end # interleave mins and maxes @@ -369,7 +369,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform - orig_size = np.array(transform[InverseKeys.ORIG_SIZE.value]) + orig_size = np.array(transform[InverseKeys.ORIG_SIZE]) current_size = np.array(d[key].shape[1:]) pad_to_start = np.floor((orig_size - current_size) / 2).astype(int) # in each direction, if original size is even and current size is odd, += 1 @@ -449,12 +449,12 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform - orig_size = transform[InverseKeys.ORIG_SIZE.value] + orig_size = transform[InverseKeys.ORIG_SIZE] random_center = self.random_center pad_to_start = np.empty((len(orig_size)), dtype=np.int32) pad_to_end = np.empty((len(orig_size)), dtype=np.int32) if random_center: - for i, _slice in enumerate(transform[InverseKeys.EXTRA_INFO.value]["slices"]): + for i, _slice in enumerate(transform[InverseKeys.EXTRA_INFO]["slices"]): pad_to_start[i] = _slice[0] pad_to_end[i] = orig_size[i] - _slice[1] else: @@ -594,8 +594,8 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform - orig_size = np.array(transform[InverseKeys.ORIG_SIZE.value]) - extra_info = transform[InverseKeys.EXTRA_INFO.value] + orig_size = np.array(transform[InverseKeys.ORIG_SIZE]) + extra_info = transform[InverseKeys.EXTRA_INFO] pad_to_start = np.array(extra_info["box_start"]) pad_to_end = orig_size - np.array(extra_info["box_end"]) # interleave mins and maxes @@ -827,7 +827,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform - orig_size = np.array(transform[InverseKeys.ORIG_SIZE.value]) + orig_size = np.array(transform[InverseKeys.ORIG_SIZE]) current_size = np.array(d[key].shape[1:]) # Unfortunately, we can't just use ResizeWithPadOrCrop with original size because of odd/even rounding. # Instead, we first pad any smaller dimensions, and then we crop any larger dimensions. diff --git a/monai/transforms/inverse.py b/monai/transforms/inverse.py index 9708f103e6..3e5b68e8e4 100644 --- a/monai/transforms/inverse.py +++ b/monai/transforms/inverse.py @@ -72,17 +72,17 @@ def push_transform( orig_size: Optional[Tuple] = None, ) -> None: """Append to list of applied transforms for that key.""" - key_transform = str(key) + InverseKeys.KEY_SUFFIX.value + key_transform = str(key) + InverseKeys.KEY_SUFFIX info = { - InverseKeys.CLASS_NAME.value: self.__class__.__name__, - InverseKeys.ID.value: id(self), - InverseKeys.ORIG_SIZE.value: orig_size or data[key].shape[1:], + InverseKeys.CLASS_NAME: self.__class__.__name__, + InverseKeys.ID: id(self), + InverseKeys.ORIG_SIZE: orig_size or data[key].shape[1:], } if extra_info is not None: - info[InverseKeys.EXTRA_INFO.value] = extra_info + info[InverseKeys.EXTRA_INFO] = extra_info # If class is randomizable transform, store whether the transform was actually performed (based on `prob`) if isinstance(self, RandomizableTransform): - info[InverseKeys.DO_TRANSFORM.value] = self._do_transform + info[InverseKeys.DO_TRANSFORM] = self._do_transform # If this is the first, create list if key_transform not in data: data[key_transform] = [] @@ -90,25 +90,25 @@ def push_transform( def check_transforms_match(self, transform: dict) -> None: """Check transforms are of same instance.""" - if transform[InverseKeys.ID.value] == id(self): + if transform[InverseKeys.ID] == id(self): return # basic check if multiprocessing uses 'spawn' (objects get recreated so don't have same ID) if ( torch.multiprocessing.get_start_method(allow_none=False) == "spawn" - and transform[InverseKeys.CLASS_NAME.value] == self.__class__.__name__ + and transform[InverseKeys.CLASS_NAME] == self.__class__.__name__ ): return raise RuntimeError("Should inverse most recently applied invertible transform first") def get_most_recent_transform(self, data: dict, key: Hashable) -> dict: """Get most recent transform.""" - transform = dict(data[str(key) + InverseKeys.KEY_SUFFIX.value][-1]) + transform = dict(data[str(key) + InverseKeys.KEY_SUFFIX][-1]) self.check_transforms_match(transform) return transform def pop_transform(self, data: dict, key: Hashable) -> None: """Remove most recent transform.""" - data[str(key) + InverseKeys.KEY_SUFFIX.value].pop() + data[str(key) + InverseKeys.KEY_SUFFIX].pop() def inverse(self, data: dict) -> Dict[Hashable, np.ndarray]: """ diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 32327ec302..0d5b3436fd 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -225,8 +225,8 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar + "Please raise a github issue if you need this feature" ) # Create inverse transform - meta_data = d[transform[InverseKeys.EXTRA_INFO.value]["meta_data_key"]] - old_affine = np.array(transform[InverseKeys.EXTRA_INFO.value]["old_affine"]) + meta_data = d[transform[InverseKeys.EXTRA_INFO]["meta_data_key"]] + old_affine = np.array(transform[InverseKeys.EXTRA_INFO]["old_affine"]) orig_pixdim = np.sqrt(np.sum(np.square(old_affine), 0))[:-1] inverse_transform = Spacing(orig_pixdim, diagonal=self.spacing_transform.diagonal) # Apply inverse @@ -312,8 +312,8 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform - meta_data = d[transform[InverseKeys.EXTRA_INFO.value]["meta_data_key"]] - orig_affine = transform[InverseKeys.EXTRA_INFO.value]["old_affine"] + meta_data = d[transform[InverseKeys.EXTRA_INFO]["meta_data_key"]] + orig_affine = transform[InverseKeys.EXTRA_INFO]["old_affine"] orig_axcodes = nib.orientations.aff2axcodes(orig_affine) inverse_transform = Orientation( axcodes=orig_axcodes, @@ -429,9 +429,9 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Check if random transform was actually performed (based on `prob`) - if transform[InverseKeys.DO_TRANSFORM.value]: + if transform[InverseKeys.DO_TRANSFORM]: # Create inverse transform - num_times_rotated = transform[InverseKeys.EXTRA_INFO.value]["rand_k"] + num_times_rotated = transform[InverseKeys.EXTRA_INFO]["rand_k"] num_times_to_rotate = 4 - num_times_rotated inverse_transform = Rotate90(num_times_to_rotate, self.spatial_axes) # Might need to convert to numpy @@ -491,7 +491,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar d = deepcopy(dict(data)) for key, mode, align_corners in self.key_iterator(d, self.mode, self.align_corners): transform = self.get_most_recent_transform(d, key) - orig_size = transform[InverseKeys.ORIG_SIZE.value] + orig_size = transform[InverseKeys.ORIG_SIZE] # Create inverse transform inverse_transform = Resize(orig_size, mode, align_corners) # Apply inverse transform @@ -582,9 +582,9 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): transform = self.get_most_recent_transform(d, key) - orig_size = transform[InverseKeys.ORIG_SIZE.value] + orig_size = transform[InverseKeys.ORIG_SIZE] # Create inverse transform - fwd_affine = transform[InverseKeys.EXTRA_INFO.value]["affine"] + fwd_affine = transform[InverseKeys.EXTRA_INFO]["affine"] inv_affine = np.linalg.inv(fwd_affine) affine_grid = AffineGrid(affine=inv_affine) @@ -710,9 +710,9 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): transform = self.get_most_recent_transform(d, key) - orig_size = transform[InverseKeys.ORIG_SIZE.value] + orig_size = transform[InverseKeys.ORIG_SIZE] # Create inverse transform - fwd_affine = transform[InverseKeys.EXTRA_INFO.value]["affine"] + fwd_affine = transform[InverseKeys.EXTRA_INFO]["affine"] inv_affine = np.linalg.inv(fwd_affine) affine_grid = AffineGrid(affine=inv_affine) @@ -1048,7 +1048,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Check if random transform was actually performed (based on `prob`) - if transform[InverseKeys.DO_TRANSFORM.value]: + if transform[InverseKeys.DO_TRANSFORM]: # Might need to convert to numpy if isinstance(d[key], torch.Tensor): d[key] = torch.Tensor(d[key]).cpu().numpy() @@ -1098,8 +1098,8 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Check if random transform was actually performed (based on `prob`) - if transform[InverseKeys.DO_TRANSFORM.value]: - flipper = Flip(spatial_axis=transform[InverseKeys.EXTRA_INFO.value]["axis"]) + if transform[InverseKeys.DO_TRANSFORM]: + flipper = Flip(spatial_axis=transform[InverseKeys.EXTRA_INFO]["axis"]) # Might need to convert to numpy if isinstance(d[key], torch.Tensor): d[key] = torch.Tensor(d[key]).cpu().numpy() @@ -1181,7 +1181,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar ): transform = self.get_most_recent_transform(d, key) # Create inverse transform - fwd_rot_mat = transform[InverseKeys.EXTRA_INFO.value]["rot_mat"] + fwd_rot_mat = transform[InverseKeys.EXTRA_INFO]["rot_mat"] inv_rot_mat = np.linalg.inv(fwd_rot_mat) xform = AffineTransform( @@ -1194,7 +1194,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar output = xform( torch.as_tensor(np.ascontiguousarray(d[key]).astype(dtype)).unsqueeze(0), torch.as_tensor(np.ascontiguousarray(inv_rot_mat).astype(dtype)), - spatial_size=transform[InverseKeys.ORIG_SIZE.value], + spatial_size=transform[InverseKeys.ORIG_SIZE], ) d[key] = np.asarray(output.squeeze(0).detach().cpu().numpy(), dtype=np.float32) # Remove the applied transform @@ -1314,9 +1314,9 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar ): transform = self.get_most_recent_transform(d, key) # Check if random transform was actually performed (based on `prob`) - if transform[InverseKeys.DO_TRANSFORM.value]: + if transform[InverseKeys.DO_TRANSFORM]: # Create inverse transform - fwd_rot_mat = transform[InverseKeys.EXTRA_INFO.value]["rot_mat"] + fwd_rot_mat = transform[InverseKeys.EXTRA_INFO]["rot_mat"] inv_rot_mat = np.linalg.inv(fwd_rot_mat) xform = AffineTransform( @@ -1329,7 +1329,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar output = xform( torch.as_tensor(np.ascontiguousarray(d[key]).astype(dtype)).unsqueeze(0), torch.as_tensor(np.ascontiguousarray(inv_rot_mat).astype(dtype)), - spatial_size=transform[InverseKeys.ORIG_SIZE.value], + spatial_size=transform[InverseKeys.ORIG_SIZE], ) d[key] = np.asarray(output.squeeze(0).detach().cpu().numpy(), dtype=np.float32) # Remove the applied transform @@ -1410,7 +1410,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar align_corners=align_corners, ) # Size might be out by 1 voxel so pad - d[key] = SpatialPad(transform[InverseKeys.ORIG_SIZE.value])(d[key]) + d[key] = SpatialPad(transform[InverseKeys.ORIG_SIZE])(d[key]) # Remove the applied transform self.pop_transform(d, key) @@ -1513,9 +1513,9 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar ): transform = self.get_most_recent_transform(d, key) # Check if random transform was actually performed (based on `prob`) - if transform[InverseKeys.DO_TRANSFORM.value]: + if transform[InverseKeys.DO_TRANSFORM]: # Create inverse transform - zoom = np.array(transform[InverseKeys.EXTRA_INFO.value]["zoom"]) + zoom = np.array(transform[InverseKeys.EXTRA_INFO]["zoom"]) inverse_transform = Zoom(zoom=1 / zoom, keep_size=self.keep_size) # Apply inverse d[key] = inverse_transform( @@ -1525,7 +1525,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar align_corners=align_corners, ) # Size might be out by 1 voxel so pad - d[key] = SpatialPad(transform[InverseKeys.ORIG_SIZE.value])(d[key]) + d[key] = SpatialPad(transform[InverseKeys.ORIG_SIZE])(d[key]) # Remove the applied transform self.pop_transform(d, key) diff --git a/monai/utils/enums.py b/monai/utils/enums.py index d661781616..63d65329af 100644 --- a/monai/utils/enums.py +++ b/monai/utils/enums.py @@ -217,7 +217,7 @@ class Method(Enum): END = "end" -class InverseKeys(Enum): +class InverseKeys: """Extra meta data keys used for inverse transforms.""" CLASS_NAME = "class" diff --git a/tests/test_decollate.py b/tests/test_decollate.py index 4dc5a217a7..5b78bbbcf6 100644 --- a/tests/test_decollate.py +++ b/tests/test_decollate.py @@ -55,7 +55,7 @@ def check_match(self, in1, in2): k1, k2 = k1.value, k2.value self.check_match(k1, k2) # Transform ids won't match for windows with multiprocessing, so don't check values - if k1 == InverseKeys.ID.value and sys.platform in ["darwin", "win32"]: + if k1 == InverseKeys.ID and sys.platform in ["darwin", "win32"]: continue self.check_match(v1, v2) elif isinstance(in1, (list, tuple)): diff --git a/tests/test_inverse.py b/tests/test_inverse.py index f548b53f11..d54855d7c1 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -565,7 +565,7 @@ def test_inverse_inferred_seg(self): data = first(loader) labels = data["label"].to(device) segs = model(labels).detach().cpu() - label_transform_key = "label" + InverseKeys.KEY_SUFFIX.value + label_transform_key = "label" + InverseKeys.KEY_SUFFIX segs_dict = {"label": segs, label_transform_key: data[label_transform_key]} segs_dict_decollated = decollate_batch(segs_dict) From 06e1e990b03a763ac0a1da0e37917f991e3901cf Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Mon, 22 Mar 2021 16:30:38 +0000 Subject: [PATCH 083/457] cron tutorial notebooks (#1783) * cron tutorial notebooks Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> Co-authored-by: Wenqi Li --- .github/workflows/cron.yml | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 90abb5d7e4..761b1f7ebc 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -128,3 +128,36 @@ jobs: with: fail_ci_if_error: false file: ./coverage.xml + + cron-tutorial-notebooks: + if: github.repository == 'Project-MONAI/MONAI' + needs: cron-gpu # so that monai itself is verified first + container: + image: nvcr.io/nvidia/pytorch:21.02-py3 # testing with the latest pytorch base image + options: "--gpus all --ipc=host" + runs-on: [self-hosted, linux, x64, common] + steps: + - uses: actions/checkout@v2 + - name: Install MONAI + id: monai-install + run: | + which python + python -m pip install --upgrade pip wheel + python -m pip install -r requirements-dev.txt + BUILD_MONAI=0 python setup.py develop # install monai + nvidia-smi + export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) + echo $CUDA_VISIBLE_DEVICES + echo "::set-output name=devices::$CUDA_VISIBLE_DEVICES" + - name: Checkout tutorials and install their requirements + run: | + cd /opt + git clone --depth 1 --branch master --single-branch https://github.com/Project-MONAI/tutorials.git # latest commit of master branch + cd tutorials + python -m pip install -r requirements.txt + - name: Run tutorial notebooks + run: | + export CUDA_VISIBLE_DEVICES=${{ steps.monai-install.outputs.devices }} + echo $CUDA_VISIBLE_DEVICES + cd /opt/tutorials + $(pwd)/runner.sh From a8f83e17ffee12bd1b8f9f6d7711a26ba6ff69ff Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Mon, 22 Mar 2021 20:05:26 +0000 Subject: [PATCH 084/457] refactor CommonKeys (#1825) Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/apps/deepgrow/interaction.py | 2 +- monai/engines/__init__.py | 2 +- monai/engines/evaluator.py | 2 +- monai/engines/trainer.py | 2 +- monai/engines/utils.py | 19 +------------------ monai/handlers/metric_logger.py | 2 +- monai/utils/__init__.py | 1 + monai/utils/enums.py | 18 ++++++++++++++++++ tests/test_threadcontainer.py | 3 ++- 9 files changed, 27 insertions(+), 24 deletions(-) diff --git a/monai/apps/deepgrow/interaction.py b/monai/apps/deepgrow/interaction.py index 77e271a9eb..8a64ad7cf9 100644 --- a/monai/apps/deepgrow/interaction.py +++ b/monai/apps/deepgrow/interaction.py @@ -13,9 +13,9 @@ import torch from monai.engines import SupervisedEvaluator, SupervisedTrainer -from monai.engines.utils import CommonKeys from monai.engines.workflow import Events from monai.transforms import Compose +from monai.utils.enums import CommonKeys class Interaction: diff --git a/monai/engines/__init__.py b/monai/engines/__init__.py index 8256680735..d3a14f6104 100644 --- a/monai/engines/__init__.py +++ b/monai/engines/__init__.py @@ -12,4 +12,4 @@ from .evaluator import EnsembleEvaluator, Evaluator, SupervisedEvaluator from .multi_gpu_supervised_trainer import create_multigpu_supervised_evaluator, create_multigpu_supervised_trainer from .trainer import GanTrainer, SupervisedTrainer, Trainer -from .utils import CommonKeys, GanKeys, IterationEvents, default_make_latent, default_prepare_batch, get_devices_spec +from .utils import GanKeys, IterationEvents, default_make_latent, default_prepare_batch, get_devices_spec diff --git a/monai/engines/evaluator.py b/monai/engines/evaluator.py index 0afa3747a4..2c237f5245 100644 --- a/monai/engines/evaluator.py +++ b/monai/engines/evaluator.py @@ -14,13 +14,13 @@ import torch from torch.utils.data import DataLoader -from monai.engines.utils import CommonKeys as Keys from monai.engines.utils import IterationEvents, default_prepare_batch from monai.engines.workflow import Workflow from monai.inferers import Inferer, SimpleInferer from monai.networks.utils import eval_mode from monai.transforms import Transform from monai.utils import ensure_tuple, exact_version, optional_import +from monai.utils.enums import CommonKeys as Keys if TYPE_CHECKING: from ignite.engine import Engine diff --git a/monai/engines/trainer.py b/monai/engines/trainer.py index 5b996eafe1..a7b1943211 100644 --- a/monai/engines/trainer.py +++ b/monai/engines/trainer.py @@ -15,12 +15,12 @@ from torch.optim.optimizer import Optimizer from torch.utils.data import DataLoader -from monai.engines.utils import CommonKeys as Keys from monai.engines.utils import GanKeys, IterationEvents, default_make_latent, default_prepare_batch from monai.engines.workflow import Workflow from monai.inferers import Inferer, SimpleInferer from monai.transforms import Transform from monai.utils import exact_version, optional_import +from monai.utils.enums import CommonKeys as Keys if TYPE_CHECKING: from ignite.engine import Engine diff --git a/monai/engines/utils.py b/monai/engines/utils.py index b0b1e44f71..04237d0f4a 100644 --- a/monai/engines/utils.py +++ b/monai/engines/utils.py @@ -14,6 +14,7 @@ import torch from monai.utils import exact_version, optional_import +from monai.utils.enums import CommonKeys if TYPE_CHECKING: from ignite.engine import EventEnum @@ -22,7 +23,6 @@ __all__ = [ "IterationEvents", - "CommonKeys", "GanKeys", "get_devices_spec", "default_prepare_batch", @@ -47,23 +47,6 @@ class IterationEvents(EventEnum): OPTIMIZER_COMPLETED = "optimizer_completed" -class CommonKeys: - """ - A set of common keys for dictionary based supervised training process. - `IMAGE` is the input image data. - `LABEL` is the training or evaluation label of segmentation or classification task. - `PRED` is the prediction data of model output. - `LOSS` is the loss value of current iteration. - `INFO` is some useful information during training or evaluation, like loss value, etc. - - """ - - IMAGE = "image" - LABEL = "label" - PRED = "pred" - LOSS = "loss" - - class GanKeys: """ A set of common keys for generative adversarial networks. diff --git a/monai/handlers/metric_logger.py b/monai/handlers/metric_logger.py index c749d4bbab..0cfefb715a 100644 --- a/monai/handlers/metric_logger.py +++ b/monai/handlers/metric_logger.py @@ -14,8 +14,8 @@ from threading import RLock from typing import TYPE_CHECKING, Callable, DefaultDict, List, Optional -from monai.engines.utils import CommonKeys from monai.utils import exact_version, optional_import +from monai.utils.enums import CommonKeys Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") if TYPE_CHECKING: diff --git a/monai/utils/__init__.py b/monai/utils/__init__.py index 4d272ac6ff..6a76c96d0c 100644 --- a/monai/utils/__init__.py +++ b/monai/utils/__init__.py @@ -17,6 +17,7 @@ Average, BlendMode, ChannelMatching, + CommonKeys, GridSampleMode, GridSamplePadMode, InterpolateMode, diff --git a/monai/utils/enums.py b/monai/utils/enums.py index 63d65329af..9920aefe0e 100644 --- a/monai/utils/enums.py +++ b/monai/utils/enums.py @@ -29,6 +29,7 @@ "SkipMode", "Method", "InverseKeys", + "CommonKeys", ] @@ -226,3 +227,20 @@ class InverseKeys: EXTRA_INFO = "extra_info" DO_TRANSFORM = "do_transforms" KEY_SUFFIX = "_transforms" + + +class CommonKeys: + """ + A set of common keys for dictionary based supervised training process. + `IMAGE` is the input image data. + `LABEL` is the training or evaluation label of segmentation or classification task. + `PRED` is the prediction data of model output. + `LOSS` is the loss value of current iteration. + `INFO` is some useful information during training or evaluation, like loss value, etc. + + """ + + IMAGE = "image" + LABEL = "label" + PRED = "pred" + LOSS = "loss" diff --git a/tests/test_threadcontainer.py b/tests/test_threadcontainer.py index 92a50a15aa..13608e166c 100644 --- a/tests/test_threadcontainer.py +++ b/tests/test_threadcontainer.py @@ -15,11 +15,12 @@ import torch from monai.utils import optional_import +from monai.utils.enums import CommonKeys try: _, has_ignite = optional_import("ignite") - from monai.engines import CommonKeys, SupervisedTrainer + from monai.engines import SupervisedTrainer from monai.utils import ThreadContainer except ImportError: has_ignite = False From db2f47de9df9418f57ee3573fab32b69c1f42d31 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Mon, 22 Mar 2021 21:42:42 +0000 Subject: [PATCH 085/457] [1823] allow None for switch endianness (#1824) * allow None for switch endianness Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * add PIL testing Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> * [MONAI] python code formatting Signed-off-by: monai-bot Co-authored-by: monai-bot --- monai/transforms/io/array.py | 4 ++-- monai/utils/jupyter_utils.py | 4 ++-- tests/test_nifti_endianness.py | 15 +++++++++++++++ 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index 164c7b0e76..9c2727ffc3 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -47,10 +47,10 @@ def switch_endianness(data, old, new): data = [switch_endianness(x, old, new) for x in data] elif isinstance(data, dict): data = {k: switch_endianness(v, old, new) for k, v in data.items()} - elif isinstance(data, (bool, str, float, int)): + elif isinstance(data, (bool, str, float, int, type(None))): pass else: - raise AssertionError() + raise AssertionError(f"Unknown type: {type(data).__name__}") return data diff --git a/monai/utils/jupyter_utils.py b/monai/utils/jupyter_utils.py index a7e712619e..726a11731c 100644 --- a/monai/utils/jupyter_utils.py +++ b/monai/utils/jupyter_utils.py @@ -10,8 +10,8 @@ # limitations under the License. """ -This set of utility function is meant to make using Jupyter notebooks easier with MONAI. Plotting functions using -Matplotlib produce common plots for metrics and images. +This set of utility function is meant to make using Jupyter notebooks easier with MONAI. Plotting functions using +Matplotlib produce common plots for metrics and images. """ from enum import Enum diff --git a/tests/test_nifti_endianness.py b/tests/test_nifti_endianness.py index d8adb1efb2..b725e2462c 100644 --- a/tests/test_nifti_endianness.py +++ b/tests/test_nifti_endianness.py @@ -1,3 +1,4 @@ +import os import tempfile import unittest from typing import TYPE_CHECKING, List, Tuple @@ -7,16 +8,20 @@ from parameterized import parameterized from monai.data import DataLoader, Dataset, create_test_image_2d +from monai.data.image_reader import PILReader from monai.transforms import LoadImage, LoadImaged from monai.transforms.io.array import switch_endianness from monai.utils.module import optional_import if TYPE_CHECKING: import nibabel as nib + from PIL import Image as PILImage has_nib = True + has_pil = True else: nib, has_nib = optional_import("nibabel") + PILImage, has_pil = optional_import("PIL.Image") TESTS: List[Tuple] = [] for endianness in ["<", ">"]: @@ -49,6 +54,16 @@ def test_switch(self): # verify data types output = switch_endianness(data, ">", "<") self.assertEqual(type(data), type(output)) + @skipUnless(has_pil, "Requires PIL") + def test_pil(self): + tempdir = tempfile.mkdtemp() + test_image = np.random.randint(0, 256, size=[128, 256]) + filename = os.path.join(tempdir, "test_image.png") + PILImage.fromarray(test_image.astype("uint8")).save(filename) + + loader = LoadImage(PILReader(converter=lambda image: image.convert("LA"))) + _ = loader(filename) + if __name__ == "__main__": unittest.main() From c4bb4cbf3bb4f476d0b84ae9698321d897faa58e Mon Sep 17 00:00:00 2001 From: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Date: Tue, 23 Mar 2021 08:55:24 +0000 Subject: [PATCH 086/457] Jupyter followup (#1826) * Jupyter and other additions Signed-off-by: Eric Kerfoot * Jupyter utilities update Signed-off-by: Eric Kerfoot --- monai/handlers/metric_logger.py | 14 ++++ monai/utils/jupyter_utils.py | 16 ----- tests/test_handler_metric_logger.py | 60 ++++++++++++++++++ tests/test_threadcontainer.py | 51 ++++++++++++++- .../threadcontainer_plot_test.png | Bin 0 -> 60289 bytes 5 files changed, 122 insertions(+), 19 deletions(-) create mode 100644 tests/test_handler_metric_logger.py create mode 100644 tests/testing_data/threadcontainer_plot_test.png diff --git a/monai/handlers/metric_logger.py b/monai/handlers/metric_logger.py index 0cfefb715a..778ec13900 100644 --- a/monai/handlers/metric_logger.py +++ b/monai/handlers/metric_logger.py @@ -40,6 +40,20 @@ class MetricLogger: useful for collecting loss and metric values in one place for storage with checkpoint savers (`state_dict` and `load_state_dict` methods provided as expected by Pytorch and Ignite) and for graphing during training. + Example:: + # construct an evaluator saving mean dice metric values in the key "val_mean_dice" + evaluator = SupervisedEvaluator(..., key_val_metric={"val_mean_dice": MeanDice(...)}) + + # construct the logger and associate with evaluator to extract metric values from + logger = MetricLogger(evaluator=evaluator) + + # construct the trainer with the logger passed in as a handler so that it logs loss values + trainer = SupervisedTrainer(..., train_handlers=[logger, ValidationHandler(evaluator, 1)]) + + # run training, logger.loss will be a list of (iteration, loss) values, logger.metrics a dict with key + # "val_mean_dice" storing a list of (iteration, metric) values + trainer.run() + Args: loss_transform: Converts the `output` value from the trainer's state into a loss value metric_transform: Converts the metric value coming from the trainer/evaluator's state into a storable value diff --git a/monai/utils/jupyter_utils.py b/monai/utils/jupyter_utils.py index 726a11731c..10dfe59f59 100644 --- a/monai/utils/jupyter_utils.py +++ b/monai/utils/jupyter_utils.py @@ -21,22 +21,6 @@ import numpy as np import torch -# from monai.utils import exact_version, optional_import - -# if TYPE_CHECKING: -# import matplotlib.pyplot as plt -# from ignite.engine import Engine, Events - -# Figure = plt.Figure -# Axes = plt.Axes -# has_matplotlib = True -# else: -# Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") -# Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") -# plt, has_matplotlib = optional_import("matplotlib.pyplot") -# Figure, _ = optional_import("matplotlib.pyplot", name="Figure") -# Axes, _ = optional_import("matplotlib.pyplot", name="Axes") - try: import matplotlib.pyplot as plt diff --git a/tests/test_handler_metric_logger.py b/tests/test_handler_metric_logger.py new file mode 100644 index 0000000000..5812605cd7 --- /dev/null +++ b/tests/test_handler_metric_logger.py @@ -0,0 +1,60 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from monai.utils import optional_import +from tests.utils import SkipIfNoModule + +try: + _, has_ignite = optional_import("ignite") + from ignite.engine import Engine, Events + + from monai.handlers import MetricLogger +except ImportError: + has_ignite = False + + +class TestHandlerMetricLogger(unittest.TestCase): + @SkipIfNoModule("ignite") + def test_metric_logging(self): + dummy_name = "dummy" + + # set up engine + def _train_func(engine, batch): + return torch.tensor(0.0) + + engine = Engine(_train_func) + + # set up dummy metric + @engine.on(Events.EPOCH_COMPLETED) + def _update_metric(engine): + engine.state.metrics[dummy_name] = 1 + + # set up testing handler + handler = MetricLogger(loss_transform=lambda output: output.item()) + handler.attach(engine) + + engine.run(range(3), max_epochs=2) + + expected_loss = [(1, 0.0), (2, 0.0), (3, 0.0), (4, 0.0), (5, 0.0), (6, 0.0)] + expected_metric = [(4, 1), (5, 1), (6, 1)] + + self.assertSetEqual({dummy_name}, set(handler.metrics)) + + self.assertListEqual(expected_loss, handler.loss) + self.assertListEqual(expected_metric, handler.metrics[dummy_name]) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_threadcontainer.py b/tests/test_threadcontainer.py index 13608e166c..75612586e8 100644 --- a/tests/test_threadcontainer.py +++ b/tests/test_threadcontainer.py @@ -9,27 +9,32 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os +import tempfile import time import unittest import torch -from monai.utils import optional_import +from monai.data import DataLoader +from monai.utils import optional_import, set_determinism from monai.utils.enums import CommonKeys +from tests.utils import SkipIfNoModule try: _, has_ignite = optional_import("ignite") from monai.engines import SupervisedTrainer + from monai.handlers import MetricLogger from monai.utils import ThreadContainer except ImportError: has_ignite = False -from monai.data import DataLoader +compare_images, _ = optional_import("matplotlib.testing.compare", name="compare_images") class TestThreadContainer(unittest.TestCase): - @unittest.skipIf(not has_ignite, "Ignite needed for this test") + @SkipIfNoModule("ignite") def test_container(self): net = torch.nn.Conv2d(1, 1, 3, padding=1) @@ -57,3 +62,43 @@ def test_container(self): self.assertTrue(len(con.status_dict) > 0) con.join() + + @SkipIfNoModule("ignite") + @SkipIfNoModule("matplotlib") + def test_plot(self): + set_determinism(0) + + testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") + + net = torch.nn.Conv2d(1, 1, 3, padding=1) + + opt = torch.optim.Adam(net.parameters()) + + img = torch.rand(1, 16, 16) + data = {CommonKeys.IMAGE: img, CommonKeys.LABEL: img} + loader = DataLoader([data for _ in range(10)]) + + trainer = SupervisedTrainer( + device=torch.device("cpu"), + max_epochs=1, + train_data_loader=loader, + network=net, + optimizer=opt, + loss_function=torch.nn.L1Loss(), + ) + + logger = MetricLogger() + logger.attach(trainer) + + con = ThreadContainer(trainer) + con.start() + con.join() + + fig = con.plot_status(logger) + + with tempfile.TemporaryDirectory() as tempdir: + tempimg = f"{tempdir}/threadcontainer_plot_test.png" + fig.savefig(tempimg) + comp = compare_images(tempimg, f"{testing_dir}/threadcontainer_plot_test.png", 1e-3) + + self.assertIsNone(comp, comp) # None indicates test passed diff --git a/tests/testing_data/threadcontainer_plot_test.png b/tests/testing_data/threadcontainer_plot_test.png new file mode 100644 index 0000000000000000000000000000000000000000..b3576491ec1b9f6ecd95427dc63293aa454fc5e7 GIT binary patch literal 60289 zcmcG$cRba7{|A2T$Vw`aRY;UoBH4*ZiiE_W$liNXSt%otA|sN@%HAtU%1G$gGucNn zj_vn)>*~I`?)(1UkKZ4^pNFgKx^$fL8SnS&^<1yd-AigpG*rw~7z~C+`J93V21D@~ zgCQB;K@L9&#NK=j|2pM#R>$eGt%;NCb%&dn3)h|OtZbdE%x`eG+;ng>x3xJga9rTT zQ4TXFCp$+8K|$+(y+FX$!Bmi5v#}K3gwpQZRYwel?mGI1=%q}WIR-O{S5}bIbbC78 z=c%W8bCcpn%acz}t_$=EhETnF<=)`=ICCFk`^ytE$=9^}UFr{Zee5;3W^ofgxwp*1 z{ARaq=8Md0ak|gW=692c_BEE9^p{ZYsHJY)Sl`4b2JSbZ-_^cSZXk%YOK@*?OmK9o z9zH!Vd+1yw*R9}xeDQv0==LC1_~%y)G5P1a|9Dw&HAZ6BKff^l@fwVfpV~j)h}4hB z>EJ)VFw{xA|M^BlhM4$&e%KDU2mgE{1q_qy;eWmn@&D(C)Vk*!J!#{U<7t^`a_wA% z0bdy>3H@sq@BDRezduj3_4So>jvcqx&OE!6bS8+lcx!XbezN^lVM0Imu~Vn^d#^6c znZ?QYN^fq=`W7|$$381BKR_q7K!4-Ljie3>VT*R&>F$E`)=l5#TK3QB+JYFr2IIFo z1==wCE~`$8(0j+GuT+knlT%%aSG>I zzBJz=Gtzu|Rv|_}KdIG>o|QFp*CEA0nXQ#@oVabp;$FqD19yvy#b0XjIpKTv!`Jdm z(ZD)(t7>Z_d(HHtp^#fe<;_of?=y>1wYIkIprcdL&|uKY#?G{+m6kHMrCn4?l=i0I zyO%s(Ey!nMb;#Ctjb+-UOljcTw{MERFQQda%M7qsY{~|yT=gfxs%c>x8ykT@8W!?U zMYuaVdhP33iDoxGJUDbIy#ub^X_x6AZpUO~*mv`&%BHEsCuya(X6c{qbsGuoEVPbm z$tkoQz*qW|xy<}{xBFtXv55P^sl?gC7%#YH#ha^ByV%&mUcThLf8Z1Yi*zCJA^0h#)V#S~`=dkKV5P#ftS<~(;Q<%+sBZoTTLZK}lRw=}&O9u;y?kS0+~>bP za~x}aqjPi_UpD*3qJwXpP_!!R?B!vcZNfX_hof-nBbl19ya)&y|;G=mc?5-nRSdhW1KS`Uo@3%)kdLq znVx{{S2yjDu>?=rneKvGKO&MJD`SZi(^d8(itpaPA1kJ_-3z`AbEFXe(X$o2l@2c z`g}{u>(?L0EbGHq&)mGp{iUsK7dw0S>(_i<3$03zg{~8$_X}e4_|QkFXZKYYYmOJb z{+1Ljoxe$3gSh~8;?Z`uerK?Y~C^(3zvI6 zrZSnt?Kt)x)xo5+23Kv&*fO28V)9v^Q+oIA-LhBZanojh`DcC|7K}58^`%Je&S;U6lv)DQj`YJrdG&D3m=b2tCU!6=BcmK(?_n59f)R*h? z-<$A0WAht&lZw!f!?%k=CsT*+3ri6^$dtsMN+002y_1)cvh2ScD z7YqY;I<2!fF;9rJyzeHp;w|o z%&~`&(Jj9vR`fhEN2Y#RVW^4A1j{Lh=j~;#w&5o;wfIxh(u&5NU#F*Yr?qt@8>vKc z!OOJZ#n+0O`?U1*js^q;op2#SWv`7;vHpnUU-Zf*D(VZ9eSG>UBcB)dHpC%N_E{~|jqoQr1qE3QR07;Ct= z6t>t)uT39UW;{DlGL|sEGMgf3l6N)a-o1yg&Bt039zEhIupPj@e)-a_sABQ!H3rNC zEM~%K6Qh9^sO-u(eb;y>8okzg_UwsNOM3{-V9>^MI;Wm1K%R4EplZMo{`*lqGv+Mtt! z)auEjXZ2$i-B1CAOMxms6eST6k@DP=>FMcnP<}ZiB$z!a*q~Hbzq*{J>tx_8M{jsC zZVu{Y>3PZl_$@r{V!T9vdE1MUPrL+tAIog{d{{k`bLw4O#??EhaIs1_L0!JpYUbZ( zQ{P+S*qV7w>}7|A!r8OeJKtRY(A4CdXj{~i{Y{` z-x`*eN5U&a1jTH6gWtRnqGmZsEqh_f6&ghBf|c~@_@Q2hPsh%kJ7-sv`r<{hCjr0w zqn0l2grZ}I_2r1f#PA??A0~Bm^&K=cbx?DH@=RMi7JWAsajmdvpeeqquRnDB`0mls zQS3(L)~08#Q`-g9OSIC@4pe&26;*9Cr5jxSx)dvFDP;DUV|>xWKQM5(+cJBRc?y~u zn@sp-`<6TPbHEWjMYi=&X{T+b+F$K9&MjPNlfRs>_3TWL-P%l1ihh|3binb>oI^fa zYXd3C$#478vaDR)+{6Jwcr1+HlG&UKXB#;n<8xq*FiR(%278k#H8s@@mWpKe@#Dv@ zpN#W<)V#zEy>kket)!-=R$Y1Z>ea_5%p=!a?Jq-Bahhyz81t}xX%o6Uc}+_z5M6&L zP-ooSid*JG6%RoROwxGOYfTkEj9NVowcOo>{nB)v$YoohLQJ z*E)-A*?BI+ep4S!HmLF$@5(#;HRI}tXjq?(l9tw9*tDMuEOCAWb)}S64j!KOBR=;P z4>>qG+P6p%I8K~s@ADb(Cdj3=k*)XPItguNCrZz+;V5Zn0@Tvx-IfNt=4r^Onfzc= zyEQt#^{4DCu-pgB<(qSpX`=UDDUXF}07q@@*S4``sg+*0D9twT9goy5lCgsBlUKhy zc!%!N)vI^WJ%+v_=)1BLRoeB{scg2jE{S-x`z&I#X9B5flF#0I10ZL8G28bnE=zxI zJSCjGy!l(h!`2c<)AFAm`O$~>`>a``w%X#mnOD|=_7=2cdP2XaU{=+phZn3FHlo@` zsMVlGbWTOZA*LsDXEo!VJwxz;K^!}v^pFFPeB(ZEqM9hps+h9tqV&r1dL6;a<%0_9 z?=D}IB1`X}QfCx4Z9afL6mQ8CsyN-5LjsHWE&MX3oz&8EY5MNMf>YJn0}|1j@f1)M z_V8X%QF$9@TY2{m1sz&>&PBG}`IJWQ@1L3#Sp^6sZ*MP5PRBOAs0+yLq; zrLBoNqZF|gMhMEDe^8K7^V3t}c7vFHw-K5Hr(GI1UXUeyzpx(w&{+!$J~r7NO%*nda1%crD@yWSTl(IwPz)4@L=U}!AS_x-V=-rJSlYd4_kUwWk}?zQ{|$JRmUb7imGT(vwG`OOt8 zv$=c-4{aHQBEe8h&yQS%?l7wEzWn9(f zC?OW!65g2n4$;iLCKUnpn>fKP9WgcP9ZLPHUN;Z>mG{uRv7&Q`+ke|ClPV>|3%pHw(!SWxsNo8YC zTE{9?DL>>B?03)9np&I&@N&|omzO94b!*qS zE~`iF7S1W$-5b$%*`71(TgyQ>OTZQRq}3_ZPM13}Sq<11Kk?!+=U_1@pNwnj+l;AN zKTFKv4QnZqt0O9V-&u9$(=HiG4eQ+J9|5#+?RQs-d>)(o5sv;5!|vUwsFgIn@=+dr zku?JV*o0s>&xI;9}5&n}7NGbtD4=!}G2@ z)ANx|cvpEN!W#6rGF*i!&qI9iNSNaJkLj9xt^){H&H8SYX(vi~E}a9+p^q)c6Rc`Y z%|)I(Z+Lk4{B4fn`gXGuU*L0W6LFicwkPI}OSt_ooIq>i(8cuku$s6rUtU~@TVHYn z)TO8QG%70UTPxm4BK5})yCdA(bt!#@Lg#cd0aea)=G6B2d~e*ZoQ*3hiZ-*l_fTTw zeW>F=IB&=oSae6PXls`l7!=vWlgfTm78~p6kD4`ASAP%%)niUFB_$;l z0Kew~i;Pb>BO9B*{!qp6CFq;5Mt~fZ^ymr1prh!oOu-K@JhDR#A(@}@eDO9sYIRq> zaN(?~Dz$|3q(35Rx|w?uy?#X$VLA6dpA(GYQeK7@Aq4yCTqNn8n!!y}y`YG^vBU|4 zI|4zf$mOZ6t(^m;w|2Db+Su##tlM15M0qW@Vsn=kKMCU0(>w6EDK8-HZ*()yoj<=* zJS%$&WA?M4IY>8C(Z(hVSAUyqXZfo1!sjz=(R8T(@-kAnlCM8HcFi!a>&Lf;7d?ze zWuU^tzI3y+FP4TBI%K zEU24W+sD{}wI}g7Rt{;=I8Rmzviq(J>j`M5;2J-D+WFX0-x^2CiMdD@AG6WoZ+&CQAU8Ia&G+t2FSYfrHW!h!Gpq$+T6d?_N$~@;gsEFT zgcV(}3oAYrCzyd~kW5TW=oHzo_PdX<=GzXi74^BW(6UQU;|Xx@lL63OexX_uF0&bo z`Yv>jcVVoj%kkbFudgfv-<9MmqDQ4v-~2Z`Z~pWp^?YlEr>mx>=3u;QznqUxWsA&e zV)*KsKFQd}8nIqi*f*YJ(Xf8K@E5%+5g4k=F{2DyDQMxs~$e5{T^Q30?S?2um<;&-1XDIHop5FWL@WuB)yE>n*mC}=u zk?|fmQo=<%e!+Va%A%r+%Sj+qSIR*IW0Lj~N5YpUzNDU2FHz(s6~b2tIk_!$nLdb& zyxn6{Mgz-3DR0Szj*`;+1g~|D4sq51Lam^*5e?+e1FN|{^fo}J)aeMweGk98G>PH{ zV=c3YE?tiF0pP?W<0FYcd$yPp(yuTPj~)eHv#rgWx4J27=?4A29}7**?|f_Sm7==wkptIxgpskdbXR(b7YM|zI) z)K{MD2b&vfWmT1Mazb{4vWRf$RQX8xRadQfWyZbmk^$nR1{Gi(YO;NzE4s@d{VgNa zBSXJzuWS>+9i#zt<%HqaF``@eiuIqL+`d0C?IqZpU`Cq)X$s@*uP#5tZ?na5hj-M! z1YQY2Wo6}a7cSI6kq84sN(z^!1^@xg!-jA8w$6l?YH4kzAT|-Y`h`aFa(pgyv`^(n zmr{44TW4D61bUtip=`kY=?MQDksI%J9604b3WrO(x-fAE6eboD3fgyYr}{G7fe5pi zHb0%S$hN!}9`0IX4LDV-&*^er_GRDRKwQZE`{jdTmU^#aa|?}(jB-2)HeNp)4kLxB za&?mbd1@-Bl+;FYi*4JB3&oSK(?NjEzUPzqXaTU%9w1FyKk8*R5#(|kzI*sH@VBQR zrpY$7T=emgF0|^p4SQi9dYKR?32^UUv<_%*qKysbHx()XxC&4on+!h`3|^Dqgj*6gP0+BZ)^` z|I#Hts0phxI1m@8nS`nJ}mik*H6i5LC@<=T5q;W&>wP}Jod2JA#5=e4=1REhHO#6C*1n5Tt?!n2yl)HHb z4YpQePUoHeJS*)x^+K(7b}YMUGo(d&jT$)pkMVu`_usu<8+g5^(3)-LxJjd|y815A zU!l&q7;bHD;KgE53CFxKYdr`xRP~8So%gJ>%z4w6ggQ_t_x@N@Mp~-VsoyR^>;-|C zk!Wpf%|u%oLNTtM3JU(GA8Q!sn~bOO2@CHMh}o$W&bDZlZ7^Awy6o{#4*K?jhtyNG z!y1<_d;0+UI%&TC;lnv&W8=ZZt@XodPsMK`N&w`gDb^AWDF+&GwqGw80Cb^>BeYr5 zeP#RV!`W?J9y!4MqTjuHM1Vza0C?NrgF1I%(WBnBB2Vl9qUbqyT)|j;rNkT%x2sM` zY%-~BO;PI0cAe?ogNcnw?a@r;y|KPL=LRIt{@aILlIuTe0f2AXtA91~2A$bziRF;4 z?&U<^c+2dHz0krA{G6ZPmQ`WH=@eQ$S#t)}GAfD@>4S(iV|`X`qVB&unV#M3Y2$2} zKg8S*L;;}8fT$PfF~xcUk|42+_mS-DBzMagLpO25YU)$NaK&c8XvO9FrKOT<_Y%Ymms;h62^jW6n=QGj< z)sDsOnszx2Ex%ffk2H1+mY83be%UaPRl@0JXzI5=e*EZnYYD0zHM{hIzB1RKwh09+ zmMxHa|8VnLX&^^%mQwf)=yv27)yG25l%u6VT;x<{;N#~fYS(;qiI)*O_1+}!I!Hek zR{L;2P67WQ`mym|w5$)WE?*g@m0kyZLl}e;D3?}Uxn!K2oOm%CRuDO05uF8Xu#bQ( z8vVGwGLKGvx~E8RP;Z--2sr;`MzxYDtEnjhvjwu$d$SO@XvoGIA?5k=cYU^?{$lsr z#>Co>yeGn7eK(xLm+J2AD_Snd*UiMmQGatdWmC1`f>dnjJ6Vf~X)>q))PoGBZQxy? zgep%>K?FzuMfTcq zAbDEC6@UNf(*xPF&W^9yPOMjcG!}7vleI}$0$mCJbMDx)moIm~>6UVREF!t!yQ!Gc z%74ynrT{0;^ROgQTVK)D+qvfBYRkg%+(>7YuWxlpD4P^BbWX5R#3Z{1fasf@U=BAm z6JmM;@)}`t8r6MpX3C{K2>4Kce^RT?H$>2`C@|#{QOq9?>Y&(ZLf*Zl5$~^S^q)evpR7&Jkio%K7;?>CVD}3#S*h$>}yJZq#*0v zQ8LK|-^_P&o?XweNfgh=On#?ktmUl!D78VAHqzmtc?lM;;mazpmgBH|k#+N2`62OA zW**cUPy)%30oQ8{Rr&?40vFwDkZ<-t=|yF>DR@m`;nJ%+Kr?ly;^aGel={`HS8gEL z_sThAbSgYVffLowR=)6&0sYM4gRuy-&#xmT3D7D2Qk(_}{Y6mYd^e^H9!qQXYS2=Eq(@PDm)hD z9UMf+X_)dkn@ByfEgA!WbBC~ZL{*fFk zpYj~MMR5s)O{m=|M)hIhF4IhV1+n)5TBSfa!TTIJa^!-K0HB8VfU0}tde_&z(9*{E zA!Km(?%mFO^WE(h1)c`et5DeZkx|eHk(RiAWrPTOs>l1*eHGWYN*l%itb9h>N=i zwbBSU6&9DVQ;XswpCCVfE$~Boz|3N$rt5J1gV3%8=hF{>Igs2RaP-5sDA??>_a&Ja zECf#Wa~y|#5iaWs_z1XrFz1;-j*}<%LUXVG7|n0Lx?oP&9C1c{wb!nehUt_87n{d8 zCqjT|Ckulch&*Jx&xH-2_h1~L9SR@*n`gk0qTa5quOA9BN?E-mEFB(z|A3K_K$%|# zRFz}ZC9*Knvk#!ZD}hDaF3B@|LBhGp6n6EDqy8_h_lOI)8>kOF;M}E>%JT8@{_tp; za8iM{h%R-R;X8GJ7Nn{PxJ(aAohA<6P3B#k9WYphYn1w1KI@0Y3lhuQfSncZNjWNh zyxoZ(fxlBu@9R04ytI=1@gu@FDAUc?AVy7^wml;O4GM$_NXq6A#X(Y|h7U zDR-B-W}kqYq@2=v4w}Rlz)h)d-^ynTBlQO=r1W5KTU#4Th0DeiE;9&Fs;SA}+Ws*EJ@BNqKTr4-pY`3jq&A z*pyE2B#z;lT5(Dni8b>Z?&emFfbm?bWLvhNl176}#^XHGeMKX^L&Mnk`3Vp-%t1)S zO9OR_wy9XuIwCENWjL!I5*{7{bxvRc^bhH=c1_gP+EbM1z*Bq$A&@VCDS1lt1QJ`a z@Tb;1*=T%ym%nt*`a}Cn)UdF4&A4yhVQ@o_96Ttma^XU>^yacrxQ|x)6+my1t4lNd z0fB)ka5+>|RiogB;1`cio;Rv81I;OVzZtBhKTW<=}^g)g{NS6{><93Ir{5X$6eD_BU#sjH#jj5Ry;LcO;s8QlvDgaU{4 zv}S2}Gvf_GlE)IxeAMK3PRFg^02PEC!676K*Qwmg)-9ldj0`U?Z*h{yf;K}??JZDM zky(R$xL7G#U+nfL1&=$_Nc|PWntYX&wPf+Bq2ZP)&pfZv25h7uNRmk1xmEBMG(z~# ziYvFrWSi#MC7i~&^8^XeuQM{JA%h^9?VaL1)Up%FNFXE$JpK!6Xtsb+n&UX8hC~f8 z>*B<0-$OB-`+SD(`pVByYbmMH;NP!RPzWhZz-+%kYwEW^78_Zh>j=p`R^{R1;xYv0 zv^ZR^*zB>wx5ui)76G9yF5%S+NYcfRGYDQ1Jg1JgwSD*g`lJ0I zshq5$B01`;~`m7*S@CIxPQ zN#1@a2hf%&Co!d5%5bR0qLX_MDl-7ZX$4}B8M~1=c8_S`yiOmTikTTN$@sB%HbrN^ z>pXEHY72Au^WcAfbH@9>KAnOht?e!pu4m93f{-d?h+{BjJ z$eO|VIm^40(R)vGDO*gL|4O0!b(;$wr=E`)MvAv?=KBcFGf+rnPOS8bT7H$KCxw_n zPOEgdPB)66*S<6Kh2!}T-A04fD(up{v(l<|lud^sB?ay_;+ClsBDL z+~lRMy+Co6wkK)l6h-f$f`i|3?-A~({<)bl$DrJSZM#jsbTYM2R1WN&0hGMJR6`d; zjxdV41%L_yd8>eco%@DS_6mTJF#Hu#&V`<+qNKtmXdsYo97%2jvSUyOL~WeSw=4H% zCr}5ZnoF)t=Y=MWIZ2?r00ORvVtxGdsbUpfB9P9-rMK1{Q6>tT-SYHQ+lDiMDuRo$!^Je+KuqCCRgzmHa{VFKH3XqGIoR`%mJ5ip zDWzOnnz;kgorwvL)kzkh4$nF)a%=~r$DJfF)rp%6he5cY1DVJlyq)XUIWx2@Q*f~| zzLiQR3kBg<&j8-gE`zJx9Df=qwXSihJZHdluiRL=R%O%-iVqYb#6`7bSulD6`|uS6 z?6#WQS5`ETgGqv7HCweMZBsl%o@g(D0cTtYnVh{YJs{fwD+6X@SE7d`1&9<5Jhwbq zf0i;yQ0EN>97;tn?6u?9B((LN_dp5#-m%$f6n=f;OX}m}rnErCrsj0R*`z=q*xVa$ z2|DUt0li>Q@`oX+hlqU3>IOKjb%0jYb##Iu#dsZLXox;ow3b}z*|}?Uobi=h+PTmH zpGdA6of+A?4GsdG_(Ov>LRZx-3TwD)`T~z{j&blAhnnPyNt#f)Z^P$bf0RDG(i1WI z1S*su0p+aL>_qa#L}>`}g<@wPD6!RmGqlm%puOUR5n`eSr$6}Ky?L{6_=Ck2lD}K` z_V8;Nf@lTEWRnaYbpSEQqmA6%$64GwIczj4B3qk2tn?E z41r_SSAcKj0Hwft5k$Tqs1}HpARf`0W297tvkBWvm$Z+O(GVIoGH9xO)W%9&A0oXH zKxz#z$qAdOzA`b;tsQJ~skYks;Jg8Hm-pwqft3aMDEDL37_j%oaoOwuG`}yZ1xj4# z+Lr`(3WWj2d|;b~3h0!2QQr0w3T3#?%$*h1u{JYhS9-~$(48FTZCn_msjF^oU;W)~ zNYhC13+Hj&DQoy7F7cI`^pZF&Cvz2GdGT&1aZ zu^K-m9&mrlzGPj1zuV35H$3@`Ab#T)_5%QMxRh^y8ij9S%*$`YOU?V+CLPrq$Os2w zUk~-_3&d!vre?L|o3{y>G%{KB7VkoS?P#<%T9}QGg-O7dM~XM{Ypwdr_oF3f6fPAA zrGK>?&!1g>TN~?MH`}{}@0-f=_cS)<0__x_b&v}RDmZg}`cxFw{m)`#ePq3P2iw=DdWf}-LuFbhQ}FAXAUW8kn+Vpw)J)>IRJdM9!!6)W`jtssusS8y);RtCMLs3TCuROsK$7BgM)*z zxp0f5_IANb>Dk#KW*-XwH4LW1+)5~gg~cMuMgi@uMq$5>bnOu!(CEJMs68D&?$+O4 zBI;`3_gxnp#YY1Alg*#EhVKa`wJy_@e_3g0YOazfc{roMv;v#Wp|!$tYhg*e)! z+HALlbk8l7xJ&aTSkF|}2ri;j6Bd=GlSv=G5n>(Wc>r784yk1Zx!eVumM}i7?l%Uq=F}J?t7Id~bqzJ%2j`!H7KuV& zDKl_$13|<^ivx0(C{xY0e;H#}&+3X&nLwc|QXF5se2Kw8X1cId`pT6nut7ioAe3mA zwoSZ)^o|UeaPP;MIy!EpORlxuAX)&7Xb4Isuq*LdN65PPrhv;1UQSh+HRzq7TJAw* z2bhr|oDnjRVLk4w=yz?HhND7u3i=toa?J*LvB=Q`9kI2y$6~1xLj&ng6hTK76OaqY zmSb~SA*&+Ck3V?wgc2?$oC9JA&z?PNs-%#pw<$U#CWcD!+zN>+w?iq{)6Cpw&vOMl z`*dfgx?2kB0YRuKS6^kZ`$~@4Jl^WeM|Q*MVDe;cr&MlF%gsV1^Rv9)LTCHr_Gt@c zc^>@5EB<}sBH|$}4vzd&aeHF09RSg0ra596smK)66#!nK?ga`I$LZ5>B6H+GY)o1b zLSdr~BK{bq!fn$0^e)m$!JDZEcs2(PZZa6zD9nn6Qb1o#*3QQAg4hNp+=|-$$SpX- zbE>LVOTAk^f@C-j9U=ida~AxQ&84EMXVBOHoCmMPf=WF8`56fShVajyFA+|Dbxux6 zd3K4{4pg$T@a)n_zHsSe-h<%TL6nOa=9{fK%g%xiLSirYC9SFF_hsnh z3`g=R>H7*2=2JEKR>IHj-1)-vHR)$8ce1y_V5KPcw+e!ZZKsx1{@Y2r8e`{bW9LCL z^XLKlKJITUT`3it_Bz2oBbT34Fp#yC9)4Z>pTzO^4L0Zk%aT;iPnGA6s>=#@e@7E5 z2Ic+X;1Q>TLWN2Yv~9@SV%m+}A-Fh##s&}*0C#2>H1s?0yRcQaHU_qa0M*<9G6m`G z96%uVmQ;~i1j8K2_vAf#)XDTYI2QeVV?cT%K8=Pp!&FQ7nyu=FG^{CbYpTI8fmk-8 zCP)Ky!s;0EmJH zeVF5G1I!*^RspH1>*?Kx1ygiC-vcSYK#NU-!AiC-+Zf5WyC*vTidU~rfMm0hB@b~>obl&0OZdjf`Tn0 z;;?V)}QWRkoVJ78^h7d!`1RHtcjo16wy=noQjSzClp{F=F8( z$jk+hlRDt|LiNe*BbQ$+&&)%>b_j0IPPdImUkGmmu%Lc3ZcFwWcf8i@y;0L z4U9;jurQiq@Ps6VKMYh10=u=&53ezlg&3>{1bIPB9RkhooSIr7+-e9RH_yJXxSJdd zk;Cg#Uo#(z+aL6E1b}hPSV3Dvjy_47|Cu&_E4SiU(QFCB&q|VM=X#(wqgPxy;~CVn zb?izH{`h`KSH0hi$FnJiS=eJwveUt+qnsE2U^EIu`R6eD**MSC50(YG03%C!Vvb(D zO~Z215Izwv4akRIJ4*rNVC`e8X$c-w=uQDEQ%jT%h2ae%XvAnhq~#PC)F_Yv1pp+< z0QqOK+1QE*5c-k#0u|-)DaRK$cb(-no^HSZ zaa06_-?_|Avx((#kC`D(l@G@MJV(a+x|y3fFkA(QPO=*%Kj0*cK=H?JTuhLh$f{@G zz55R28is&hpMfx2%iK+o{ixDGvi%I>SyxXOCV@EkBgK{;C}|)!j!HK-Hy2H59xwyCVb<|{^rcR@iM(vt6NWt1Zg3r|VinTp=q;#cOm<-+n* zfbymZ%g4r(2Y7!P$Ya%6e%PK!ufWD>)iM11^X6zr-rKuju5;KIHZ~BjFr^;YN}S4( zE%Puh;nYde9MlRWe!wkl`{O~;iry1 z8TI&R=%a@+*KP*YazFcQCihI(j?(>tKIICAh`^gkm3*7~+qRqjmkJ`_teVJ`%pa@D z!=aKUAEic?KRwnu%CRVY4^mf zhh57vE$=*MQr3NFFl)~!Jt=w5--kB-bJ(Gz+k?u94D`hduVwq0&;)C&T)KvAy2jvZ zo~VfImxZ1tIbw|#*-`?>anm@gaend#GaB(A`qW-lRhfNG+MYKHgI?)K|8a=j3A`ad z36Wk2v3!n@8%oBr9J zUC;KkN~z5jW%o_UziQB8xkTtsuP~t_#2P*9xMfkcyZ+N_yYi!O#Y6SNs@VGjbt;&1 zo>iXzP(RtRgHAp7Pllz>DDs3F=+Ao?v$BKWRAGWz+(VEaIy$xmNHK=|6F@>`06`Z6 zd5LrN+1U~l92~2BB<7j^W9FH+?U=XgOPRT+EJ!B%Z!HDZyQKN^rTHD=HaT!ac#Tz? zb2t5rQMQYLoNMR@U-`c^ukm%r0fE{N@kjCaX7r)JV^ER^C|hj&u?qqcN!JfJm$@V0X{zbpNANO`V+UMVIrbBeK8*r#g&7#Rt)seM z0!-$Q!}4(=+f7O|#LV4o>Gw=Od3}?_%C*$c>~RfNFug)x9II!3s}6s=>-#(PJNuPr zHuJj4h=RX_usxZ{@hE8V13{-oMD&mS)o!vad19c`_XGtbdFupk3lmn3r6kjJ(V;O9 z#2#nZDf`tJ->EUK-R>{xj3SA*76au7X7M|s<@+Ox{;Z|%kfIvT4u$#h5%~UmN_VO# zyN&x-2AN z9R5wVEz#dX0;WNIiw6e?;PX(*# z4xahJ*3mx%ShC}aRJsj-@FQYkf}lHP7*th)=B?n=pw9n?9HY5gGCQ#bYqhY;PVB0> zNny=@yJczes%F+d^t0ukYx&CySibr-YxAqcCZ8w$52cdxU}*-(4R)cy@NHx7QRO|O zwvQV7WV&CDFs!oksLQ^_lZV5Y8^tYzC?1p0r|#){GP6i9%}(rMV|jfWo9f5$=Fd&h zI_gKXpcVP_>HYHZ^1=l2UFw(AKi)KB-+O!MI;YQ?awKV9(}ZXE(#O?XF8nGNw2HZ^ z4RdP@bFceePaVj)LiI%3uuuE5l>QtjN?nr$G1Mk^phc}#@j z;6aOPw>f0fG~heQG5@QrLE3rwCtcqQE|w{Mt|+YdGVIqe^tK_dL)^VkVo>s}d{pq_ zgNA;gaW=MOf7SOHE`I}<1>&BDh&r&>K^E@gODj&=<{ICHC`>44S-bz!;bX(_Ki7*!CVV;F(K>j6G#S>t_PFh7!YG*VpH!IAw6~K6iOJ- z9uW6~VR4Ks(7ho$@$%*kpu0euX9?1GgU5ez#!%w@TsZcYwZ)h83k_oDuiF>(#EW{C zX>0b_zwG?xsu!SY7ZaYnGrvA?mA2GUVQ(CO|KDF0ueG+)OMxPSmy0x>A#ja<_K2ZL zFgM7tpg}|!KA-5ojXH3`h#lm-Yhj9xBL){$BL$O4APj^~Pg_aOHy+21K`@K+*s-Qr zNfaNJgxEWTRxp^O*9xgYR(*GRw)_qn3d6jC$Y_<3-j1?9+Oj^M*7tNg&MgmG9^R(J zdG|aZF-y}l5U;>@5!Us6YMB-NdUkYSZL&smo{4O(s3tMe;X(X6yF{_-hg0gC6V zPnlhDcsFjL@~(bdGx%#deM)*)m6VpR4LjjnC<9eV#;8qY=>OpEhzqq|! zAOYF71vCjz6kk42Yq8y`ZMNeECuq0JVGK!0Y|vfP!m) z`(3(p$*zA4S?`F{L3nCYaSbLaM?kW82W$q36C$(s6G@1rrl5l$2QVIIphsH*KK=Ck@3PaL88MXrMV2i=XXdT3` z%T+dZ>%S_--+9QTEY$O~VE@dRNavJk{yy!m#^o`w)^^Ios!Oqh>*mYhVd;#yu}7D> zBj;Kr=JZzFT+4!awBY27cUz#MsSkB6swztcw$d067S0_E_#5w-8*;nRQ(a5>zY2UXw zO|xX1oiA?aTx^%yd;8(&t@9s(0(f_&6MC~N@KZV!-5bts!3-i-KW%w7{oj-niG{XT z_pKhicFeC5ykkfW7=a&!KM5`*c&RDpu4M1}Py}YSmZTQ9)7qY4j~fAW%2&n}k9t*{MY1-@DTx_+9*GIa)yu2{+c!%&p&WY*uHlvgKT7g4Dca2D9CVXO$v zt}TH*UqQbon6XBDBv4f@8%fh z6n67Si)q7hAkp%rg5k){p88vKDuw~mZ5P1D+>7c_m_aJHM4BXr%&_Z3COi-d@|B{60$_+G}hj z!Yp<8)Tt8Xk(S58ceoL^=#Jpn@%a_w=eqIFh*D?QS=5$ZJee{OF(htj?-6xh&^)3G z3;xKY-O#r#3jlGfc5K%;)ak|h-sVpo~55yl71t)E;=VBxmQ#vcJ@YUYbVHWc4sY(ISol(ypJ}SSmQJ&nwzx zK{EA7kz}SWT1&I*>2HRol1|)wNP|Cte_yt(SJl!RdT!ENe{#lVieHcWIi~s#CL(%` z6?eLq z;*4%IF??&EwcJ>8EJ5n)3U&TIu@5#t^lHC54LJpnrSTsn8UszFoM1t)KahGwPT)^C ztzYu%ol2=c7!xNu@hMDb7Qcw!s?-YF9SHOo&rw#Fe&5R;4B?=i(R(TQj?u2`-dK{M z00OZ>myf$yFr`WLDB@oaE=vAH`|C`#nWE*xXoon*>Dh*h5>YOMpVL6*VPCz^OsWaL z9z$=v_=qp9MQY>ed-`ch-DOg`X}4cS6#RLTqU^ukN=}8I)cxjXVU6S3xKcx{%-E1-~0|7FAHh+m0+4h>eBl0D?lOT-~S;WIPDVt z^$aJXBi}^cO?Cu#q>~^@X!tV!**jzN6Dbt}J$+Evus4<5qgjJOE`ae0v@k`lJbR;C z^x)R9}hA>ZSEd`hXsITH7z6knMS^22g#hZAT6s_-eoHhgq+Q3PKYd_P{gXP}tUh(3(J=dT zmVufy+&kSPwhcXhF9WSF!$b>YJNFs_CP57!GAsj?XjnbtY91QwhiT*?(6HY_*%yYv zSq#!%gE4pvkT67%dlZ<>_WAEldX76f`qls-!V=NM z@ExC0;Ik&7&(N&ZC3(C{ig;#>coy6USjTA})9qiE1Fewn2=Tb(B6`D#?_HorOhkBl za74r7j(2ttxi!(#=muH#*+YG10Spwob7YYaka7*@oOaU4ui z!EDB8um7IVi|+sb5RQT6J4m`sJ_(c~_-lvpeS+0&RdyYI;iDnJUC%tAY2tS4uwUzN z^#caS>Qf5lXT$F=Io@(<+2|=qe)>m263E@VGYR4os__zg(9^B39uuj48(T0omjY2M zl*%p3gmioVZcSczoKKnG2fd?%immkgD5n-TP(~2u0_ERePD%!y*q&?~KbJ!)I24=V@y<6HL#onTWik7e~Z?E?5I=Yv3|l(S1n%8*kCq>3su(8_)lxSF|e}+bi4B*!T{n zM((50OtQ~1E8ljbL=w!JA4X3-a2o$i6fU*I#OC!wF5Cwuoa7-*2YIPLTGkM7Y{?+J zh8<}z7P0!DrY9p%2{bMXe!5gvbZl%0dc4$CqXfZi8r9E-EB(?Tr6hfkibs!tVDGDA z=25aY{F(3WvZ&gaR@?WnQFJXEN7`vE^f-f8xU1JQA6M|iS`CCkajaIr$jvK0idO~$ z^smuBQ$MJCxb)7nvw_l-W^n6WQEQJ~px>GlwA~D?n*DE)yp1~hXDyc_xRgsjE5i;W zM~_RRqN0MQf;|9P01(AmABlcu*2L@WYM|Xs6w|)oSF9J2q)qI>P0y#YBa={or3A0oKSb1;Z{=*c99mZBD|xTRl`8=t zT(iI^75IEov-5)DB!AP7tkb;xj3DB})X{sGnj*rWv?~m;N)eAQo=+!0ejQk)90>Z- z>p#xGY)ao)F)R`^7=s=JC!k+ySQoM3@pf#8 zgU7o7&3S)+KMA*GoJPd&nBxZ+kenBEcyPjs>*FzjsK-`9nqQ5fk>=r|BgGzD@n#p+ z9--tdA{sr3G1=N`%DB?dax)K>!B?Dd-u801hsP!7A7pg6I@NoEOlq_8zeY3n1+|)$ zC&4-YFA~GwO_=pot_iv0vrqJY;?md3jhraVy!5;BH}D~r4R;DAYyXBw{{ z>5)8Np2rM>jGvR_w&_WJ*}t(O1%>vnnpCHTYtPWL|3WuFo$=Kk_NjHINfkB&E-|E0 zvDUnzJ#VVse3|V`*3-%pE6bKG%fi0V&%UkO!2B;W!)Z?6?tgQm7GRS0wKQUe5P^Iw z=`P~4JVa3r&!=(&YS-tT;4yjRKD!JXoF9_sv2H_Ta8r6=umw90SvGiL!Wme@kOSHY zvN$})j}y%L6s2%QD1peq%$|oaFD1n_6lnoT2%;tjfriUANzZ_<%_Qk21WBq07@7f5 z0ls#6x@{NT81a9Xa)+##$n0O2iq70;Cr&VX{n*judNV!)62?idKSPgBuw;m`V#t&vg zG9Z+WK|{^(6pZcBWXKqzAt^M+Yj1DgNH!RMx1@F>QQY6FIYe1jiJ9Te>HZeCw~0UVFS4-)u~v z21&+J1%R07=r`X}fS>sSGUh6}r&yJybpB;30pC{HCRjkR9O3zvmkJo3S#X!WA75W` z7T4#sl&4&YRN)68|1Zg%BqoCH3jgHIoOBkNawQ9e4}ZVecy8Tad1(Dzj1K#6ELs3$ zCR)>-ZdN?rHPua1V9@hafaw@5d>^2^B##x?4RSS!Nnw59>t}mL|MnZ5?q2`v5vaGr zbdM;douiRBOD@1^yWR2Sa;h4aRQNx>wz4bbV#MM5*r4V2e`ENWEdN!b=wEQm=l8=` z+jq#()4xzZsG9PwBj=ZT{;}}_Kc{W3#3gIjVYjP$!eXwFJyj}JoY-%SdqvNpOIR$4 z{0S}eZ>CcEucx#YU{du0i!X@|%fofZ{!IkzDR3;G>|BevqO`lzu{Z0V-FP@)zej|q zRbt@DG)>R{!`zq0)x37?Z}ZT>;UF@nC_~X)8t4c`5tT|48Z?O}nw^7#k|=~qPKxF< zYc{31q+vIxP%4comFC~IHsL(yd4I!yKcAlSdQ*FU_xF3>>%P{tu63=rb)%gtzh$Ay zzYn|3VEtWreaXd{T-$lC{`w)~TxuTo-vL!rWFDzUUiY0X{jG7|0a@vUVu5bq+wGKn zihl>>$|uHKz?wh(>#AP2rSo5 z6S<5bLif*Qum3vv`$r7OV;}dJJxCuB#Q@X<<~@q4Z29DuqhlE*W$*sXX*rYA@$`Nw)Ws+JFl{3=lF#+-gz?4=1e`C1GXexAv-Txzs{C% zhR1s#U%%fbTSahba?)2}OL$k+y%kdk`%~+C@ghL)DZ_p} znsc5(aQF$TvxHuiuel|k9o4q4}N0I{UOVdwKgTfoiZ;*u5&6OTWaw zaG7Cuo3Y)*31Mi7eoCwA_Z?@QL>JG!cI|5TrH=QBSZ7jkYti4^vZjC48XudXC7T}j z+I7E`_6)oAzpPpB{57g<(EIphQQFL6iSMf^StVYc;e>hw^`64B4#z@$Vkhhjq2$;= zT1W`j9;0hN8iEe$@y=}Yf}|0`$Rp_g6timlh1vk93-hxEM1v%cM%&uwR^}Wum!U0v zzgxjK+h#s#Z0>~t+tn|Ebs6Me)d?l8nt`d%6P>ZgKUIZ9PRtm11%kqU!I94`J0^8p z(}V`}8_T4r95(G0+sv7dAZtY{qNrXSBwg=MWehEYNZ~x(wN|fLGnKLc*eVo_B2)Vv zx-JOecMeg1?}7#A(Wma;bi2Tiu8$fF%oA%Gdk0~;M9B^qX=*FZ&dGWD_+?`%3q;QQsvmoxneUssCv!t+|Jnzp^olXTRG!c-ieZ%z5T&(x=63J%Nr znSZQSMDC{6#?Owx_E8jl`pL*ETSBh(X!x`wBSZmM5pZ0nGlTm7CD@H#mREe)^ z%F)W@@1)Jg4uKvuCJ^?wv{MvM1@h5@CJ`Evmx4u-^*wLCCTGGD9_ZO$0C%93aHwQ3 zfd8Mvf3pikvbT^iPGO+&Ys=daA@7>w7ix}Y9IKz^mE4rwB>(i>&z(QsuamvUt3+`Y zlCf#dGwe_o`b}N5(!?VIsg23R_BBf=oXek>_HL5$dV`DQA-_VjE8S`;!;JrED!ucg z%06EGji>ud@9+iL>@C&&XQz?NNv-Cd`RHoXJjOQxZmg>X1m;m|JkH8uwCHDWzj5eo zIS;#ncMzz5L91{vI(H@eDvnaGw^N@Cpb$CFLMTTK#$xuLdFcP8-dc2BDBiiha{6-a zH|8Qsgg9nVN1t-J6u4N3xyU2a$9{-$2FEus{iqsQ#1+AgDBAktelb6#rGSzv96wDE z;ustH@-;LT-^k>hux86X<7t;gCcL!QWsYebo)%u%lPAf{=5Cf-slvN0YO-u=`ocf< z85A)8=Dep|s{iyG|DsJct|w63Atw^(4V#>t!wl|4;Tb{>F1u~pHm{7EOe|db=K5LzDf@d4)kL^ zx7B5grM^VtVJ9@7uR8p`jW!iZBs84AEi1V56rfY#rcMOXAb8 zQkmi#u0JmUt^OvJ%B7obnblUdty!^T*| zV>Dwx?a9BZF5l9WL)>SaVVOr2W&l(Zo40B8Ef7v2jx0hO@iYd^VHikBGQlM{PJK0D z<-cl0?>z@rd_gCrP^;v>(~xi7YFlsZzqHAGdh*MGPAD=!FE4k96@3$i+R*;kSxVmL z0ufkpwXiN+%YpIP*O-HNNeF#gJo5M)%#m*H@rOomGLqb&~^^ZDHq;_l;_t#9;#Qe{d zE45a0YOM}90Pcll_qBSo!tc@osrhZ>^PUqv9xAz@d zlYT9~&)!^9HLQLA&ac0G@R8a;#_lv74{4{t({aZCO@9Y*^NU%cZ_dDd`zV>)qEq@4 zG=%TL0ew#XEAN9W+TUR=7;W3G2|K9IEoE#_@{@oay`$uoJ?QI?iaR)snk+c!WTJbY z3eJ%D=o=ZWMlb*`@Xag*dj$>S#q&JA6;hgG9;ou(Q{}z7D2NbH=*Cr@YoF+x()y=1 z#GR*qb_{b!LQU7Ja(U2ie_xLsS8NQ}=A=qhkC%>MYrKq3MyQgH!mzXXO_Cr4Yt(%a z)$?D~b_+o{3fZ@lwNPm=JRni}XKV$9L$X;YFYg5gP9~QJ5?mlsI)ZV#=*IRaY3bK! zH8%;Ei!+WBU&*3>pG*DwuwL5XM>VJ;6}v)-r3%@}4Yhab#&73!U(+n^ly0}lf~x38 zE#pslpDC|;y1ON>H09qui~QzvZ_0<8hfcudRLD3VWy3FKD>mt!sq0~7HJ)|SSQC0V z-^3aLY5~{tzKYyrC-$NCkuF%dByRB@$c*yxMXujA7u%<@SZ{W(LiV)jk4<0fUbAx? z5-`R(zjh{HyvS#2gI*8U*}yHSRXY5Sk8hJVP+aKnJiiYDxi+O4M(Z|+9heKioDg}= zG0n0oePX+^=G^Zkie&w7>_1l8wr!B7g)EpGvY`0BA8;Z{9zkn->YS=}lRoP|q?GKF zSMFauxN(_ozm59zaljrg@?$~-$G(3;PBSYJo@EwYxyPb-S6Q5e+-bZ(2gEM=%cGqp z*Ll{>UpbaEy?wRN#o~~;5$xl-z~J+r#v;dy40q&#GgW5d%lbGnw^&t?`Jyp9ky5y} z+?m&n+KyPes;thH-ktrY<4SK_^(eP9jNK`kZQ}9z5-w=t@cgeY6uER98_%5^Kk@dS zt=E;#qk8{MK6*GVc9zQ;+hp^{b-7{EH*|vQIEW=KqqwI#-Li-F{r$%9Yb6J`?7|S90dZ5mo{W zVtJpj>92aw1=Q%p1f+%rn4hhVub%gaJ zfc2MsDbt%RW?SIg9#F(^82JUln(4PMe}jtIqElZT`Y&3xuhF!$MJQn63}xh{#>0IO zOWshs(D~N>Y#XFJQ(5|nzmT0ybKB#bbu(`=lB`ZfN~B0hn$Dr%J45lT>z%R$uP+a; zYAEsl0?bH^cnIjs`eQARdJE_US>!?P$b(uf<;4yeoc){6Cru=!=I!c~v{J3#NSGY( z_%+9g^sC&M!)9o~swN;tccRu;3C3O~p6Pj2YA^AZ%Ro_GXrsP?dxJBnNyi zOEwpO(~v?YR(QlB+|dzjswhdCDX}ZvI9RV@_jaTzKH$yzvr@_L?zThJZNsTD6G^Nz zEA+)bLL=diX3m9(vK;NAaF9mJ9P@qz4gr`yjoI}k?Hd>vAjz6-$D2#kLIY9?0`q`_ z8IT7Zfr}y<3AU5)`E=J2E^hAl3d!-E+WdZhuE9k%)zy3|%N|iphG$8g`d#ZOd$)J9z$|22f`nO z*k9xWvv%z?YG6Y*!mmq~urZ*kl36<7LXn{Ju(ovPDS|9EQl%-d&p zvZE^IB(M?}vuK7r$^m3m2rMole#RO0mymP) z9-&4R6zV5LEB}dFCkFgj4BrP* zgGX6KtkIl&LqW1OLR-jT+)wEtLqkJOC?jF|jc&e0TWmL=Kgh>vjx{;#e=Wh(tG4(z zD`fsV0cmh6cqbc@w8kp!g^E|s@l9o0BJ-?8H4Z0896mLJCw8KN8sn4yrN{%kl7bbc zf>lKZNnyW6u+I%z@OSF2q5rrEqwhCyxU*Z2PMP*o2bOGvn&?34pg{>XsvB)Wbb^o- zEG%Yf8>DA(!&?L?D)}&xc?0^XwdQe$v~<=xr=ttZ<(TFof8KGsjq$E2RzK_Db%m`< z0%T>a>p#@Ds#N<@O))Z2CWU#oJ-!_^?lobP@U?UIB%`kF$FARXh!c%5ayYl+oR;mV zOH}`Sgyh<-mXM zPpVTa`wa$Iu*ii)QdFyufyH;7ZoqE2i#%U>-@!V*eV$x6(>l)Bk1$e(&?gwQX30{% z8Af4kI6ShRa(dN8e-M~(O*0xr1;vz$i~KnLUbQh{5;uh>WiPEyE`K}-415dw!0uyt zaAgx_YMs@HH5BGOJ)42BLrrzCj;d~xh5c+Q$>Ze9G0E*ozZ6T%oq1vD3h$ANk2m!u>7}FuIu=B$W$Lj&g!W^$(I|!gZBj#K z7%x3mc)$W5eySYQbBe|SG_ghYv>eES{1-tHl^%6yl+pW$K`ZAeKZjc(oLEV5m{urw zd(ZO4?f=YZ7+Y}!Kv7FN3!GTca8HA03;wY8eH#Ce7x}DWsxEi$inzGEec1cjDbxuB zt06xU-7lTqQE?2-?<^_NnKSj%6ox53X67AZH$B^Cvc0v)AcZ-lt}OQf_Xr6vls_5Y zNHpLWuj+uGq4@^n-*@ZQBAPk^gO6osBPk5qy9{2Rl)qs`{VG>nQwxDO^b?s);*3Wa zP6N}PTqj>YzgY~@^T9EY!GMwUHVg$IgKgNf1-OijlH)jp+{`wkaX_+tz8TdielNQ? zOLbbCX*Bh?sOy|B>P}c4ofR*3sODa{qxqw>+{W$kHJ4Cx7t>jII>Rc_&MQ08CoqsEw^~H>{&6tc@H_c-&kw!gx1a9sX*<*7OR@o_iOa6yuCzA zofE{hg5xCi8QZ))cvk$aB&U#qhg8)Pm<)t0fYM{*$Rc`B!8cAG9mhpLM&ZURCZ21D z@d&}X9d6H8XLt14w4V&gnR;ROv&wZ_pnPG;&&`M;UZNkvEP_qki8)N3Rnxy>Wo_4= zyln#R5muJHKAlZD!1QvVg)U@EcJVc*uBJU*-hoDKt$BKz%9c(GxXyO5d#+i3?r~<= zy@R}q%&4GeBKIS`m^)W^C2AL?uJ~%6kXDyI`yrcwYC_eS2xD%oAfOZ?Y*!cTRluTrP=t2&d69D>RwggyUWXf%u9 z>7N-hlxyNq&Psevw$dvcT>1mJU%aS!DfsH&PU_REE}Wm-cF5eyzAdlAqR7^*W=d?q z3@hUW&MbHqtocqbEY11JxFTyDnI&)dUb&#lnlbaAwjId#mg`4(m+2gddDd}V4?^TP zu@iwW+D@5mV{i%wY{R*mJqua!MT zI{2rq-?K8p^(z&UemVKK$&2%$lltN-{CFZ}B`j%~^2TXO^;b%3T+Vm2RdwceCR?y* zf|0{@wJFN0zT)J7^}nF4=Gf~%dAQpA4jD9f+`96TA8i#THqe7Ed-nUQRgzqKb?ba4 z{iU!(Vb9H~HrqOf1n*nnkG}7?;HOJZw4HFDw^_o4yFm^*Q@*2AQu?2(Gqe}^>^PMF zILCt?ukjM?>aA)i1vQN)Op=z8e5!;!I{OZ#Ayw8D^DO%|ufA)++NaalL$&2=g;H!;4 z{PUKCfa7&vs-&uy6dgafN+sgk)3**W_WF;GorlHF6uRJrCRH^qu4{IrR^)$+>pZ0Y zyST2HC9eCL9TaE&RGNI)U(I}H{dah-+Myk_V|Uwegr9iKHJ@W6kC5iX!rxW0P5x}_ zjjb}xd%5!*&!@1tVrpvV``&3DQ_Vk*IZ7rQBSxc+{}9-H&vh2|N6Qv}xt?fV{TF&) z5i&jcoQC>Vty?z}g?i~C!G9(?t-Tna#0iF+${DcILT&0S6$jw+aM9g;3Io$#R={+( z^$l9-CjxLPCLg5S^6Q^uYn~rE(0Fkz9^pe$eP*Ji)v-0p&P1_{qYBjU%sxVX3-{}-8$kG0JB(m_D_@IxlfIP8oP zy;9LV`?KMFNA6F_a?4Zx;Ca8QL-t0&rK3TrC><{#X~whr`i|D-YC}o&R{nD5!_Rl1 z>$5qxICR2ivR8%Q6wD#aEg2&uarE%O$c>lyTNh@=fq{P-=D%mnb1(wr_xc8`0Ewe8 z#Lg9BlB0lA?o*2@-+^|ok&Zvo zA@qa|Aj)ZzNQa7zgjuM(WI<$~)|qBZu6q9#$8G!d+?s*|Fc^?}g}w(eqtHVJKs`Ar zb*RXlg*7J!%{21z-}Y~HT)qTtL)4w#SxcP!^mdyg%380E2`Q%k+yKHZKXHT7lVj7? zwQmhl3?9)uQ><2P$ZjGtNh!qGMQ|Ep!aSjwD^+GR%gwMy0ypt*g&FqI3ECPZkq0Km zJeZ>h5!3sBm!I_S`NLxa^U%;AbY64W>8M*0y?LSkO%EsjZ#|si?7O;Vz5SN`Ymtqi z@d)YAbT+o<=*{-&E#S6js!bA~`hT((yziFQ!7UHH^nIPa!KCoMd0w-ESOq2?VWy*) z_L)lMo|6x%#_FmK-~<%mU(Cm;@UZ)fmUkC523{_<4b7}$i7)xT^&BZMoKS$~;p2OW zXi#Gg*Jk2(V)SQVxRXPXr3{4l94Fr;8s-sR*W1E)$GpXsj z33BsCL(;gv(0}+NvRl<;UF1CSY~n#_8*vcR|E z46IIws3t?$7FEPV)$iMsO{(v~wPHJt8jLc8S4e4ilG}INgpQaF5HaLfgA(5z;Wb~Q zJN>u#!&WfkK?hbTJbWfR!7_I#w5y>e6I~l8=s@h7nXA@iY;MiSgR*l ziIwXJ>xNB!)oU+>{_OhV2W2L6G2`mEE^av$I*?!`53$SvVNs0=4LK60!o3%U9F!=e zM!vI5qWL4pfyiYLww+|ujVzw}9BCKfp-49w3EY;9se&bvD=b~8I*i5mKlvqaFQ4<% zM>E*F%6pYs)Q)iJPim3KlhKUu)c_#g-*jRdH|zjr*m$H*^N!m`)KdryX}GzTaz%@>wU59|Ey7Kg7EOav;;{Pg~a~1Jm-H_ zsIl8u|A_b7x9uFa43K4<8)!F;-g0=;K+&(rsxu+-dKv|(I)9SZ-t zjRea}r}EwYHdE#65gfqw_uK`Le63uS>+(q3lk1kbQpVzTascBUP*@Ls_nX{>C~%Jx z?lEhEb@<;dk6>?|Qmcyg4b*V{zKWo}-I{k?rbH0!;>AB*zjiK#%x_)_xza&&1#P@? z27JktWqdPW2Lfr=$;TD*^_D|?`gz?FU77nR22XdNp0wv!P8Lpc>7M~VuA5|MmyPQ>{1LJ<3Bk3e2F}Tggi1%KrJ$ zgKy-t{`p2i^$9EcGF;+1GP!d19${VYgO%?9QH3;NJTQ6bq3d~$H$D0nm@!=~;Um%P zn)Nf>pIgkm2b)Di#l2!5q${k=Qn$F>hWkTnZuUXVrH{n5q49Kw)N?|X+IoYgi7mYf zFC8YoF_pP+vHRv@8>A2a@p=nnzc1N|g!Q8o=xB<7zRr>A9qdt`B=V8ZV~v$)r=L52VouKe>e4nYglE_~zM zhY9PLrD^`L$Zz`7by-a}9DZHG@m^Iey#wf!mF07cOB}z2iGiShuEy{yF9oZXT)&wp zt{ogF1~WRwuP5{^uGMuxZ`b>0tI4T_4tvg8cC(~>0QlCUKPA;z-~kI)O`rJLgd1NH zJJF_fgKNf1lW7Wj)aKvxxi(bL-&_&FUZ^HsIHgKaZP7Yv4gi84=h)vrvAXn%M@pb# z$xZZ+CqwGTgN!>s^XGIMq)Qm8Ft9FsCZxc%BY5AS+c}@V*2spa*!S9$O)YALuG%}Z zc2izjsTt@hl4}aoU-e`Y7)GOZt&T;*iQG4erQW`^U)~uts!xe+>OzuRg7zzMZfeN>o=OaS;D5> zXaT1XCv5|5TWUX|>8m2<9JQc-6YkFZ5vws?argK+B2dbYi=N-qUnSW65OS*^(@ zrZx1l56($IGRzqt;IY!-xn=jda~F7!yC!d^5Am;Z5`BUIO^ECG%9dY5AKC3K6*~MW zX(Es?y-f>Gxa^2x@{+ zfNeDD8=?|M<12*zZ*mOJ2La^wbJ;n>Dvra&(Ik>B^3a=Ywz>u%+!sz8uhk!(A(jw) zwG2ndj2LFxg9Do@lqSdNF>3S$4#VM^H5UE#ZvOZRaX=rj-m|5T#)4pA&#$068D~hjhXUB?Ccjgo{%Kq0B$}pI zPaF2lxB7EC*I#>NFY^9%XUe6dzY=$uhOXi{!Y7>iV7_qKt2-rqK|!u2#r^?}cFO~J zJb&fd{`>Ydw|!%sT>`~zxHxm<%U3m}N3CbqaA@y!?5_+9TQMZ=(CG`;ZFx_aDmvG5 zu$P0K6+{-EX*)cq-&a&zO*PE+5;=ZJ3~Bt`%8BgI<%~AlIm4JiQtOiLkV#a$(sC;9N zw9g&MH4gAF@N)oK7%)#<^GoT?{_<3?s6B8cDc~8 zeYRD`50N3eq_cesO44 z!O!(~G{fAENUW7eX-}xu%Ig`R#&I$d75{MkfQdfL=w)C$`dM~akB^q!hZ{-oe?Oq1 zaa+GUrQ%TBt^O%qQ(#}%&+MId&-&X(zt(eiF#BJ3hV4O_%c!;a)xZLm(c!Gix%%Dg zIWHFH1m0e7TS$<;#E$l59MpgBlAstBo=vxEFxGhSNTp+HY}&SKQ6rorzT@^;8uqz|G4^mUJh>%w zv7q4|7;*VRUL*-2qNZlzaj7gC)mQlY9I5N{T{z{Rx{F?X+({WoEPG|8BDpYRa_@c( z4e_k3j8knDC;v!yO9N+(8*xwoLv6z!J)23kxxNtVosJxu%^}1CNmVW!5=t-x*bKK6 zZJz~N{RU6-eZ>V~@92_HY3L&^-Lm4Q%K!Fm*_gIm&UNhe_qY}{pw#zq;PmUumxqe_ zlUxMs>>m_{=UxtVz3g)rU-|ZPMwq6RfroDzmvJq_TInk4)2%E|SM?Gy)yk-{Z5nxx zI^+w@nvC)(gJ`9&%-@{VfDs0JV-4$urDn5P`t!TQ4QiT3+<+8o6D+86uomv1lv~&< zy*?stKm3RPBK<8edQm4V4=pnLg&hYR-mt4+7mj!s&g~Hl`fuz1$0CSY;=V*w$>(Ol zYo|(!+Eclo`A2)Mb;YK8kJRS~HhxL(s~nRyH&lI)ADn-E@59ECy%_^X8xDz@U+;^^ z==N)mxbwiNb!;?t#6?=cS$!DXTrx)1S!Yy-UMi{Nj;kM-gUhI zraQHCu8t~j@(hF-jE2DtRSfI97^PiAQ)QQ8nnoqY&h%guNfSoK=E2=F{gp=)$SGs^ zJdeVQv*?Ei zHkUkvHqE2enEi4KjXfU%y_D{l&Q0IcFYk#FwM}riEF^P2W;J>ty`W>}4~t0997Tht z5w5cmy(lFRoey_=XJ`dyF=2xg4u!EDXogrQ2e{=y40NOD-Uw@1t>B{*S)b*JXE>^Q z5qCrs!XCK4ju;X3ZpQ_04%6}%aw^Zt-g~Fd-(~dS3IE}`YC{z@gV%BV0>)MBc1`sb zeAU$=7LrWMG{Y|G*7}AeqACf@Q+No-J^h5s)$hMdz|mxQ+u&wMt#jsrgAHggEP_e8 zSRFphZ%SgT5QD5x9reUKx=m;`HKZ9CX_)?d#RhG}1{apu?t=9>K2l;JMl5My!ZAe^ zBMg5mL}l5({>ql)SB4btN+!MBQia;!ZNy|l*culqtdF?PB@Q!~Sj^cpf?;P-e|mqK zmTjfMZ2<#?*_g3;emG>p*9Vys9+RdR`_i~1{G`40G2tR7w}9*yA-WCqN$FuueLJ(q z5>Jl>gl3-$!GQUe{L71m3QRRZu-m4~f`!>oo?f>p*H{aFzygKy z$4@+FeEn4FV(5-Et`0a^m2k}WhPU6TN}XQ+?%|9&rcSKZShq};;_Yj>p`7(H{ykj&jO@sQ;F&h#G^v zwmx_VW8Ahoq891n>|bPiV?vO8Li2?M)wcEbo&XNN%!n?l*t) zINJ1uWaQj<@x5BB)3uowq28oR9}r+pw|e2}?0zdPg2z=;rhoUK@Ux}LJp;p$x~oCA z-bBxZ+L&X;$AR!D)=yfwe_{Q~a9c_sUJ0i!3`MvL?g(4CZ`#SF(H%8c*J)!3UZbn5 z$x1$TM}8y6;VbIbnHQ(dTK76sUhYnu$<+nb2@0`>?HRUpp`o1qd+yws^HMSYOOcV` z`~0b89|mvjUE5H9sYJlpUZA6gA2{T*ikezueb;Y2T}BJ+!n$0ThlH){_sSJx02JDqq8C*K67wTUL zj{nml0uH|O%Xz*>42q0)LAf5^N9$%-qBhQFInN{Tua1niAV~#R zCfrBW(#ZH2--nzwBH9Ac06%Y4By}*Y9oqQ~p7UWS5Eb1C!vP&G-sNbaY%zydti!dy z3Vkm{leQFW8FqPB3{eZSuLi_9$i-W(8_!uWjXg?Ib>em8~q0z*>p3I%_ z!Je`4hu4ar9K$JJjFXjPX!*$(bMgIr!6JO$dH739la;Nr^8CyhYkl`|@hXYEJJ({4 z+H!F1DL4$3RRiYcbIRj)c9v$FLI`{<5aMR*-UOHlKiajQ;(&0UIo=T8$D8*itdD#g zfbU0VEf?-<2ZMbrkfx>Ke!`^`_YI#XJh~Iaz7`iReNfz=T6@!Lfz4>?c6WjgS3BMe zm3vhS8l#Dui=GSJqjicRm4UcFIB3eM>9*qjpaqCokqDTB4}*IjUfTyR*#+{hXqhgp zIE6d5(Q`g>iI7^v?1mUJin~?}P7Ov2^FjpR233B62Padyn;w9oJqx znn%BL+Wpo@T|A96)?Pi+>V>z{66{13wFNH2J>flpOpez1E_NqXPPX4)?P@hP8Wm8o zF1Am*>7JEqZ(({=vwgR=TvnR1G8Zogf9(F^4}mG6V?wPlN?5ZaUwT4gwN{Us9&Wl= zxvUkFVO-BE=k9zw>1$BmFoQ!Rcvp2}Y;9#tsK)(ay_yA~7DGcx8wCy3EgK8daKcwN zxaEcpXSfIz&KbSFAw#8S*s>_z$m#lU;K#+?{c9HA2s}`2sOz;|JNV1qlA^>{-!Q2o zWcC{9Wp!Nonsab^|IX!hT$mJpltc7*+kGX+rTf#q1bZRbZG@Ih2}aXS=Z2V){?fCJ zb5eHfxg*4jMi<@DCSZiYBnnL%J~A=S$CZ^+9KNuOBD<#4k7&n-XouLFG16{PiGO~L z&^KT&-P(#k7Tut=+Oq-3CmZ9%ZsC|n+Ogq9dgR=?G}9MIw0aUVMm7Q=^~~*Pyx8L8 z{n_*dLOkIaLvL+cd0i{47Y>Mt5kT$g6hZ6xa_=ZI>KBb94sa0g#mbec zH1O6A^?1jjJS2VujRwZpQy8zbufF%sfuOBp3w^Zj@wa*9`5LlcieI#+DU=x- z#<2SajFgFJ4SI6K3dku4H~pnQkbPNij9&B*7Hc0}7TS&H*>!?j4W1q; zLG@cavL8@xG17T${#grskkI%d8U2V$0SvkeTB-p`DJA9kHfciY;Xpdqk9tUnAY)^_ zSk4-s225Ou!i#bCC`zd+&lj)1EchBgL&&055@#o|0{D~~TFoYqXHhLPV51lZN+(ZjmfYOI8; zK@y3B-Edb{t3B^NqbpK<6=aOCSlaP4#^>NI9;rKGM%7-jXHBUaiL|dqEWQi$+(c*b z^BYg!3t|*&-0>O-SmPKG^V8CfJW0p4;_wB$y|~R|3H8tERlsl8U|rwzP-euKpa)kD z4w=_DfN7lRK(U9j%7w59f9#cecQJKd)>FfPvveB9)Fi=}f*w2_o;GPq)!QJiyzXaP z#Y^}LpMV$I-@AC>hiK+QyQxz*$OLAur(X16b=~Mqv^vT-bR-$kUBh64wHX5i7A^J~ z`|jWrIKrZvKx5AhXXrmUAmhL4)4FxeP-T@>&Dk9-tGYlKY0qjzZY zPg@uUCP$cPFv8uCG1JuQB0`bOygxHrkk=IM=1*%FdKWAzg0V$(b2Km@9Wd<{S{KDI zhRY7fDIT{wInsNYTPE^YFJ$c!Su0!|m85!0Y2}M!d6XdkCdg)!CaKq zAD$dxu^Oq}xf(FIvc#A*1I+v`jB@S`OV(MqK?G8uBd|mD!R`^or&GWXy>5KiM$G)R z;Se^tjtH8p!?^(?mg4i)g@t=!YG)~3@yF7TlBF5HKCUte8yK9}avnJ~{~XCrTW3pu zd7G>=bAv6qbC1wQ9d6z!lZ(%N8$p2|_*#poRC-`)Qud#-G56^Xcp4)>cRNHh0zcMh z^PpH(gXGwoYXGB5w2uxE>C{rq<2`Yi-VD|Ga#tj~vk7k7L8b7TO6fZDN-u$|fkP2~ zcY>QDI#ar_KnBSP($H18&;+)Goj)-H2CV~<~ce-8?%w;pKMTWys zjl~61rsUFM*L#wb+R#31iz`vGNt>_UMl>l-mmY&5T7!7YD{7NJn4q0n!lvaqI;)#j zjD6W@TVun-C2g>UrA{7w3{dZn?5hD)dTfoAkEgd`^N3G#BM7twB#|ZQsBu{cCoo)9cZ0 zq-+1&QL_ccTb6uD4W3J_Uu|QZwWD1gE`dC*EE0M1k?l&7T`$m$x_;X7(|Y3v3`KDY zz8}#9imJxVlD#rgL@fh9sGV|)%=(5=9zB-o&R$(<5*E=|kq*YH2{{FoIBLId*i7%xsh8Dp> znZaU@ZtOb)9Izh|14OFLVtw)HKZ2a1%M3$qy#ArDr2++TgtHHzn~GLlSh}AT2-Al; z^5{SW*jXPI9-}~^GezEO%?C{UC;}6C7bnL9pruNr>Xt7T8zJ1Sp;%{z;lAa*mbtu( zk-Odm_1=b867PRwMFco)m3eigbQIBp!ZXOhiE*<)qJ_y;uSi#6v3k4{0aI}huJ#gtH%pr&Rx&Sk_O-ev(-BNxLjKlAZD7eHqSe zA+w4{k>1L#oeg=O}tB;2tWRG-SbW=>$FA3|UfKw^&aRrqJKsFDeF2DN|8espfA< za^X6R)9L}|sE^yd_ZyB@bGcBgJ2uD+PinkFW07IOo|cO9M)*p9k;|yyw52DuVlK~J zx}$X4079;VCM>unwdFn_2JH|GYpU`E!HR6)*3mlPh0TPi2zV3TqrD|yQW`wPKAb=o zF`KKehy~am0s%Q&X804-pAu9ho49Na<84YCj){DwE$!C{CIMm6>lb27B@j)a%@}Ug zW*@T{d3tTyhIGCh^~WUGCJ;yY;BS0^R1CqP=NCEm#nP#uX6gs*K<9QF-0;di*m(}a zJY5OK!gOIuBig@EK6oMn{GZ0>uVz~{V=64GY zd2~@pIQ6{p*Tv0|XufBSv4e^}B8oV@dR)f(U99xo@qtjiI;5Fkb{pBV2oAqDvIX|E z!tw|+F6TfX?86EgZdfamgDvl)JKF6{WfeGBGaE_|?2TA%R)pXW+lQ4nywx~_ZuM?C zNEI*~<9j%!_UxB?AKkXV|4Y1c0^* zZ$es7fLV!mF&CzRfIncEFC5k-51B>F={OI3z_`@o>C2h;mM_&K;>*;+qDLI+hID${ zeZXEn8?9h4(hpEcFwUfG9f3H_c#|7oeRzy~Pwv3HgIjKV$-ytvVL)F*#01_)pNM@b ziecE6B%FkDX!YoqZ@GmcgEx+X-^4$^`aKp{;sf%-0<4xh068D<%j(VXNL<1OKt;!Z zon%(jQjI3IP{L|>|60aYCm}p4YbK%-S)2ql7M_k^ zJw3#S-dETjH!$&N9pPe6eY9s5QSASHwm26?lpE4P$d$8y_r@Fkc)WI$E5jgQjrY<0mo3#oorcUwd1)!Num7Ggv`Oq?L`77-9cw zrj1OK`LYchxfe!Igud*vqp~dEu2|NVH(LNhxCVg#np(vejNN|%*6_mVCu(#E3ahQf*qt`)~mWoKFzSnupR@k_C4xc(c6oV?m2UJ^hgXcU|m;a3KfM+Vl9I zDfx(O^mpu*7pXdSpDPD|>pE9Rcz8a>6<$Tw8<8xz*AwF(#kaIQ`rSQ#7&FNffp;)k z@9JLZfxy8cf^)ErX@t|p-Oemwq>&u_lZ!-#9PWaqlc>v@$oLXD)cuCoC7L&03H5jn zNQEX8$HSv72wr>n4+jo*(k`IeP1RQd;hqhMvQ}z`%;SNJ!d|{fKuWMSXJUc5uCRmB zP_jnh(rJuMVM1du_peB-leYwkc+jGiXzA`$9#@d7E;)#pz5zTY`6sK*6Saw)8@W>p zw`4zDmH}K}gVU*iV2NeS_enENX_}+A-87^=pFem?%2O`Lr2%@NDcOm|t7C9#3FU62 z1weXm=;#>A5?u0>DF84VqI7?E^-x$f!Jq)1*RYjJij2ov>FK%ZnQwpX^zphJp`ny} z>BF1?j3=HQ$Y_g0hZlu`T~@N&4}df>{$7`d64ms1;npDcOKe7EudcCy`^w5j?;EfVF^r_Eq3i z{m3josVsCpI%q&+&~%#l1=GcopP2U2+52ECmI#u7$6Kr20)j{TDF_lG0L5~PYJ-^P zrhw;7(Gx}0jh{4elJ3WpIZDVQ2k@_VI*;O+^J3qea199F+*~xe>lJ^-lI09Nc}hit zaAxCVP-MkhG$7JRN@5B^4s2415jb1{i+4BL4;^$_Y_%yK2 zdx^V!thB150|A_jOJvsVk*-cRPv*xnodQ!N7(M2ALR6v_JpJjd@5z^o%c?N}K<8)% zoiQkPr>i|A zJ@N29V1k2bLd6lcmUc{^^KBhPf9_%MT>I;_o6-Hh+mixqPIyu~wud|ttj573=H@_U z9LC7rV7Yl7=hQv>A*iSY;DXCh&0w^Ip&@{I`oaJ%2|l!{Qc(qph`Fy37>Jp085`}9 zbOyen+_*s5J_AdhEme&LR{&+t42TRZd#~%f)83Pc(U9K2nc&UItYsA5!V*0eZRP|< z-s|q<^6Bh~KpYs0B~O1^nzhlD6V$d(XD?Z45>pll;R;6f6ODYL<_-KxQ`0zB}l(X2q16hG_M#S?rPm|qo~yz7YMOX63>;5LPJi% zM~q`R4uS`ieu(`Ln3SuO$l^HBYCbxNv4J-dIX8z(2Hw((3&G?$vz9XVO4QYQFh-G^ z>eovXU9fb^#?nzRYsAr*4}&$ew04kqV^to`{_Nwx?TMo)mJ~f2E!^hY;#P(?F=vpP zO{|>OmJmIL(XITH!4i<7nj~oLYzox)AZndVSe;U0MNz#QM3^B|ZM%^h3L@#GESe5q zt7kLT<}(M?)DNFuike+gbATHq<7SVZPEEqqbtBPfLS9+Vblxt7Ep3Rq$U5DyLS;}_ zw+8KI347N3E;hGVn~pc)#5t0ZF&ujvaPbf=3CbZ!$Ku(Bs$&u!wiW2M4+x}%m=}MC zqCa>u`hE}IX*GD!QQ0UO!B{}aC@kq3F<7JkrT|tbKN}&)6@Z4yc$GDUp}wDZEhKTf zY2VVU^Ca-fv4{XhAUp0hjS%rFIsEB)B@S|{>{S<7$2Iz<@1WGiOSA>WJFXmLa-M{~ zWst4*0CGG4{!e=M*OJ$mwKzp5Yb96XKDM}YJbszo z8{<_7P7{|#`Eakx9tNY7oe(MYK@)-~pomJT0G_=CEE&Cl{*PZ7@zyj686NG=D5C45 zb^8&8aw2`{T=jE-7Y- zgNLL2`pZ(WO$4$cuCOs41wO_Tr3|(KA4Lggnk6!@#sD|I!Iwiez`?W$C;&E5M4|0R z+`~uE*?f=R{@F4DxxqE;s!cnW*{JYggFraIIn)-rm)O_ygXee9vR7S@$s8FJXKa{C z5LySfK|EcQRg<{ITJV_C-c;`)qlDUYtGzg5w`3hszazxza=5Gj4UDnA9cEZ~HnA&N zMT;|bpXPQjHi(c0Yr?dpS7DgZgd-x4>Jg{|G#JSYRZd)HL_2)b;b)Tl8i?7%M^z(E>>>ukB(v3etJB*puO@sc zUjjr&AKI=X7NnFxNYKr7#&QfseiRWVcN0%xhk-%0mk*o3xF?dTG_7>nkK)Yhdq^+8 z{i&Ne)B23uFT`2rc`&(5@H@_09*v{hdR)fFT;@@nx@p7&l;bkTQwLbn(6= zlrMpu*$`j4-?%tHj@TL6)mCi>m6f~VS-L`Y#0;-@V z-D~aMLfqgRKvtCODdN zuQL(&&Lge5MwEkq7Q2a{Uno3;6t8e>v~$c5d1pSw3YtvH?=rC4`Sbl1ACT!EgpyN| zIYen{#+OX~*Wd`Jj2+&%0|#{_ejb#0RKttk_U^$Jqq>X%gk|||{OPxVehYg>>Ak5W zL|O_89Njek2^tVPTDEX#91E_s04}J+oURfO9ll6%j6st|f~9UVBN!YOT86iZw>gea6@ummIvLFcb0<%wNwKXAteF#RfY zC<`Y5SA4)vTDS}y>P;Yi<`^Dg7tSp;3m=Hnq9Np2i1VRCG|~z;4AhO>W^g(KPRAHq zFXf+VObOUjH-XU;B_f!|q$(#QGN39FU$qXBoa9yiNGRSe^%yMvxCLS{6 zGq@&S0>BqnUkN@rhMGLW4etQP(CMU^V@E0hD`lK)2bqs{RBaQpFjTv#s13fUsQ2iO z71-ibC`!Wlo!<4Oryio%B2rwEW|IO;#1$JLgdo(L2A97fTXK*a^<2Y)-yhtl#c*85^Ps zLTDn%)QPrsUwlf3*tw%u#Me-<3mMmDrc)#`7ebbJj&`0J{v9gwGqw7)5vxn?{gKS3 zZmp%vSa=QlUz}b7%;bTiLnI^R8mRJM?lE*qEs+@;C<;duw0hH**6GBLp&_<-)+&BT z@|>7>4nQXN2K(gR;iy%B=)W%gWgnjNQkzm`am*p4NRb(N0v(4#)p0QS)HY zZMP-ux+kt^#UWJ)e6%I&?J&Iwl(=@-czyBxej_UchzE&IlXF`r?+MBy`enLqO=oN0 zNGJ%wE&j04qWsO`v=C#f_U5{>D+99`!4@Whv5u8E*#t1ng4#31KRub`#RZP{P_vCT zWRM!zE?8$IMWzLb3YGEGY@9LJ_g)p@4amHWA_4 zKxZkcxsOPK076XhHAedk_d$L2@@Q#ek~-CDkwi59$;OwBpYSH4gA!MeKmIJGZHEIu zK@$p;d2?L8Jg1Y6Xf9$SY035E6E`^uNNmx zB_R`F12O(k+ZNzQeW$5Hxf)mW|>}&}hs~P=sIzQLu`o;@>g*~w#n>l^(PbEBJ;3E-xw)=FTs2O&@#dE4mA+aFP*jEH^!1n1bE4&_TF3dmMCcN@n> zGRC}#4JCaBRaNW^H)LPlA_~3g9%={CLV8j6#qUF`2bJ;5D$?;^1LOp5pG~hY(iKpv zxd`$;Wbo+abhAEU%3Ru=kgI;A^_$*N2M!Lg*bs4CxQ{3dw4;d@`3}I^>a7-(7vd;V z#{$)sJvN7_6d18xSEL7iZibkp=wk{S2WgimZMR+AWkpcUI5~o z2L|H~8OawAgiW%9}jjC2}0F0=nbr|IA!4{APo>h z7V|`M?CeBFutVdPo*J{pEeyQ`0&7%+ayLgtpyeqi8tYhwb(7JRk_jUFJx5UY!Ms8I zjT3?H6yg10v;*N(Qw2YfGK|)nB4|2BgpR-YvV<9pV8<#;`JMC!#cJVuh@M9(;sn5c z_0q(4U>+*nQ!wVr!S3<~qu7L|m|gJT3+wk8k?MA<54*q!T|jUgo9jCqN6k(h^sgI; zxkoS4Q9{un|M=1hTcApa+)<)Ux35O&R)b2gVn~id6P@r+nP{@qChb_%$uX?KriMFu!!U~TFa^7oj^2wsoJrL9cI!@xnG+XfIL zQhFhgwHeo282}zK6ITjYE`bBg4dYm$G{>e`fHh;g8F&vDg>sP^xPsKv0Ni1TQoub( z3y=v#){2ng8>@x~Gr^v>m}3*mG~U|cA<+_yga?2$h72r1`_oWB2efAutRm7qHO>md7aQU!eAk{x0WX%nq?AkFD+Dc5DrNWO1yy?9sy|vsW`Zb z2niCOwTz%ta+T_z^4(yw8j+S1>~$F|K^2sbL{E?wpi$G}B5X_>X(bYcCGK2r4FWWz z%jd3cJot@96zpztU=m-eMD#k9vfm|S;`*Bqq-4F(_d!YO~?MA1cH?eC%&e%ufY?QIawRLZ0%V?r7Hyz^zG&I*XdMDkFp0noQ49o5v!X@^}DCUdO` z1sX+AEivCDI%8G!+Qi~vAyL;L^_fN0>dcoT#knMqlz79{-(!%HHsQ%<6DY*}6k)Xz zqywa=FGrHal>>NBWF-}Be1u}GOVhLmk#qy&Fs1;~) zrv(BW?uJ0Mp0ET+M@%D_5=Lj#LTl?a*@%vSz!gdskj)-QkRJuAg+n4ca4Q zBS+jxquDN7gi%Q+k1a;OG@{98*6q+ zt86;B>p&(ckR?uqNAbG!*41SI zK+zvCZ>`-}{qdGU5`Ed}jgMEao7?&w)X6^&h1FR*y^`Uf@=dX6A8i2aUOu*^nJ?zM z4|F<{(QwThJ~H=f3Ci&XhH=l_f1Qi`-+wcEW!2P_;#Z}=g@Y05lWkI2*4yT!fEu<< z?mfeZ930cL%!nKqR$poyhTe26nc^I4-|!)!Q|JYXk`I3Ar_`6=$teGuDV&OJ!v2;U zg_MGFqf>1<^UB?|hnHSl|LOjs#m~84#x=iBfpje3?-VV@Fh5O;0XFlr*!Pze=V&oz z%v>!-5qwySVS}5d1t2l~(Y}qt9}H2D%8kQF298BlQ!4hqGYAgDvF;kXl+P-FwQ{UA zsn@RE|GFzH5!e5+r7SyVJo{ot7B4JV3}wd!gvE5M0ASOx1c5mgN5T>X;VT?#aBXp; zV*w8*YB5&MH;;NeeXoEqEk@Ixti{9!uLU6M{L%SXCw$Q=3IXpaj{fK~4P6@Z&Ms+~ zSzX**v(HD+pK*geph#3JT@@>}u8R3tS2<@&*VW*G5NgzXU6oR#tFnlttBg?nl?v#D za4&{wHvhFem{Oizk7|c|Ig*j|KDeZ z1ZMBtyjMU^)zn>&{$SoGryMJuexji|77r~N?#3PQ)B_vr&GI9rAQVFD&Vh!Qu> za~EitZkuR=motpR$Pd6rY>Zk=tUZp!AUm1ZH@1-A;6xnoemYC)ua3TM^YD@RbO+=I z!x+Tu?O4#I+Z_untATz2S6Xh2KAY_AM7<+~35_Y-meMl#Bb@P6MD$x-MTwzV3M=V> zX_1U{6(0BDJ1d2|a4a!qNpTnDUsq9}`YQQ57k+`_fLzbe$CG<(lL6Q;I_-b$bF2E* zE3K3Sc+s-N{3mt}FKF@c4Tl1eXN!rfN5^MmqZUoo0wFnhCLBb{S!bKjcW>Dy8#zpq z7~Z&RR*z+lY&my6+0Ii^KC_8`ezwpFTyrQ8#}b#Q785pAi{U^4700nL(lyEdT+caN z^Ej*%8jXNI(bimyPGT5^d(+F~DehRJp918Do?LjtVm_p}<-B7-R0F>`FT}^hT?MaA zY3E$rJW_h@xRqmg=DM86psQ>sXDa!d71NBaes9cg`y`8a z0;O~);vIIz+;bPs&lXJ+sEtk!CdPzHwAh;F6+3vz4i18Z zvdvW|PR~NhqnG2c?xWum=YXjSMOb2)|QNxYd zrVxO(iI&g+c*JG`m^d?e2Nh(`3*VHP+#ML^qd9}u@U}6=FI|i~Boj(T4JA9b8}<%C z&vs-lt@f@s@e!}y)apJ;J^UBrjp%M9&d7_8>FSYbCBKF!aXFFQ&)=S2c-R2w|Aah#hGgDdZ69%nEUR~P;V@Z4n zYhp9L+-PEwr&Mrk0rMg}cd5~Yu3Bo8rOARBpn+8|U6nN!T@|A~kb?C%U1d0%=8bEI zUMy+{;WzUTe(iTp)7ki6Snx-Vz+l!A|D16cPRp_8;AeF##3kxDRy5*P($>%h_;Epa z1d}#DNo$tIjT4&8MmT{+K~{N0SL3C(VMp5nT@_2VuI@bHa&3v2D?QxM-(S;;3;?D^ zh1OL-A~_5@X0WA0v$G_@^9wqDmy8}8ut_x3LH;` z61%G}`oY%SNscuoWnl-U#p77<7=eW2LIx6+rUj~(j$pdQjOW$sD%?Tb4j^NHbQqc( ztfo8r`a&olmTii~P z)H-4ang^#ZW;Vy-5Jb>is29#>FaJ@7m)j-=A=@^oP4okoyPH&3 z@_f!cY&R!R%*{--T>ITatH$SLrEGslBzklbtSwrAl`UWN3@Zs;mPDA5%8Wtm%R#gj z)|2M0pcPnhdQ4~*3>=`0wO?4Uo`X%>WTs=pV#R}~;dSIYvUh4{-hg5~g1YKh$2J#G zl$XrSck6PzH@nrjp2Om38Th@S*IlF*m-fR=UsuQ{B9b0zx-)SfEMLzKZ{qz;qbFT$J6`!E zS3|wObwXB7kA$3hU%Dr(F7M(|@Z-MXNil0~@vhUvc2TBpd($XuH!}FzhlJ$EC9ho~ zwUReVGAk0dLb9SH%ctjBOZJLnpG}m7B&cXdP-}0!wRUsSPmT;ZFV`k}^v_1E|MAbC F`4^T*=>Gr! literal 0 HcmV?d00001 From 672ba95594d307b415afde9a73aa8d55e01cf707 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 23 Mar 2021 21:49:35 +0800 Subject: [PATCH 087/457] 1808 Drop post transforms from AUC metric (#1828) * [DLMED] update ROCAUC Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot * [DLMED] fix CI tests Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma Co-authored-by: monai-bot --- monai/handlers/roc_auc.py | 12 +-- monai/metrics/rocauc.py | 28 +------ monai/transforms/post/array.py | 5 ++ tests/test_compute_roc_auc.py | 93 +++++++++++---------- tests/test_handler_rocauc.py | 9 +- tests/test_handler_rocauc_dist.py | 12 ++- tests/test_integration_classification_2d.py | 24 +++++- 7 files changed, 95 insertions(+), 88 deletions(-) diff --git a/monai/handlers/roc_auc.py b/monai/handlers/roc_auc.py index 9a9af601f9..8011dab8db 100644 --- a/monai/handlers/roc_auc.py +++ b/monai/handlers/roc_auc.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Callable, Optional, Union +from typing import Any, Callable, Union import torch @@ -27,10 +27,6 @@ class ROCAUC(EpochMetric): # type: ignore[valid-type, misc] # due to optional_ accumulating predictions and the ground-truth during an epoch and applying `compute_roc_auc`. Args: - to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False. - softmax: whether to add softmax function to `y_pred` before computation. Defaults to False. - other_act: callable function to replace `softmax` as activation layer if needed, Defaults to ``None``. - for example: `other_act = lambda x: torch.log_softmax(x)`. average: {``"macro"``, ``"weighted"``, ``"micro"``, ``"none"``} Type of averaging performed if not binary classification. Defaults to ``"macro"``. @@ -56,9 +52,6 @@ class ROCAUC(EpochMetric): # type: ignore[valid-type, misc] # due to optional_ def __init__( self, - to_onehot_y: bool = False, - softmax: bool = False, - other_act: Optional[Callable] = None, average: Union[Average, str] = Average.MACRO, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = "cpu", @@ -67,9 +60,6 @@ def _compute_fn(pred, label): return compute_roc_auc( y_pred=pred, y=label, - to_onehot_y=to_onehot_y, - softmax=softmax, - other_act=other_act, average=Average(average), ) diff --git a/monai/metrics/rocauc.py b/monai/metrics/rocauc.py index 80a6671dfa..a6d70b6dd8 100644 --- a/monai/metrics/rocauc.py +++ b/monai/metrics/rocauc.py @@ -9,13 +9,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import warnings -from typing import Callable, Optional, Union, cast +from typing import Union, cast import numpy as np import torch -from monai.networks import one_hot from monai.utils import Average @@ -53,9 +51,6 @@ def _calculate(y: torch.Tensor, y_pred: torch.Tensor) -> float: def compute_roc_auc( y_pred: torch.Tensor, y: torch.Tensor, - to_onehot_y: bool = False, - softmax: bool = False, - other_act: Optional[Callable] = None, average: Union[Average, str] = Average.MACRO, ): """Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC). Referring to: @@ -67,10 +62,6 @@ def compute_roc_auc( it must be One-Hot format and first dim is batch, example shape: [16] or [16, 2]. y: ground truth to compute ROC AUC metric, the first dim is batch. example shape: [16, 1] will be converted into [16, 2] (where `2` is inferred from `y_pred`). - to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False. - softmax: whether to add softmax function to `y_pred` before computation. Defaults to False. - other_act: callable function to replace `softmax` as activation layer if needed, Defaults to ``None``. - for example: `other_act = lambda x: torch.log_softmax(x)`. average: {``"macro"``, ``"weighted"``, ``"micro"``, ``"none"``} Type of averaging performed if not binary classification. Defaults to ``"macro"``. @@ -86,8 +77,6 @@ def compute_roc_auc( Raises: ValueError: When ``y_pred`` dimension is not one of [1, 2]. ValueError: When ``y`` dimension is not one of [1, 2]. - ValueError: When ``softmax=True`` and ``other_act is not None``. Incompatible values. - TypeError: When ``other_act`` is not an ``Optional[Callable]``. ValueError: When ``average`` is not one of ["macro", "weighted", "micro", "none"]. Note: @@ -107,22 +96,7 @@ def compute_roc_auc( y = y.squeeze(dim=-1) if y_pred_ndim == 1: - if to_onehot_y: - warnings.warn("y_pred has only one channel, to_onehot_y=True ignored.") - if softmax: - warnings.warn("y_pred has only one channel, softmax=True ignored.") return _calculate(y, y_pred) - n_classes = y_pred.shape[1] - if to_onehot_y: - y = one_hot(y, n_classes) - if softmax and other_act is not None: - raise ValueError("Incompatible values: softmax=True and other_act is not None.") - if softmax: - y_pred = y_pred.float().softmax(dim=1) - if other_act is not None: - if not callable(other_act): - raise TypeError(f"other_act must be None or callable but is {type(other_act).__name__}.") - y_pred = other_act(y_pred) if y.shape != y_pred.shape: raise AssertionError("data shapes of y_pred and y do not match.") diff --git a/monai/transforms/post/array.py b/monai/transforms/post/array.py index 8b4f71093b..6462753cf9 100644 --- a/monai/transforms/post/array.py +++ b/monai/transforms/post/array.py @@ -86,9 +86,14 @@ def __call__( if other is not None and not callable(other): raise TypeError(f"other must be None or callable but is {type(other).__name__}.") + # convert to float as activation must operate on float tensor + img = img.float() if sigmoid or self.sigmoid: img = torch.sigmoid(img) if softmax or self.softmax: + # add channel dim if not existing + if img.ndimension() == 1: + img = img.unsqueeze(-1) img = torch.softmax(img, dim=1) act_func = self.other if other is None else other diff --git a/tests/test_compute_roc_auc.py b/tests/test_compute_roc_auc.py index 612bd375ac..10141ce0a7 100644 --- a/tests/test_compute_roc_auc.py +++ b/tests/test_compute_roc_auc.py @@ -16,71 +16,78 @@ from parameterized import parameterized from monai.metrics import compute_roc_auc +from monai.transforms import Activations, AsDiscrete TEST_CASE_1 = [ - { - "y_pred": torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]), - "y": torch.tensor([[0], [1], [0], [1]]), - "to_onehot_y": True, - "softmax": True, - }, + torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]), + torch.tensor([[0], [1], [0], [1]]), + True, + True, + "macro", 0.75, ] -TEST_CASE_2 = [{"y_pred": torch.tensor([[0.5], [0.5], [0.2], [8.3]]), "y": torch.tensor([[0], [1], [0], [1]])}, 0.875] +TEST_CASE_2 = [ + torch.tensor([[0.5], [0.5], [0.2], [8.3]]), + torch.tensor([[0], [1], [0], [1]]), + False, + False, + "macro", + 0.875, +] -TEST_CASE_3 = [{"y_pred": torch.tensor([[0.5], [0.5], [0.2], [8.3]]), "y": torch.tensor([0, 1, 0, 1])}, 0.875] +TEST_CASE_3 = [ + torch.tensor([[0.5], [0.5], [0.2], [8.3]]), + torch.tensor([0, 1, 0, 1]), + False, + False, + "macro", + 0.875, +] -TEST_CASE_4 = [{"y_pred": torch.tensor([0.5, 0.5, 0.2, 8.3]), "y": torch.tensor([0, 1, 0, 1])}, 0.875] +TEST_CASE_4 = [ + torch.tensor([0.5, 0.5, 0.2, 8.3]), + torch.tensor([0, 1, 0, 1]), + False, + False, + "macro", + 0.875, +] TEST_CASE_5 = [ - { - "y_pred": torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]), - "y": torch.tensor([[0], [1], [0], [1]]), - "to_onehot_y": True, - "softmax": True, - "average": "none", - }, + torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]), + torch.tensor([[0], [1], [0], [1]]), + True, + True, + "none", [0.75, 0.75], ] TEST_CASE_6 = [ - { - "y_pred": torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5], [0.1, 0.5]]), - "y": torch.tensor([[1, 0], [0, 1], [0, 0], [1, 1], [0, 1]]), - "softmax": True, - "average": "weighted", - }, + torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5], [0.1, 0.5]]), + torch.tensor([[1, 0], [0, 1], [0, 0], [1, 1], [0, 1]]), + True, + False, + "weighted", 0.56667, ] TEST_CASE_7 = [ - { - "y_pred": torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5], [0.1, 0.5]]), - "y": torch.tensor([[1, 0], [0, 1], [0, 0], [1, 1], [0, 1]]), - "softmax": True, - "average": "micro", - }, + torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5], [0.1, 0.5]]), + torch.tensor([[1, 0], [0, 1], [0, 0], [1, 1], [0, 1]]), + True, + False, + "micro", 0.62, ] -TEST_CASE_8 = [ - { - "y_pred": torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]), - "y": torch.tensor([[0], [1], [0], [1]]), - "to_onehot_y": True, - "other_act": lambda x: torch.log_softmax(x, dim=1), - }, - 0.75, -] - class TestComputeROCAUC(unittest.TestCase): - @parameterized.expand( - [TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7, TEST_CASE_8] - ) - def test_value(self, input_data, expected_value): - result = compute_roc_auc(**input_data) + @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7]) + def test_value(self, y_pred, y, softmax, to_onehot, average, expected_value): + y_pred = Activations(softmax=softmax)(y_pred) + y = AsDiscrete(to_onehot=to_onehot, n_classes=2)(y) + result = compute_roc_auc(y_pred=y_pred, y=y, average=average) np.testing.assert_allclose(expected_value, result, rtol=1e-5) diff --git a/tests/test_handler_rocauc.py b/tests/test_handler_rocauc.py index 05f6eebce6..04e4d3edb3 100644 --- a/tests/test_handler_rocauc.py +++ b/tests/test_handler_rocauc.py @@ -15,18 +15,25 @@ import torch from monai.handlers import ROCAUC +from monai.transforms import Activations, AsDiscrete class TestHandlerROCAUC(unittest.TestCase): def test_compute(self): - auc_metric = ROCAUC(to_onehot_y=True, softmax=True) + auc_metric = ROCAUC() + act = Activations(softmax=True) + to_onehot = AsDiscrete(to_onehot=True, n_classes=2) y_pred = torch.Tensor([[0.1, 0.9], [0.3, 1.4]]) y = torch.Tensor([[0], [1]]) + y_pred = act(y_pred) + y = to_onehot(y) auc_metric.update([y_pred, y]) y_pred = torch.Tensor([[0.2, 0.1], [0.1, 0.5]]) y = torch.Tensor([[0], [1]]) + y_pred = act(y_pred) + y = to_onehot(y) auc_metric.update([y_pred, y]) auc = auc_metric.compute() diff --git a/tests/test_handler_rocauc_dist.py b/tests/test_handler_rocauc_dist.py index c5cf44162c..e768906158 100644 --- a/tests/test_handler_rocauc_dist.py +++ b/tests/test_handler_rocauc_dist.py @@ -17,23 +17,29 @@ import torch.distributed as dist from monai.handlers import ROCAUC +from monai.transforms import Activations, AsDiscrete from tests.utils import DistCall, DistTestCase class DistributedROCAUC(DistTestCase): @DistCall(nnodes=1, nproc_per_node=2, node_rank=0) def test_compute(self): - auc_metric = ROCAUC(to_onehot_y=True, softmax=True) + auc_metric = ROCAUC() + act = Activations(softmax=True) + to_onehot = AsDiscrete(to_onehot=True, n_classes=2) + device = f"cuda:{dist.get_rank()}" if torch.cuda.is_available() else "cpu" if dist.get_rank() == 0: y_pred = torch.tensor([[0.1, 0.9], [0.3, 1.4]], device=device) y = torch.tensor([[0], [1]], device=device) - auc_metric.update([y_pred, y]) if dist.get_rank() == 1: y_pred = torch.tensor([[0.2, 0.1], [0.1, 0.5], [0.3, 0.4]], device=device) y = torch.tensor([[0], [1], [1]], device=device) - auc_metric.update([y_pred, y]) + + y_pred = act(y_pred) + y = to_onehot(y) + auc_metric.update([y_pred, y]) result = auc_metric.compute() np.testing.assert_allclose(0.66667, result, rtol=1e-4) diff --git a/tests/test_integration_classification_2d.py b/tests/test_integration_classification_2d.py index 6f8c949d78..68493e4ffb 100644 --- a/tests/test_integration_classification_2d.py +++ b/tests/test_integration_classification_2d.py @@ -23,7 +23,18 @@ from monai.metrics import compute_roc_auc from monai.networks import eval_mode from monai.networks.nets import DenseNet121 -from monai.transforms import AddChannel, Compose, LoadImage, RandFlip, RandRotate, RandZoom, ScaleIntensity, ToTensor +from monai.transforms import ( + Activations, + AddChannel, + AsDiscrete, + Compose, + LoadImage, + RandFlip, + RandRotate, + RandZoom, + ScaleIntensity, + ToTensor, +) from monai.utils import set_determinism from tests.testing_data.integration_answers import test_integration_value from tests.utils import DistTestCase, TimedCall, skip_if_quick @@ -63,6 +74,8 @@ def run_training_test(root_dir, train_x, train_y, val_x, val_y, device="cuda:0", ) train_transforms.set_random_state(1234) val_transforms = Compose([LoadImage(image_only=True), AddChannel(), ScaleIntensity(), ToTensor()]) + act = Activations(softmax=True) + to_onehot = AsDiscrete(to_onehot=True, n_classes=len(np.unique(train_y))) # create train, val data loaders train_ds = MedNISTDataset(train_x, train_y, train_transforms) @@ -110,10 +123,15 @@ def run_training_test(root_dir, train_x, train_y, val_x, val_y, device="cuda:0", val_images, val_labels = val_data[0].to(device), val_data[1].to(device) y_pred = torch.cat([y_pred, model(val_images)], dim=0) y = torch.cat([y, val_labels], dim=0) - auc_metric = compute_roc_auc(y_pred, y, to_onehot_y=True, softmax=True) - metric_values.append(auc_metric) + + # compute accuracy acc_value = torch.eq(y_pred.argmax(dim=1), y) acc_metric = acc_value.sum().item() / len(acc_value) + # compute AUC + y_pred = act(y_pred) + y = to_onehot(y) + auc_metric = compute_roc_auc(y_pred, y) + metric_values.append(auc_metric) if auc_metric > best_metric: best_metric = auc_metric best_metric_epoch = epoch + 1 From 83855b98f1c128f46b4db21a2f3ee55d0b5ffa5d Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Tue, 23 Mar 2021 17:21:08 +0000 Subject: [PATCH 088/457] Test time augmentations (#1794) test time augmentations --- docs/source/data.rst | 4 + monai/data/__init__.py | 1 + monai/data/test_time_augmentation.py | 178 +++++++++++++++++++++++++++ tests/test_testtimeaugmentation.py | 143 +++++++++++++++++++++ 4 files changed, 326 insertions(+) create mode 100644 monai/data/test_time_augmentation.py create mode 100644 tests/test_testtimeaugmentation.py diff --git a/docs/source/data.rst b/docs/source/data.rst index 8071bb1585..66fadd549b 100644 --- a/docs/source/data.rst +++ b/docs/source/data.rst @@ -188,3 +188,7 @@ ThreadBuffer BatchInverseTransform ~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: monai.data.BatchInverseTransform + +TestTimeAugmentation +~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: monai.data.TestTimeAugmentation diff --git a/monai/data/__init__.py b/monai/data/__init__.py index 2001ccfc8f..adb27a608e 100644 --- a/monai/data/__init__.py +++ b/monai/data/__init__.py @@ -34,6 +34,7 @@ from .png_writer import write_png from .samplers import DistributedSampler, DistributedWeightedRandomSampler from .synthetic import create_test_image_2d, create_test_image_3d +from .test_time_augmentation import TestTimeAugmentation from .thread_buffer import ThreadBuffer, ThreadDataLoader from .utils import ( compute_importance_map, diff --git a/monai/data/test_time_augmentation.py b/monai/data/test_time_augmentation.py new file mode 100644 index 0000000000..51b95adc58 --- /dev/null +++ b/monai/data/test_time_augmentation.py @@ -0,0 +1,178 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch + +from monai.data.dataloader import DataLoader +from monai.data.dataset import Dataset +from monai.data.inverse_batch_transform import BatchInverseTransform +from monai.data.utils import list_data_collate, pad_list_data_collate +from monai.transforms.compose import Compose +from monai.transforms.inverse import InvertibleTransform +from monai.transforms.transform import RandomizableTransform +from monai.transforms.utils import allow_missing_keys_mode +from monai.utils.enums import CommonKeys, InverseKeys + +__all__ = ["TestTimeAugmentation"] + + +class TestTimeAugmentation: + """ + Class for performing test time augmentations. This will pass the same image through the network multiple times. + + The user passes transform(s) to be applied to each realisation, and provided that at least one of those transforms + is random, the network's output will vary. Provided that inverse transformations exist for all supplied spatial + transforms, the inverse can be applied to each realisation of the network's output. Once in the same spatial + reference, the results can then be combined and metrics computed. + + Test time augmentations are a useful feature for computing network uncertainty, as well as observing the network's + dependency on the applied random transforms. + + Reference: + Wang et al., + Aleatoric uncertainty estimation with test-time augmentation for medical image segmentation with convolutional + neural networks, + https://doi.org/10.1016/j.neucom.2019.01.103 + + Args: + transform: transform (or composed) to be applied to each realisation. At least one transform must be of type + `RandomizableTransform`. All random transforms must be of type `InvertibleTransform`. + batch_size: number of realisations to infer at once. + num_workers: how many subprocesses to use for data. + inferrer_fn: function to use to perform inference. + device: device on which to perform inference. + image_key: key used to extract image from input dictionary. + label_key: key used to extract label from input dictionary. + return_full_data: normally, metrics are returned (mode, mean, std, vvc). Setting this flag to `True` will return the + full data. Dimensions will be same size as when passing a single image through `inferrer_fn`, with a dimension appended + equal in size to `num_examples` (N), i.e., `[N,C,H,W,[D]]`. + + Example: + .. code-block:: python + + transform = RandAffined(keys, ...) + post_trans = Compose([Activations(sigmoid=True), AsDiscrete(threshold_values=True)]) + + tt_aug = TestTimeAugmentation( + transform, batch_size=5, num_workers=0, inferrer_fn=lambda x: post_trans(model(x)), device=device + ) + mode, mean, std, vvc = tt_aug(test_data) + """ + + def __init__( + self, + transform: InvertibleTransform, + batch_size: int, + num_workers: int, + inferrer_fn: Callable, + device: Optional[Union[str, torch.device]] = "cuda" if torch.cuda.is_available() else "cpu", + image_key=CommonKeys.IMAGE, + label_key=CommonKeys.LABEL, + return_full_data: bool = False, + ) -> None: + self.transform = transform + self.batch_size = batch_size + self.num_workers = num_workers + self.inferrer_fn = inferrer_fn + self.device = device + self.image_key = image_key + self.label_key = label_key + self.return_full_data = return_full_data + + # check that the transform has at least one random component, and that all random transforms are invertible + self._check_transforms() + + def _check_transforms(self): + """Should be at least 1 random transform, and all random transforms should be invertible.""" + ts = [self.transform] if not isinstance(self.transform, Compose) else self.transform.transforms + randoms = np.array([isinstance(t, RandomizableTransform) for t in ts]) + invertibles = np.array([isinstance(t, InvertibleTransform) for t in ts]) + # check at least 1 random + if sum(randoms) == 0: + raise RuntimeError( + "Requires a `Randomizable` transform or a `Compose` containing at least one `Randomizable` transform." + ) + # check that whenever randoms is True, invertibles is also true + for r, i in zip(randoms, invertibles): + if r and not i: + raise RuntimeError( + f"All applied random transform(s) must be invertible. Problematic transform: {type(r).__name__}" + ) + + def __call__( + self, data: Dict[str, Any], num_examples: int = 10 + ) -> Union[Tuple[np.ndarray, np.ndarray, np.ndarray, float], np.ndarray]: + """ + Args: + data: dictionary data to be processed. + num_examples: number of realisations to be processed and results combined. + + Returns: + - if `return_full_data==False`: mode, mean, std, vvc. The mode, mean and standard deviation are calculated across + `num_examples` outputs at each voxel. The volume variation coefficient (VVC) is `std/mean` across the whole output, + including `num_examples`. See original paper for clarification. + - if `return_full_data==False`: data is returned as-is after applying the `inferrer_fn` and then concatenating across + the first dimension containing `num_examples`. This allows the user to perform their own analysis if desired. + """ + d = dict(data) + + # check num examples is multiple of batch size + if num_examples % self.batch_size != 0: + raise ValueError("num_examples should be multiple of batch size.") + + # generate batch of data of size == batch_size, dataset and dataloader + data_in = [d] * num_examples + ds = Dataset(data_in, self.transform) + dl = DataLoader(ds, self.num_workers, batch_size=self.batch_size, collate_fn=pad_list_data_collate) + + label_transform_key = self.label_key + InverseKeys.KEY_SUFFIX + + # create inverter + inverter = BatchInverseTransform(self.transform, dl, collate_fn=list_data_collate) + + outputs: List[np.ndarray] = [] + + for batch_data in dl: + + batch_images = batch_data[self.image_key].to(self.device) + + # do model forward pass + batch_output = self.inferrer_fn(batch_images) + if isinstance(batch_output, torch.Tensor): + batch_output = batch_output.detach().cpu() + if isinstance(batch_output, np.ndarray): + batch_output = torch.Tensor(batch_output) + + # create a dictionary containing the inferred batch and their transforms + inferred_dict = {self.label_key: batch_output, label_transform_key: batch_data[label_transform_key]} + + # do inverse transformation (allow missing keys as only inverting label) + with allow_missing_keys_mode(self.transform): # type: ignore + inv_batch = inverter(inferred_dict) + + # append + outputs.append(inv_batch[self.label_key]) + + # output + output: np.ndarray = np.concatenate(outputs) + + if self.return_full_data: + return output + + # calculate metrics + mode: np.ndarray = np.apply_along_axis(lambda x: np.bincount(x).argmax(), axis=0, arr=output.astype(np.int64)) + mean: np.ndarray = np.mean(output, axis=0) # type: ignore + std: np.ndarray = np.std(output, axis=0) # type: ignore + vvc: float = (np.std(output) / np.mean(output)).item() + return mode, mean, std, vvc diff --git a/tests/test_testtimeaugmentation.py b/tests/test_testtimeaugmentation.py new file mode 100644 index 0000000000..bee1aa4b0d --- /dev/null +++ b/tests/test_testtimeaugmentation.py @@ -0,0 +1,143 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from functools import partial +from typing import TYPE_CHECKING + +import numpy as np +import torch + +from monai.data import CacheDataset, DataLoader, create_test_image_2d +from monai.data.test_time_augmentation import TestTimeAugmentation +from monai.data.utils import pad_list_data_collate +from monai.losses import DiceLoss +from monai.networks.nets import UNet +from monai.transforms import Activations, AddChanneld, AsDiscrete, Compose, CropForegroundd, DivisiblePadd, RandAffined +from monai.transforms.croppad.dictionary import SpatialPadd +from monai.transforms.spatial.dictionary import Rand2DElasticd, RandFlipd +from monai.utils import optional_import, set_determinism + +if TYPE_CHECKING: + import tqdm + + has_tqdm = True +else: + tqdm, has_tqdm = optional_import("tqdm") + +trange = partial(tqdm.trange, desc="training") if has_tqdm else range + + +class TestTestTimeAugmentation(unittest.TestCase): + @staticmethod + def get_data(num_examples, input_size): + custom_create_test_image_2d = partial( + create_test_image_2d, *input_size, rad_max=7, num_seg_classes=1, num_objs=1 + ) + data = [] + for _ in range(num_examples): + im, label = custom_create_test_image_2d() + data.append({"image": im, "label": label}) + return data[0] if num_examples == 1 else data + + def setUp(self) -> None: + set_determinism(seed=0) + + def tearDown(self) -> None: + set_determinism(None) + + def test_test_time_augmentation(self): + input_size = (20, 20) + device = "cuda" if torch.cuda.is_available() else "cpu" + keys = ["image", "label"] + num_training_ims = 10 + train_data = self.get_data(num_training_ims, input_size) + test_data = self.get_data(1, input_size) + + transforms = Compose( + [ + AddChanneld(keys), + RandAffined( + keys, + prob=1.0, + spatial_size=(30, 30), + rotate_range=(np.pi / 3, np.pi / 3), + translate_range=(3, 3), + scale_range=((0.8, 1), (0.8, 1)), + padding_mode="zeros", + mode=("bilinear", "nearest"), + as_tensor_output=False, + ), + CropForegroundd(keys, source_key="image"), + DivisiblePadd(keys, 4), + ] + ) + + train_ds = CacheDataset(train_data, transforms) + # output might be different size, so pad so that they match + train_loader = DataLoader(train_ds, batch_size=2, collate_fn=pad_list_data_collate) + + model = UNet(2, 1, 1, channels=(6, 6), strides=(2, 2)).to(device) + loss_function = DiceLoss(sigmoid=True) + optimizer = torch.optim.Adam(model.parameters(), 1e-3) + + num_epochs = 10 + for _ in trange(num_epochs): + epoch_loss = 0 + + for batch_data in train_loader: + inputs, labels = batch_data["image"].to(device), batch_data["label"].to(device) + optimizer.zero_grad() + outputs = model(inputs) + loss = loss_function(outputs, labels) + loss.backward() + optimizer.step() + epoch_loss += loss.item() + + epoch_loss /= len(train_loader) + + post_trans = Compose( + [ + Activations(sigmoid=True), + AsDiscrete(threshold_values=True), + ] + ) + + def inferrer_fn(x): + return post_trans(model(x)) + + tt_aug = TestTimeAugmentation(transforms, batch_size=5, num_workers=0, inferrer_fn=inferrer_fn, device=device) + mode, mean, std, vvc = tt_aug(test_data) + self.assertEqual(mode.shape, (1,) + input_size) + self.assertEqual(mean.shape, (1,) + input_size) + self.assertTrue(all(np.unique(mode) == (0, 1))) + self.assertEqual((mean.min(), mean.max()), (0.0, 1.0)) + self.assertEqual(std.shape, (1,) + input_size) + self.assertIsInstance(vvc, float) + + def test_fail_non_random(self): + transforms = Compose([AddChanneld("im"), SpatialPadd("im", 1)]) + with self.assertRaises(RuntimeError): + TestTimeAugmentation(transforms, None, None, None) + + def test_fail_random_but_not_invertible(self): + transforms = Compose([AddChanneld("im"), Rand2DElasticd("im", None, None)]) + with self.assertRaises(RuntimeError): + TestTimeAugmentation(transforms, None, None, None) + + def test_single_transform(self): + transforms = RandFlipd(["image", "label"]) + tta = TestTimeAugmentation(transforms, batch_size=5, num_workers=0, inferrer_fn=lambda x: x) + tta(self.get_data(1, (20, 20))) + + +if __name__ == "__main__": + unittest.main() From 8ddd09f4800e9ca5ec2c09f7d4f2a671dfc53add Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Tue, 23 Mar 2021 21:13:23 +0000 Subject: [PATCH 089/457] fixes tutorial issue 150 (#1830) * fixes tutorial issue 150 Signed-off-by: Wenqi Li * revise based on comments Signed-off-by: Wenqi Li Co-authored-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/data/utils.py | 11 ++++- monai/transforms/spatial/dictionary.py | 4 +- tests/test_inverse_collation.py | 57 ++++++++++++++++++-------- 3 files changed, 53 insertions(+), 19 deletions(-) diff --git a/monai/data/utils.py b/monai/data/utils.py index bdbfa5c636..63e630fe17 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -259,11 +259,20 @@ def list_data_collate(batch: Sequence): re_str = str(re) if "equal size" in re_str: re_str += ( - "\nMONAI hint: if your transforms intentionally create images of different shapes, creating your " + "\n\nMONAI hint: if your transforms intentionally create images of different shapes, creating your " + "`DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem (check its " + "documentation)." ) raise RuntimeError(re_str) + except TypeError as re: + re_str = str(re) + if "numpy" in re_str and "Tensor" in re_str: + re_str += ( + "\n\nMONAI hint: if your transforms intentionally create mixtures of torch Tensor and numpy ndarray, " + + "creating your `DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem " + + "(check its documentation)." + ) + raise TypeError(re_str) def decollate_batch(data: dict, batch_size: Optional[int] = None) -> List[dict]: diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 0d5b3436fd..5f8aa3d0b0 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -698,7 +698,7 @@ def __call__( affine = self.rand_affine.rand_affine_grid.get_transformation_matrix() else: grid = create_grid(spatial_size=sp_size) - affine = np.eye(len(sp_size) + 1) + affine = torch.eye(len(sp_size) + 1) for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): self.push_transform(d, key, extra_info={"affine": affine}) @@ -1285,7 +1285,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda d = dict(data) if not self._do_transform: for key in self.keys: - self.push_transform(d, key, extra_info={"rot_mat": np.eye(4)}) + self.push_transform(d, key, extra_info={"rot_mat": np.eye(d[key].ndim)}) return d angle: Union[Sequence[float], float] = self.x if d[self.keys[0]].ndim == 3 else (self.x, self.y, self.z) rotator = Rotate( diff --git a/tests/test_inverse_collation.py b/tests/test_inverse_collation.py index c5d77fb8f2..5bde157343 100644 --- a/tests/test_inverse_collation.py +++ b/tests/test_inverse_collation.py @@ -16,7 +16,7 @@ import numpy as np from parameterized import parameterized -from monai.data import CacheDataset, DataLoader, create_test_image_3d, pad_list_data_collate +from monai.data import CacheDataset, DataLoader, create_test_image_2d, create_test_image_3d, pad_list_data_collate from monai.transforms import ( AddChanneld, Compose, @@ -40,16 +40,29 @@ KEYS = ["image", "label"] -TESTS = [ - (t.__class__.__name__ + (" pad_list_data_collate" if collate_fn else " default_collate"), t, collate_fn) +TESTS_3D = [ + (t.__class__.__name__ + (" pad_list_data_collate" if collate_fn else " default_collate"), t, collate_fn, 3) for collate_fn in [None, pad_list_data_collate] for t in [ - RandFlipd(keys=KEYS, spatial_axis=[1, 2]), - RandAxisFlipd(keys=KEYS), + RandFlipd(keys=KEYS, prob=0.5, spatial_axis=[1, 2]), + RandAxisFlipd(keys=KEYS, prob=0.5), RandRotate90d(keys=KEYS, spatial_axes=(1, 2)), RandZoomd(keys=KEYS, prob=0.5, min_zoom=0.5, max_zoom=1.1, keep_size=True), - RandRotated(keys=KEYS, range_x=np.pi), - RandAffined(keys=KEYS, rotate_range=np.pi), + RandRotated(keys=KEYS, prob=0.5, range_x=np.pi), + RandAffined(keys=KEYS, prob=0.5, rotate_range=np.pi), + ] +] + +TESTS_2D = [ + (t.__class__.__name__ + (" pad_list_data_collate" if collate_fn else " default_collate"), t, collate_fn, 2) + for collate_fn in [None, pad_list_data_collate] + for t in [ + RandFlipd(keys=KEYS, prob=0.5, spatial_axis=[1]), + RandAxisFlipd(keys=KEYS, prob=0.5), + RandRotate90d(keys=KEYS, prob=0.5, spatial_axes=(0, 1)), + RandZoomd(keys=KEYS, prob=0.5, min_zoom=0.5, max_zoom=1.1, keep_size=True), + RandRotated(keys=KEYS, prob=0.5, range_x=np.pi), + RandAffined(keys=KEYS, prob=0.5, rotate_range=np.pi), ] ] @@ -63,30 +76,42 @@ def setUp(self): set_determinism(seed=0) + b_size = 11 im_fname, seg_fname = [make_nifti_image(i) for i in create_test_image_3d(101, 100, 107)] load_ims = Compose([LoadImaged(KEYS), AddChanneld(KEYS)]) - self.batch_size = 10 - self.data = [load_ims({"image": im_fname, "label": seg_fname}) for _ in range(self.batch_size)] + self.data_3d = [load_ims({"image": im_fname, "label": seg_fname}) for _ in range(b_size)] + + b_size = 8 + im_fname, seg_fname = [make_nifti_image(i) for i in create_test_image_2d(62, 37, rad_max=10)] + load_ims = Compose([LoadImaged(KEYS), AddChanneld(KEYS)]) + self.data_2d = [load_ims({"image": im_fname, "label": seg_fname}) for _ in range(b_size)] + + self.batch_size = 7 def tearDown(self): set_determinism(seed=None) - @parameterized.expand(TESTS) - def test_collation(self, _, transform, collate_fn): - + @parameterized.expand(TESTS_2D + TESTS_3D) + def test_collation(self, _, transform, collate_fn, ndim): + if ndim == 3: + data = self.data_3d + else: + data = self.data_2d if collate_fn: modified_transform = transform else: - modified_transform = Compose([transform, ResizeWithPadOrCropd(KEYS, [100, 100, 100])]) + modified_transform = Compose([transform, ResizeWithPadOrCropd(KEYS, 100)]) # num workers = 0 for mac num_workers = 2 if sys.platform != "darwin" else 0 - dataset = CacheDataset(self.data, transform=modified_transform, progress=False) + dataset = CacheDataset(data, transform=modified_transform, progress=False) loader = DataLoader(dataset, num_workers, batch_size=self.batch_size, collate_fn=collate_fn) - for _ in loader: - pass + for item in loader: + np.testing.assert_array_equal( + item["image_transforms"][0]["do_transforms"], item["label_transforms"][0]["do_transforms"] + ) if __name__ == "__main__": From 07c6e34e7820560d7df2fe7f27907b05f6d91750 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Wed, 24 Mar 2021 07:27:25 +0800 Subject: [PATCH 090/457] 1814 Enhance transform chain error logging (#1829) * [DLMED] enhance transform logging Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot * [DLMED] fix typo Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] ignore type warning Signed-off-by: Nic Ma Co-authored-by: monai-bot --- monai/transforms/transform.py | 23 ++++++++ monai/transforms/utility/array.py | 6 +++ monai/transforms/utility/dictionary.py | 9 +++- tests/test_data_stats.py | 51 +++++++++++++----- tests/test_data_statsd.py | 73 ++++++++++++++++++++------ 5 files changed, 132 insertions(+), 30 deletions(-) diff --git a/monai/transforms/transform.py b/monai/transforms/transform.py index fea46aafa3..6a22db1076 100644 --- a/monai/transforms/transform.py +++ b/monai/transforms/transform.py @@ -16,7 +16,9 @@ from typing import Any, Callable, Dict, Generator, Hashable, Iterable, List, Optional, Tuple import numpy as np +import torch +from monai import transforms from monai.config import KeysCollection from monai.utils import MAX_SEED, ensure_tuple @@ -45,6 +47,27 @@ def apply_transform(transform: Callable, data, map_items: bool = True): return [transform(item) for item in data] return transform(data) except Exception as e: + + if not isinstance(transform, transforms.compose.Compose): + # log the input data information of exact transform in the transform chain + datastats = transforms.utility.array.DataStats(data_shape=False, value_range=False) + datastats._logger.info("input data information of the runtime error transform:") + if isinstance(data, (list, tuple)): + data = data[0] + + def _log_stats(data, prefix: Optional[str] = "Data"): + if isinstance(data, (np.ndarray, torch.Tensor)): + # log data type, shape, range for array + datastats(img=data, data_shape=True, value_range=True, prefix=prefix) # type: ignore + else: + # log data type and value for other meta data + datastats(img=data, data_value=True, prefix=prefix) + + if isinstance(data, dict): + for k, v in data.items(): + _log_stats(data=v, prefix=k) + else: + _log_stats(data=data) raise RuntimeError(f"applying transform {transform}") from e diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 41804d5c1d..f169002596 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -394,6 +394,7 @@ class DataStats(Transform): def __init__( self, prefix: str = "Data", + data_type: bool = True, data_shape: bool = True, value_range: bool = True, data_value: bool = False, @@ -403,6 +404,7 @@ def __init__( """ Args: prefix: will be printed in format: "{prefix} statistics". + data_type: whether to show the type of input data. data_shape: whether to show the shape of input data. value_range: whether to show the value range of input data. data_value: whether to show the raw value of input data. @@ -419,6 +421,7 @@ def __init__( if not isinstance(prefix, str): raise AssertionError("prefix must be a string.") self.prefix = prefix + self.data_type = data_type self.data_shape = data_shape self.value_range = value_range self.data_value = data_value @@ -438,6 +441,7 @@ def __call__( self, img: NdarrayTensor, prefix: Optional[str] = None, + data_type: Optional[bool] = None, data_shape: Optional[bool] = None, value_range: Optional[bool] = None, data_value: Optional[bool] = None, @@ -448,6 +452,8 @@ def __call__( """ lines = [f"{prefix or self.prefix} statistics:"] + if self.data_type if data_type is None else data_type: + lines.append(f"Type: {type(img)}") if self.data_shape if data_shape is None else data_shape: lines.append(f"Shape: {img.shape}") if self.value_range if value_range is None else value_range: diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index a05a5fc904..324835a874 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -507,6 +507,7 @@ def __init__( self, keys: KeysCollection, prefix: Union[Sequence[str], str] = "Data", + data_type: Union[Sequence[bool], bool] = True, data_shape: Union[Sequence[bool], bool] = True, value_range: Union[Sequence[bool], bool] = True, data_value: Union[Sequence[bool], bool] = False, @@ -520,6 +521,8 @@ def __init__( See also: :py:class:`monai.transforms.compose.MapTransform` prefix: will be printed in format: "{prefix} statistics". it also can be a sequence of string, each element corresponds to a key in ``keys``. + data_type: whether to show the type of input data. + it also can be a sequence of bool, each element corresponds to a key in ``keys``. data_shape: whether to show the shape of input data. it also can be a sequence of bool, each element corresponds to a key in ``keys``. value_range: whether to show the value range of input data. @@ -538,6 +541,7 @@ def __init__( """ super().__init__(keys, allow_missing_keys) self.prefix = ensure_tuple_rep(prefix, len(self.keys)) + self.data_type = ensure_tuple_rep(data_type, len(self.keys)) self.data_shape = ensure_tuple_rep(data_shape, len(self.keys)) self.value_range = ensure_tuple_rep(value_range, len(self.keys)) self.data_value = ensure_tuple_rep(data_value, len(self.keys)) @@ -547,12 +551,13 @@ def __init__( def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, NdarrayTensor]: d = dict(data) - for key, prefix, data_shape, value_range, data_value, additional_info in self.key_iterator( - d, self.prefix, self.data_shape, self.value_range, self.data_value, self.additional_info + for key, prefix, data_type, data_shape, value_range, data_value, additional_info in self.key_iterator( + d, self.prefix, self.data_type, self.data_shape, self.value_range, self.data_value, self.additional_info ): d[key] = self.printer( d[key], prefix, + data_type, data_shape, value_range, data_value, diff --git a/tests/test_data_stats.py b/tests/test_data_stats.py index 877da52263..073620ad43 100644 --- a/tests/test_data_stats.py +++ b/tests/test_data_stats.py @@ -23,6 +23,7 @@ TEST_CASE_1 = [ { "prefix": "test data", + "data_type": False, "data_shape": False, "value_range": False, "data_value": False, @@ -36,58 +37,80 @@ TEST_CASE_2 = [ { "prefix": "test data", - "data_shape": True, + "data_type": True, + "data_shape": False, "value_range": False, "data_value": False, "additional_info": None, "logger_handler": None, }, np.array([[0, 1], [1, 2]]), - "test data statistics:\nShape: (2, 2)", + "test data statistics:\nType: ", ] TEST_CASE_3 = [ { "prefix": "test data", + "data_type": True, "data_shape": True, - "value_range": True, + "value_range": False, "data_value": False, "additional_info": None, "logger_handler": None, }, np.array([[0, 1], [1, 2]]), - "test data statistics:\nShape: (2, 2)\nValue range: (0, 2)", + "test data statistics:\nType: \nShape: (2, 2)", ] TEST_CASE_4 = [ { "prefix": "test data", + "data_type": True, "data_shape": True, "value_range": True, - "data_value": True, + "data_value": False, "additional_info": None, "logger_handler": None, }, np.array([[0, 1], [1, 2]]), - "test data statistics:\nShape: (2, 2)\nValue range: (0, 2)\nValue: [[0 1]\n [1 2]]", + "test data statistics:\nType: \nShape: (2, 2)\nValue range: (0, 2)", ] TEST_CASE_5 = [ { "prefix": "test data", + "data_type": True, "data_shape": True, "value_range": True, "data_value": True, - "additional_info": np.mean, + "additional_info": None, "logger_handler": None, }, np.array([[0, 1], [1, 2]]), - "test data statistics:\nShape: (2, 2)\nValue range: (0, 2)\nValue: [[0 1]\n [1 2]]\nAdditional info: 1.0", + "test data statistics:\nType: \nShape: (2, 2)\nValue range: (0, 2)\nValue: [[0 1]\n [1 2]]", ] TEST_CASE_6 = [ { "prefix": "test data", + "data_type": True, + "data_shape": True, + "value_range": True, + "data_value": True, + "additional_info": np.mean, + "logger_handler": None, + }, + np.array([[0, 1], [1, 2]]), + ( + "test data statistics:\nType: \nShape: (2, 2)\n" + "Value range: (0, 2)\nValue: [[0 1]\n [1 2]]\nAdditional info: 1.0" + ), +] + +TEST_CASE_7 = [ + { + "prefix": "test data", + "data_type": True, "data_shape": True, "value_range": True, "data_value": True, @@ -96,25 +119,26 @@ }, torch.tensor([[0, 1], [1, 2]]), ( - "test data statistics:\nShape: torch.Size([2, 2])\nValue range: (0, 2)\n" + "test data statistics:\nType: \nShape: torch.Size([2, 2])\nValue range: (0, 2)\n" "Value: tensor([[0, 1],\n [1, 2]])\nAdditional info: 1.0" ), ] -TEST_CASE_7 = [ +TEST_CASE_8 = [ np.array([[0, 1], [1, 2]]), - "test data statistics:\nShape: (2, 2)\nValue range: (0, 2)\nValue: [[0 1]\n [1 2]]\nAdditional info: 1.0\n", + "test data statistics:\nType: \nShape: (2, 2)\nValue range: (0, 2)\n" + "Value: [[0 1]\n [1 2]]\nAdditional info: 1.0\n", ] class TestDataStats(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6]) + @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7]) def test_value(self, input_param, input_data, expected_print): transform = DataStats(**input_param) _ = transform(input_data) self.assertEqual(transform.output, expected_print) - @parameterized.expand([TEST_CASE_7]) + @parameterized.expand([TEST_CASE_8]) def test_file(self, input_data, expected_print): with tempfile.TemporaryDirectory() as tempdir: filename = os.path.join(tempdir, "test_data_stats.log") @@ -122,6 +146,7 @@ def test_file(self, input_data, expected_print): handler.setLevel(logging.INFO) input_param = { "prefix": "test data", + "data_type": True, "data_shape": True, "value_range": True, "data_value": True, diff --git a/tests/test_data_statsd.py b/tests/test_data_statsd.py index bacd70194a..7ac346b275 100644 --- a/tests/test_data_statsd.py +++ b/tests/test_data_statsd.py @@ -24,10 +24,12 @@ { "keys": "img", "prefix": "test data", + "data_type": False, "data_shape": False, "value_range": False, "data_value": False, "additional_info": None, + "logger_handler": None, }, {"img": np.array([[0, 1], [1, 2]])}, "test data statistics:", @@ -37,97 +39,138 @@ { "keys": "img", "prefix": "test data", - "data_shape": True, + "data_type": True, + "data_shape": False, "value_range": False, "data_value": False, "additional_info": None, + "logger_handler": None, }, {"img": np.array([[0, 1], [1, 2]])}, - "test data statistics:\nShape: (2, 2)", + "test data statistics:\nType: ", ] TEST_CASE_3 = [ { "keys": "img", "prefix": "test data", + "data_type": True, "data_shape": True, - "value_range": True, + "value_range": False, "data_value": False, "additional_info": None, + "logger_handler": None, }, {"img": np.array([[0, 1], [1, 2]])}, - "test data statistics:\nShape: (2, 2)\nValue range: (0, 2)", + "test data statistics:\nType: \nShape: (2, 2)", ] TEST_CASE_4 = [ { "keys": "img", "prefix": "test data", + "data_type": True, "data_shape": True, "value_range": True, - "data_value": True, + "data_value": False, "additional_info": None, + "logger_handler": None, }, {"img": np.array([[0, 1], [1, 2]])}, - "test data statistics:\nShape: (2, 2)\nValue range: (0, 2)\nValue: [[0 1]\n [1 2]]", + "test data statistics:\nType: \nShape: (2, 2)\nValue range: (0, 2)", ] TEST_CASE_5 = [ { "keys": "img", "prefix": "test data", + "data_type": True, "data_shape": True, "value_range": True, "data_value": True, - "additional_info": np.mean, + "additional_info": None, + "logger_handler": None, }, {"img": np.array([[0, 1], [1, 2]])}, - "test data statistics:\nShape: (2, 2)\nValue range: (0, 2)\nValue: [[0 1]\n [1 2]]\nAdditional info: 1.0", + "test data statistics:\nType: \nShape: (2, 2)\nValue range: (0, 2)\nValue: [[0 1]\n [1 2]]", ] TEST_CASE_6 = [ { "keys": "img", "prefix": "test data", + "data_type": True, + "data_shape": True, + "value_range": True, + "data_value": True, + "additional_info": np.mean, + "logger_handler": None, + }, + {"img": np.array([[0, 1], [1, 2]])}, + ( + "test data statistics:\nType: \nShape: (2, 2)\n" + "Value range: (0, 2)\nValue: [[0 1]\n [1 2]]\nAdditional info: 1.0" + ), +] + +TEST_CASE_7 = [ + { + "keys": "img", + "prefix": "test data", + "data_type": True, "data_shape": True, "value_range": True, "data_value": True, "additional_info": lambda x: torch.mean(x.float()), + "logger_handler": None, }, {"img": torch.tensor([[0, 1], [1, 2]])}, ( - "test data statistics:\nShape: torch.Size([2, 2])\nValue range: (0, 2)\n" + "test data statistics:\nType: \nShape: torch.Size([2, 2])\nValue range: (0, 2)\n" "Value: tensor([[0, 1],\n [1, 2]])\nAdditional info: 1.0" ), ] -TEST_CASE_7 = [ +TEST_CASE_8 = [ { "keys": ("img", "affine"), "prefix": ("image", "affine"), + "data_type": True, "data_shape": True, "value_range": (True, False), "data_value": (False, True), "additional_info": (np.mean, None), }, {"img": np.array([[0, 1], [1, 2]]), "affine": np.eye(2, 2)}, - "affine statistics:\nShape: (2, 2)\nValue: [[1. 0.]\n [0. 1.]]", + "affine statistics:\nType: \nShape: (2, 2)\nValue: [[1. 0.]\n [0. 1.]]", ] -TEST_CASE_8 = [ +TEST_CASE_9 = [ {"img": np.array([[0, 1], [1, 2]])}, - "test data statistics:\nShape: (2, 2)\nValue range: (0, 2)\nValue: [[0 1]\n [1 2]]\nAdditional info: 1.0\n", + "test data statistics:\nType: \nShape: (2, 2)\nValue range: (0, 2)\n" + "Value: [[0 1]\n [1 2]]\nAdditional info: 1.0\n", ] class TestDataStatsd(unittest.TestCase): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7]) + @parameterized.expand( + [ + TEST_CASE_1, + TEST_CASE_2, + TEST_CASE_3, + TEST_CASE_4, + TEST_CASE_5, + TEST_CASE_6, + TEST_CASE_7, + TEST_CASE_8, + ] + ) def test_value(self, input_param, input_data, expected_print): transform = DataStatsd(**input_param) _ = transform(input_data) self.assertEqual(transform.printer.output, expected_print) - @parameterized.expand([TEST_CASE_8]) + @parameterized.expand([TEST_CASE_9]) def test_file(self, input_data, expected_print): with tempfile.TemporaryDirectory() as tempdir: filename = os.path.join(tempdir, "test_stats.log") From cf502d644b51a79013c583fbc1dd428f698550d1 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Wed, 24 Mar 2021 09:22:49 +0800 Subject: [PATCH 091/457] 1815 Add train mode and eval mode in Evaluators (#1831) * [DLMED] add eval mode in Evaluators Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot * [DLMED] update according to comments Signed-off-by: Nic Ma Co-authored-by: monai-bot Co-authored-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> --- monai/engines/evaluator.py | 26 ++++++++++++++++++++++---- monai/utils/__init__.py | 1 + monai/utils/enums.py | 9 +++++++++ 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/monai/engines/evaluator.py b/monai/engines/evaluator.py index 2c237f5245..c1fe79c848 100644 --- a/monai/engines/evaluator.py +++ b/monai/engines/evaluator.py @@ -17,9 +17,9 @@ from monai.engines.utils import IterationEvents, default_prepare_batch from monai.engines.workflow import Workflow from monai.inferers import Inferer, SimpleInferer -from monai.networks.utils import eval_mode +from monai.networks.utils import eval_mode, train_mode from monai.transforms import Transform -from monai.utils import ensure_tuple, exact_version, optional_import +from monai.utils import ForwardMode, ensure_tuple, exact_version, optional_import from monai.utils.enums import CommonKeys as Keys if TYPE_CHECKING: @@ -54,6 +54,8 @@ class Evaluator(Workflow): val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like: CheckpointHandler, StatsHandler, SegmentationSaver, etc. amp: whether to enable auto-mixed-precision evaluation, default is False. + mode: model forward mode during evaluation, should be 'eval' or 'train', + which maps to `model.eval()` or `model.train()`, default to 'eval'. """ @@ -70,6 +72,7 @@ def __init__( additional_metrics: Optional[Dict[str, Metric]] = None, val_handlers: Optional[Sequence] = None, amp: bool = False, + mode: Union[ForwardMode, str] = ForwardMode.EVAL, ) -> None: super().__init__( device=device, @@ -85,6 +88,13 @@ def __init__( handlers=val_handlers, amp=amp, ) + mode = ForwardMode(mode) + if mode == ForwardMode.EVAL: + self.mode = eval_mode + elif mode == ForwardMode.TRAIN: + self.mode = train_mode + else: + raise ValueError(f"unsupported mode: {mode}, should be 'eval' or 'train'.") def run(self, global_epoch: int = 1) -> None: """ @@ -128,6 +138,8 @@ class SupervisedEvaluator(Evaluator): val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like: CheckpointHandler, StatsHandler, SegmentationSaver, etc. amp: whether to enable auto-mixed-precision evaluation, default is False. + mode: model forward mode during evaluation, should be 'eval' or 'train', + which maps to `model.eval()` or `model.train()`, default to 'eval'. """ @@ -146,6 +158,7 @@ def __init__( additional_metrics: Optional[Dict[str, Metric]] = None, val_handlers: Optional[Sequence] = None, amp: bool = False, + mode: Union[ForwardMode, str] = ForwardMode.EVAL, ) -> None: super().__init__( device=device, @@ -159,6 +172,7 @@ def __init__( additional_metrics=additional_metrics, val_handlers=val_handlers, amp=amp, + mode=mode, ) self.network = network @@ -197,7 +211,7 @@ def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]) -> Dict # put iteration outputs into engine.state engine.state.output = output = {Keys.IMAGE: inputs, Keys.LABEL: targets} # execute forward computation - with eval_mode(self.network): + with self.mode(self.network): if self.amp: with torch.cuda.amp.autocast(): output[Keys.PRED] = self.inferer(inputs, self.network, *args, **kwargs) @@ -235,6 +249,8 @@ class EnsembleEvaluator(Evaluator): val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like: CheckpointHandler, StatsHandler, SegmentationSaver, etc. amp: whether to enable auto-mixed-precision evaluation, default is False. + mode: model forward mode during evaluation, should be 'eval' or 'train', + which maps to `model.eval()` or `model.train()`, default to 'eval'. """ @@ -254,6 +270,7 @@ def __init__( additional_metrics: Optional[Dict[str, Metric]] = None, val_handlers: Optional[Sequence] = None, amp: bool = False, + mode: Union[ForwardMode, str] = ForwardMode.EVAL, ) -> None: super().__init__( device=device, @@ -267,6 +284,7 @@ def __init__( additional_metrics=additional_metrics, val_handlers=val_handlers, amp=amp, + mode=mode, ) self.networks = ensure_tuple(networks) @@ -309,7 +327,7 @@ def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]) -> Dict # put iteration outputs into engine.state engine.state.output = output = {Keys.IMAGE: inputs, Keys.LABEL: targets} for idx, network in enumerate(self.networks): - with eval_mode(network): + with self.mode(network): if self.amp: with torch.cuda.amp.autocast(): output.update({self.pred_keys[idx]: self.inferer(inputs, network, *args, **kwargs)}) diff --git a/monai/utils/__init__.py b/monai/utils/__init__.py index 6a76c96d0c..d622ce96ae 100644 --- a/monai/utils/__init__.py +++ b/monai/utils/__init__.py @@ -18,6 +18,7 @@ BlendMode, ChannelMatching, CommonKeys, + ForwardMode, GridSampleMode, GridSamplePadMode, InterpolateMode, diff --git a/monai/utils/enums.py b/monai/utils/enums.py index 9920aefe0e..1da7df86b6 100644 --- a/monai/utils/enums.py +++ b/monai/utils/enums.py @@ -218,6 +218,15 @@ class Method(Enum): END = "end" +class ForwardMode(Enum): + """ + See also: :py:class:`monai.transforms.engines.evaluator.Evaluator` + """ + + TRAIN = "train" + EVAL = "eval" + + class InverseKeys: """Extra meta data keys used for inverse transforms.""" From e8e2f3e2efb6bf8072b7ded40cb57bca01e07773 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Wed, 24 Mar 2021 12:57:52 +0800 Subject: [PATCH 092/457] 1807 Enhance SmartCacheDataset with shuffle and exception (#1832) * [DLMED] enhance SmartCache Signed-off-by: Nic Ma * [DLMED] fix CI test Signed-off-by: Nic Ma * [DLMED] update according to comments Signed-off-by: Nic Ma --- monai/data/dataset.py | 28 +++++++++++++++++++++++++- tests/test_handler_smartcache.py | 2 +- tests/test_smartcachedataset.py | 34 ++++++++++++++++++++++++++++---- 3 files changed, 58 insertions(+), 6 deletions(-) diff --git a/monai/data/dataset.py b/monai/data/dataset.py index c10c500bf8..813008e3a8 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -547,7 +547,7 @@ def __getitem__(self, index): return data -class SmartCacheDataset(CacheDataset): +class SmartCacheDataset(Randomizable, CacheDataset): """ Re-implementation of the SmartCache mechanism in NVIDIA Clara-train SDK. At any time, the cache pool only keeps a subset of the whole dataset. In each epoch, only the items @@ -594,6 +594,8 @@ def __init__( num_init_workers: Optional[int] = None, num_replace_workers: Optional[int] = None, progress: bool = True, + shuffle: bool = True, + seed: int = 0, ) -> None: """ Args: @@ -609,8 +611,14 @@ def __init__( num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch. If num_replace_workers is None then the number returned by os.cpu_count() is used. progress: whether to display a progress bar when caching for the first epoch. + shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch. + seed: random seed if shuffle is `True`, default to `0`. """ + if shuffle: + self.set_random_state(seed=seed) + self.randomize(data) + super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress) if self._cache is None: self._cache = self._fill_cache() @@ -636,6 +644,12 @@ def __init__( self._compute_data_idx() + def randomize(self, data: Sequence) -> None: + try: + self.R.shuffle(data) + except TypeError as e: + warnings.warn(f"input data can't be shuffled in SmartCacheDataset with numpy.random.shuffle(): {e}.") + def _compute_data_idx(self): """ Update the replacement data position in the total data. @@ -789,6 +803,18 @@ def __len__(self): """ return self.cache_num + def __getitem__(self, index): + """ + Raise exception if didn't call the expected APIs in SmartCacheDataset. + + """ + if not self.is_started(): + raise RuntimeError( + "if using MONAI workflows, please add `SmartCacheHandler` to the handler list of trainer," + "otherwise, please make sure to call `start()`, `update_cache()`, `shutdown()` during training." + ) + return super().__getitem__(index) + class ZipDataset(Dataset): """ diff --git a/tests/test_handler_smartcache.py b/tests/test_handler_smartcache.py index 95f8e70fa4..cfe68e98e2 100644 --- a/tests/test_handler_smartcache.py +++ b/tests/test_handler_smartcache.py @@ -36,7 +36,7 @@ def _train_func(engine, batch): engine = Engine(_train_func) # set up testing handler - dataset = SmartCacheDataset(data, transform=None, replace_rate=0.2, cache_num=5) + dataset = SmartCacheDataset(data, transform=None, replace_rate=0.2, cache_num=5, shuffle=False) data_loader = torch.utils.data.DataLoader(dataset, batch_size=5) SmartCacheHandler(dataset).attach(engine) diff --git a/tests/test_smartcachedataset.py b/tests/test_smartcachedataset.py index 7ebb2858d2..992e8ae43b 100644 --- a/tests/test_smartcachedataset.py +++ b/tests/test_smartcachedataset.py @@ -63,13 +63,39 @@ def test_shape(self, replace_rate, num_replace_workers, transform): dataset.start() for _ in range(3): dataset.update_cache() - self.assertIsNotNone(dataset._cache[15]) - if isinstance(dataset._cache[15]["image"], np.ndarray): - np.testing.assert_allclose(dataset._cache[15]["image"], dataset._cache[15]["label"]) + self.assertIsNotNone(dataset[15]) + if isinstance(dataset[15]["image"], np.ndarray): + np.testing.assert_allclose(dataset[15]["image"], dataset[15]["label"]) else: - self.assertIsInstance(dataset._cache[15]["image"], str) + self.assertIsInstance(dataset[15]["image"], str) dataset.shutdown() + def test_shuffle(self): + test_data = [{"image": f"test_image{i}.nii.gz"} for i in range(20)] + dataset = SmartCacheDataset( + data=test_data, + transform=None, + replace_rate=0.1, + cache_num=16, + num_init_workers=4, + num_replace_workers=4, + shuffle=True, + seed=123, + ) + + dataset.start() + for i in range(3): + dataset.update_cache() + + if i == 0: + self.assertEqual(dataset[15]["image"], "test_image18.nii.gz") + elif i == 1: + self.assertEqual(dataset[15]["image"], "test_image13.nii.gz") + else: + self.assertEqual(dataset[15]["image"], "test_image5.nii.gz") + + dataset.shutdown() + if __name__ == "__main__": unittest.main() From f8c3901a9c2d7854c7cd57f609920b1b71056f65 Mon Sep 17 00:00:00 2001 From: Yiwen Li <44606435+kate-sann5100@users.noreply.github.com> Date: Wed, 24 Mar 2021 13:22:19 +0000 Subject: [PATCH 093/457] fix device bug (#1841) Signed-off-by: kate-sann5100 --- monai/losses/image_dissimilarity.py | 10 +-- monai/losses/multi_scale.py | 4 +- tests/test_bending_energy.py | 20 +++--- tests/test_global_mutual_information_loss.py | 45 +++++++----- ...local_normalized_cross_correlation_loss.py | 72 ++++++++++++++----- tests/test_multi_scale.py | 23 ++++-- 6 files changed, 119 insertions(+), 55 deletions(-) diff --git a/monai/losses/image_dissimilarity.py b/monai/losses/image_dissimilarity.py index b229a0c08f..431167447b 100644 --- a/monai/losses/image_dissimilarity.py +++ b/monai/losses/image_dissimilarity.py @@ -129,11 +129,11 @@ def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor: t2, p2, tp = target ** 2, pred ** 2, target * pred kernel, kernel_vol = self.kernel.to(pred), self.kernel_vol.to(pred) # sum over kernel - t_sum = separable_filtering(target, kernels=[kernel] * self.ndim) - p_sum = separable_filtering(pred, kernels=[kernel] * self.ndim) - t2_sum = separable_filtering(t2, kernels=[kernel] * self.ndim) - p2_sum = separable_filtering(p2, kernels=[kernel] * self.ndim) - tp_sum = separable_filtering(tp, kernels=[kernel] * self.ndim) + t_sum = separable_filtering(target, kernels=[kernel.to(pred)] * self.ndim) + p_sum = separable_filtering(pred, kernels=[kernel.to(pred)] * self.ndim) + t2_sum = separable_filtering(t2, kernels=[kernel.to(pred)] * self.ndim) + p2_sum = separable_filtering(p2, kernels=[kernel.to(pred)] * self.ndim) + tp_sum = separable_filtering(tp, kernels=[kernel.to(pred)] * self.ndim) # average over kernel t_avg = t_sum / kernel_vol diff --git a/monai/losses/multi_scale.py b/monai/losses/multi_scale.py index 5a17bc2d07..af23e03440 100644 --- a/monai/losses/multi_scale.py +++ b/monai/losses/multi_scale.py @@ -82,8 +82,8 @@ def forward(self, y_true: torch.Tensor, y_pred: torch.Tensor) -> torch.Tensor: else: loss_list.append( self.loss( - separable_filtering(y_pred, [self.kernel_fn(s)] * (y_true.ndim - 2)), - separable_filtering(y_true, [self.kernel_fn(s)] * (y_true.ndim - 2)), + separable_filtering(y_pred, [self.kernel_fn(s).to(y_pred)] * (y_true.ndim - 2)), + separable_filtering(y_true, [self.kernel_fn(s).to(y_pred)] * (y_true.ndim - 2)), ) ) loss = torch.stack(loss_list, dim=0) diff --git a/tests/test_bending_energy.py b/tests/test_bending_energy.py index f2b9a41cae..8f1fb43535 100644 --- a/tests/test_bending_energy.py +++ b/tests/test_bending_energy.py @@ -17,30 +17,32 @@ from monai.losses.deform import BendingEnergyLoss +device = "cuda" if torch.cuda.is_available() else "cpu" + TEST_CASES = [ [ {}, - {"pred": torch.ones((1, 3, 5, 5, 5))}, + {"pred": torch.ones((1, 3, 5, 5, 5), device=device)}, 0.0, ], [ {}, - {"pred": torch.arange(0, 5)[None, None, None, None, :].expand(1, 3, 5, 5, 5)}, + {"pred": torch.arange(0, 5, device=device)[None, None, None, None, :].expand(1, 3, 5, 5, 5)}, 0.0, ], [ {}, - {"pred": torch.arange(0, 5)[None, None, None, None, :].expand(1, 3, 5, 5, 5) ** 2}, + {"pred": torch.arange(0, 5, device=device)[None, None, None, None, :].expand(1, 3, 5, 5, 5) ** 2}, 4.0, ], [ {}, - {"pred": torch.arange(0, 5)[None, None, None, :].expand(1, 3, 5, 5) ** 2}, + {"pred": torch.arange(0, 5, device=device)[None, None, None, :].expand(1, 3, 5, 5) ** 2}, 4.0, ], [ {}, - {"pred": torch.arange(0, 5)[None, None, :].expand(1, 3, 5) ** 2}, + {"pred": torch.arange(0, 5, device=device)[None, None, :].expand(1, 3, 5) ** 2}, 4.0, ], ] @@ -56,19 +58,19 @@ def test_ill_shape(self): loss = BendingEnergyLoss() # not in 3-d, 4-d, 5-d with self.assertRaisesRegex(ValueError, ""): - loss.forward(torch.ones((1, 3))) + loss.forward(torch.ones((1, 3), device=device)) with self.assertRaisesRegex(ValueError, ""): - loss.forward(torch.ones((1, 3, 5, 5, 5, 5))) + loss.forward(torch.ones((1, 3, 5, 5, 5, 5), device=device)) # spatial_dim < 5 with self.assertRaisesRegex(ValueError, ""): - loss.forward(torch.ones((1, 3, 4, 5, 5))) + loss.forward(torch.ones((1, 3, 4, 5, 5), device=device)) with self.assertRaisesRegex(ValueError, ""): loss.forward(torch.ones((1, 3, 5, 4, 5))) with self.assertRaisesRegex(ValueError, ""): loss.forward(torch.ones((1, 3, 5, 5, 4))) def test_ill_opts(self): - pred = torch.rand(1, 3, 5, 5, 5) + pred = torch.rand(1, 3, 5, 5, 5).to(device=device) with self.assertRaisesRegex(ValueError, ""): BendingEnergyLoss(reduction="unknown")(pred) with self.assertRaisesRegex(ValueError, ""): diff --git a/tests/test_global_mutual_information_loss.py b/tests/test_global_mutual_information_loss.py index 252a70e85e..3373b59621 100644 --- a/tests/test_global_mutual_information_loss.py +++ b/tests/test_global_mutual_information_loss.py @@ -17,20 +17,30 @@ from monai.losses.image_dissimilarity import GlobalMutualInformationLoss +device = "cuda" if torch.cuda.is_available() else "cpu" + TEST_CASES = [ [ {}, { - "pred": torch.arange(0, 3, dtype=torch.float)[None, :, None, None, None].expand(1, 3, 3, 3, 3).div(3), - "target": torch.arange(0, 3, dtype=torch.float)[None, :, None, None, None].expand(1, 3, 3, 3, 3).div(3), + "pred": torch.arange(0, 3, dtype=torch.float, device=device)[None, :, None, None, None] + .expand(1, 3, 3, 3, 3) + .div(3), + "target": torch.arange(0, 3, dtype=torch.float, device=device)[None, :, None, None, None] + .expand(1, 3, 3, 3, 3) + .div(3), }, -1.0986018, ], [ {}, { - "pred": torch.arange(0, 3, dtype=torch.float)[None, :, None, None, None].expand(1, 3, 3, 3, 3).div(3), - "target": torch.arange(0, 3, dtype=torch.float)[None, :, None, None, None].expand(1, 3, 3, 3, 3).div(3) + "pred": torch.arange(0, 3, dtype=torch.float, device=device)[None, :, None, None, None] + .expand(1, 3, 3, 3, 3) + .div(3), + "target": torch.arange(0, 3, dtype=torch.float, device=device)[None, :, None, None, None] + .expand(1, 3, 3, 3, 3) + .div(3) ** 2, }, -1.083999, @@ -38,32 +48,35 @@ [ {}, { - "pred": torch.arange(0, 3, dtype=torch.float)[None, :, None, None].expand(1, 3, 3, 3).div(3), - "target": torch.arange(0, 3, dtype=torch.float)[None, :, None, None].expand(1, 3, 3, 3).div(3) ** 2, + "pred": torch.arange(0, 3, dtype=torch.float, device=device)[None, :, None, None].expand(1, 3, 3, 3).div(3), + "target": torch.arange(0, 3, dtype=torch.float, device=device)[None, :, None, None] + .expand(1, 3, 3, 3) + .div(3) + ** 2, }, -1.083999, ], [ {}, { - "pred": torch.arange(0, 3, dtype=torch.float)[None, :, None].expand(1, 3, 3).div(3), - "target": torch.arange(0, 3, dtype=torch.float)[None, :, None].expand(1, 3, 3).div(3) ** 2, + "pred": torch.arange(0, 3, dtype=torch.float, device=device)[None, :, None].expand(1, 3, 3).div(3), + "target": torch.arange(0, 3, dtype=torch.float, device=device)[None, :, None].expand(1, 3, 3).div(3) ** 2, }, -1.083999, ], [ {}, { - "pred": torch.arange(0, 3, dtype=torch.float)[None, :].div(3), - "target": torch.arange(0, 3, dtype=torch.float)[None, :].div(3) ** 2, + "pred": torch.arange(0, 3, dtype=torch.float, device=device)[None, :].div(3), + "target": torch.arange(0, 3, dtype=torch.float, device=device)[None, :].div(3) ** 2, }, -1.083999, ], [ {}, { - "pred": torch.arange(0, 3, dtype=torch.float).div(3), - "target": torch.arange(0, 3, dtype=torch.float).div(3) ** 2, + "pred": torch.arange(0, 3, dtype=torch.float, device=device).div(3), + "target": torch.arange(0, 3, dtype=torch.float, device=device).div(3) ** 2, }, -1.1920927e-07, ], @@ -79,13 +92,13 @@ def test_shape(self, input_param, input_data, expected_val): def test_ill_shape(self): loss = GlobalMutualInformationLoss() with self.assertRaisesRegex(ValueError, ""): - loss.forward(torch.ones((1, 2), dtype=torch.float), torch.ones((1, 3), dtype=torch.float)) + loss.forward(torch.ones((1, 2), dtype=torch.float), torch.ones((1, 3), dtype=torch.float, device=device)) with self.assertRaisesRegex(ValueError, ""): - loss.forward(torch.ones((1, 3, 3), dtype=torch.float), torch.ones((1, 3), dtype=torch.float)) + loss.forward(torch.ones((1, 3, 3), dtype=torch.float), torch.ones((1, 3), dtype=torch.float, device=device)) def test_ill_opts(self): - pred = torch.ones((1, 3, 3, 3, 3), dtype=torch.float) - target = torch.ones((1, 3, 3, 3, 3), dtype=torch.float) + pred = torch.ones((1, 3, 3, 3, 3), dtype=torch.float, device=device) + target = torch.ones((1, 3, 3, 3, 3), dtype=torch.float, device=device) with self.assertRaisesRegex(ValueError, ""): GlobalMutualInformationLoss(num_bins=0)(pred, target) with self.assertRaisesRegex(ValueError, ""): diff --git a/tests/test_local_normalized_cross_correlation_loss.py b/tests/test_local_normalized_cross_correlation_loss.py index 8e9482596f..bddaedb54a 100644 --- a/tests/test_local_normalized_cross_correlation_loss.py +++ b/tests/test_local_normalized_cross_correlation_loss.py @@ -17,60 +17,89 @@ from monai.losses.image_dissimilarity import LocalNormalizedCrossCorrelationLoss +device = "cuda" if torch.cuda.is_available() else "cpu" + TEST_CASES = [ [ {"in_channels": 1, "ndim": 1, "kernel_type": "rectangular", "reduction": "sum"}, { - "pred": torch.arange(0, 3).reshape(1, 1, -1).to(torch.float), - "target": torch.arange(0, 3).reshape(1, 1, -1).to(torch.float), + "pred": torch.arange(0, 3).reshape(1, 1, -1).to(dtype=torch.float, device=device), + "target": torch.arange(0, 3).reshape(1, 1, -1).to(dtype=torch.float, device=device), }, -1.0 * 3, ], [ {"in_channels": 1, "ndim": 1, "kernel_type": "rectangular"}, { - "pred": torch.arange(0, 3).reshape(1, 1, -1).to(torch.float), - "target": torch.arange(0, 3).reshape(1, 1, -1).to(torch.float), + "pred": torch.arange(0, 3).reshape(1, 1, -1).to(dtype=torch.float, device=device), + "target": torch.arange(0, 3).reshape(1, 1, -1).to(dtype=torch.float, device=device), }, -1.0, ], [ {"in_channels": 1, "ndim": 2, "kernel_type": "rectangular"}, { - "pred": torch.arange(0, 3).reshape(1, 1, -1, 1).expand(1, 1, 3, 3).to(torch.float), - "target": torch.arange(0, 3).reshape(1, 1, -1, 1).expand(1, 1, 3, 3).to(torch.float), + "pred": torch.arange(0, 3).reshape(1, 1, -1, 1).expand(1, 1, 3, 3).to(dtype=torch.float, device=device), + "target": torch.arange(0, 3).reshape(1, 1, -1, 1).expand(1, 1, 3, 3).to(dtype=torch.float, device=device), }, -1.0, ], [ {"in_channels": 1, "ndim": 3, "kernel_type": "rectangular"}, { - "pred": torch.arange(0, 3).reshape(1, 1, -1, 1, 1).expand(1, 1, 3, 3, 3).to(torch.float), - "target": torch.arange(0, 3).reshape(1, 1, -1, 1, 1).expand(1, 1, 3, 3, 3).to(torch.float), + "pred": torch.arange(0, 3) + .reshape(1, 1, -1, 1, 1) + .expand(1, 1, 3, 3, 3) + .to(dtype=torch.float, device=device), + "target": torch.arange(0, 3) + .reshape(1, 1, -1, 1, 1) + .expand(1, 1, 3, 3, 3) + .to(dtype=torch.float, device=device), }, -1.0, ], [ {"in_channels": 3, "ndim": 3, "kernel_type": "rectangular"}, { - "pred": torch.arange(0, 3).reshape(1, 1, -1, 1, 1).expand(1, 3, 3, 3, 3).to(torch.float), - "target": torch.arange(0, 3).reshape(1, 1, -1, 1, 1).expand(1, 3, 3, 3, 3).to(torch.float) ** 2, + "pred": torch.arange(0, 3) + .reshape(1, 1, -1, 1, 1) + .expand(1, 3, 3, 3, 3) + .to(dtype=torch.float, device=device), + "target": torch.arange(0, 3) + .reshape(1, 1, -1, 1, 1) + .expand(1, 3, 3, 3, 3) + .to(dtype=torch.float, device=device) + ** 2, }, -0.95801723, ], [ {"in_channels": 3, "ndim": 3, "kernel_type": "triangular", "kernel_size": 5}, { - "pred": torch.arange(0, 5).reshape(1, 1, -1, 1, 1).expand(1, 3, 5, 5, 5).to(torch.float), - "target": torch.arange(0, 5).reshape(1, 1, -1, 1, 1).expand(1, 3, 5, 5, 5).to(torch.float) ** 2, + "pred": torch.arange(0, 5) + .reshape(1, 1, -1, 1, 1) + .expand(1, 3, 5, 5, 5) + .to(dtype=torch.float, device=device), + "target": torch.arange(0, 5) + .reshape(1, 1, -1, 1, 1) + .expand(1, 3, 5, 5, 5) + .to(dtype=torch.float, device=device) + ** 2, }, -0.918672, ], [ {"in_channels": 3, "ndim": 3, "kernel_type": "gaussian"}, { - "pred": torch.arange(0, 3).reshape(1, 1, -1, 1, 1).expand(1, 3, 3, 3, 3).to(torch.float), - "target": torch.arange(0, 3).reshape(1, 1, -1, 1, 1).expand(1, 3, 3, 3, 3).to(torch.float) ** 2, + "pred": torch.arange(0, 3) + .reshape(1, 1, -1, 1, 1) + .expand(1, 3, 3, 3, 3) + .to(dtype=torch.float, device=device), + "target": torch.arange(0, 3) + .reshape(1, 1, -1, 1, 1) + .expand(1, 3, 3, 3, 3) + .to(dtype=torch.float, device=device) + ** 2, }, -0.95406944, ], @@ -87,13 +116,22 @@ def test_ill_shape(self): loss = LocalNormalizedCrossCorrelationLoss(in_channels=3, ndim=3) # in_channel unmatch with self.assertRaisesRegex(ValueError, ""): - loss.forward(torch.ones((1, 2, 3, 3, 3), dtype=torch.float), torch.ones((1, 2, 3, 3, 3), dtype=torch.float)) + loss.forward( + torch.ones((1, 2, 3, 3, 3), dtype=torch.float, device=device), + torch.ones((1, 2, 3, 3, 3), dtype=torch.float, device=device), + ) # ndim unmatch with self.assertRaisesRegex(ValueError, ""): - loss.forward(torch.ones((1, 3, 3, 3), dtype=torch.float), torch.ones((1, 3, 3, 3), dtype=torch.float)) + loss.forward( + torch.ones((1, 3, 3, 3), dtype=torch.float, device=device), + torch.ones((1, 3, 3, 3), dtype=torch.float, device=device), + ) # pred, target shape unmatch with self.assertRaisesRegex(ValueError, ""): - loss.forward(torch.ones((1, 3, 3, 3, 3), dtype=torch.float), torch.ones((1, 3, 4, 4, 4), dtype=torch.float)) + loss.forward( + torch.ones((1, 3, 3, 3, 3), dtype=torch.float, device=device), + torch.ones((1, 3, 4, 4, 4), dtype=torch.float, device=device), + ) def test_ill_opts(self): pred = torch.ones((1, 3, 3, 3, 3), dtype=torch.float) diff --git a/tests/test_multi_scale.py b/tests/test_multi_scale.py index 9ce1734e28..01a760db72 100644 --- a/tests/test_multi_scale.py +++ b/tests/test_multi_scale.py @@ -19,23 +19,30 @@ from tests.utils import SkipIfBeforePyTorchVersion, test_script_save dice_loss = DiceLoss(include_background=True, sigmoid=True, smooth_nr=1e-5, smooth_dr=1e-5) +device = "cuda" if torch.cuda.is_available() else "cpu" TEST_CASES = [ [ {"loss": dice_loss, "scales": None, "kernel": "gaussian"}, - {"y_pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), "y_true": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]])}, + { + "y_pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]], device=device), + "y_true": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]], device=device), + }, 0.307576, ], [ {"loss": dice_loss, "scales": [0, 1], "kernel": "gaussian"}, - {"y_pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), "y_true": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]])}, + { + "y_pred": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]], device=device), + "y_true": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]], device=device), + }, 0.463116, ], [ {"loss": dice_loss, "scales": [0, 1, 2], "kernel": "cauchy"}, { - "y_pred": torch.tensor([[[[[1.0, -1.0], [-1.0, 1.0]]]]]), - "y_true": torch.tensor([[[[[1.0, 0.0], [1.0, 1.0]]]]]), + "y_pred": torch.tensor([[[[[1.0, -1.0], [-1.0, 1.0]]]]], device=device), + "y_true": torch.tensor([[[[[1.0, 0.0], [1.0, 1.0]]]]], device=device), }, 0.715228, ], @@ -52,9 +59,13 @@ def test_ill_opts(self): with self.assertRaisesRegex(ValueError, ""): MultiScaleLoss(loss=dice_loss, kernel="none") with self.assertRaisesRegex(ValueError, ""): - MultiScaleLoss(loss=dice_loss, scales=[-1])(torch.ones((1, 1, 3)), torch.ones((1, 1, 3))) + MultiScaleLoss(loss=dice_loss, scales=[-1])( + torch.ones((1, 1, 3), device=device), torch.ones((1, 1, 3), device=device) + ) with self.assertRaisesRegex(ValueError, ""): - MultiScaleLoss(loss=dice_loss, scales=[-1], reduction="none")(torch.ones((1, 1, 3)), torch.ones((1, 1, 3))) + MultiScaleLoss(loss=dice_loss, scales=[-1], reduction="none")( + torch.ones((1, 1, 3), device=device), torch.ones((1, 1, 3), device=device) + ) @SkipIfBeforePyTorchVersion((1, 7, 0)) def test_script(self): From cd08c001a4201ad5d1f03f488cc15938257fe171 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Wed, 24 Mar 2021 15:45:45 +0000 Subject: [PATCH 094/457] 1837 fixes collating data types (#1839) * 1837--collating Signed-off-by: Wenqi Li * fixes transform device Signed-off-by: Wenqi Li * reverting self.affine Signed-off-by: Wenqi Li Co-authored-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/transforms/spatial/dictionary.py | 3 ++- tests/test_inverse_collation.py | 21 +++++++++++++++++---- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 5f8aa3d0b0..e356a51a2a 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -698,7 +698,8 @@ def __call__( affine = self.rand_affine.rand_affine_grid.get_transformation_matrix() else: grid = create_grid(spatial_size=sp_size) - affine = torch.eye(len(sp_size) + 1) + # to be consistent with the self._do_transform case (dtype and device) + affine = torch.as_tensor(np.eye(len(sp_size) + 1), device=self.rand_affine.rand_affine_grid.device) for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): self.push_transform(d, key, extra_info={"affine": affine}) diff --git a/tests/test_inverse_collation.py b/tests/test_inverse_collation.py index 5bde157343..3e07a8f0e2 100644 --- a/tests/test_inverse_collation.py +++ b/tests/test_inverse_collation.py @@ -14,6 +14,7 @@ from typing import TYPE_CHECKING import numpy as np +import torch from parameterized import parameterized from monai.data import CacheDataset, DataLoader, create_test_image_2d, create_test_image_3d, pad_list_data_collate @@ -49,7 +50,13 @@ RandRotate90d(keys=KEYS, spatial_axes=(1, 2)), RandZoomd(keys=KEYS, prob=0.5, min_zoom=0.5, max_zoom=1.1, keep_size=True), RandRotated(keys=KEYS, prob=0.5, range_x=np.pi), - RandAffined(keys=KEYS, prob=0.5, rotate_range=np.pi), + RandAffined( + keys=KEYS, + prob=0.5, + rotate_range=np.pi, + device=torch.device("cuda" if torch.cuda.is_available() else "cpu"), + as_tensor_output=False, + ), ] ] @@ -62,7 +69,13 @@ RandRotate90d(keys=KEYS, prob=0.5, spatial_axes=(0, 1)), RandZoomd(keys=KEYS, prob=0.5, min_zoom=0.5, max_zoom=1.1, keep_size=True), RandRotated(keys=KEYS, prob=0.5, range_x=np.pi), - RandAffined(keys=KEYS, prob=0.5, rotate_range=np.pi), + RandAffined( + keys=KEYS, + prob=0.5, + rotate_range=np.pi, + device=torch.device("cuda" if torch.cuda.is_available() else "cpu"), + as_tensor_output=False, + ), ] ] @@ -102,8 +115,8 @@ def test_collation(self, _, transform, collate_fn, ndim): else: modified_transform = Compose([transform, ResizeWithPadOrCropd(KEYS, 100)]) - # num workers = 0 for mac - num_workers = 2 if sys.platform != "darwin" else 0 + # num workers = 0 for mac or gpu transforms + num_workers = 0 if sys.platform == "darwin" or torch.cuda.is_available() else 2 dataset = CacheDataset(data, transform=modified_transform, progress=False) loader = DataLoader(dataset, num_workers, batch_size=self.batch_size, collate_fn=collate_fn) From c3f1e02dae9926f587167c59f01eaca218691236 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Wed, 24 Mar 2021 22:19:36 +0000 Subject: [PATCH 095/457] truncate plot title string (#1843) Signed-off-by: Wenqi Li Co-authored-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- monai/utils/jupyter_utils.py | 5 ++++- tests/test_threadcontainer.py | 6 +++++- .../threadcontainer_plot_test.png | Bin 60289 -> 57084 bytes 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/monai/utils/jupyter_utils.py b/monai/utils/jupyter_utils.py index 10dfe59f59..df97f0fa4b 100644 --- a/monai/utils/jupyter_utils.py +++ b/monai/utils/jupyter_utils.py @@ -245,6 +245,7 @@ class ThreadContainer(Thread): engine: wrapped `Engine` object, when the container is started its `run` method is called loss_transform: callable to convert an output dict into a single numeric value metric_transform: callable to convert a named metric value into a single numeric value + status_format: format string for status key-value pairs. """ def __init__( @@ -252,6 +253,7 @@ def __init__( engine: Engine, loss_transform: Callable = _get_loss_from_output, metric_transform: Callable = lambda name, value: value, + status_format: str = "{}: {:.4}", ): super().__init__() self.lock = RLock() @@ -260,6 +262,7 @@ def __init__( self.loss_transform = loss_transform self.metric_transform = metric_transform self.fig = None + self.status_format = status_format self.engine.add_event_handler(Events.ITERATION_COMPLETED, self._update_status) @@ -318,7 +321,7 @@ def status(self) -> str: stats = self.status_dict msgs = [stats.pop(StatusMembers.STATUS.value), "Iters: " + str(stats.pop(StatusMembers.ITERS.value))] - msgs += ["%s: %s" % kv for kv in stats.items()] + msgs += [self.status_format.format(key, val) for key, val in stats.items()] return ", ".join(msgs) diff --git a/tests/test_threadcontainer.py b/tests/test_threadcontainer.py index 75612586e8..afb27609a4 100644 --- a/tests/test_threadcontainer.py +++ b/tests/test_threadcontainer.py @@ -99,6 +99,10 @@ def test_plot(self): with tempfile.TemporaryDirectory() as tempdir: tempimg = f"{tempdir}/threadcontainer_plot_test.png" fig.savefig(tempimg) - comp = compare_images(tempimg, f"{testing_dir}/threadcontainer_plot_test.png", 1e-3) + comp = compare_images(f"{testing_dir}/threadcontainer_plot_test.png", tempimg, 1e-3) self.assertIsNone(comp, comp) # None indicates test passed + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/testing_data/threadcontainer_plot_test.png b/tests/testing_data/threadcontainer_plot_test.png index b3576491ec1b9f6ecd95427dc63293aa454fc5e7..b73edd825860508fc5c6e022feddfa5c948314b0 100644 GIT binary patch literal 57084 zcmb@u1z1(<)<3!^DG3!2K}taolu|;v6;Ko;6zLM_?v_qP5tRlJBn9b41VltY=}zfI zcm2n-w);Eh|K0oC=lZO(_dY6XtvToWj`6GU2HjG)N<=_KfI^{&WUomnp-{MOC=}K- z-Vyj6e=Wld_}3LX={t6}Esg9Pb!`k$H+1c+%q;E99_Z0K7~0rAu(Y_ueu@3^1$tvU zJ1biO4i57_e}Uc7<~|3h^5-&m6MU;{cWqH95?$nf80n&E4^XI~2eMM)%1*KKgU)Ws zo!g>It#OlI44%ysVqd#saMj^qx)?o1D8ZPVx5-gf_N3RZRO{98*>wmz-3amYj$J4c zCEZCa#555*d(i@eK>K_?Rqwm&63<>3bh&zZGM?go(R3Y;0pt6sN9D?=flIBzo1a^n zD4o9xU43MD_sFq-{Zf2%+indN`0rmiN3JKH_}5=M=8bQ6=HI_i|M7P)bTH%o{YH4G z6Vp=v{)Kuj#{BO$!mPm-`1gk$L!E02`u97bIK37A{YKbc|G$5wGBm!AS8Qx-IPDR| z5TTWiXE`a4(g+{@`E?1DeqV6m`!f4EdOV+9`BbGO`Dk{hiN{y1Hs*W7X@^uUUb%AW z;Sr)0<9h|h#|{oQs~taksV5~TW7>ZIEPnm^(KQ$0h6uU{_uUO1>cdR!eFCf_M^UKf zEk+mayhRlcdkCI6dzJwYvn%^v_k5}7-V&BY^}Yk;(IZkaGGj>rv=R>GNQNwt99^=e`z+mgm0x^*8}?YisL^J0ne-!=5DKq)UU2!x$)Di-Ev|!@a5L!E!!q zaWNa46~l<8?#1Rpqn4LE)5F!G1?m@FHql{*&Cd!m61ufLX?*ChE9l6{$pgc}{OIv| zDxBXn+8*vNdM?*sktE)4jc=Q=3k(l$=*iPxZuLC)mYLubm&c&?h%$~3~LA9pd?w3jWn!iQGarN*RiEXJ~`7H;LzqURwsE(vXg zN#x_g!opi`a?pqS$o)fCA9`Gm;muA7IR1!Hwq*5-p!G-%#@Ytv!jY6G!f}dNlLU8=}aZZDeq!Uw9e6HD-?tjsl@5#d>B&QNIDlcl}Y*5Z-`iHcox`PSBW1aeP4 zG&UAH?fuvwb_*qChd6#ew$C(c^s{IFkI!DkMA5p=b4gvjI__A#OCz!~eO72|nQ0Jq-d z!y$Yqzs&@mYPLEKYI>mb(w79$2!%KS-PU+v^h72{2*pL8dcE4AO4rAa9-&azJy4TDzf{qk4XQkH&GpTM%7Rmtu|{D5d1m!ya;5y%90| z2KHV5+}?w4uXQIrKHgeQI1D|h<&-3?|D#YPn-a>G{H%tBNnH93D3n4h|EPt$g2Kk# zPuq{L(u!}Q2L~HP_t#Efi`RTjmCtioH(8tQB%}&E!Z&Dlf`EXa;tLPuQGVBr3$Jg) z9C=vl_53-d(C+*@t5OZxyc_b0Qxg+nq}ZoLJx)gmt>C?{t2;(P;SYN#S0x6vgVXmH z_ep7Jh*H`E-`q|co9}&RUY1)%kN5V@1t(a(gQ*|roF|TLuEN%=hd&%Ne2PnaI&gEy zwKXMTZ_f?S$BT%XcO*4hOCp+GGx6KZEfp1uPcJK9S>~eYBu6G3w2mH`CwJNJRFQ>L}LR)rtDa=KBf>Zwf7A zCnqP9Qc(p5(0NdvWY<^<_S{`4=CT?QGrmj(EBWlDODCWJX;}|@>|R8dj1$AU7R&9{ zUmB_1N;ur6%Po2Kc>yj;=R@EzPO30GBBJ^*zM<*P?w}LQLLPMc5B7E`uSrYeRPD^N z4LDE5p^GdA%L=z~dDSzRHV>h2l`K~6w!OK{Y+R^rx6nr}y4%}&>+ao|@TTVGI^<3) zEm=XyU0dis$x*eHd{s0}A@2QXT>w`2ObADn*xkFQ;yrdAL~MNieErS^3Nv_b>0I;f z98IW|muzfpu^NmF4M*c$(bm-`i+z>eNEKTQx_#+>P4)enyxxztHr@Au{8wKzPuH~^ zc$Sl~HVL(YEh95?`NN5uup0KDbu_|_525qiJ7!YYM>Azp%IUmf5K6^E04=9JoJMdd zKy(kI(q;Yq?VG~nO9CV!Tl7frtq(b=KV0R$^)2DRx2NaM9@??mwh?J&TT68$%WaY* zqu*mJ8=|#6W*&2z5zckzHtI(R`@ed{xH;RZRq29(3JwXeUTxu%B*p$5CwP*Ji>s%~ zU69s&O;K4zWi+~aFZHT+Zq*K}l;VeXts;amEk_i|9zS{V5I(m&S5Za9PuO)s2fn~_ ze(N({Wd`Pl2m6oX&wSS=0_82W~cx>$H(^qV;+0=4A zG(BM+q*F)DiagD;8kKnU`t{hGw0N6FaxyX<*qCVNnCR$$a&#GCoS>tOswxFi(CIw3 zFyOv&LD?SYHftLA0?$44)TKv}kz?`ikf!UfIJD5NCMQRLD%h^-Oz_w_4_o_{?32EN z;V$E*{o?+G=R(fhX1vrM%OTo{x6>4m%`I}UrLU~4On>3RQKVm5cWXN8!jg5Gj8aQ% zH@4c_vYsER>>qa_5#8gg+FOzTm_P5RaR{XlRbxsVLCJ+l;D?F)UrbdAs0>^693I|{ zw634WH$O$+h~XX0%&mG4KT5hZWuABE*1dZnpFYXCt+ii8p3du=3Fu2jSy@NfqY074 z3OyEcWUS(6%bg?n4Z@<6^7D^Fb5zYFN46FPy9QpE%tN?2nVs(O0`>%r;WW0)qq`dm zA?1rz)Nt8vu2#d}{g!sK(b9AG9;)WaO_3nj2>O5>F3Kg6Q1c!&?aH#Ii;jxYh2KIO zOjNn=E{z7zjlnWu8giMth^m3c83wiNG{041XKwBr1dBMWG%>foyMHs+pwH4{l^W_b z(ubi8Vcyylxc~XNH#}V-*avv{_|}d0vQ?p~HKv%)b!C@~hYP-SS)b)SwFm`@%VGXb z?#7omK`a#fV@hGIv!jDvRFn=Dg;}RP)n@;i`sCy!)T%b)%h3bH^?J*9ligM(nzkmM z+!X4ADw2dwYAdaM9{itK=KyGrYWADVk)` zQqt0+u-Gwn?-ZJlE5r*0(Fi(hg=J=E*Tb$0abEqN1mFYX#B}veXYSG$!9^5`i~t8+ zxv6zQyK3OR8PxiLC?=@0OO2uj?fawDKh{Fo!CPO!ADFD&@Y;*_LiY&UDWFt%cpsO9rf7+VAc1k~x}IXxZQoJ>pbFj?)Kiewyx<^5pQv%;U;3pM!u{x^6>De>6{W9+#{22pR!fY<{jCC+ffJjB6Rp* zqZniJmFIXE--s1|#itg&DP$YKcV0T!T|9g7A`$FPhw{Pu^BW8Oc~J58MoB!yBqT5| zP{@bmi2nd@-RE+C(WA1XY^qse2_o*$nS!93laxCwm~;k@G*B%%5@}q(9kv^OkgYlw zr5qd>xKt{-o~p%s`7#;v&3IplB(VruVWBaLl2Lz(bG2=f)4K0rR?A357 z4GoRV9IV9p?lLmD)Z@_JF9t6F@@~wl)e)Qe5;rJ2^Mv^(W_sq+w&yIt)4lJ~cDMcA zKjIQmkuAN1{c%%t|1y99$yceV2yJ?P`N45owZ=j7AHRHQ0E!S)y*J50dCBPLYlXOAq<)|$cGhOTEvJd@<3|W@P-;1i;=FqGYGlZD zv3`GZcsQeSP*OtTJeaPq!0q1^QqOTf!%n|?&8mVRAhzole5A; zl9Q7#F<}K3A%c`+%FD*2Mqgs_-}TOiFBg_kr1$rCM}PmM$fvpNTxytk&m}JW>s!nX zSe2!%onM`uosk+L{AD@n5w)Ph1%Q^`$is$@e2;LEqjVytJ$Jq(sPp7NwJLAUxSLIaD&h6X)ox?9sJ5uEn3%5KZ`C$GuN zOS-u1kkbtpnfJF#}DC%*%w2C|KHxOliP z8Y|*X^$?dtMnOR#vq8Xa`qq4ZaVs0c=0fqXMB29`pg$-t`13JfU)frz1F7;SKYaM0 z1AIrT)g8zlrGTAr=1!&Z4f)}i7Nb~EPujt9N8DR{H&G)WAD^YQb||L-s)L?#U7y8C zL{}A%E0S>;ej=-YJ;!f7hK=--J?Q*ZXKV}dvhNia?L0hYo(-Fz7I1A~w3Z7#Z2tGv z2kRWMfQ~X%vu#9kZ?OPe@!=bGI}t!k^L{S3I=of%@U%8OZKjB9-EiKxI@gVPZ$?>< zsiod`_Zei5BadWly1j0H9WH8Yx;+J6xLVGW8YH6lEVp08nyt z=wh_3O* z>VcJZmFS)HxtsAvQBHvIh5Mo}-s}3_{dI0#i;nviYZQXSmw+?0-2Ym*yuX%~mGybN zM~8F7pW0%HB@AuFZ?(0gn~+%K5kV zAx~x*i}2h_xw`JdW7?INpZ|3q!v|1K3Q~evTGn^&7Qdbq|CW#`kz^G;X}R}K*m}&j z+|BJC3p;xQ^tPPfl?AcpMNepnu?lI8rF=t9XUPcsc0G8qrY*7{N;$4aF6`6T!<>HQ1E?~hnMVj0x1_lOn2Bt#VwhErRo(K7FdwnV~|- zZ>3YC@|flU&@ALRJ&K75$3%08Z`n9Bdt7igTrH)a5KIG_}m#p*;pQLJj=#LX!P!s z{Id%`b{SyQ2oY*kU%vSp=!kDKA8_g1MpjE*(WQ-XX4tc;+-cBZYH{g4fS9th(Qi3k z)NfG-)c~VIh2XiUrwCx_z<$eW8sGxR^cuIA&ugTWgxYxS`g5-w&GIo2gYFcWo$@E92|n20Egx4>Qc`!* z?#k7UfbLrMqy(u2rn!|zP)^fH0gs+P4+NGZp4nNpS#M#6u+T!&lU`*cPC!jJxAhyt z>!BnM=0&gUDygf_qSNv~d(-L4x_hzgFv0WS%J(mEA+R}6?mKJVaMgfy2kDeCywtRu zagfi6a~ui+fD9{Q1geG%yuEe5d2wm(E^x_l06j;yY6BiUBCUXR`SJ7T4_G8ZUckr3 zwGVerifoNwRUGWh1%`&&HL}Aafv5E0q_&&)#DoC|QTlxa547CBfB%l@)o2^m*hMcS zbQ)##?FOIo2NA&j=@}XC;fuFJr6nULALa1eCM2d42^9s*gcj~-F}j4{)|qc<`Xe>o zAf|0W@5pmlQ1|uo8-;R`J>30ME$-%g@l!`;@9irzHALBni+gw5vPa9KJ2RsPGSxVG zXY4-ZUudRtTk;+NN^+M>+IhMyX$9I9Yg$@bb5KwadX<2X&=9o}$s}K~gH6wI=WUI9 zYQ5yRBWi=!ISQFE!PE}E9cM45|fZb!;epR z?C&(?Rv(m^755Kv5p7m2+wY&};D`o*uXu%(RjzBsZl%O}TI=;)wGVM8WuKgD5{Oek-t~UVOQw_LVjE|2WnlGf* zuGSTC+n$VGiM?Vw0gbp z_M9VDkDoXp?9sY+iqAsh-R2|+GGJ;vTZG!M9WIDoa`vdBba&@DpsDB>j{d%LT~^k* z5vS}>^k6GR2+CY3z>0j;%&4u1SHG;B$s!B|&+lXvJF4L|H z&##}`G+AMr0eT5Fn;vwCabC8IRiL*5?oCWg^qS0=yAr1Wj8b=btlnv7ChguJiKq6@ zjgg}@fc49@d8u;m-dyLim>5DoiBZMy9k{nOtsWbf&SA~@1q6&kkJK+`Z1pbN6$NmR zX{3#wQ-QMey(K09%H&l3&gdfHPkhiB!uHF*NxzHidQgJ7JocQ@m01zC+h1(i2x|o) zt#)fm(*$D#zpNz!nT&8+)Q`6~rderGI!Q4xIXT*?rtQ~jCIijT#>Qp|G~m}M0W!a= zppNe)|6w`(xtyQZ2P^Wwm{29SJ1RFK838^L%cLChp|@MJ`u*2`{JYPWQPxTz4$Pmj zFYvWNM4g>A0clb)sbp<^;(DTXJ;3n*kQMFMIRD2F(&Hfw3R(k0@R^g3m0}(no&xxX zaOjvN(wq%?#2aO7F6t`PtM)&QO`zCkCGM= zesLv`iQOa%NVz4^)UBYyOu(ZQ&D}wcA>WBKMh6>NT-vYUgwRi{^N~> zanAnp$qyekPre5f*$$d1tRnk0D*+A3Q?79e-5E;b6D>NNC=_rrg1gV|b>ey+|MhHs z{>!fZ%4uShmqOay$m6#43yh4|$t>?@?22aH{)fevKKNGSil(d&y++q*{r`BVzpb~Q zi{sd_?I^&V2-k-vWP*D=@%Am}o!s5LSzH{g9vjl?!wJqVH2a6fLr3D_gFF6o@-68_ z0`$!`S4p1y{j}0svhL(z0#pxB&8$yXOhGKO!mR<2mO+6PtV8Ko#H;* z@#fmKYhJ3E(m-~9D5*dR(NhtpulOK%f}NdMJ(~j5&kyn(mDmAwqv3Y}x0GLSt&3L8 zG+*B0h?)&vzfKEVg%&KSvrJ4lF?^Qrkg>CKJud|GzOEtJ{lY_?qQXY1-$CDyC?2Z# zaC&Dkte>i{(8N$MJpS)*KK<2fpyXI@Z!e+_$%Ik)4Gd^pj~6-xZ3dP?*08N7kSfGu z0N#B49T%X*TA&IKp}^%%A^s!u;bS0(Ho!w&o`2Um2JN#^P1_v@bt|*eX=^lq%Wccb z7xp}Wc&EMPMxYNA7u6@wh{OONVNIv~g?7rD(|bq(BveiWJ_6RH5q_G;)XY8PvLxen z&`E2DZa+E6qwrL-tK| zSIK(P3^;$JWvfJD`4#EjQ9RZdRLgvLedHV&x_+ zXb65rt+u5a&v{HK5G?1pKc}6Zp1u@(1(aPxj>>r;-QbNGeo3*@&=9cvAWl3pq54aON*J~igZE~-UG-ZHdL{xtg`NGW94lntBy zQ{wUqod}Ekd|~CxvgCx93_qL2kAIe_!+uu}$Q;b2PFB@Y75NU%RBfJq<5K%oItF-b zzz&5FFXgm=9iB-c6~Gg?X$V#XN1B*M0Q+Gr3bh6LT?hbGU63xp7-C^%^=UCO-B1TZ zZ{<$dLNlQ5M~@zjbXpYm=Q%EFeNGS^9()Z%^ySN7KpTCQ35e^er?0;Z(t`_A0fty4 zqtIHL4cn zj|J>PI<~`}7@kroRCxL_`{a`ak>lud?}!X31Q}1Ed1E(4k0_=ZJ21I^(|`8puNXtJ z>D}x)KMW4fK6JP6+9!kQ4_&Ep)W@gF9Tyue77t*ewwkpMe4t&7g4HcZ>okH93yctP ziJ_OvpqB0!LUB2N6!dgNW&G3a7O`qQ8YeQ{1W?fGK`#GLU+-Md zSYN;0QjTb;4KI02-+CS%80mX#mJf@kDviFlAHOVEG?m~<+Z@et^~Mb%#20mgjjY85 zH=YpmM_kxWAfX~EptWdsv88qe$WD|(PMoZ4Y=|>HVAFhY3%+a!Fp`~02h##)%u3Mb z7AsbXkC4!w=KL~Qw)G$q@QmKrhajgBZ`=WMpsXLjmSQJXo%Ae$E0{ zraDg{+Sevz@KPyS-_Z3zAyKC)^SlE7eKr&8qBB#tB@A`+vCCdWg7P7-}nr zV2(GWM2G|fP175F?}yaUWvDc+vBlQoGRRLuHT|&{Qstw^N~hwnP@%NKR3I}rG!DV- zM$#CF-;aF9pyK0mNsWXnkTLJL8Z>Q zb6ERoRHSHgKNL^#%o(Ww5v~KZqI)##&p&VMC@rrKx9~oFxnQ3<-r}&CH+Vj&Z&s?X zGl&lB>7$d_^h(TU8tA*CLr)&lOfZPx4YW_4AUY%eQ9Dh*WaxxF<3IS}eWHTMi#dRSECVWaW~?u_o54g)cjwZbCdNf}>gO1~*0@#k>u;3xfJO2<%s+2k zZqA?I)WZ4t8h;J^V#JOH1(N8wnB`}S;&H@Ma&RbWWCR{U3DJ+FP7^MdHRB6f6+KfV zwCS0h)o_KOG=da}XNV{O5c|0PoD<{Q>l>>tge+%z)XO=Pv|WkQdUeE`$7E_Jl_mM%1*KLJHxM!NU1)meyzc_8lQggKVj$}wAfwj zU;hqN6^HUl$wnpf$^xgfZ}?!#WMr_~&2dP>%?H#qRJy$X;y~#Ma0Z^Y+QL1}bKSIvC_<7=@V&s5ABWTbg&~~kh!@#A79x4fi!5T;_^d?gMhR|Bz)Vh~<*k91l z(M2F?E%JMi?pa>%);Cu?X@Y728IS3{LN-vWCYIj>H*jCN^!UY# zAh?;JbLfI&0r=f~-83--mLJ$moIs}&s;#=uMl+5}gwdpS5@mLqhtYO>=8ERz3yenM zXuQQbIgc9gWGWL+kq8x+Qm$ili0E86saAA#Mmqgz%i^H8nkwfP*E9dy*1eOhim2sB z9%4ZHEG58JR+*Jt3UTLI{E4XjK}tR&C`b*L;@fWgVQ+gCQE>J^>v*W1-2lMJ3RK_} zkkd=7Mlq`nc1#hkAt@;d2^aa3(1n8-m{?J9TF?yiWbY&Rh>a z5b2PT3Gsm+K@fOKBTdW+cG{POg=xT6sD)$~Fy>3f^jkOL6bgYT%P zcSa_Z5(Bjb0$$$zR@woVxokkyHSl2H>eON)R0-r2u#1s)jajH32DsxG#CSkSuLb)( z7-qXO{yvANl_ z(?I*35z*JLUxhXZEcea_!$Q#SNLRde{rU%}I)R|@7W7s@Tn$M*K~*P4NCY(XoM4-d zuuYq4Ws|~}hk3^OwRSfnCR2Zj(WE8+OdS+%^015~@ z0m(5OaX&!WM$JZo{=2_u4WbMHT|kLQI0XVqwwt%X++>b-!URxw>4* z54A;xL!!RhUCN6@Zr98bvGUf$yIoA zh#yjIU0+}S{L+2m*VkY8`1oK#bteIFzI^SZ3WtCI6y>KG>TQ<)dEY5Z(SN&oK)2r4OSl$e|L(#?$17VzoeQxZnmFC0Oq4yi_l? z(5x{h!M4$pDb!n#PLW_ENHS}#M1aDfqmBU9G2v}6;AQ}(y1%>G2;ns=xcd;fluT*Y znfaO;y^%Z{lnnP)S0omtm=mpYz8T^JCMo^vah1ezI{ycqLV2pl1 z%U$aeuGltcMu?s}cLbm)b=CHFYVaJl=CzY6YyitMVuJ*Q; z(c|;;W+*YxkITtw!6#Q#R}Y4?SFgUdS4l~Ud>O9PM1oAJl$#UYSe@ivLsS0Ikip3p z8o5O-=LQCMi~C0-4W8bcXDRNne%0fN@2ukfzPeXsA*^qW>R*)SOj*1}-u%sP0^3)v zT>044v}_y$e!juo4d(;s1MJ%`VF|7EikS7hHENYrP_S?~8ivz%(krkpf*z04SQ%c^ z$Feu6*?68l;_KJRC`#!tGcStW^P|FzjSNj@Wed*0jDFM&e@gPmD&ngtea0P$zZH;a zlg7y>Qg)iRS+J!}T@K_n_EI>|Ur@j|e0I)oJmy&>U;iy7LrEZIRON)s%_dg)I7}0D z_Oo6=8Ll8(e1_{rz3qwBYO;S4aeO7Ulb4_A6gdyE zatFUv)`$7x;WFXzdLz{$Q{m%i7e#z=o&3LY?>QGE8SU3drv=W*Duu8<;tK$;G+DLH zR2g2=Lq5k>q6&#yRpxJ~6?W4_-<#bq<;clTD)~fVb(Fy*Rq%xe*G$g3dN1v3-EF6T z=>4oyGK)mgtr6J#fk8puEWzTHx2{JrLVgI^F$>vV@m@1XFY^w95Z=?}evzn}d9U+F z>`U#D1OvSU#~BHZGC0c4`#|z{czSE(Le#_9PF#3T=sRg~FQ; zbPhtS4i=lnRg$#2rXNzHudgrmIxc?S zOS#ndxgP6m17MA33s#3UN=iv(zfxswJAXYyL0286>!EKt|7FLw%rUugCa;burGaTg z>J1LF+(nOFmGjtAyFHnMYF{;(S40{9sRecqpn=1dj8$OPU{`|=l>}iC$&-R*V!m$j zVC9uEE0UasCabzxJC6%#foseek4RG`B%Xe;jD?D<(m7_?zrv}r#iEgJpH$H2A87(t z824+nWnnZXo{S14>U7C9;=8QXnIjI8lw8x-OaFWY<*TIF>7BEQH>ud7Y({n66G0)W5 z68)I(h4~|c8iS%bgQ9UO#lqn#9#+5B4=&$cxfY1~%A}e#NB><(KOi#}np9Ah+tT6H zc2<3nuZyV(wt408Ia=Axoeh)B6TE4@mrj~kf0r`F1r538hQ1YbL`wq>Gjr*Id%$D}Wf*RbcrZnkU_9IR~y_VIiej|z^BA{lP zr7ylJSD5gm_=~9foVtXwFab$faSsm>usPpCAr7Y{KGv>sHB&YD-Yeq4Gxi?i2BqxI zLryrZvQZJf<-PtkAv6lIGKx{;(kfkE`fVX{AN)-!)!tEG4#dyCSw+ng*b#E;Jh}P$ z*S|h!nza_&MX(9*3}_F_Aa**g-3koMc>v0;2J;yW=48j8d!u%uFmh0EMoY=uualux z&oQaZKs>4GxXw|rr&@NoW|kGAuH#V)c}Y~1Jgj(_@*n>ilF=jtPRHET)bz2nb#Ypk z^cW6#p_2C(U54q?6k~h}&kXO(vk$u#sPQQj^xrZn5|6}q(o6iLH~a@M=t?Kk3bNOh z-86oP7be&Hj3eQ%mB6kTz|=Mjp*PUm=ny~&nS@i{AX*VCreC=vL`t)a#!W!46-8WohA_*z}OgASqZ=s)S3dP`Y3d>A>VT^>KK9RZq|YkL!*8y2O87$DV; z;bOrB-c>@u6hcL zLmh54iehf`BY9nrC>y~<+;8}$V|ant)Shy6xx4y+Tp%u|zMJ7p@?7+7%+DZQU?^X~ zR_>pp>&H>P;$;%xm9fJc(6Pe3h-oklIt3M;N;4igMhX=kxuI`{Fr8_XR}I0+KLy;5 zJmbw9gmEiErn0TAO(e6AUuN1biE7GilRKzLwdKKd_GLA zAX>RQ+lk{{4?`2?=kT(Ly(?R-;ik&LWh(RQOyem;WA!V`Xoq8pM~E(Ggx~G_^2ZeX zqsIL08dh^>WMr_!wDc4ysvSWr;lwHpLZyAA{*Dfui;gOw2kaVW78 zf@e(&J$WgeQyHBm2VV=ghHfKP(berQ#q&#H7MTd(#Q~&jQ{&qL3%r()KX2sY$U>++fDItNZ z@_xu(Lq6GWvN^iW=w0dNMax;R0U#!VdjR?j--tb+AFlgfP9u36`1PF6k`^TXu377= zSFb{}I=Q5zXHFna0O#X1$P;Q}Ar<2zQZYK2dxTHS*=wspg8?{_-{t*D9XCAc`TjwtrlgWcsfbhnR7Y!-M?6P|8I}6T;oAgYfdO*~)1YB3Qu$ z2BP_S6N1(o@HYcV znKW$fRhs+*APFcIo{8~Mg+V;^(bX8O+^fpUPs`o6S0;P%4GH1XX2Ksc;)78p4KrxU z8Q%lgWRC-qLefQ0j3rW)bZfjYK*^|sLe~b04kC2U2?0?YZ;U99AOtDa^UwncR5CDN zppZ%k#Uu3emE)k)K)dn-gIY$W!Eo>dT+Lh*Jsw_n5WJ>o?ypgsI?A!Ub2``M&NL{W zk^NSH@wvGIyvSZ&IA}yMX&s6T-*4J_jlXp(dp4tI<>y%2kGd_;rJ{n>X}~=Saa3A! zpg>5B5%D6cr&!4dE~ppafxHGt8NI8}F>AAs|+^H997D<@ z5izOQ4>y12y_WCA9YO<65%Ect-GzH8N|*0(SIy+fbh>q({^vQA71MgG{cK)A zUiShTNtv)(oXqAF1GfiHq87j#F#~Uey<_aNU!T`F9--pyje?63QH(@O!`Vm{kv_BCI00Q$?Xq_I$}t@tLLje#!1+@pG9M?YJ=*FX0k z3Oe0a|6^=f5%=p)YDWufmRXjYy`#tdT%Y{QEQ{PM`x zK@gh|=xpKK`AuJO8kNm@$x!j)%56Yz-{L61ru9C+^lrw$k z|B`^(*F*8w93mmjvbVPfbLX%g+IeJ^&$nHHai^G zI_q(pmG&b<1tx~~s@(qUN)u-Dcrj!ah12uE4X%IzA}p4`e(o>BAhJH-opgXgh<;sY z+N}p;uP7LN|7oJf{x;EhhyK$-n~Y&LhR^$FIU$F-&cjE?u&oigP@}!&4}Y zzXs7pJP_6!upkl4QN4E@ac2;V7UUSbe zVx{Y*-of5BZMWQw8};Do4Q7aNMAeOY)r}fQn`$mnq0cTdF1Q zJZ^)pZdH`YCCsV78iiVrHtQUHUv5pDwBoieI zHVrND_dyOzJj?V_Fdm7bt%{)Lw=Iv~*}H$i*WA$N$^;+C%}x^Q)%g=S?XPz$-nxa# zRtV&9U`<1I>MdlDUZtgBp^7Yri2+U_TMu+K!~^{j zFB}5-iucBscjh<2hCu{hnF!i2NZL?=%mA5q&5#4#Tcp^z!fmY(Mr7A48$o77;f@Y2 zz?mPXkr`7ruxBooNQ%LL67gw%6o}X0W5eh~Q%ZFMA`6hSsn$%|jKa@s&1oMJ#C-Z_Rb+B?a2(v?ri(WN*>FzD@aQOGB_)Tp$wY*i&?{d& ztDI%}jU{)4knwi~G674I=lK=ebC4)GMn&b)G^M>kum-jS61)fIgMQ^C4@n=0Dd>Tm zjq*ZHSpeaH7JscFwG&Q*2!-$l0~gmZ7}jM#rVp;{cLy=G)|4$M(x z`uOcz&g(BPzflp@Sov3dDa_&Y*r!+&~f?VS+gATKewDt@Yw;Pp;> zwmxdrcYIGHfm)}h+t)A=roY7^cR*1bdS062`4p1bOGG6_J*WEaD(FmH!RV>vwKK|c z(z_ML1TJpXzc~w%$o#V^)MQjtIt}AId-2y`%_hHkg$)63$6}C05Y-Y$FAO-^E?ynF z1KtUjP7Nxe2)MyGGH3g)o8XTLwQQvS-$_ldnkw_(CREeVyt#O`>T2}su;qBAKQU1m z22`G|i(L@0(4x7*$xpM*^}7RUl?NrS`Uo;5ed@pa)T=&B__P=G2p1b8up3j*5#^ChP8S+7|GJdP!pGu(C?`roXjtxvOof2<|+S2Q?1h{q)8 zXpy<|V~NcgQ$-zL9%Tq5Az7woZsxw=5G|Mo!rl4{W6Pdm%}DRR!MHN(?1s^1lri6B zxgu(wwv4-ye}G(0MYBR8b(qXis>>f!Zsc~KWo7*|Smm~DW7eCGgYdCN#P&l{Q;>l4 zMI2ycwnjBqD-3es<@)jN8+Q@93E6pM-h|@99aO4P(5XvCPGORfj35IcOBgc#zMzSF zVPGNa5kd-y$oM0lWqh}@(Ys!KX;)B9Kn;q3y$OX7mENBw48#P9f+;B-hZ6;K^X@!? z@S-zc1k!q@S(*Q*Xm`?NzU5pGb+~*+R(igGuA?k8*AVEj7>&9e>7*z6+>$Ft+2nG) zm1JWfuXNmu$$rqd&rvhYV1>u6bZ!!08FGV!vs+ZB1ry9Zh$qV zIt=s>A&6m04D?WU3&bBD#O*h#)8y<__F_dnDy~I8y=88jT-FgpCy!VKcw(=rwy|B! zQW|B5{^_ib)7T=#T3cH;`M)$Zi7P0Cs|{#64*DTe?!6v}AuZb-2ck>s1{53~R|Sx^ z4DJ&5Z2sn2(AvX{fyackC7JsE2jrcAnOm=@QOA8ED~il>ht+X+D`-EpZ(NhOG!(7 z-O^P%jmgW4dOx_nQtK#3xu-a0Y3N!=G|A&o z!gz{l-L)9$_eEd^r7IP5%KwQuivB*i$;2NR{I@s9Uf&+~zXRy^M`tx;nrPu=HlbmU zWzDU9v+@!cX!jf8C-Vrfqo&oqaGs7f*$ChHS9eHm{(b0^A~kToK*G*5?3bz9-=yjS`A$gO1FpOg;l z;z#ZOg>A;ez;@5LT74Q2DU_ZuAXJAMzIU*wku43lo*d)a%_4zjx&9YpEj<=5s0c1@N4PG zxcOovF|hvO*vZZBf1l%}$r~mYw6#R~Az&AtLo@ehV4m(#kNNrsWVg7HK*aOU4)8k7 zzM>+b6*L)8g^3E3^8<=d6<%oa=MB$TUQaAeLDFB_+uPTm$$&^K=)(WwNlPs#kYZC7 zn!sx&TYTkOq9 zhQrT$k*tH5LZAMJDTGwm92Ysi5e0+YF)MxqkPcWk8k8h@H4Q+j=7wrff86nmYz5_e zaa(yEU=;hIq-Ncjd-P+n9QaKB(A;VDMtE~M1qB%xjl4?o5P93c=`u+7xARB@Ku`p6 z^HM%h!QO{ltKl>M<14(+KlAQc4INTmW|gk_x{L%Oi2lSO> ziVrDMih7X%zrpWCmh8}h#$SWLOa+|S1({;2njV+}8i6cI{nqk$9-OLybPtdx=tL6VP@o)j91A$AJ*wLEu z*yz0vwd zlUgX$JY;}J<{n%OsKW@Tv##GEIY+elc=L%5{y(^qEHvk}b1rsUQ@RXu3f1($-lvTQ zrimLyu9|v-X1St8NgZan@&z#qpwLK0jw-yzO@@gQ^EP^j>bmoFd0HA@>T-|ZHcnJL z-?!9x+qLW^gu`4%|20*#A7x#-c^l^HEK>5YpT@_B12Cz8vA1gq3Jox1w*SnE4(50f zmJG+4jldYF6dX6Q1R74O^$gh2!(n*&r-C8U7`&*$pj zp3qbKn>TM>g#()omLoh1)+Vt~FeZ_QK(L{T(%$F5QFQ8q$zgUMQO%Jf$RyHMt0U-s za!*Zd4Q?F=)Q?ZE5#r47Zu4(ELzh#iT{4#RIQeZJHx zMkWoc&9%OJBA`kc1kue$;3gLH5Grzp02!WK`Ou%nwNRStw_1|i**l|=d@u_JC&WPhxjnN(w0%AJ$Gq1cc5$+^Cyw*Q!cb-}B(zogg~Mss zPw}v0@Dz$!XCKa&0D@BII8*kb6MUqyAHqZNe+dBo@r3>kD?U`Ec3Lbs0)QJ-p*wI2 zA4&(TPJX8)dN}WG@{%_(3LhW;PPK<{Ll|`csOfre@{d}}Va+0#L@HBn{x*qzGX-R7 z4flPAiSLYL8`~F82xCA#z-hi^$S!xozY!#JjmXe36YAhhb5zFu22HZ+&Ol7%jCFxH z7UXm|sQ+I3KRMhF>ah?hHVo4pa&mHB;Yc@>Og8zpLCkKcF14m{Tk&_Qy=HUn)2#Lc)|3;YtoS>vW$nkS|DZ2g$ zbsQTNI953x>Y-1=jt>sGnos4cPqR@@#XXwo_9dVk9036VGyJSZngesEe%OsC3%`u6 ztlx3Ag>=p1(-1C229<^TVDq=ZRIJe9-l_=1?+`s}3^^#m926gEJ-y^sCxGP;)4>=!-TuILbwiu}RzO(3WyCO3HNGZ|IyXOKy_zu1Bp6Q9X9(?0WPfuC)$EWEJ! zP?PHaBO`Lv{C`1vT9;q67{TPvfV&3N2FMnkg(DGQRYAE#&Onq2KOOY*Oo{h;dGu$` zrd+fj`FUOKJU4O`#d8%Ixa1Z=)SZ?8xGVdX?{xINc`G^Y;EXJQct=h@R8V$d?xhwE z=jqBe`{6D8$uGH;n~21lbHA_}`+y%&6k}0_kAx&Za9RGumYWDSAIbYexv%)5#zKay z{f5v}Sc+tW3rKDPW?M*4ocK0hxCv(zz`$R5#JhLzFi=0wC!m2NxWd1FRjSxqX@sUB$4bgSJIp%79+u(k61>W~&pn@O)}Yq0qI(7Y8?fxu+VQgCBzS4Psr#tQhqp{wb;%s2B^Ogy zxNq&Y@2s~oR#*homVB5FO7nPx6`amyAdSWS;5&^G8izfuda#S=(oLtrDlYY;%;}`0 z1||kQ4vPh*Y3#^2aaLR*Xt(J#M!lo+Q_&sge>lhaboXaIVAZ#y9Z*za{|aV(v;Fk; z+h;+RcR|{gDSt#tdV}q~O%jpe5&X*?Dc06R&h=d5RMzdlTff@fyKgRRd-A$c+x{uD z7qVh6cT8lT+Na&;KFOsZxwX8BGaz()mhO|Aj>kr0M1nZ(!h5%5UJBEHKOA#Fe!&Xu z*iv!FwY+r`mN9qM7@ifcS|(Bzb!bxHcviS3X*@~Ux$`9JX|UV7IoxLjBziN{dkq{tPfnD-?qy9Eh5X!Uz4&rP)$Knkggm*sQ#Ke@~y)dByiIHi+u09 zg%)@MT@bI@l_Xxi*o<0pXnMinb3Bt)#VuK7p5J5OBC?ThxykXgfI}@Y*)=!!udf_S zdB57@3ujONsnR=EHTpx*$i!8<_eE3<+8qP#8fW%&v6Ov!tD_a zqGnjbr{^lo7|%OlpJws}PK?>T?(pTR)3kQF-TW2yyw4YTxWN*mcI+b%^{}b4>a_hL z;m{Ot;pTKsDgM>gZTqk4A;+CP#-4EdmCw7eLSOh(qOz$ns>WksYMg^oYP>|QaqmlX zsP!9L$)IO-zmfSHsq4JjwHAZ%&JuIsP?OkIit%fiu^w%(|J2k=l@R(BFXkHz1|qW3 zv53E_;iIpHNo)!}J(WWfpYJ!0d3!vj(xeuZ*fr_ze6=@~Mx8V%Ngk?$-DZKU4-uUw{kp5ew4~qb|%V{m5yd*q-TOl`SfX-ge=}f^nt1)v}>P z8=AcMe6XShbGZp3!8JBK&ljzJWwB!P#$`I`?`*O%Tfgd0{ba9pR?J74%WeBPWQq1- z3x*$U(U_CB&a#)Ury(!LL%CWW=>Q6x2?3nP_$PK$L~^k-`JdnRZm%>hT~@Vh$8Cj# zviva#Rgv>T`J_#CkQV?MTrsaJ(KR5S2nr+0Z1|T>HY*b63pdLXr7v&~EObG`k zKnk?;hWUwq-MG1E>ewoyh^nR^IMMW$`U5BZNsrW*2)M+4D2(_;FeD+n`KoqIs`lEj z`?;}Rr|r)>1-Hy?U2*vLs@=8;&fK;M*vnwIO9`i%#w^JWBd=|OYu03uk}t?>iEA8Q zWt~B)lJoF>aN7B2uAWoIIveNUib)NU49)!cPDx3dqs*C{9KU_P;4JMD!ujsX4#Y=9 zlOo<1dOgUqfUII#_jop#_7fT4Q%!>ckwyD4fBWD1`ug;8_KYvP%F^*(f^XBbQXjUT z4ap6)j1sSI$^4pJIlBJaiKthmE5{C2Wr}MAjo%len}u`Uq16c*9PC!b3QgMxT`m=N z6)FG91ctQhRrC-cDLjHeSDvp$MrR3>H0Q3`gHALNa4|)rx`_GV?%t5t@!v>RemDpQ z8+xTQVn)tM|6yM@d`CO`pP5K1t1nM{H)mCVtmn5k`-M7@`o@-D1b0H0(w1}w-qhT~ zQTt-6b%d(rjghuYxSnk6*h|#NoSO=;m!)k_xs8JNr7U_d5Zih9cH_n$kGoYU#`^v_ zCRTY=|FT#__U17?J?doG3Dsrix$nUI_{q+0E7Sw2dIFWwmCKjEc0Iz_1|B_B)P8{K z8n;8APPkSkH#EAao<=8bgZ0ZfkZqtAPn<*XNY+@7bBdDvlvtca(hWc-37TOnQ8Nkh zZ7P9O1z6t~Ak_LfMjsDcy+l>`ltktk7(m zC120b_n8Rp`x`z4!iE!~!jvB)bCE}#g*a_TZs*+6PagSUR#ZS+mFS zs_>}L{dzNNmZOWabLyS+=Li0q+&1)IRW(;^;{rPyOQM7BV9mghi;*gS!92`(?mH)s zX~AD1TWG;wDc7(&XF7jPp@^!Xz-FEa^&&T4Oks?u1c203JioP@ze+IDtYTT!niU8s z?45cH8vh|>Me)Z%tx?Nz;xL*X(7Ln(Eu;6;jhu@OG(_wnYWRb&{ygg3BzaQw#2cCJ zIq(;W%r5EBM}tXPeDUgqT3#ZLY-K(PZTx9dL)OPHd~*Ky)mb%m{e4s7ExGpSEs|N- zR@@$c%+@}&X6=1L^K3)&fN17>KEH2$tIS<`^m+_ko?MjY6Bw>-881VPDS6Z;2=r=d zTK=0Rr5A(O2IoTa7DA1$fS-8GKL8yxK=Rs1s1ZkHp{p($;Et^A{>#u+`pk1%SLywk z^7BrA_6okI>lDr#UmHo}a03@aBuMPm%gy)O9W+MM3k=U6a5cU4=nR+LL=Zn~$Tm4{oDSh<0x zAvvIce5*=FrGJ~!kX8oiKjMU#0=^(Me)M;3&?`u~##HG46HXyBNgLfe}f7O4J30GgUbu8J!vl;jVIP9{%qq(zo?@0b*u|sgN6t)(+ zxtMH5+a?urR8CA@_Nw%nLZ7=UBPWvOY{M=Zq<$rD=(oonl2`wDEoH=#S9W`s$(;5lzAxMs`QIrvlwn&Iy zzO?u8^_|W@XH^b_wipbQZKY0}SgQ%edbIHA0Ba)^-C0~*70tF|*gLjto}r@5WDvyA zL^0*}_$}1h5*mDeb-&i~n2yUy^3`|3?d)UIAGPoJ+;DO#)AIIiB@g*cA~j((8$NIY zpH*C2^(K&#mZ!pRF7Z6+>gp1|Vs2)3_4@U)f95V#T?A>e;@c%}zSsA;>}HxHcj%UKnEfMl!@a$E7T{#)rKb9%g^b{~ z-l}TY8DDsDshs=iw-=s-i1!>fiTu@$1WA_P>o!_OD{J{^937=-$?I-K@XdmRDn;8LM8+bdxsx2Ny|>(pA9Gd+p-_of$~Y=!(VCK zoIQ&`nPdM-FlE_wB3GW<0p+j3KR7zG7iCPs%Pn%w1hXav*zr`SD<>`z{!w3X4?DGs zU->zw!lQY~qiWH-3;%s|0$c7G@2#{)n*Q>MgNJIyQncH_d6xqC46;m*D^5f6%jJa+ zHde-=)5hPH7EXX`G+_K(u2_-s{a3*@I6FeJ=Dc;{y@pZHaWR(}uP_dlJIh4zq(&*V zJ!NM->oiFi^{EB5J&pegu>3-woN&aTO~A#za=5M1*y87Cgzau+uY%a2nZA;QO8ln1 zNjX0Vqcet@I6knOI3m9ruUSyEcgShSPF_%9pR7{=v5WdDA{(x^URI9x@G|-(=_v4A zlt4`bO5Y@!u=7T-n0E8xo`IZeTV_w2+-m@P5MZ!qyfQw+t$jYwpU zcCqF4$|=%Q#`@#goIsO6M8ukgmc>2C&s}&vAYT2`Pl~g?Sw-jWUGsRAh8}bORg5tJ zYJ~qbJy^(Ytw5DF1LQWSU%WVcUeyahWM<{BciW(!PrgS z#JIR;P5P7onWvnB^uqc+{^f!x*V|+!v#Y3+IUts^A6bH4vEOA73o>t?|7>D*o-BKI5%s+g zGM;5QJzy5zg6{<bcwFnvJT~}M-y}NOmWR%>knVXDR@IXKNb1+%_ebXc5}P)~_mA!;21%S(P~uL% zH|S`=0WceJvNnRFb(fYxInJ{6`D+Q~5U-`3&}F#h=JpLp!A~9DJy{OXujK*)MUB>9 z6kJTv$6_O%iGErrj2t1kP{dcr>(*mP0ZH@6#KbU8BR_)JmyPcrl$e%FNM!7aV&MpV z`W4Cj5B9~m@JO-cf!pHMHQ)8=FA6o(U3C~<5XLP8ymcj51PD{E}lb(IZ)-=%e zHcy&zgyA1z+u(cbs)WxNi+GbB9t}o0)fQi&e)5V;;0~a<6CeE}d9GI~`-?uxTWGO# zSsSB67>SQp+hF0Q+6Ndgas_GwjI2L)Hs#E=`F!kh|{Js|N4XuPTT?6{Q$9cOC0 zA(0Hi;vt5z9%3(S&9XY{Wq+NyX!5bkN>2qUQeFzbkb1dq+N!^Z$|>NDlp1e9I#cx0 zHUY@eSS_R`TI??wns1>nsmV5wD&e zFlhTbVFwpHcN<-^nXS)zz3rs#7?{|eDGb-(9rjOcNXmce%2{+O%5%!){Lq=)TYn%CVX&08x;BA{k6K)k8KF@PoVu8ub z=ZP}QOIgk)&%JBFG>_5}l#zLhO&Mz$hT-tIsD1f^w5c5*QHU0G`6aNVwpqz(Di>_@FnHAzD zPuO8)M1{b2Cov}oD$>^Xn!0UZwv($p1huX}R)ZuO+C2d_aZ>FI2Z{tNA*ZtqT1BXG zXjWY1`Hn=Qpq>EW>Xt&tkKiQItw$OAT}dDZN@b1ac0^ZG`ZHbaUr8h+B#Bcv@ab2@ z)=ymjM_C7xt0?b>Bw!oJM=hBZyw7C|MLZYUM~!DY50hmKvNGcJcg0Efcn$bdH8-Yz?~qS4}{Fe9=l+ zK>Bc7POg#YubT_b_@VZEwutuF9!=X4AoHoOcjB@Flkl?FXLDN}ciH}N$tte!yRe_K zZ*atKHoqnEYV*f{&cl@`u-)5q??lQR>aQVQ+Rj(v)dsD5UCLS|uD5l&M9lb(GnK=AtO@3M z;{GuGSjp5!Vc~XAko+oi+mfXe_f^t?fYt2X&iQi!6o02Js`o8)&~!o5$AbhC?} zj=&Nv;pi%I*r>)XZAQw5ZjwooTJ&)DMXTwDFE2!v5$r~R)gigB5PF2yH&^_LpO_c> zJ73ra?f$;sj(iEyJUl1;J_&?VP2IPPqR<|+_}1K8ir*SpFS>By@zk#aRwOl;hnKyh z6bTaOX6I^ErCnj9wuMwGC)(7cd(|`utgC&ZNgdUnD4$p6sQ|)Qj`o)FN(07 z$+7>Z`$xrQ?+^P@JE4PFI%VFp3A1fx<3Ia1sP3|R6?#tdSILGMvH27GJ${c$czqri z!!6*D#)={mRX0yq>e>b?PiCt=1Y-irrxVWpqis*+)jE;m6CZ7%Jl@i+Pde7atA$(T z>qgz@iNT6}%%vUtl|am~eormNx@jbI1I)>Adxe@*b-k z^o>sC@rek2Sa}I$4ui5w&fj(K=6wqxitbwCjHN+r!ZK%^h{g6=Pc3$Q?kAZDY&;Sj zHn!nN6VHAHhfpMFT=|4A>^nM&keT{b`@oxL&66%npZ<1<6-Liylj)+bR%qcx$?%R01b0}MJQB+0)` z^E}R$AAyR(eU!(;EpDRw^DXb0oH@4+*oJGHs7qulZ{j8j9UAeP|KTl&gf`ZlcI}5- z+bd@Zwg2Etjdb(*%T^aJ>L`2}eJQ<(k5YJn&#=I~=Zm!$Z@aViqfF$WOd7a@YvyOO zQh0ZsoUe^mBeZddLqI?Zqo+hnUcPwx$Co%0mQRn0rXN?}KbstsV`SK!H7-J(8FMJiM~nNeFz{jDG3-ui6X<#LLmR2Pk1P z7}QvWmZpaw@twz9Ma*`DR_W;@hX0=A_GgrQ-~Bb7jp>YK>Onj1?R9c6-I9s&@2^7U z!y~Pnw!<=(w!;~%78iSC2O2@m{E{}ZajpkH>_kgtd&ko$Pj9bF$&ibBE%4N6xo6`r z>tu3oT%n#7nn&8xy7od2G-30Zm#*d@di+nbuEJxTX}pj|=t0B_p;}uFe(3pWVJvId z!1qVLCR(GpFR_iyR zt=r;%qShRgkZ|%QhBj9l3QWLaY@mMoJ+_B?dWM8uDwK8b#!B~*w-h4u(86?Ge7fabTipT3*F^k2&r zmbFY|e8qEXckMa6?{ix9B;G)UuzzG|Jqx^1bVUCeW(Y3FfIRd(L(eR@ST$wiKj|R+ zsnqh1(xDkg%Bl|iSO(BTBiHz+6Y)!Qy9blK3qYXZdCogQSL9~Fl+uV>CVrZVEAp1D zcy#kaA9u~2b*M&%&4@GjomMU_b;z{ zeze#ULLQHtYj3y6=4$QBwJaJ;ut@IhjGg-qWyxLA(%(TbvE3M=)gogwvQ{iEwsmj>}RL%dLN@VrS6Z;(X=GZx~N$Y8q2vp&q;GOGh$%^3(1eUooxN z>NQby0}nea_-o|gYP^g-=luJR$9}W;{Qj^M&!bWMqH{LNL~q*4W%PuA{O0-tvyxiB zv4J?#rmuba2PZ<+Dae6? zsOrJ)0Zi%t_^=uIT~+W2-@E z2FB>-b7EXX+`~pj7R}*9zPbzzKFF>9I6Il+rXnAdLcWhOMPC~TzdWl}9ma4R47{bW zHzd#rh=vdeq_KMfY4HkUhrmql}BO2^p||4_B?XEO~Jl%=#4|0UvBrK+I%Jv zx485dBS_1NnPYz1lyPm#)0N3DlML%CBjb_yFp^c4GA{MLtOK z9-M#hz=26>?)b8%zT0>2Y7G8_Rwm{xsLwxm@F0h@E9QINR49|%<&i%*tjtC_=IpX< zLC=zpL@4tENZPp{*h|j^i!Hv~z2z$55Oa)(m@Kcd;MSQ!y>ri+@Q;>`KfhHttgE&g zbYup;(|UNTAa4S%I?@zqxJh1I{0~r?eiLG6}0l(*xQx%!n&ld(K@;7PO|?o z*&}88!nG@h(xfzsw!XG~P}a?jhB589$|pb6uI#gV&^^Na^vSCHPTkSgsNUXhN0z+q z?@BALT^X*-kEzNvy_=pCZ%`e3Wf%8T_V?_RJsjoH`z^G4ogYrb=ob2}39kkmU5*c! z+^}y;I=Gv zR^)7u)^8qA{NXSZ5m4}2$LQ>tkg>4MW+LDF`44oqpmX}kBYwp&$Wk`6}u zVC+NNY;RA`HIuGp4z`O~Rin*s*`%wWJcu8j`uz|1GPCauhgCJiWR5aAA?W^Qwis^c zg41ZRUYc4>TH^TGD5APN$B2Sl278`PO%LSL4^CND3)N`u7^@ei=fB^kNp0P2lB4}D z-*3C(;^HxiDZlxasYrL!Xj&H@1vaK82E}sLgvUBMI^4r|F@%Uw2XW!v_5^S=9y2go zrhfJyxc2yKz&AyWSSwol%=g!U!??!BXzMa2ZcH;W3S*ePIM? z82n`V6jH5F4ZxD%+a2-Q0ZZbu1B-Nd4k>`gj5@q~K}`=91`h~~-R1#ln+q|l(Pz@0 z01o|*Da&U5@MG3W5n*AaDa#x&;HEtR)5Wk+7>R>dLo^Vt?rtt?PeA)~9-|K1zpPE1q~1sU(+6i<56!5~af~{7b=8W8 zJLc|+S-vne_<7dAPTrV@XN6UxCkuGCFl-^P3AR`9>XEMBj|Am$K7Uv%BKi+fh|J)Ue5Kr3}7naJ_`o zP-0rZ$v0oHrbk;FLQ$O*M%AoNt@A?D%8f&zgPc_zWRYKC*{JA#fy2a#Hrdkw(Wp;JcU8QccJ!`^<|7@@IM|OLmVHP@YK^S)?G2_H~`(Fnk zw(o_?+FgvTmS}#Ev%Kt*%NDSnjUn&d4l(C^sU*kzj%PW`*Vz`&#^oshd(wZj-+we@ zsK)xrk5bH*;@P^oy2&UPf#2Xy40`pV}sB~3w9#c3ax##m1tm^oZ8lxe-|P1?jI`jZ;PKVv)=ny=rW`7^=-{o=h0~dfBv- zpZ@aafNfL9zC6t%UE`(6sEt(Q;hP zelN`;Ass-~Y8GWm8&KN7hJ@esnB;`vMSuQ)|M>X@7X&J-#s2Ss>Pg%Sv)a)`g@?|J ztW3a+3tg6ya!8Xd^E%YF8!lc@M>h7?i#GlF;><-#j2o*>xpm)tBG^g5pz;*=NB`y!Yo=J>A8n3)PpsX!VOLk>S^c5<~$S@s*zC zpXF^vmqP(G&pmLA0@dR5Dh=Zb&G3TKkFhu9A?Ig`@K5NMS6N<~)0pcUea!db|3il$ z+#hk*W3ucEc*U!eW^7&-p&l=Ts#I)yN{b#cN8xm)>ftSxt($AR>>*DHa+Lft*>k*i z!H~kV4LuB8cf*7tG_DAUVtj}Kebf`tI}4%TkBK3BFuk+{B9Dg9H?~H7Ro_M}`njFT zJ|#YuS-IM=K$Oc*;5`yBuN=bYk$RlY6Md^*BlOOP7N0+j1EfVRfFp2+c6=QiU?C(& zuHq8vCz= zSgK+o%ozv|ltqSD(bJbkd;UPQ(dRlwDy>8gAK4VRO=quocj?yd z(gW6SF0RHp$sd_AW3eaNQ23GP$l~qg1gE-RK)=1P5kyA(M?MDS8QHeQt1J}Ocf*X@ zj48AvP$7B-VN%k?tJL*4#FvPMnhkIwxe$)fIu~~CVTZT4Qi!4quB&;8^y+#r&Gx!N zp-r5ACfcOBy8K7erl=egOP;voraJ@$Y+R%Ab_P$!Y$0>qCqcF+`{@_2y+lZYq!NWYm z{64J0XRrlAn8mBS4P9|P?oLrrwk?Zjn#_o3n~OSmT$%F0dUUPI!WbCXSV!-qhTP=NJbrWRUISvtVh+Kyt2l20n1o%t zed-d$XZh!N+hI!{V*sWX{9DppkBPFSAwhm#a7K3!Awi00FRk@RggY6CN^I^Xq@~|#WK1eG&q>OkN9YKKb%`qR!VLT4V(9_bQ;alCTD&RqyqLw zLon~U$T(>^niGzsSatZJ(~dyf6SW!XjkfM5Dh^p4nj!x2NqAak9({BX%z$ad03d4w zm_1~ip@ zjD@kl57=PQTxy6R?IBp#W{mj6M`h9_Kagl#gQ%e5;~brf2GGVSS3PnWoJ{ru?*YQy zVEfZ6b%Tid7LMTg6fVZRJ?eLaQaTw zH}tL;+bag${;;ONtT}NHISUF_2$zXwSWb`l_L4b{xLzPjIZq(9RpFWJ8QylZ z*%q7SE7@0O;3tJ(G~2ED+he>iBou<6x)3ExMh+2+6)gSes(rBqdP!9YM&9SSZGz3J zQ2_2lVMRBtP$cX=t4vcO+gDu&W0jO5ci;F?X9u0Q`O2!x!y*#Yr$>J8?n92`(IS%q z@NjP6yed1W$N;f<1~0Wxa|v@nkgugWuKIOcu5}bug7oDHL%R_}nzZBur@MRNd-QxQ z%0pr!u)HVALd)!`b<|AZMvg(qUltBe$hx!62X=-|R1YFTTK4s;eSX;?0?=Hg5ZW@s zg2gVjd3Q@VeWPjsBF79jnZWSm2CoH@4t9G*>)qhZ*PUN%_A{Q8)48KLgzPugRuYa@ z4x41waO2or)Ke-^h%zvK8n3DrqR3bFafPNsSIffl~FlS1^dFq#2-e~z~~GPOPLM3ApLlIuV$nJDH1n`5|E1zjW> z3?2thSdTN5x6mMbKildfloahM*ub`y7~6>JY)=dAV^OXaqLt}D2FVi!gO~R5rQ_&u zcy#u(O02FK?AjfukkjfhGs1%C%!3)e0qwSSNT4f`%5Ffjao4`-Um`QRsb58Ae;Ka* zzx|h4LMG_+UW?g!AR!potYt8m+a*u7SHp#p8)BBnpM=03qLKH9#gQ@yzeV?i99%s_ zxt=0`*tQ(;FY^GbfLT+Z$6f5t3G|D@e5Vg#?q5WcE($*1`T>40Os;o7@gm8TE++XL z#2umZukRd@j6{dZF7uoXZ@LUD?TQ|N;Ip1Nh%>%8w zA$L8dFtT>Q(riJuSP^6}{PG1-nG{Z8$j>zjz}T;t`O0F8_6eoLyIhPcS0x;A*arN| z{tGiD`j$a{s1UMDW)J79@JQo?Mk8(H5ArdSK?Luw-erYnzbA?@-Os>ABKFk@m<`t` ziD@GkCRa@9~Vm{CxP;k{j$ zi>L>qI37JKJDh(?&`vl4C|We~6!;Ozc^|+#bF6=LZsuMf%M?c3gA56b*>Rff+aKGg zyV3CZk7zx}#h@Es^CHpJNMpO&Y!RUZ9V_;ufLU2p$7u+IWQrj-ol$Wl9gcqkdMk!H zJu<^zC<7!x?!1MjCi_`8+~}~_$~YZZN%roLWhF0ocl)sDK;1ojtX^7Uago zjLol3=XGwCra_uVaqo+9#T6pnPGT?%8`AxHFp~2wZY3?Qdb~ah+)D}M%ONkK!o!*5 zT8-f($-Yg8Mi&&~z3aku3?lkMm|;|MbNe~^r_sU8Q8BIZ!H!IS@!l632DbQ`_9bB2)Mf^w`vE+aiSu z#N$|_D?j#ynO1sZGX(n{qa*P8PcuY$;2sMBl?%Znz_F|X+-0aMI)VW47|OnT;hu$1 z`lX~EE=LZjQ{%zn$y^#&HxzH(0CDJio%2iFA&$5Pw4Z!j5?}1M=sNX#s&{g_@iT(p zG1Lqq;zJEbbN4wYSP!}%H17Xtw2HxS=Yyt50bb%ici-0ah|lfNwf33}+6`bM=sZ-= ztwkeEAOaim9%Sv1->wtIp*@i4Fib8%IwR%m33#buNE_c+d1dnoypb|_>mg_(6r#z| z94XV#t9a~XSU@p!FfI48Xh}0UBt6P&X>W6-;W?`; ziHTXA9(nfkQ0(_bq!9b&68~Ks{eEbw`^s&qxfiQ#0_7Bxiv1vLAPMx~F=m`y=K-s8 zFGe(Vp>480>FpH?&&_KrD(VqrjJ!1H-m)Zm;{k+Fx?6;g6#(RlWM~_IoCQ&8`a&rrRfe|PBkRX;t(b@pfSGyK9j{m zwjBn`moggi5w20c4Vla+#A}S{RQtdM)9vR^9Q_z1DF@_F$gb}_Up1E=TCcF!g?1`Q zi6AvflSALNWdfjg~=`BcI-x!jAOra@o$Z7Kf;e0sV5! ziq9KdV)DE5X#OQkNn_pPl`ixN*av)xm*@A}%-B@1h?T(TRYz5ghLDSUPRy_)g`KWatvo zYE#Ia*7wF)+BI{y&4g}RNyNcKKJ#kA19)l;j0f*pn^z?knl)@@oQ{&|iV{N|;W`hN z#;QI6Fr(7iV%b0i*o%Z#sNk(-$tX1>`C3-31%4Q1C)a^^i!4Aa$l@w5!Hs_;3Q#tq zS3Q02RAwPi9eED>be?^#K=4+x(XYwI{vt-|^$Exxc>s17;gYz)J1JK>BEXb(#g}>z z9#+E@ZOk8ONTB#fkuMPI!J%OQ;El6>IS{#BZzkKXewNm8c3{96bc_5HRO;$}-%#}U zkC~D_NZS;Xjb6SG*+)q)oB>}or4u~4@bz&`cfIxCkgb6iarUvcN;h~<&vg0n`3rgn z)FA8w8!OmKUpt-hk+C1Wik7U`97(uAqgwA35RLGPUUhGqRm>{2)50I z2UHKnxvVv?4N(jos-=yE46N^gy`sE}45PQZ1INbH#G_<7ckO!Et0@cWB~T|qrI?li7)WjBwr^zIP36klHa{rDS`onNrWe6M?J}xMpN#4jD)6%nxFz0B)6F z)AGW69D=8dJYVKG^mb_%F0^8Qa42SjM0Q#-PFKA`{8R}0QxJ~Oxv zw!t5IlB}>NXj=B>X@W_1jcQ0Bd;uYj<}2s;vKT~UcK|pq5Y|_OVOB{VhASvQsXWYZ z5+gVQ)LoVOX!<;@=`%CRdm@g^)U0vTPyT)P`qvz*ozpgJH4Wr9jk~v znV@>KD{|K8!2Bj@ip`4?Yw->K1pYwCH}Zvq!%!bCthiFoaMM0VFPw#=XdLNUlBmd6 z509-`*WDJA!yTd$r3qSsX8jK;hb)woHLdFj*>kpIQh;%`Apt2$Y@W|Y3#uNO1KUuk z>}?K<+6zDV3~qo&%YM#Nf*A=3$QXpYnHUzNa;ByU60kVk9w2;#@1NcnQH5&XR^h@6V>Gkq+pPTbgRdYJ6oMx&~Y=&^r{k%oTcdNHlEb47#+ z!IJQS8;(A{it@uI%$qdDknSsF)RSNTwkV@}W%C4vyD$dHSgR)(-Z(d7%NqD-4Z0!l zkcGX!J+!V$H1U@eNajJcy8!vmHM~|7s7DdF5(-h8={&jQSSZ(#T4JHd4MS=e^N^E{ z173K8{+{wyk*edjm!>sv2qZ0|_ktf{(Nj`@NDwEY-1Ufmx@JlUz{ed1CA<>2LmYz2 zMvR)yg;QV$RX{jKc63+)+GvCihGR2yg$>$DE&^2Q>uG>Jtx@mmg8xOd4Zp2n`fz^# zLqUgY1j_;u$j5#4z!`Szh_^vb;Dwx^PZJ}uA1$_Rjp_em=?E$$ZuBs??ZtMr2TO5g z=uC>@r<)^l)dM_Qi15OjW+9qj_9x38!=6jSV;(K+$82+NC_1d}Zx-8#I%ZBQibS;r z_5^#=(!zLcL4>sjK9zrT{RD>m!F$kVCUG4K9(+t6EjR(0a|#(D!A*h3GrPe=!%87? zQ|7HPISpP0kKI#wBMIYodH z;WrC59+yX~`YW9i2qG2l;yR=l@pdAE$&V4H4HV`Ps6)umV)KIa3xtk7wt5+b0N5Il zg*hUs2E=Q&xOjf>=7FFGCD*yi^7R_PQgI{lqvczGd$Dknu@8=}B*51G?Iy=R!p7a> z|Z)Zkf5;4hY}5xGL%CH_d&XZaIU8~SIQWVn06`>OPJ zB|&>0$LE;0Q9{U#$zwGn=C3ZlOa&4INNI*d->Z}8!l8I75(ru;5V6dGvZ#y-Q%dsS z@&!-&6Z8W3F{;Q6Qsom-PyWN{B?wTrH=4XmyIICy$S;8>(Vv9!hin0&8z1V)>Bj;; zL*!4WV=|^GJ%fwru<)Uy4{YP{j$VovG%9B>-mff0jO6sv0hV1CS?)vT6vk=&4_lVV zFY3eRaQKMPp04nh%#;2gv)qLi*yMi>>YmQKjIc&uFgMwkq8|=stfe@Q0`%t{oU%LU4@(WAyqhlj14`{MTxcKI8bOm ziWji0CPv;GrZnA6Mg*K`UP!PBi~~Dw?|jS5CJ#v$89~loDj5L3X#ilXUN@+PtH+Li zu#ALm5~$JzTUwY{3z301L@+^E4a(Q4PDa3HSDT8ma!-ULvSbVa;Rj&t+2OFYz>1Mo za&yaueBjR4$req0LppjV;@cjA!f zvnwbNoEfBxP>JBG1ReEs5uhOKV{$hgAI&^#!qhv6dw^1O4YzEIXCvlJSvp`e#CO5w zU$j#!+q}RxxQhgcc0ab>{h+|d3@UCQQu8KA8G!dbL`|L5wId?=Cy# zFSaP%&qja<8b@^g*feK}2NgMy35UMuATx(6HW&`iNy{dL^1x(0NnlL#Z#chWfC&)O?IC0@Z+MlQms_DoH4{;7?%E(9 zSNwk+=EN5Q!S|$)1rFY#J_8_jjh!e}y*hwDx5B?Tcif-EaAx(l=iIlO%3w|p6^kR` zvL-+ymXNo;2fIejinf$$J5gz!GH^3mPgeF@l1m`Zi0io(Y+fD0bBSa?rK>KKkAe_s zJKKdL@m_AxQeFu28k5Dw2Px&Fqa%>^o~8<~rfd$+tEihv|&XX9aL)O zK&>7YNkv9sq0rHv>l0vcJO~l2LeVm#Ys5ptOnR+!6Jq_S7d;5E+yL_(*aEZ6muL^0 z7zjJavP9*F4;{p#!?FMb*4iT%VpS7_Qi9AK(PYR*_+16p5)3tJ$ag>tDg!4P9vgws z=^n6E)T zgog4)&&;>$kC(f*z3YO|9 zaLb}7@!Bdf&T(`k5VdbqKdy6j-Z$6T-^jx-UTd8 zi*y`r(UR=Ts67d+)&o~c(FWF;KT#Yd$v~b!Dn?v$F-OJAy%`X}mbbrekdceYKZ<@JFZHQ{s&79GKfvke4eQ32>LDxvA6VJcC(G^b{r?fcg1^usy;7jgiB%pS~^ zQZzQekpyScpNhR1R=Wk>jZhdPg3Zh`!hMj%x0~m$OH?$r#kK4NF{qI~Giwkmpk);9 zjsX8vHa@81G`|Feqz(;RB;+ON9OkPgONOwy!C5dv^;ORAv~$UTlg(j992-AyJ*0vz zOGQ%@!IT-*R`oH=2~P$u+zG3C&+H(Dl~G!mRB5aoh%B!Q3yUY_PXk>~R$JGI9SUel z#qU|2nm^Y$uZzI)5K|5CgP|p+u4q`-v(!++W!GsOM|&MNAU5u>4=3D; z&ZynFu6+)j*X?lU7GU;D;!CTSl1J(2>Dr>yii{t)A%9w2>=;Xd9D3$ zoN?Og@Yk!DLc+LMvqHFkv&YNN^GJh4GkmT>_~biw1+E`6;1Mya@lr{dU?jk7j!|z> zc$j(v{wA>~$$3j6G)n4kAgeCg{PuT~nnpO@J&2mvVG6!i8c-D(eILM{Ky3RoTc-47 z0Kj(6bz6I&1hGPYLUFYS;2e|m4je02di-nclDc_rWV5I)%o@h!@rQX?x3O`MR8Tr` zk=J8m%j{ZGP-g6eX>NdPKgd+eRS}#uS}*8GKidrUF$lm1g1dcy2+Xk3^>8X`bw`vM zt=4zw-wyJ#3C-)lAoCTuDx>m!0~w=Gy5-Xv@@qu6>|;`El7fVQ;7)P}9JXs1+olWE zD!KH}U<4bHC8zL`p-5#`vmK$a^3H#)peK?O7lre_kByJ%h5#vY>4U~PC3 zFbQL?dgm?X81PDGbgwV+KEu_3z`X$1#shBKBx6&kLk(g*A+T-`UM&|zdB+?92sAGx zM4@5Y1(@FES~wBDuu4K*}LP2@$a1>nmNh3W0JJW>!m>Hz5}eJ#qf^fEJ37 zU~d551wvW+w?qk#1U0i5edn8i$o3WGOk$LN-&ONQCu0&r-?_PfsbvLNq7M68gfeAZ zm3|me4GuC}X!D7!QjhGeh*(~TgLI*hgBj_v#9Se=4oCWE?+w$;LTq3ODy@qFz;V== zAkCz+I%Oue4}9GwI@bhtx8nTt*K{~+s$IlAfphibEm?<&Br`eV4>RxpSc!tI8Pc3h zBNSs&y&&r?QsQH`M4g+#2rR_$@Ro#>vKr!#uosq(792ZDln*55gXu0}xV?6$ov|b- z{@T&2MX?OLAR(Yc2kJzO(TG^TugIr)NWe8JBmV@d3dNWO?*$k~mkO8!0K-}`Glx1^ zgWXNfRuhyB1i%lx&>tyrCxhN_mP6oYSH3}qu0BBdA3pdY6hDSEpdoeC$_{3;bO!lN zR(~VTS@_VJkIhBQE<>13#A*BXBK;jgx|R|~gH`w;HJ{%C1 zV$|N*HdS>G^afS>*YDQaWO5MrSY~$ye|Yo`xc%s#drc6GBef^QkZ;f)E~g*Ilgle^ z4Q&>2Wf1HaA-7hoCtOY!y$Pa7-3bYXX7hSFd_JP)La4dW|03JZP2?Wjl-V!Ge#Vop=oo1 zB0u4B)!TU)R3I4vn$n1SRD?s)N!1jTFr|fRGrEvnP6wyX+7d>+10c+{!u5zBC}!Uz z4m+&W;7{dqL>JPbl5xzR0b$YB(syyxbOcEbpI`|uaUA+ko6-UEXO3`+yh9=3LI@6g zKsDe<-`6=RkuqG({WfhuUp2e-L(n=7i&BvS$1R{1aiWsrD!|OV8bd00#Pa)!y0yOIDYr80sQSA5X38ye`4hq?TSm7zrLQ?TMp2 z%Bsb7$1=Qg5RMnbK@6i2Co4EM0>vh2&f0=bIRG6aMID?F$Un|f==atgMlMoI1J;va z+q;S#M>dc-MUmVL%cO^hUk({ghWQf2)@G=wr)MqbMW{Sll-v#TkdNfS8#`2JRSje2 zN3H#E7;>(Vlh^>KolgiHz2Vs26L4vQ zMzOi8lzQ+WRBr%rU=a?o;uDWQ4fsd_eO?B} zmkM46fO{*4UraHF{3w+ zEJ4;8NM{*WBt6fFsv>wyc=GTUJ#bL&6xEhMHk3aRA*p>=p)ZAHcm)}fCeR{6U-aTK z3i7V?MS-1F4SOXENKf|!4E~xz_=GbpEl~O-P*(C?%!Gmi%7`YN5_=f=!AR5U@vX(* zt@gQ!DzZ4?Sd_mt!qn#>iawNFD^hKE9q-4~a-8NTYOY6wIQ+;4qT}IknZX(aBA|i2 zi0q8agGD?>TswRYHXVafXetS6{BnOXyN1^54Wb^3%(<=v z7zUwQ(it}A-;wd41L-O&aHC^M5TTJQD;tMr^e|N6u?8OF@V>T&JQ9l2s|hT_;vI_U z7+@l5M!oa~kluA)XVrKsP2zX56JOT+? zi!i&^Cb}kACtV!weUJ(zaEzk!CSdB^K-BWW@-mExz)P4PO60kGVd1XGy>E5SwO+N?#}hZX{u3o9v-=d4iI<;WOfOiD zEMR|q4`HToFd*RlLZEAS=Ey)Y2pFB9($xWcFG75^Ke>av3l?Onv=9)ejcDkgNY&L4 zcS@LR5|lMEN+m6ookjKqE4g>os8md`&VD^{_ZU&fAKmD4{LUM|5)n zj#ID=-Z&6Wa@~&xaFc<672*^TMJT}C$WR@wF*Wu$Fy%lVjv*p|)jMDZ+Y;M7?wlm> z?W1WgqPJ26dmSSj>K9P#QEC~f{fMa84XdT45ACTCs*K_7m68Vx{J`z_8$`}|_ zMVP=&{fUlRivH^ogd1sDzXN`|&He%yXRPAagpFF$-MCiDaga#Ms5nZo|c+Do6 z2rLH=Hon9ksas~w4-Wi73757@_~8@mxbq7b`I&=$_5bKET~mb98GY+3O)`h4zD>`)oYkxpolr?l5KXg1apOC_T0 z*h@aVIlnJBpju8fMb&ZyEn2EX>(NqGMrgv3FXIXzjnf@$NSrrvFxjyv->9i7qoH0$ z%@;Edm@P;E1engvrk`6T{*Ug&|B~JQ>a%~kZpU!(gVZ7aepDG-Mq7+_&yaz@B$$fPAieuNW@7)p-9Zpoe=x|i&1Ea%%J4Qu^ zBMq46u2h%C;(<+G%WMI(zJ}QX8!E^QLrCnTOC(Y!U15}nqM7d8V0MjJ#M56InJ!Lb z7qRg7uK#g=DXlJkD^Z$ie=Qk6f zD%P}o8xUtX^0%Pyj^6>$Ps`^s>&bj|mIKBe2opy>h1fc3gtzLbNyk1~XfV&c8@VSQ z1f{Eh=y1>%=DQA~OJ>Is_y@;`dkkZa!dZ6gT?{)?EeG&EEx#Fe9XntKta-kBH;lkM z_xKlP57Uc{!OX|LBSgoULC}qRjH0WP?){Pn(P0{I2}4)C(jdF6i!uD8Mqineign0w-+EGNB%_4g1_8#w)0m8 z4uTua-gQEzbkvXwA7e*mh|h720p8(o0guMP1ksl43?<>rmYI%yIDJGT)KLQnD9EH- z0VfUJxlXz~|tuxC!X)48`M zMTh?}Lo7Tx9K{8gQ-BONqr<^o!q+_1Lpm*A{m5lu{0>aaBR71$ibhOyI7kum+%1r) zbGlEKojdN%DRzJO2W`Jtur)AwOC2?~muNvThMyS*e9uW2mBIMvaBLeKIZ|0JIP$F$ zW_6FVW2r2ps_aT!&**T}JaG_^#Ub1TbTS@JqLzfe8ME_$TX^;@|BFiwRt>!MhN8{4 zqr<^Jg{uUHfn5bp2ullgB|<3-glAq?1BPL%qeh?{>}w~6S0Z#{`**abWx^GJ`hf`g z-f=|JD2h+}m$w^O5h6px{Yv8|f$h7g{boFQLpv|BM3i(9u^}^KGF#B^Ai(68eu{uWz2eKmg4m{8Q?|abyLp=DGkLg;9uFx!(T+q~zO%eS{wVc=i zs^tjPkY%F-5I%9iF&``^h~BW#N3Y9G%LiHQ`m}rmXWrBDWiAbmc7dFSjU((*wH#Gr zd<}F0-wEVdLzOdHo1KNjZ$}OkSUI0({~_N+JW^FS1geYJbTEoI@((w?{`~1Ke2(1y^P=;= z%uJ*es6k0`75XQU#qgZ$77VbNR=KuX2bjyq0Nz{zE=y7^D&4s27P~@y7CLa8?iC z04x@b{p5&+jb9{X#{z-h5C1u4{6TSr@dsdr*H9yQbT}Bgana%XB!}-sGqvxJ<1T;N zXA4B~*dhPh&O;sg*VOPG<~nMGkL##W6;(%#XjM9D#NF0WL&x8CIE@n2p0Z=9_bWRV zWME_qXpO<8Ml~H@BPoYTVZ(o%WpDq!;0(sHuP!)`g@Mh4_eV%nBRiHTbgIe#yEhqJ9gHz0Pz z(}=UU+$S?V*@pWQ(gwx?5SS&UX(Y549gd>uIADe*S#?!q5Ui%<4}h2gW4#Z_*my=r zbV^)ucpO~pqu?>FW43@^fu|tKl8zc!LQcBzsGfat1Ajv=W0vA8ANDo$!rnhzYbG-c z5QdX(4T^cV=O{Hqhm*2IbU5lQ*q<@Xi;Z%OnRwTuRQF)EfZGNe0RR@EHORR5-UUK; zqQg<|LFD{k?!I~dGBh6pQ(y{C;(9~dR7Z`-f56m1lR%_^UM&1h{S5gj;C*uBv&o36 z${a7Krt^4)H4iOPr;a_8pSOh-Av3HsuHnT8RO zR!0qSnIrD0QQ{#THI&d{QP6dXI}3dce2wrCX(70xKnCuW$iP(t;Ya!>+8lfv)(F}a zus7g@>Zp-2gpL|WF9_n<;d|I)$MXu}e?zRe>VLZDWAPeVqXa(Xy<=bt>Ldc!)rj@4 zqXy{)tU5~mfJ?#Z`cx`It077yU@;-AhlxNQge+M{Z451;_K0cu3nlDM>8M3-Tu*=+ z1?Dj92SK&RALqO(*pL`e#;ICzFh3vncOiHoI%>pvV77n*2D@D%D~Mv%^S#CYm5}Ivb%C{50CrXsc?;CyJ%}yL3S*uaH*C< zJq|8@497#w1xF3sOdU1J!OG%k`2jXxipu}V5&4(T?t15cti<-8o5*}wwm(VJ{k^En<){C9nYaOJSWYnL_CZqgZ& N-nDmU&5^8 literal 60289 zcmcG$cRba7{|A2T$Vw`aRY;UoBH4*ZiiE_W$liNXSt%otA|sN@%HAtU%1G$gGucNn zj_vn)>*~I`?)(1UkKZ4^pNFgKx^$fL8SnS&^<1yd-AigpG*rw~7z~C+`J93V21D@~ zgCQB;K@L9&#NK=j|2pM#R>$eGt%;NCb%&dn3)h|OtZbdE%x`eG+;ng>x3xJga9rTT zQ4TXFCp$+8K|$+(y+FX$!Bmi5v#}K3gwpQZRYwel?mGI1=%q}WIR-O{S5}bIbbC78 z=c%W8bCcpn%acz}t_$=EhETnF<=)`=ICCFk`^ytE$=9^}UFr{Zee5;3W^ofgxwp*1 z{ARaq=8Md0ak|gW=692c_BEE9^p{ZYsHJY)Sl`4b2JSbZ-_^cSZXk%YOK@*?OmK9o z9zH!Vd+1yw*R9}xeDQv0==LC1_~%y)G5P1a|9Dw&HAZ6BKff^l@fwVfpV~j)h}4hB z>EJ)VFw{xA|M^BlhM4$&e%KDU2mgE{1q_qy;eWmn@&D(C)Vk*!J!#{U<7t^`a_wA% z0bdy>3H@sq@BDRezduj3_4So>jvcqx&OE!6bS8+lcx!XbezN^lVM0Imu~Vn^d#^6c znZ?QYN^fq=`W7|$$381BKR_q7K!4-Ljie3>VT*R&>F$E`)=l5#TK3QB+JYFr2IIFo z1==wCE~`$8(0j+GuT+knlT%%aSG>I zzBJz=Gtzu|Rv|_}KdIG>o|QFp*CEA0nXQ#@oVabp;$FqD19yvy#b0XjIpKTv!`Jdm z(ZD)(t7>Z_d(HHtp^#fe<;_of?=y>1wYIkIprcdL&|uKY#?G{+m6kHMrCn4?l=i0I zyO%s(Ey!nMb;#Ctjb+-UOljcTw{MERFQQda%M7qsY{~|yT=gfxs%c>x8ykT@8W!?U zMYuaVdhP33iDoxGJUDbIy#ub^X_x6AZpUO~*mv`&%BHEsCuya(X6c{qbsGuoEVPbm z$tkoQz*qW|xy<}{xBFtXv55P^sl?gC7%#YH#ha^ByV%&mUcThLf8Z1Yi*zCJA^0h#)V#S~`=dkKV5P#ftS<~(;Q<%+sBZoTTLZK}lRw=}&O9u;y?kS0+~>bP za~x}aqjPi_UpD*3qJwXpP_!!R?B!vcZNfX_hof-nBbl19ya)&y|;G=mc?5-nRSdhW1KS`Uo@3%)kdLq znVx{{S2yjDu>?=rneKvGKO&MJD`SZi(^d8(itpaPA1kJ_-3z`AbEFXe(X$o2l@2c z`g}{u>(?L0EbGHq&)mGp{iUsK7dw0S>(_i<3$03zg{~8$_X}e4_|QkFXZKYYYmOJb z{+1Ljoxe$3gSh~8;?Z`uerK?Y~C^(3zvI6 zrZSnt?Kt)x)xo5+23Kv&*fO28V)9v^Q+oIA-LhBZanojh`DcC|7K}58^`%Je&S;U6lv)DQj`YJrdG&D3m=b2tCU!6=BcmK(?_n59f)R*h? z-<$A0WAht&lZw!f!?%k=CsT*+3ri6^$dtsMN+002y_1)cvh2ScD z7YqY;I<2!fF;9rJyzeHp;w|o z%&~`&(Jj9vR`fhEN2Y#RVW^4A1j{Lh=j~;#w&5o;wfIxh(u&5NU#F*Yr?qt@8>vKc z!OOJZ#n+0O`?U1*js^q;op2#SWv`7;vHpnUU-Zf*D(VZ9eSG>UBcB)dHpC%N_E{~|jqoQr1qE3QR07;Ct= z6t>t)uT39UW;{DlGL|sEGMgf3l6N)a-o1yg&Bt039zEhIupPj@e)-a_sABQ!H3rNC zEM~%K6Qh9^sO-u(eb;y>8okzg_UwsNOM3{-V9>^MI;Wm1K%R4EplZMo{`*lqGv+Mtt! z)auEjXZ2$i-B1CAOMxms6eST6k@DP=>FMcnP<}ZiB$z!a*q~Hbzq*{J>tx_8M{jsC zZVu{Y>3PZl_$@r{V!T9vdE1MUPrL+tAIog{d{{k`bLw4O#??EhaIs1_L0!JpYUbZ( zQ{P+S*qV7w>}7|A!r8OeJKtRY(A4CdXj{~i{Y{` z-x`*eN5U&a1jTH6gWtRnqGmZsEqh_f6&ghBf|c~@_@Q2hPsh%kJ7-sv`r<{hCjr0w zqn0l2grZ}I_2r1f#PA??A0~Bm^&K=cbx?DH@=RMi7JWAsajmdvpeeqquRnDB`0mls zQS3(L)~08#Q`-g9OSIC@4pe&26;*9Cr5jxSx)dvFDP;DUV|>xWKQM5(+cJBRc?y~u zn@sp-`<6TPbHEWjMYi=&X{T+b+F$K9&MjPNlfRs>_3TWL-P%l1ihh|3binb>oI^fa zYXd3C$#478vaDR)+{6Jwcr1+HlG&UKXB#;n<8xq*FiR(%278k#H8s@@mWpKe@#Dv@ zpN#W<)V#zEy>kket)!-=R$Y1Z>ea_5%p=!a?Jq-Bahhyz81t}xX%o6Uc}+_z5M6&L zP-ooSid*JG6%RoROwxGOYfTkEj9NVowcOo>{nB)v$YoohLQJ z*E)-A*?BI+ep4S!HmLF$@5(#;HRI}tXjq?(l9tw9*tDMuEOCAWb)}S64j!KOBR=;P z4>>qG+P6p%I8K~s@ADb(Cdj3=k*)XPItguNCrZz+;V5Zn0@Tvx-IfNt=4r^Onfzc= zyEQt#^{4DCu-pgB<(qSpX`=UDDUXF}07q@@*S4``sg+*0D9twT9goy5lCgsBlUKhy zc!%!N)vI^WJ%+v_=)1BLRoeB{scg2jE{S-x`z&I#X9B5flF#0I10ZL8G28bnE=zxI zJSCjGy!l(h!`2c<)AFAm`O$~>`>a``w%X#mnOD|=_7=2cdP2XaU{=+phZn3FHlo@` zsMVlGbWTOZA*LsDXEo!VJwxz;K^!}v^pFFPeB(ZEqM9hps+h9tqV&r1dL6;a<%0_9 z?=D}IB1`X}QfCx4Z9afL6mQ8CsyN-5LjsHWE&MX3oz&8EY5MNMf>YJn0}|1j@f1)M z_V8X%QF$9@TY2{m1sz&>&PBG}`IJWQ@1L3#Sp^6sZ*MP5PRBOAs0+yLq; zrLBoNqZF|gMhMEDe^8K7^V3t}c7vFHw-K5Hr(GI1UXUeyzpx(w&{+!$J~r7NO%*nda1%crD@yWSTl(IwPz)4@L=U}!AS_x-V=-rJSlYd4_kUwWk}?zQ{|$JRmUb7imGT(vwG`OOt8 zv$=c-4{aHQBEe8h&yQS%?l7wEzWn9(f zC?OW!65g2n4$;iLCKUnpn>fKP9WgcP9ZLPHUN;Z>mG{uRv7&Q`+ke|ClPV>|3%pHw(!SWxsNo8YC zTE{9?DL>>B?03)9np&I&@N&|omzO94b!*qS zE~`iF7S1W$-5b$%*`71(TgyQ>OTZQRq}3_ZPM13}Sq<11Kk?!+=U_1@pNwnj+l;AN zKTFKv4QnZqt0O9V-&u9$(=HiG4eQ+J9|5#+?RQs-d>)(o5sv;5!|vUwsFgIn@=+dr zku?JV*o0s>&xI;9}5&n}7NGbtD4=!}G2@ z)ANx|cvpEN!W#6rGF*i!&qI9iNSNaJkLj9xt^){H&H8SYX(vi~E}a9+p^q)c6Rc`Y z%|)I(Z+Lk4{B4fn`gXGuU*L0W6LFicwkPI}OSt_ooIq>i(8cuku$s6rUtU~@TVHYn z)TO8QG%70UTPxm4BK5})yCdA(bt!#@Lg#cd0aea)=G6B2d~e*ZoQ*3hiZ-*l_fTTw zeW>F=IB&=oSae6PXls`l7!=vWlgfTm78~p6kD4`ASAP%%)niUFB_$;l z0Kew~i;Pb>BO9B*{!qp6CFq;5Mt~fZ^ymr1prh!oOu-K@JhDR#A(@}@eDO9sYIRq> zaN(?~Dz$|3q(35Rx|w?uy?#X$VLA6dpA(GYQeK7@Aq4yCTqNn8n!!y}y`YG^vBU|4 zI|4zf$mOZ6t(^m;w|2Db+Su##tlM15M0qW@Vsn=kKMCU0(>w6EDK8-HZ*()yoj<=* zJS%$&WA?M4IY>8C(Z(hVSAUyqXZfo1!sjz=(R8T(@-kAnlCM8HcFi!a>&Lf;7d?ze zWuU^tzI3y+FP4TBI%K zEU24W+sD{}wI}g7Rt{;=I8Rmzviq(J>j`M5;2J-D+WFX0-x^2CiMdD@AG6WoZ+&CQAU8Ia&G+t2FSYfrHW!h!Gpq$+T6d?_N$~@;gsEFT zgcV(}3oAYrCzyd~kW5TW=oHzo_PdX<=GzXi74^BW(6UQU;|Xx@lL63OexX_uF0&bo z`Yv>jcVVoj%kkbFudgfv-<9MmqDQ4v-~2Z`Z~pWp^?YlEr>mx>=3u;QznqUxWsA&e zV)*KsKFQd}8nIqi*f*YJ(Xf8K@E5%+5g4k=F{2DyDQMxs~$e5{T^Q30?S?2um<;&-1XDIHop5FWL@WuB)yE>n*mC}=u zk?|fmQo=<%e!+Va%A%r+%Sj+qSIR*IW0Lj~N5YpUzNDU2FHz(s6~b2tIk_!$nLdb& zyxn6{Mgz-3DR0Szj*`;+1g~|D4sq51Lam^*5e?+e1FN|{^fo}J)aeMweGk98G>PH{ zV=c3YE?tiF0pP?W<0FYcd$yPp(yuTPj~)eHv#rgWx4J27=?4A29}7**?|f_Sm7==wkptIxgpskdbXR(b7YM|zI) z)K{MD2b&vfWmT1Mazb{4vWRf$RQX8xRadQfWyZbmk^$nR1{Gi(YO;NzE4s@d{VgNa zBSXJzuWS>+9i#zt<%HqaF``@eiuIqL+`d0C?IqZpU`Cq)X$s@*uP#5tZ?na5hj-M! z1YQY2Wo6}a7cSI6kq84sN(z^!1^@xg!-jA8w$6l?YH4kzAT|-Y`h`aFa(pgyv`^(n zmr{44TW4D61bUtip=`kY=?MQDksI%J9604b3WrO(x-fAE6eboD3fgyYr}{G7fe5pi zHb0%S$hN!}9`0IX4LDV-&*^er_GRDRKwQZE`{jdTmU^#aa|?}(jB-2)HeNp)4kLxB za&?mbd1@-Bl+;FYi*4JB3&oSK(?NjEzUPzqXaTU%9w1FyKk8*R5#(|kzI*sH@VBQR zrpY$7T=emgF0|^p4SQi9dYKR?32^UUv<_%*qKysbHx()XxC&4on+!h`3|^Dqgj*6gP0+BZ)^` z|I#Hts0phxI1m@8nS`nJ}mik*H6i5LC@<=T5q;W&>wP}Jod2JA#5=e4=1REhHO#6C*1n5Tt?!n2yl)HHb z4YpQePUoHeJS*)x^+K(7b}YMUGo(d&jT$)pkMVu`_usu<8+g5^(3)-LxJjd|y815A zU!l&q7;bHD;KgE53CFxKYdr`xRP~8So%gJ>%z4w6ggQ_t_x@N@Mp~-VsoyR^>;-|C zk!Wpf%|u%oLNTtM3JU(GA8Q!sn~bOO2@CHMh}o$W&bDZlZ7^Awy6o{#4*K?jhtyNG z!y1<_d;0+UI%&TC;lnv&W8=ZZt@XodPsMK`N&w`gDb^AWDF+&GwqGw80Cb^>BeYr5 zeP#RV!`W?J9y!4MqTjuHM1Vza0C?NrgF1I%(WBnBB2Vl9qUbqyT)|j;rNkT%x2sM` zY%-~BO;PI0cAe?ogNcnw?a@r;y|KPL=LRIt{@aILlIuTe0f2AXtA91~2A$bziRF;4 z?&U<^c+2dHz0krA{G6ZPmQ`WH=@eQ$S#t)}GAfD@>4S(iV|`X`qVB&unV#M3Y2$2} zKg8S*L;;}8fT$PfF~xcUk|42+_mS-DBzMagLpO25YU)$NaK&c8XvO9FrKOT<_Y%Ymms;h62^jW6n=QGj< z)sDsOnszx2Ex%ffk2H1+mY83be%UaPRl@0JXzI5=e*EZnYYD0zHM{hIzB1RKwh09+ zmMxHa|8VnLX&^^%mQwf)=yv27)yG25l%u6VT;x<{;N#~fYS(;qiI)*O_1+}!I!Hek zR{L;2P67WQ`mym|w5$)WE?*g@m0kyZLl}e;D3?}Uxn!K2oOm%CRuDO05uF8Xu#bQ( z8vVGwGLKGvx~E8RP;Z--2sr;`MzxYDtEnjhvjwu$d$SO@XvoGIA?5k=cYU^?{$lsr z#>Co>yeGn7eK(xLm+J2AD_Snd*UiMmQGatdWmC1`f>dnjJ6Vf~X)>q))PoGBZQxy? zgep%>K?FzuMfTcq zAbDEC6@UNf(*xPF&W^9yPOMjcG!}7vleI}$0$mCJbMDx)moIm~>6UVREF!t!yQ!Gc z%74ynrT{0;^ROgQTVK)D+qvfBYRkg%+(>7YuWxlpD4P^BbWX5R#3Z{1fasf@U=BAm z6JmM;@)}`t8r6MpX3C{K2>4Kce^RT?H$>2`C@|#{QOq9?>Y&(ZLf*Zl5$~^S^q)evpR7&Jkio%K7;?>CVD}3#S*h$>}yJZq#*0v zQ8LK|-^_P&o?XweNfgh=On#?ktmUl!D78VAHqzmtc?lM;;mazpmgBH|k#+N2`62OA zW**cUPy)%30oQ8{Rr&?40vFwDkZ<-t=|yF>DR@m`;nJ%+Kr?ly;^aGel={`HS8gEL z_sThAbSgYVffLowR=)6&0sYM4gRuy-&#xmT3D7D2Qk(_}{Y6mYd^e^H9!qQXYS2=Eq(@PDm)hD z9UMf+X_)dkn@ByfEgA!WbBC~ZL{*fFk zpYj~MMR5s)O{m=|M)hIhF4IhV1+n)5TBSfa!TTIJa^!-K0HB8VfU0}tde_&z(9*{E zA!Km(?%mFO^WE(h1)c`et5DeZkx|eHk(RiAWrPTOs>l1*eHGWYN*l%itb9h>N=i zwbBSU6&9DVQ;XswpCCVfE$~Boz|3N$rt5J1gV3%8=hF{>Igs2RaP-5sDA??>_a&Ja zECf#Wa~y|#5iaWs_z1XrFz1;-j*}<%LUXVG7|n0Lx?oP&9C1c{wb!nehUt_87n{d8 zCqjT|Ckulch&*Jx&xH-2_h1~L9SR@*n`gk0qTa5quOA9BN?E-mEFB(z|A3K_K$%|# zRFz}ZC9*Knvk#!ZD}hDaF3B@|LBhGp6n6EDqy8_h_lOI)8>kOF;M}E>%JT8@{_tp; za8iM{h%R-R;X8GJ7Nn{PxJ(aAohA<6P3B#k9WYphYn1w1KI@0Y3lhuQfSncZNjWNh zyxoZ(fxlBu@9R04ytI=1@gu@FDAUc?AVy7^wml;O4GM$_NXq6A#X(Y|h7U zDR-B-W}kqYq@2=v4w}Rlz)h)d-^ynTBlQO=r1W5KTU#4Th0DeiE;9&Fs;SA}+Ws*EJ@BNqKTr4-pY`3jq&A z*pyE2B#z;lT5(Dni8b>Z?&emFfbm?bWLvhNl176}#^XHGeMKX^L&Mnk`3Vp-%t1)S zO9OR_wy9XuIwCENWjL!I5*{7{bxvRc^bhH=c1_gP+EbM1z*Bq$A&@VCDS1lt1QJ`a z@Tb;1*=T%ym%nt*`a}Cn)UdF4&A4yhVQ@o_96Ttma^XU>^yacrxQ|x)6+my1t4lNd z0fB)ka5+>|RiogB;1`cio;Rv81I;OVzZtBhKTW<=}^g)g{NS6{><93Ir{5X$6eD_BU#sjH#jj5Ry;LcO;s8QlvDgaU{4 zv}S2}Gvf_GlE)IxeAMK3PRFg^02PEC!676K*Qwmg)-9ldj0`U?Z*h{yf;K}??JZDM zky(R$xL7G#U+nfL1&=$_Nc|PWntYX&wPf+Bq2ZP)&pfZv25h7uNRmk1xmEBMG(z~# ziYvFrWSi#MC7i~&^8^XeuQM{JA%h^9?VaL1)Up%FNFXE$JpK!6Xtsb+n&UX8hC~f8 z>*B<0-$OB-`+SD(`pVByYbmMH;NP!RPzWhZz-+%kYwEW^78_Zh>j=p`R^{R1;xYv0 zv^ZR^*zB>wx5ui)76G9yF5%S+NYcfRGYDQ1Jg1JgwSD*g`lJ0I zshq5$B01`;~`m7*S@CIxPQ zN#1@a2hf%&Co!d5%5bR0qLX_MDl-7ZX$4}B8M~1=c8_S`yiOmTikTTN$@sB%HbrN^ z>pXEHY72Au^WcAfbH@9>KAnOht?e!pu4m93f{-d?h+{BjJ z$eO|VIm^40(R)vGDO*gL|4O0!b(;$wr=E`)MvAv?=KBcFGf+rnPOS8bT7H$KCxw_n zPOEgdPB)66*S<6Kh2!}T-A04fD(up{v(l<|lud^sB?ay_;+ClsBDL z+~lRMy+Co6wkK)l6h-f$f`i|3?-A~({<)bl$DrJSZM#jsbTYM2R1WN&0hGMJR6`d; zjxdV41%L_yd8>eco%@DS_6mTJF#Hu#&V`<+qNKtmXdsYo97%2jvSUyOL~WeSw=4H% zCr}5ZnoF)t=Y=MWIZ2?r00ORvVtxGdsbUpfB9P9-rMK1{Q6>tT-SYHQ+lDiMDuRo$!^Je+KuqCCRgzmHa{VFKH3XqGIoR`%mJ5ip zDWzOnnz;kgorwvL)kzkh4$nF)a%=~r$DJfF)rp%6he5cY1DVJlyq)XUIWx2@Q*f~| zzLiQR3kBg<&j8-gE`zJx9Df=qwXSihJZHdluiRL=R%O%-iVqYb#6`7bSulD6`|uS6 z?6#WQS5`ETgGqv7HCweMZBsl%o@g(D0cTtYnVh{YJs{fwD+6X@SE7d`1&9<5Jhwbq zf0i;yQ0EN>97;tn?6u?9B((LN_dp5#-m%$f6n=f;OX}m}rnErCrsj0R*`z=q*xVa$ z2|DUt0li>Q@`oX+hlqU3>IOKjb%0jYb##Iu#dsZLXox;ow3b}z*|}?Uobi=h+PTmH zpGdA6of+A?4GsdG_(Ov>LRZx-3TwD)`T~z{j&blAhnnPyNt#f)Z^P$bf0RDG(i1WI z1S*su0p+aL>_qa#L}>`}g<@wPD6!RmGqlm%puOUR5n`eSr$6}Ky?L{6_=Ck2lD}K` z_V8;Nf@lTEWRnaYbpSEQqmA6%$64GwIczj4B3qk2tn?E z41r_SSAcKj0Hwft5k$Tqs1}HpARf`0W297tvkBWvm$Z+O(GVIoGH9xO)W%9&A0oXH zKxz#z$qAdOzA`b;tsQJ~skYks;Jg8Hm-pwqft3aMDEDL37_j%oaoOwuG`}yZ1xj4# z+Lr`(3WWj2d|;b~3h0!2QQr0w3T3#?%$*h1u{JYhS9-~$(48FTZCn_msjF^oU;W)~ zNYhC13+Hj&DQoy7F7cI`^pZF&Cvz2GdGT&1aZ zu^K-m9&mrlzGPj1zuV35H$3@`Ab#T)_5%QMxRh^y8ij9S%*$`YOU?V+CLPrq$Os2w zUk~-_3&d!vre?L|o3{y>G%{KB7VkoS?P#<%T9}QGg-O7dM~XM{Ypwdr_oF3f6fPAA zrGK>?&!1g>TN~?MH`}{}@0-f=_cS)<0__x_b&v}RDmZg}`cxFw{m)`#ePq3P2iw=DdWf}-LuFbhQ}FAXAUW8kn+Vpw)J)>IRJdM9!!6)W`jtssusS8y);RtCMLs3TCuROsK$7BgM)*z zxp0f5_IANb>Dk#KW*-XwH4LW1+)5~gg~cMuMgi@uMq$5>bnOu!(CEJMs68D&?$+O4 zBI;`3_gxnp#YY1Alg*#EhVKa`wJy_@e_3g0YOazfc{roMv;v#Wp|!$tYhg*e)! z+HALlbk8l7xJ&aTSkF|}2ri;j6Bd=GlSv=G5n>(Wc>r784yk1Zx!eVumM}i7?l%Uq=F}J?t7Id~bqzJ%2j`!H7KuV& zDKl_$13|<^ivx0(C{xY0e;H#}&+3X&nLwc|QXF5se2Kw8X1cId`pT6nut7ioAe3mA zwoSZ)^o|UeaPP;MIy!EpORlxuAX)&7Xb4Isuq*LdN65PPrhv;1UQSh+HRzq7TJAw* z2bhr|oDnjRVLk4w=yz?HhND7u3i=toa?J*LvB=Q`9kI2y$6~1xLj&ng6hTK76OaqY zmSb~SA*&+Ck3V?wgc2?$oC9JA&z?PNs-%#pw<$U#CWcD!+zN>+w?iq{)6Cpw&vOMl z`*dfgx?2kB0YRuKS6^kZ`$~@4Jl^WeM|Q*MVDe;cr&MlF%gsV1^Rv9)LTCHr_Gt@c zc^>@5EB<}sBH|$}4vzd&aeHF09RSg0ra596smK)66#!nK?ga`I$LZ5>B6H+GY)o1b zLSdr~BK{bq!fn$0^e)m$!JDZEcs2(PZZa6zD9nn6Qb1o#*3QQAg4hNp+=|-$$SpX- zbE>LVOTAk^f@C-j9U=ida~AxQ&84EMXVBOHoCmMPf=WF8`56fShVajyFA+|Dbxux6 zd3K4{4pg$T@a)n_zHsSe-h<%TL6nOa=9{fK%g%xiLSirYC9SFF_hsnh z3`g=R>H7*2=2JEKR>IHj-1)-vHR)$8ce1y_V5KPcw+e!ZZKsx1{@Y2r8e`{bW9LCL z^XLKlKJITUT`3it_Bz2oBbT34Fp#yC9)4Z>pTzO^4L0Zk%aT;iPnGA6s>=#@e@7E5 z2Ic+X;1Q>TLWN2Yv~9@SV%m+}A-Fh##s&}*0C#2>H1s?0yRcQaHU_qa0M*<9G6m`G z96%uVmQ;~i1j8K2_vAf#)XDTYI2QeVV?cT%K8=Pp!&FQ7nyu=FG^{CbYpTI8fmk-8 zCP)Ky!s;0EmJH zeVF5G1I!*^RspH1>*?Kx1ygiC-vcSYK#NU-!AiC-+Zf5WyC*vTidU~rfMm0hB@b~>obl&0OZdjf`Tn0 z;;?V)}QWRkoVJ78^h7d!`1RHtcjo16wy=noQjSzClp{F=F8( z$jk+hlRDt|LiNe*BbQ$+&&)%>b_j0IPPdImUkGmmu%Lc3ZcFwWcf8i@y;0L z4U9;jurQiq@Ps6VKMYh10=u=&53ezlg&3>{1bIPB9RkhooSIr7+-e9RH_yJXxSJdd zk;Cg#Uo#(z+aL6E1b}hPSV3Dvjy_47|Cu&_E4SiU(QFCB&q|VM=X#(wqgPxy;~CVn zb?izH{`h`KSH0hi$FnJiS=eJwveUt+qnsE2U^EIu`R6eD**MSC50(YG03%C!Vvb(D zO~Z215Izwv4akRIJ4*rNVC`e8X$c-w=uQDEQ%jT%h2ae%XvAnhq~#PC)F_Yv1pp+< z0QqOK+1QE*5c-k#0u|-)DaRK$cb(-no^HSZ zaa06_-?_|Avx((#kC`D(l@G@MJV(a+x|y3fFkA(QPO=*%Kj0*cK=H?JTuhLh$f{@G zz55R28is&hpMfx2%iK+o{ixDGvi%I>SyxXOCV@EkBgK{;C}|)!j!HK-Hy2H59xwyCVb<|{^rcR@iM(vt6NWt1Zg3r|VinTp=q;#cOm<-+n* zfbymZ%g4r(2Y7!P$Ya%6e%PK!ufWD>)iM11^X6zr-rKuju5;KIHZ~BjFr^;YN}S4( zE%Puh;nYde9MlRWe!wkl`{O~;iry1 z8TI&R=%a@+*KP*YazFcQCihI(j?(>tKIICAh`^gkm3*7~+qRqjmkJ`_teVJ`%pa@D z!=aKUAEic?KRwnu%CRVY4^mf zhh57vE$=*MQr3NFFl)~!Jt=w5--kB-bJ(Gz+k?u94D`hduVwq0&;)C&T)KvAy2jvZ zo~VfImxZ1tIbw|#*-`?>anm@gaend#GaB(A`qW-lRhfNG+MYKHgI?)K|8a=j3A`ad z36Wk2v3!n@8%oBr9J zUC;KkN~z5jW%o_UziQB8xkTtsuP~t_#2P*9xMfkcyZ+N_yYi!O#Y6SNs@VGjbt;&1 zo>iXzP(RtRgHAp7Pllz>DDs3F=+Ao?v$BKWRAGWz+(VEaIy$xmNHK=|6F@>`06`Z6 zd5LrN+1U~l92~2BB<7j^W9FH+?U=XgOPRT+EJ!B%Z!HDZyQKN^rTHD=HaT!ac#Tz? zb2t5rQMQYLoNMR@U-`c^ukm%r0fE{N@kjCaX7r)JV^ER^C|hj&u?qqcN!JfJm$@V0X{zbpNANO`V+UMVIrbBeK8*r#g&7#Rt)seM z0!-$Q!}4(=+f7O|#LV4o>Gw=Od3}?_%C*$c>~RfNFug)x9II!3s}6s=>-#(PJNuPr zHuJj4h=RX_usxZ{@hE8V13{-oMD&mS)o!vad19c`_XGtbdFupk3lmn3r6kjJ(V;O9 z#2#nZDf`tJ->EUK-R>{xj3SA*76au7X7M|s<@+Ox{;Z|%kfIvT4u$#h5%~UmN_VO# zyN&x-2AN z9R5wVEz#dX0;WNIiw6e?;PX(*# z4xahJ*3mx%ShC}aRJsj-@FQYkf}lHP7*th)=B?n=pw9n?9HY5gGCQ#bYqhY;PVB0> zNny=@yJczes%F+d^t0ukYx&CySibr-YxAqcCZ8w$52cdxU}*-(4R)cy@NHx7QRO|O zwvQV7WV&CDFs!oksLQ^_lZV5Y8^tYzC?1p0r|#){GP6i9%}(rMV|jfWo9f5$=Fd&h zI_gKXpcVP_>HYHZ^1=l2UFw(AKi)KB-+O!MI;YQ?awKV9(}ZXE(#O?XF8nGNw2HZ^ z4RdP@bFceePaVj)LiI%3uuuE5l>QtjN?nr$G1Mk^phc}#@j z;6aOPw>f0fG~heQG5@QrLE3rwCtcqQE|w{Mt|+YdGVIqe^tK_dL)^VkVo>s}d{pq_ zgNA;gaW=MOf7SOHE`I}<1>&BDh&r&>K^E@gODj&=<{ICHC`>44S-bz!;bX(_Ki7*!CVV;F(K>j6G#S>t_PFh7!YG*VpH!IAw6~K6iOJ- z9uW6~VR4Ks(7ho$@$%*kpu0euX9?1GgU5ez#!%w@TsZcYwZ)h83k_oDuiF>(#EW{C zX>0b_zwG?xsu!SY7ZaYnGrvA?mA2GUVQ(CO|KDF0ueG+)OMxPSmy0x>A#ja<_K2ZL zFgM7tpg}|!KA-5ojXH3`h#lm-Yhj9xBL){$BL$O4APj^~Pg_aOHy+21K`@K+*s-Qr zNfaNJgxEWTRxp^O*9xgYR(*GRw)_qn3d6jC$Y_<3-j1?9+Oj^M*7tNg&MgmG9^R(J zdG|aZF-y}l5U;>@5!Us6YMB-NdUkYSZL&smo{4O(s3tMe;X(X6yF{_-hg0gC6V zPnlhDcsFjL@~(bdGx%#deM)*)m6VpR4LjjnC<9eV#;8qY=>OpEhzqq|! zAOYF71vCjz6kk42Yq8y`ZMNeECuq0JVGK!0Y|vfP!m) z`(3(p$*zA4S?`F{L3nCYaSbLaM?kW82W$q36C$(s6G@1rrl5l$2QVIIphsH*KK=Ck@3PaL88MXrMV2i=XXdT3` z%T+dZ>%S_--+9QTEY$O~VE@dRNavJk{yy!m#^o`w)^^Ios!Oqh>*mYhVd;#yu}7D> zBj;Kr=JZzFT+4!awBY27cUz#MsSkB6swztcw$d067S0_E_#5w-8*;nRQ(a5>zY2UXw zO|xX1oiA?aTx^%yd;8(&t@9s(0(f_&6MC~N@KZV!-5bts!3-i-KW%w7{oj-niG{XT z_pKhicFeC5ykkfW7=a&!KM5`*c&RDpu4M1}Py}YSmZTQ9)7qY4j~fAW%2&n}k9t*{MY1-@DTx_+9*GIa)yu2{+c!%&p&WY*uHlvgKT7g4Dca2D9CVXO$v zt}TH*UqQbon6XBDBv4f@8%fh z6n67Si)q7hAkp%rg5k){p88vKDuw~mZ5P1D+>7c_m_aJHM4BXr%&_Z3COi-d@|B{60$_+G}hj z!Yp<8)Tt8Xk(S58ceoL^=#Jpn@%a_w=eqIFh*D?QS=5$ZJee{OF(htj?-6xh&^)3G z3;xKY-O#r#3jlGfc5K%;)ak|h-sVpo~55yl71t)E;=VBxmQ#vcJ@YUYbVHWc4sY(ISol(ypJ}SSmQJ&nwzx zK{EA7kz}SWT1&I*>2HRol1|)wNP|Cte_yt(SJl!RdT!ENe{#lVieHcWIi~s#CL(%` z6?eLq z;*4%IF??&EwcJ>8EJ5n)3U&TIu@5#t^lHC54LJpnrSTsn8UszFoM1t)KahGwPT)^C ztzYu%ol2=c7!xNu@hMDb7Qcw!s?-YF9SHOo&rw#Fe&5R;4B?=i(R(TQj?u2`-dK{M z00OZ>myf$yFr`WLDB@oaE=vAH`|C`#nWE*xXoon*>Dh*h5>YOMpVL6*VPCz^OsWaL z9z$=v_=qp9MQY>ed-`ch-DOg`X}4cS6#RLTqU^ukN=}8I)cxjXVU6S3xKcx{%-E1-~0|7FAHh+m0+4h>eBl0D?lOT-~S;WIPDVt z^$aJXBi}^cO?Cu#q>~^@X!tV!**jzN6Dbt}J$+Evus4<5qgjJOE`ae0v@k`lJbR;C z^x)R9}hA>ZSEd`hXsITH7z6knMS^22g#hZAT6s_-eoHhgq+Q3PKYd_P{gXP}tUh(3(J=dT zmVufy+&kSPwhcXhF9WSF!$b>YJNFs_CP57!GAsj?XjnbtY91QwhiT*?(6HY_*%yYv zSq#!%gE4pvkT67%dlZ<>_WAEldX76f`qls-!V=NM z@ExC0;Ik&7&(N&ZC3(C{ig;#>coy6USjTA})9qiE1Fewn2=Tb(B6`D#?_HorOhkBl za74r7j(2ttxi!(#=muH#*+YG10Spwob7YYaka7*@oOaU4ui z!EDB8um7IVi|+sb5RQT6J4m`sJ_(c~_-lvpeS+0&RdyYI;iDnJUC%tAY2tS4uwUzN z^#caS>Qf5lXT$F=Io@(<+2|=qe)>m263E@VGYR4os__zg(9^B39uuj48(T0omjY2M zl*%p3gmioVZcSczoKKnG2fd?%immkgD5n-TP(~2u0_ERePD%!y*q&?~KbJ!)I24=V@y<6HL#onTWik7e~Z?E?5I=Yv3|l(S1n%8*kCq>3su(8_)lxSF|e}+bi4B*!T{n zM((50OtQ~1E8ljbL=w!JA4X3-a2o$i6fU*I#OC!wF5Cwuoa7-*2YIPLTGkM7Y{?+J zh8<}z7P0!DrY9p%2{bMXe!5gvbZl%0dc4$CqXfZi8r9E-EB(?Tr6hfkibs!tVDGDA z=25aY{F(3WvZ&gaR@?WnQFJXEN7`vE^f-f8xU1JQA6M|iS`CCkajaIr$jvK0idO~$ z^smuBQ$MJCxb)7nvw_l-W^n6WQEQJ~px>GlwA~D?n*DE)yp1~hXDyc_xRgsjE5i;W zM~_RRqN0MQf;|9P01(AmABlcu*2L@WYM|Xs6w|)oSF9J2q)qI>P0y#YBa={or3A0oKSb1;Z{=*c99mZBD|xTRl`8=t zT(iI^75IEov-5)DB!AP7tkb;xj3DB})X{sGnj*rWv?~m;N)eAQo=+!0ejQk)90>Z- z>p#xGY)ao)F)R`^7=s=JC!k+ySQoM3@pf#8 zgU7o7&3S)+KMA*GoJPd&nBxZ+kenBEcyPjs>*FzjsK-`9nqQ5fk>=r|BgGzD@n#p+ z9--tdA{sr3G1=N`%DB?dax)K>!B?Dd-u801hsP!7A7pg6I@NoEOlq_8zeY3n1+|)$ zC&4-YFA~GwO_=pot_iv0vrqJY;?md3jhraVy!5;BH}D~r4R;DAYyXBw{{ z>5)8Np2rM>jGvR_w&_WJ*}t(O1%>vnnpCHTYtPWL|3WuFo$=Kk_NjHINfkB&E-|E0 zvDUnzJ#VVse3|V`*3-%pE6bKG%fi0V&%UkO!2B;W!)Z?6?tgQm7GRS0wKQUe5P^Iw z=`P~4JVa3r&!=(&YS-tT;4yjRKD!JXoF9_sv2H_Ta8r6=umw90SvGiL!Wme@kOSHY zvN$})j}y%L6s2%QD1peq%$|oaFD1n_6lnoT2%;tjfriUANzZ_<%_Qk21WBq07@7f5 z0ls#6x@{NT81a9Xa)+##$n0O2iq70;Cr&VX{n*judNV!)62?idKSPgBuw;m`V#t&vg zG9Z+WK|{^(6pZcBWXKqzAt^M+Yj1DgNH!RMx1@F>QQY6FIYe1jiJ9Te>HZeCw~0UVFS4-)u~v z21&+J1%R07=r`X}fS>sSGUh6}r&yJybpB;30pC{HCRjkR9O3zvmkJo3S#X!WA75W` z7T4#sl&4&YRN)68|1Zg%BqoCH3jgHIoOBkNawQ9e4}ZVecy8Tad1(Dzj1K#6ELs3$ zCR)>-ZdN?rHPua1V9@hafaw@5d>^2^B##x?4RSS!Nnw59>t}mL|MnZ5?q2`v5vaGr zbdM;douiRBOD@1^yWR2Sa;h4aRQNx>wz4bbV#MM5*r4V2e`ENWEdN!b=wEQm=l8=` z+jq#()4xzZsG9PwBj=ZT{;}}_Kc{W3#3gIjVYjP$!eXwFJyj}JoY-%SdqvNpOIR$4 z{0S}eZ>CcEucx#YU{du0i!X@|%fofZ{!IkzDR3;G>|BevqO`lzu{Z0V-FP@)zej|q zRbt@DG)>R{!`zq0)x37?Z}ZT>;UF@nC_~X)8t4c`5tT|48Z?O}nw^7#k|=~qPKxF< zYc{31q+vIxP%4comFC~IHsL(yd4I!yKcAlSdQ*FU_xF3>>%P{tu63=rb)%gtzh$Ay zzYn|3VEtWreaXd{T-$lC{`w)~TxuTo-vL!rWFDzUUiY0X{jG7|0a@vUVu5bq+wGKn zihl>>$|uHKz?wh(>#AP2rSo5 z6S<5bLif*Qum3vv`$r7OV;}dJJxCuB#Q@X<<~@q4Z29DuqhlE*W$*sXX*rYA@$`Nw)Ws+JFl{3=lF#+-gz?4=1e`C1GXexAv-Txzs{C% zhR1s#U%%fbTSahba?)2}OL$k+y%kdk`%~+C@ghL)DZ_p} znsc5(aQF$TvxHuiuel|k9o4q4}N0I{UOVdwKgTfoiZ;*u5&6OTWaw zaG7Cuo3Y)*31Mi7eoCwA_Z?@QL>JG!cI|5TrH=QBSZ7jkYti4^vZjC48XudXC7T}j z+I7E`_6)oAzpPpB{57g<(EIphQQFL6iSMf^StVYc;e>hw^`64B4#z@$Vkhhjq2$;= zT1W`j9;0hN8iEe$@y=}Yf}|0`$Rp_g6timlh1vk93-hxEM1v%cM%&uwR^}Wum!U0v zzgxjK+h#s#Z0>~t+tn|Ebs6Me)d?l8nt`d%6P>ZgKUIZ9PRtm11%kqU!I94`J0^8p z(}V`}8_T4r95(G0+sv7dAZtY{qNrXSBwg=MWehEYNZ~x(wN|fLGnKLc*eVo_B2)Vv zx-JOecMeg1?}7#A(Wma;bi2Tiu8$fF%oA%Gdk0~;M9B^qX=*FZ&dGWD_+?`%3q;QQsvmoxneUssCv!t+|Jnzp^olXTRG!c-ieZ%z5T&(x=63J%Nr znSZQSMDC{6#?Owx_E8jl`pL*ETSBh(X!x`wBSZmM5pZ0nGlTm7CD@H#mREe)^ z%F)W@@1)Jg4uKvuCJ^?wv{MvM1@h5@CJ`Evmx4u-^*wLCCTGGD9_ZO$0C%93aHwQ3 zfd8Mvf3pikvbT^iPGO+&Ys=daA@7>w7ix}Y9IKz^mE4rwB>(i>&z(QsuamvUt3+`Y zlCf#dGwe_o`b}N5(!?VIsg23R_BBf=oXek>_HL5$dV`DQA-_VjE8S`;!;JrED!ucg z%06EGji>ud@9+iL>@C&&XQz?NNv-Cd`RHoXJjOQxZmg>X1m;m|JkH8uwCHDWzj5eo zIS;#ncMzz5L91{vI(H@eDvnaGw^N@Cpb$CFLMTTK#$xuLdFcP8-dc2BDBiiha{6-a zH|8Qsgg9nVN1t-J6u4N3xyU2a$9{-$2FEus{iqsQ#1+AgDBAktelb6#rGSzv96wDE z;ustH@-;LT-^k>hux86X<7t;gCcL!QWsYebo)%u%lPAf{=5Cf-slvN0YO-u=`ocf< z85A)8=Dep|s{iyG|DsJct|w63Atw^(4V#>t!wl|4;Tb{>F1u~pHm{7EOe|db=K5LzDf@d4)kL^ zx7B5grM^VtVJ9@7uR8p`jW!iZBs84AEi1V56rfY#rcMOXAb8 zQkmi#u0JmUt^OvJ%B7obnblUdty!^T*| zV>Dwx?a9BZF5l9WL)>SaVVOr2W&l(Zo40B8Ef7v2jx0hO@iYd^VHikBGQlM{PJK0D z<-cl0?>z@rd_gCrP^;v>(~xi7YFlsZzqHAGdh*MGPAD=!FE4k96@3$i+R*;kSxVmL z0ufkpwXiN+%YpIP*O-HNNeF#gJo5M)%#m*H@rOomGLqb&~^^ZDHq;_l;_t#9;#Qe{d zE45a0YOM}90Pcll_qBSo!tc@osrhZ>^PUqv9xAz@d zlYT9~&)!^9HLQLA&ac0G@R8a;#_lv74{4{t({aZCO@9Y*^NU%cZ_dDd`zV>)qEq@4 zG=%TL0ew#XEAN9W+TUR=7;W3G2|K9IEoE#_@{@oay`$uoJ?QI?iaR)snk+c!WTJbY z3eJ%D=o=ZWMlb*`@Xag*dj$>S#q&JA6;hgG9;ou(Q{}z7D2NbH=*Cr@YoF+x()y=1 z#GR*qb_{b!LQU7Ja(U2ie_xLsS8NQ}=A=qhkC%>MYrKq3MyQgH!mzXXO_Cr4Yt(%a z)$?D~b_+o{3fZ@lwNPm=JRni}XKV$9L$X;YFYg5gP9~QJ5?mlsI)ZV#=*IRaY3bK! zH8%;Ei!+WBU&*3>pG*DwuwL5XM>VJ;6}v)-r3%@}4Yhab#&73!U(+n^ly0}lf~x38 zE#pslpDC|;y1ON>H09qui~QzvZ_0<8hfcudRLD3VWy3FKD>mt!sq0~7HJ)|SSQC0V z-^3aLY5~{tzKYyrC-$NCkuF%dByRB@$c*yxMXujA7u%<@SZ{W(LiV)jk4<0fUbAx? z5-`R(zjh{HyvS#2gI*8U*}yHSRXY5Sk8hJVP+aKnJiiYDxi+O4M(Z|+9heKioDg}= zG0n0oePX+^=G^Zkie&w7>_1l8wr!B7g)EpGvY`0BA8;Z{9zkn->YS=}lRoP|q?GKF zSMFauxN(_ozm59zaljrg@?$~-$G(3;PBSYJo@EwYxyPb-S6Q5e+-bZ(2gEM=%cGqp z*Ll{>UpbaEy?wRN#o~~;5$xl-z~J+r#v;dy40q&#GgW5d%lbGnw^&t?`Jyp9ky5y} z+?m&n+KyPes;thH-ktrY<4SK_^(eP9jNK`kZQ}9z5-w=t@cgeY6uER98_%5^Kk@dS zt=E;#qk8{MK6*GVc9zQ;+hp^{b-7{EH*|vQIEW=KqqwI#-Li-F{r$%9Yb6J`?7|S90dZ5mo{W zVtJpj>92aw1=Q%p1f+%rn4hhVub%gaJ zfc2MsDbt%RW?SIg9#F(^82JUln(4PMe}jtIqElZT`Y&3xuhF!$MJQn63}xh{#>0IO zOWshs(D~N>Y#XFJQ(5|nzmT0ybKB#bbu(`=lB`ZfN~B0hn$Dr%J45lT>z%R$uP+a; zYAEsl0?bH^cnIjs`eQARdJE_US>!?P$b(uf<;4yeoc){6Cru=!=I!c~v{J3#NSGY( z_%+9g^sC&M!)9o~swN;tccRu;3C3O~p6Pj2YA^AZ%Ro_GXrsP?dxJBnNyi zOEwpO(~v?YR(QlB+|dzjswhdCDX}ZvI9RV@_jaTzKH$yzvr@_L?zThJZNsTD6G^Nz zEA+)bLL=diX3m9(vK;NAaF9mJ9P@qz4gr`yjoI}k?Hd>vAjz6-$D2#kLIY9?0`q`_ z8IT7Zfr}y<3AU5)`E=J2E^hAl3d!-E+WdZhuE9k%)zy3|%N|iphG$8g`d#ZOd$)J9z$|22f`nO z*k9xWvv%z?YG6Y*!mmq~urZ*kl36<7LXn{Ju(ovPDS|9EQl%-d&p zvZE^IB(M?}vuK7r$^m3m2rMole#RO0mymP) z9-&4R6zV5LEB}dFCkFgj4BrP* zgGX6KtkIl&LqW1OLR-jT+)wEtLqkJOC?jF|jc&e0TWmL=Kgh>vjx{;#e=Wh(tG4(z zD`fsV0cmh6cqbc@w8kp!g^E|s@l9o0BJ-?8H4Z0896mLJCw8KN8sn4yrN{%kl7bbc zf>lKZNnyW6u+I%z@OSF2q5rrEqwhCyxU*Z2PMP*o2bOGvn&?34pg{>XsvB)Wbb^o- zEG%Yf8>DA(!&?L?D)}&xc?0^XwdQe$v~<=xr=ttZ<(TFof8KGsjq$E2RzK_Db%m`< z0%T>a>p#@Ds#N<@O))Z2CWU#oJ-!_^?lobP@U?UIB%`kF$FARXh!c%5ayYl+oR;mV zOH}`Sgyh<-mXM zPpVTa`wa$Iu*ii)QdFyufyH;7ZoqE2i#%U>-@!V*eV$x6(>l)Bk1$e(&?gwQX30{% z8Af4kI6ShRa(dN8e-M~(O*0xr1;vz$i~KnLUbQh{5;uh>WiPEyE`K}-415dw!0uyt zaAgx_YMs@HH5BGOJ)42BLrrzCj;d~xh5c+Q$>Ze9G0E*ozZ6T%oq1vD3h$ANk2m!u>7}FuIu=B$W$Lj&g!W^$(I|!gZBj#K z7%x3mc)$W5eySYQbBe|SG_ghYv>eES{1-tHl^%6yl+pW$K`ZAeKZjc(oLEV5m{urw zd(ZO4?f=YZ7+Y}!Kv7FN3!GTca8HA03;wY8eH#Ce7x}DWsxEi$inzGEec1cjDbxuB zt06xU-7lTqQE?2-?<^_NnKSj%6ox53X67AZH$B^Cvc0v)AcZ-lt}OQf_Xr6vls_5Y zNHpLWuj+uGq4@^n-*@ZQBAPk^gO6osBPk5qy9{2Rl)qs`{VG>nQwxDO^b?s);*3Wa zP6N}PTqj>YzgY~@^T9EY!GMwUHVg$IgKgNf1-OijlH)jp+{`wkaX_+tz8TdielNQ? zOLbbCX*Bh?sOy|B>P}c4ofR*3sODa{qxqw>+{W$kHJ4Cx7t>jII>Rc_&MQ08CoqsEw^~H>{&6tc@H_c-&kw!gx1a9sX*<*7OR@o_iOa6yuCzA zofE{hg5xCi8QZ))cvk$aB&U#qhg8)Pm<)t0fYM{*$Rc`B!8cAG9mhpLM&ZURCZ21D z@d&}X9d6H8XLt14w4V&gnR;ROv&wZ_pnPG;&&`M;UZNkvEP_qki8)N3Rnxy>Wo_4= zyln#R5muJHKAlZD!1QvVg)U@EcJVc*uBJU*-hoDKt$BKz%9c(GxXyO5d#+i3?r~<= zy@R}q%&4GeBKIS`m^)W^C2AL?uJ~%6kXDyI`yrcwYC_eS2xD%oAfOZ?Y*!cTRluTrP=t2&d69D>RwggyUWXf%u9 z>7N-hlxyNq&Psevw$dvcT>1mJU%aS!DfsH&PU_REE}Wm-cF5eyzAdlAqR7^*W=d?q z3@hUW&MbHqtocqbEY11JxFTyDnI&)dUb&#lnlbaAwjId#mg`4(m+2gddDd}V4?^TP zu@iwW+D@5mV{i%wY{R*mJqua!MT zI{2rq-?K8p^(z&UemVKK$&2%$lltN-{CFZ}B`j%~^2TXO^;b%3T+Vm2RdwceCR?y* zf|0{@wJFN0zT)J7^}nF4=Gf~%dAQpA4jD9f+`96TA8i#THqe7Ed-nUQRgzqKb?ba4 z{iU!(Vb9H~HrqOf1n*nnkG}7?;HOJZw4HFDw^_o4yFm^*Q@*2AQu?2(Gqe}^>^PMF zILCt?ukjM?>aA)i1vQN)Op=z8e5!;!I{OZ#Ayw8D^DO%|ufA)++NaalL$&2=g;H!;4 z{PUKCfa7&vs-&uy6dgafN+sgk)3**W_WF;GorlHF6uRJrCRH^qu4{IrR^)$+>pZ0Y zyST2HC9eCL9TaE&RGNI)U(I}H{dah-+Myk_V|Uwegr9iKHJ@W6kC5iX!rxW0P5x}_ zjjb}xd%5!*&!@1tVrpvV``&3DQ_Vk*IZ7rQBSxc+{}9-H&vh2|N6Qv}xt?fV{TF&) z5i&jcoQC>Vty?z}g?i~C!G9(?t-Tna#0iF+${DcILT&0S6$jw+aM9g;3Io$#R={+( z^$l9-CjxLPCLg5S^6Q^uYn~rE(0Fkz9^pe$eP*Ji)v-0p&P1_{qYBjU%sxVX3-{}-8$kG0JB(m_D_@IxlfIP8oP zy;9LV`?KMFNA6F_a?4Zx;Ca8QL-t0&rK3TrC><{#X~whr`i|D-YC}o&R{nD5!_Rl1 z>$5qxICR2ivR8%Q6wD#aEg2&uarE%O$c>lyTNh@=fq{P-=D%mnb1(wr_xc8`0Ewe8 z#Lg9BlB0lA?o*2@-+^|ok&Zvo zA@qa|Aj)ZzNQa7zgjuM(WI<$~)|qBZu6q9#$8G!d+?s*|Fc^?}g}w(eqtHVJKs`Ar zb*RXlg*7J!%{21z-}Y~HT)qTtL)4w#SxcP!^mdyg%380E2`Q%k+yKHZKXHT7lVj7? zwQmhl3?9)uQ><2P$ZjGtNh!qGMQ|Ep!aSjwD^+GR%gwMy0ypt*g&FqI3ECPZkq0Km zJeZ>h5!3sBm!I_S`NLxa^U%;AbY64W>8M*0y?LSkO%EsjZ#|si?7O;Vz5SN`Ymtqi z@d)YAbT+o<=*{-&E#S6js!bA~`hT((yziFQ!7UHH^nIPa!KCoMd0w-ESOq2?VWy*) z_L)lMo|6x%#_FmK-~<%mU(Cm;@UZ)fmUkC523{_<4b7}$i7)xT^&BZMoKS$~;p2OW zXi#Gg*Jk2(V)SQVxRXPXr3{4l94Fr;8s-sR*W1E)$GpXsj z33BsCL(;gv(0}+NvRl<;UF1CSY~n#_8*vcR|E z46IIws3t?$7FEPV)$iMsO{(v~wPHJt8jLc8S4e4ilG}INgpQaF5HaLfgA(5z;Wb~Q zJN>u#!&WfkK?hbTJbWfR!7_I#w5y>e6I~l8=s@h7nXA@iY;MiSgR*l ziIwXJ>xNB!)oU+>{_OhV2W2L6G2`mEE^av$I*?!`53$SvVNs0=4LK60!o3%U9F!=e zM!vI5qWL4pfyiYLww+|ujVzw}9BCKfp-49w3EY;9se&bvD=b~8I*i5mKlvqaFQ4<% zM>E*F%6pYs)Q)iJPim3KlhKUu)c_#g-*jRdH|zjr*m$H*^N!m`)KdryX}GzTaz%@>wU59|Ey7Kg7EOav;;{Pg~a~1Jm-H_ zsIl8u|A_b7x9uFa43K4<8)!F;-g0=;K+&(rsxu+-dKv|(I)9SZ-t zjRea}r}EwYHdE#65gfqw_uK`Le63uS>+(q3lk1kbQpVzTascBUP*@Ls_nX{>C~%Jx z?lEhEb@<;dk6>?|Qmcyg4b*V{zKWo}-I{k?rbH0!;>AB*zjiK#%x_)_xza&&1#P@? z27JktWqdPW2Lfr=$;TD*^_D|?`gz?FU77nR22XdNp0wv!P8Lpc>7M~VuA5|MmyPQ>{1LJ<3Bk3e2F}Tggi1%KrJ$ zgKy-t{`p2i^$9EcGF;+1GP!d19${VYgO%?9QH3;NJTQ6bq3d~$H$D0nm@!=~;Um%P zn)Nf>pIgkm2b)Di#l2!5q${k=Qn$F>hWkTnZuUXVrH{n5q49Kw)N?|X+IoYgi7mYf zFC8YoF_pP+vHRv@8>A2a@p=nnzc1N|g!Q8o=xB<7zRr>A9qdt`B=V8ZV~v$)r=L52VouKe>e4nYglE_~zM zhY9PLrD^`L$Zz`7by-a}9DZHG@m^Iey#wf!mF07cOB}z2iGiShuEy{yF9oZXT)&wp zt{ogF1~WRwuP5{^uGMuxZ`b>0tI4T_4tvg8cC(~>0QlCUKPA;z-~kI)O`rJLgd1NH zJJF_fgKNf1lW7Wj)aKvxxi(bL-&_&FUZ^HsIHgKaZP7Yv4gi84=h)vrvAXn%M@pb# z$xZZ+CqwGTgN!>s^XGIMq)Qm8Ft9FsCZxc%BY5AS+c}@V*2spa*!S9$O)YALuG%}Z zc2izjsTt@hl4}aoU-e`Y7)GOZt&T;*iQG4erQW`^U)~uts!xe+>OzuRg7zzMZfeN>o=OaS;D5> zXaT1XCv5|5TWUX|>8m2<9JQc-6YkFZ5vws?argK+B2dbYi=N-qUnSW65OS*^(@ zrZx1l56($IGRzqt;IY!-xn=jda~F7!yC!d^5Am;Z5`BUIO^ECG%9dY5AKC3K6*~MW zX(Es?y-f>Gxa^2x@{+ zfNeDD8=?|M<12*zZ*mOJ2La^wbJ;n>Dvra&(Ik>B^3a=Ywz>u%+!sz8uhk!(A(jw) zwG2ndj2LFxg9Do@lqSdNF>3S$4#VM^H5UE#ZvOZRaX=rj-m|5T#)4pA&#$068D~hjhXUB?Ccjgo{%Kq0B$}pI zPaF2lxB7EC*I#>NFY^9%XUe6dzY=$uhOXi{!Y7>iV7_qKt2-rqK|!u2#r^?}cFO~J zJb&fd{`>Ydw|!%sT>`~zxHxm<%U3m}N3CbqaA@y!?5_+9TQMZ=(CG`;ZFx_aDmvG5 zu$P0K6+{-EX*)cq-&a&zO*PE+5;=ZJ3~Bt`%8BgI<%~AlIm4JiQtOiLkV#a$(sC;9N zw9g&MH4gAF@N)oK7%)#<^GoT?{_<3?s6B8cDc~8 zeYRD`50N3eq_cesO44 z!O!(~G{fAENUW7eX-}xu%Ig`R#&I$d75{MkfQdfL=w)C$`dM~akB^q!hZ{-oe?Oq1 zaa+GUrQ%TBt^O%qQ(#}%&+MId&-&X(zt(eiF#BJ3hV4O_%c!;a)xZLm(c!Gix%%Dg zIWHFH1m0e7TS$<;#E$l59MpgBlAstBo=vxEFxGhSNTp+HY}&SKQ6rorzT@^;8uqz|G4^mUJh>%w zv7q4|7;*VRUL*-2qNZlzaj7gC)mQlY9I5N{T{z{Rx{F?X+({WoEPG|8BDpYRa_@c( z4e_k3j8knDC;v!yO9N+(8*xwoLv6z!J)23kxxNtVosJxu%^}1CNmVW!5=t-x*bKK6 zZJz~N{RU6-eZ>V~@92_HY3L&^-Lm4Q%K!Fm*_gIm&UNhe_qY}{pw#zq;PmUumxqe_ zlUxMs>>m_{=UxtVz3g)rU-|ZPMwq6RfroDzmvJq_TInk4)2%E|SM?Gy)yk-{Z5nxx zI^+w@nvC)(gJ`9&%-@{VfDs0JV-4$urDn5P`t!TQ4QiT3+<+8o6D+86uomv1lv~&< zy*?stKm3RPBK<8edQm4V4=pnLg&hYR-mt4+7mj!s&g~Hl`fuz1$0CSY;=V*w$>(Ol zYo|(!+Eclo`A2)Mb;YK8kJRS~HhxL(s~nRyH&lI)ADn-E@59ECy%_^X8xDz@U+;^^ z==N)mxbwiNb!;?t#6?=cS$!DXTrx)1S!Yy-UMi{Nj;kM-gUhI zraQHCu8t~j@(hF-jE2DtRSfI97^PiAQ)QQ8nnoqY&h%guNfSoK=E2=F{gp=)$SGs^ zJdeVQv*?Ei zHkUkvHqE2enEi4KjXfU%y_D{l&Q0IcFYk#FwM}riEF^P2W;J>ty`W>}4~t0997Tht z5w5cmy(lFRoey_=XJ`dyF=2xg4u!EDXogrQ2e{=y40NOD-Uw@1t>B{*S)b*JXE>^Q z5qCrs!XCK4ju;X3ZpQ_04%6}%aw^Zt-g~Fd-(~dS3IE}`YC{z@gV%BV0>)MBc1`sb zeAU$=7LrWMG{Y|G*7}AeqACf@Q+No-J^h5s)$hMdz|mxQ+u&wMt#jsrgAHggEP_e8 zSRFphZ%SgT5QD5x9reUKx=m;`HKZ9CX_)?d#RhG}1{apu?t=9>K2l;JMl5My!ZAe^ zBMg5mL}l5({>ql)SB4btN+!MBQia;!ZNy|l*culqtdF?PB@Q!~Sj^cpf?;P-e|mqK zmTjfMZ2<#?*_g3;emG>p*9Vys9+RdR`_i~1{G`40G2tR7w}9*yA-WCqN$FuueLJ(q z5>Jl>gl3-$!GQUe{L71m3QRRZu-m4~f`!>oo?f>p*H{aFzygKy z$4@+FeEn4FV(5-Et`0a^m2k}WhPU6TN}XQ+?%|9&rcSKZShq};;_Yj>p`7(H{ykj&jO@sQ;F&h#G^v zwmx_VW8Ahoq891n>|bPiV?vO8Li2?M)wcEbo&XNN%!n?l*t) zINJ1uWaQj<@x5BB)3uowq28oR9}r+pw|e2}?0zdPg2z=;rhoUK@Ux}LJp;p$x~oCA z-bBxZ+L&X;$AR!D)=yfwe_{Q~a9c_sUJ0i!3`MvL?g(4CZ`#SF(H%8c*J)!3UZbn5 z$x1$TM}8y6;VbIbnHQ(dTK76sUhYnu$<+nb2@0`>?HRUpp`o1qd+yws^HMSYOOcV` z`~0b89|mvjUE5H9sYJlpUZA6gA2{T*ikezueb;Y2T}BJ+!n$0ThlH){_sSJx02JDqq8C*K67wTUL zj{nml0uH|O%Xz*>42q0)LAf5^N9$%-qBhQFInN{Tua1niAV~#R zCfrBW(#ZH2--nzwBH9Ac06%Y4By}*Y9oqQ~p7UWS5Eb1C!vP&G-sNbaY%zydti!dy z3Vkm{leQFW8FqPB3{eZSuLi_9$i-W(8_!uWjXg?Ib>em8~q0z*>p3I%_ z!Je`4hu4ar9K$JJjFXjPX!*$(bMgIr!6JO$dH739la;Nr^8CyhYkl`|@hXYEJJ({4 z+H!F1DL4$3RRiYcbIRj)c9v$FLI`{<5aMR*-UOHlKiajQ;(&0UIo=T8$D8*itdD#g zfbU0VEf?-<2ZMbrkfx>Ke!`^`_YI#XJh~Iaz7`iReNfz=T6@!Lfz4>?c6WjgS3BMe zm3vhS8l#Dui=GSJqjicRm4UcFIB3eM>9*qjpaqCokqDTB4}*IjUfTyR*#+{hXqhgp zIE6d5(Q`g>iI7^v?1mUJin~?}P7Ov2^FjpR233B62Padyn;w9oJqx znn%BL+Wpo@T|A96)?Pi+>V>z{66{13wFNH2J>flpOpez1E_NqXPPX4)?P@hP8Wm8o zF1Am*>7JEqZ(({=vwgR=TvnR1G8Zogf9(F^4}mG6V?wPlN?5ZaUwT4gwN{Us9&Wl= zxvUkFVO-BE=k9zw>1$BmFoQ!Rcvp2}Y;9#tsK)(ay_yA~7DGcx8wCy3EgK8daKcwN zxaEcpXSfIz&KbSFAw#8S*s>_z$m#lU;K#+?{c9HA2s}`2sOz;|JNV1qlA^>{-!Q2o zWcC{9Wp!Nonsab^|IX!hT$mJpltc7*+kGX+rTf#q1bZRbZG@Ih2}aXS=Z2V){?fCJ zb5eHfxg*4jMi<@DCSZiYBnnL%J~A=S$CZ^+9KNuOBD<#4k7&n-XouLFG16{PiGO~L z&^KT&-P(#k7Tut=+Oq-3CmZ9%ZsC|n+Ogq9dgR=?G}9MIw0aUVMm7Q=^~~*Pyx8L8 z{n_*dLOkIaLvL+cd0i{47Y>Mt5kT$g6hZ6xa_=ZI>KBb94sa0g#mbec zH1O6A^?1jjJS2VujRwZpQy8zbufF%sfuOBp3w^Zj@wa*9`5LlcieI#+DU=x- z#<2SajFgFJ4SI6K3dku4H~pnQkbPNij9&B*7Hc0}7TS&H*>!?j4W1q; zLG@cavL8@xG17T${#grskkI%d8U2V$0SvkeTB-p`DJA9kHfciY;Xpdqk9tUnAY)^_ zSk4-s225Ou!i#bCC`zd+&lj)1EchBgL&&055@#o|0{D~~TFoYqXHhLPV51lZN+(ZjmfYOI8; zK@y3B-Edb{t3B^NqbpK<6=aOCSlaP4#^>NI9;rKGM%7-jXHBUaiL|dqEWQi$+(c*b z^BYg!3t|*&-0>O-SmPKG^V8CfJW0p4;_wB$y|~R|3H8tERlsl8U|rwzP-euKpa)kD z4w=_DfN7lRK(U9j%7w59f9#cecQJKd)>FfPvveB9)Fi=}f*w2_o;GPq)!QJiyzXaP z#Y^}LpMV$I-@AC>hiK+QyQxz*$OLAur(X16b=~Mqv^vT-bR-$kUBh64wHX5i7A^J~ z`|jWrIKrZvKx5AhXXrmUAmhL4)4FxeP-T@>&Dk9-tGYlKY0qjzZY zPg@uUCP$cPFv8uCG1JuQB0`bOygxHrkk=IM=1*%FdKWAzg0V$(b2Km@9Wd<{S{KDI zhRY7fDIT{wInsNYTPE^YFJ$c!Su0!|m85!0Y2}M!d6XdkCdg)!CaKq zAD$dxu^Oq}xf(FIvc#A*1I+v`jB@S`OV(MqK?G8uBd|mD!R`^or&GWXy>5KiM$G)R z;Se^tjtH8p!?^(?mg4i)g@t=!YG)~3@yF7TlBF5HKCUte8yK9}avnJ~{~XCrTW3pu zd7G>=bAv6qbC1wQ9d6z!lZ(%N8$p2|_*#poRC-`)Qud#-G56^Xcp4)>cRNHh0zcMh z^PpH(gXGwoYXGB5w2uxE>C{rq<2`Yi-VD|Ga#tj~vk7k7L8b7TO6fZDN-u$|fkP2~ zcY>QDI#ar_KnBSP($H18&;+)Goj)-H2CV~<~ce-8?%w;pKMTWys zjl~61rsUFM*L#wb+R#31iz`vGNt>_UMl>l-mmY&5T7!7YD{7NJn4q0n!lvaqI;)#j zjD6W@TVun-C2g>UrA{7w3{dZn?5hD)dTfoAkEgd`^N3G#BM7twB#|ZQsBu{cCoo)9cZ0 zq-+1&QL_ccTb6uD4W3J_Uu|QZwWD1gE`dC*EE0M1k?l&7T`$m$x_;X7(|Y3v3`KDY zz8}#9imJxVlD#rgL@fh9sGV|)%=(5=9zB-o&R$(<5*E=|kq*YH2{{FoIBLId*i7%xsh8Dp> znZaU@ZtOb)9Izh|14OFLVtw)HKZ2a1%M3$qy#ArDr2++TgtHHzn~GLlSh}AT2-Al; z^5{SW*jXPI9-}~^GezEO%?C{UC;}6C7bnL9pruNr>Xt7T8zJ1Sp;%{z;lAa*mbtu( zk-Odm_1=b867PRwMFco)m3eigbQIBp!ZXOhiE*<)qJ_y;uSi#6v3k4{0aI}huJ#gtH%pr&Rx&Sk_O-ev(-BNxLjKlAZD7eHqSe zA+w4{k>1L#oeg=O}tB;2tWRG-SbW=>$FA3|UfKw^&aRrqJKsFDeF2DN|8espfA< za^X6R)9L}|sE^yd_ZyB@bGcBgJ2uD+PinkFW07IOo|cO9M)*p9k;|yyw52DuVlK~J zx}$X4079;VCM>unwdFn_2JH|GYpU`E!HR6)*3mlPh0TPi2zV3TqrD|yQW`wPKAb=o zF`KKehy~am0s%Q&X804-pAu9ho49Na<84YCj){DwE$!C{CIMm6>lb27B@j)a%@}Ug zW*@T{d3tTyhIGCh^~WUGCJ;yY;BS0^R1CqP=NCEm#nP#uX6gs*K<9QF-0;di*m(}a zJY5OK!gOIuBig@EK6oMn{GZ0>uVz~{V=64GY zd2~@pIQ6{p*Tv0|XufBSv4e^}B8oV@dR)f(U99xo@qtjiI;5Fkb{pBV2oAqDvIX|E z!tw|+F6TfX?86EgZdfamgDvl)JKF6{WfeGBGaE_|?2TA%R)pXW+lQ4nywx~_ZuM?C zNEI*~<9j%!_UxB?AKkXV|4Y1c0^* zZ$es7fLV!mF&CzRfIncEFC5k-51B>F={OI3z_`@o>C2h;mM_&K;>*;+qDLI+hID${ zeZXEn8?9h4(hpEcFwUfG9f3H_c#|7oeRzy~Pwv3HgIjKV$-ytvVL)F*#01_)pNM@b ziecE6B%FkDX!YoqZ@GmcgEx+X-^4$^`aKp{;sf%-0<4xh068D<%j(VXNL<1OKt;!Z zon%(jQjI3IP{L|>|60aYCm}p4YbK%-S)2ql7M_k^ zJw3#S-dETjH!$&N9pPe6eY9s5QSASHwm26?lpE4P$d$8y_r@Fkc)WI$E5jgQjrY<0mo3#oorcUwd1)!Num7Ggv`Oq?L`77-9cw zrj1OK`LYchxfe!Igud*vqp~dEu2|NVH(LNhxCVg#np(vejNN|%*6_mVCu(#E3ahQf*qt`)~mWoKFzSnupR@k_C4xc(c6oV?m2UJ^hgXcU|m;a3KfM+Vl9I zDfx(O^mpu*7pXdSpDPD|>pE9Rcz8a>6<$Tw8<8xz*AwF(#kaIQ`rSQ#7&FNffp;)k z@9JLZfxy8cf^)ErX@t|p-Oemwq>&u_lZ!-#9PWaqlc>v@$oLXD)cuCoC7L&03H5jn zNQEX8$HSv72wr>n4+jo*(k`IeP1RQd;hqhMvQ}z`%;SNJ!d|{fKuWMSXJUc5uCRmB zP_jnh(rJuMVM1du_peB-leYwkc+jGiXzA`$9#@d7E;)#pz5zTY`6sK*6Saw)8@W>p zw`4zDmH}K}gVU*iV2NeS_enENX_}+A-87^=pFem?%2O`Lr2%@NDcOm|t7C9#3FU62 z1weXm=;#>A5?u0>DF84VqI7?E^-x$f!Jq)1*RYjJij2ov>FK%ZnQwpX^zphJp`ny} z>BF1?j3=HQ$Y_g0hZlu`T~@N&4}df>{$7`d64ms1;npDcOKe7EudcCy`^w5j?;EfVF^r_Eq3i z{m3josVsCpI%q&+&~%#l1=GcopP2U2+52ECmI#u7$6Kr20)j{TDF_lG0L5~PYJ-^P zrhw;7(Gx}0jh{4elJ3WpIZDVQ2k@_VI*;O+^J3qea199F+*~xe>lJ^-lI09Nc}hit zaAxCVP-MkhG$7JRN@5B^4s2415jb1{i+4BL4;^$_Y_%yK2 zdx^V!thB150|A_jOJvsVk*-cRPv*xnodQ!N7(M2ALR6v_JpJjd@5z^o%c?N}K<8)% zoiQkPr>i|A zJ@N29V1k2bLd6lcmUc{^^KBhPf9_%MT>I;_o6-Hh+mixqPIyu~wud|ttj573=H@_U z9LC7rV7Yl7=hQv>A*iSY;DXCh&0w^Ip&@{I`oaJ%2|l!{Qc(qph`Fy37>Jp085`}9 zbOyen+_*s5J_AdhEme&LR{&+t42TRZd#~%f)83Pc(U9K2nc&UItYsA5!V*0eZRP|< z-s|q<^6Bh~KpYs0B~O1^nzhlD6V$d(XD?Z45>pll;R;6f6ODYL<_-KxQ`0zB}l(X2q16hG_M#S?rPm|qo~yz7YMOX63>;5LPJi% zM~q`R4uS`ieu(`Ln3SuO$l^HBYCbxNv4J-dIX8z(2Hw((3&G?$vz9XVO4QYQFh-G^ z>eovXU9fb^#?nzRYsAr*4}&$ew04kqV^to`{_Nwx?TMo)mJ~f2E!^hY;#P(?F=vpP zO{|>OmJmIL(XITH!4i<7nj~oLYzox)AZndVSe;U0MNz#QM3^B|ZM%^h3L@#GESe5q zt7kLT<}(M?)DNFuike+gbATHq<7SVZPEEqqbtBPfLS9+Vblxt7Ep3Rq$U5DyLS;}_ zw+8KI347N3E;hGVn~pc)#5t0ZF&ujvaPbf=3CbZ!$Ku(Bs$&u!wiW2M4+x}%m=}MC zqCa>u`hE}IX*GD!QQ0UO!B{}aC@kq3F<7JkrT|tbKN}&)6@Z4yc$GDUp}wDZEhKTf zY2VVU^Ca-fv4{XhAUp0hjS%rFIsEB)B@S|{>{S<7$2Iz<@1WGiOSA>WJFXmLa-M{~ zWst4*0CGG4{!e=M*OJ$mwKzp5Yb96XKDM}YJbszo z8{<_7P7{|#`Eakx9tNY7oe(MYK@)-~pomJT0G_=CEE&Cl{*PZ7@zyj686NG=D5C45 zb^8&8aw2`{T=jE-7Y- zgNLL2`pZ(WO$4$cuCOs41wO_Tr3|(KA4Lggnk6!@#sD|I!Iwiez`?W$C;&E5M4|0R z+`~uE*?f=R{@F4DxxqE;s!cnW*{JYggFraIIn)-rm)O_ygXee9vR7S@$s8FJXKa{C z5LySfK|EcQRg<{ITJV_C-c;`)qlDUYtGzg5w`3hszazxza=5Gj4UDnA9cEZ~HnA&N zMT;|bpXPQjHi(c0Yr?dpS7DgZgd-x4>Jg{|G#JSYRZd)HL_2)b;b)Tl8i?7%M^z(E>>>ukB(v3etJB*puO@sc zUjjr&AKI=X7NnFxNYKr7#&QfseiRWVcN0%xhk-%0mk*o3xF?dTG_7>nkK)Yhdq^+8 z{i&Ne)B23uFT`2rc`&(5@H@_09*v{hdR)fFT;@@nx@p7&l;bkTQwLbn(6= zlrMpu*$`j4-?%tHj@TL6)mCi>m6f~VS-L`Y#0;-@V z-D~aMLfqgRKvtCODdN zuQL(&&Lge5MwEkq7Q2a{Uno3;6t8e>v~$c5d1pSw3YtvH?=rC4`Sbl1ACT!EgpyN| zIYen{#+OX~*Wd`Jj2+&%0|#{_ejb#0RKttk_U^$Jqq>X%gk|||{OPxVehYg>>Ak5W zL|O_89Njek2^tVPTDEX#91E_s04}J+oURfO9ll6%j6st|f~9UVBN!YOT86iZw>gea6@ummIvLFcb0<%wNwKXAteF#RfY zC<`Y5SA4)vTDS}y>P;Yi<`^Dg7tSp;3m=Hnq9Np2i1VRCG|~z;4AhO>W^g(KPRAHq zFXf+VObOUjH-XU;B_f!|q$(#QGN39FU$qXBoa9yiNGRSe^%yMvxCLS{6 zGq@&S0>BqnUkN@rhMGLW4etQP(CMU^V@E0hD`lK)2bqs{RBaQpFjTv#s13fUsQ2iO z71-ibC`!Wlo!<4Oryio%B2rwEW|IO;#1$JLgdo(L2A97fTXK*a^<2Y)-yhtl#c*85^Ps zLTDn%)QPrsUwlf3*tw%u#Me-<3mMmDrc)#`7ebbJj&`0J{v9gwGqw7)5vxn?{gKS3 zZmp%vSa=QlUz}b7%;bTiLnI^R8mRJM?lE*qEs+@;C<;duw0hH**6GBLp&_<-)+&BT z@|>7>4nQXN2K(gR;iy%B=)W%gWgnjNQkzm`am*p4NRb(N0v(4#)p0QS)HY zZMP-ux+kt^#UWJ)e6%I&?J&Iwl(=@-czyBxej_UchzE&IlXF`r?+MBy`enLqO=oN0 zNGJ%wE&j04qWsO`v=C#f_U5{>D+99`!4@Whv5u8E*#t1ng4#31KRub`#RZP{P_vCT zWRM!zE?8$IMWzLb3YGEGY@9LJ_g)p@4amHWA_4 zKxZkcxsOPK076XhHAedk_d$L2@@Q#ek~-CDkwi59$;OwBpYSH4gA!MeKmIJGZHEIu zK@$p;d2?L8Jg1Y6Xf9$SY035E6E`^uNNmx zB_R`F12O(k+ZNzQeW$5Hxf)mW|>}&}hs~P=sIzQLu`o;@>g*~w#n>l^(PbEBJ;3E-xw)=FTs2O&@#dE4mA+aFP*jEH^!1n1bE4&_TF3dmMCcN@n> zGRC}#4JCaBRaNW^H)LPlA_~3g9%={CLV8j6#qUF`2bJ;5D$?;^1LOp5pG~hY(iKpv zxd`$;Wbo+abhAEU%3Ru=kgI;A^_$*N2M!Lg*bs4CxQ{3dw4;d@`3}I^>a7-(7vd;V z#{$)sJvN7_6d18xSEL7iZibkp=wk{S2WgimZMR+AWkpcUI5~o z2L|H~8OawAgiW%9}jjC2}0F0=nbr|IA!4{APo>h z7V|`M?CeBFutVdPo*J{pEeyQ`0&7%+ayLgtpyeqi8tYhwb(7JRk_jUFJx5UY!Ms8I zjT3?H6yg10v;*N(Qw2YfGK|)nB4|2BgpR-YvV<9pV8<#;`JMC!#cJVuh@M9(;sn5c z_0q(4U>+*nQ!wVr!S3<~qu7L|m|gJT3+wk8k?MA<54*q!T|jUgo9jCqN6k(h^sgI; zxkoS4Q9{un|M=1hTcApa+)<)Ux35O&R)b2gVn~id6P@r+nP{@qChb_%$uX?KriMFu!!U~TFa^7oj^2wsoJrL9cI!@xnG+XfIL zQhFhgwHeo282}zK6ITjYE`bBg4dYm$G{>e`fHh;g8F&vDg>sP^xPsKv0Ni1TQoub( z3y=v#){2ng8>@x~Gr^v>m}3*mG~U|cA<+_yga?2$h72r1`_oWB2efAutRm7qHO>md7aQU!eAk{x0WX%nq?AkFD+Dc5DrNWO1yy?9sy|vsW`Zb z2niCOwTz%ta+T_z^4(yw8j+S1>~$F|K^2sbL{E?wpi$G}B5X_>X(bYcCGK2r4FWWz z%jd3cJot@96zpztU=m-eMD#k9vfm|S;`*Bqq-4F(_d!YO~?MA1cH?eC%&e%ufY?QIawRLZ0%V?r7Hyz^zG&I*XdMDkFp0noQ49o5v!X@^}DCUdO` z1sX+AEivCDI%8G!+Qi~vAyL;L^_fN0>dcoT#knMqlz79{-(!%HHsQ%<6DY*}6k)Xz zqywa=FGrHal>>NBWF-}Be1u}GOVhLmk#qy&Fs1;~) zrv(BW?uJ0Mp0ET+M@%D_5=Lj#LTl?a*@%vSz!gdskj)-QkRJuAg+n4ca4Q zBS+jxquDN7gi%Q+k1a;OG@{98*6q+ zt86;B>p&(ckR?uqNAbG!*41SI zK+zvCZ>`-}{qdGU5`Ed}jgMEao7?&w)X6^&h1FR*y^`Uf@=dX6A8i2aUOu*^nJ?zM z4|F<{(QwThJ~H=f3Ci&XhH=l_f1Qi`-+wcEW!2P_;#Z}=g@Y05lWkI2*4yT!fEu<< z?mfeZ930cL%!nKqR$poyhTe26nc^I4-|!)!Q|JYXk`I3Ar_`6=$teGuDV&OJ!v2;U zg_MGFqf>1<^UB?|hnHSl|LOjs#m~84#x=iBfpje3?-VV@Fh5O;0XFlr*!Pze=V&oz z%v>!-5qwySVS}5d1t2l~(Y}qt9}H2D%8kQF298BlQ!4hqGYAgDvF;kXl+P-FwQ{UA zsn@RE|GFzH5!e5+r7SyVJo{ot7B4JV3}wd!gvE5M0ASOx1c5mgN5T>X;VT?#aBXp; zV*w8*YB5&MH;;NeeXoEqEk@Ixti{9!uLU6M{L%SXCw$Q=3IXpaj{fK~4P6@Z&Ms+~ zSzX**v(HD+pK*geph#3JT@@>}u8R3tS2<@&*VW*G5NgzXU6oR#tFnlttBg?nl?v#D za4&{wHvhFem{Oizk7|c|Ig*j|KDeZ z1ZMBtyjMU^)zn>&{$SoGryMJuexji|77r~N?#3PQ)B_vr&GI9rAQVFD&Vh!Qu> za~EitZkuR=motpR$Pd6rY>Zk=tUZp!AUm1ZH@1-A;6xnoemYC)ua3TM^YD@RbO+=I z!x+Tu?O4#I+Z_untATz2S6Xh2KAY_AM7<+~35_Y-meMl#Bb@P6MD$x-MTwzV3M=V> zX_1U{6(0BDJ1d2|a4a!qNpTnDUsq9}`YQQ57k+`_fLzbe$CG<(lL6Q;I_-b$bF2E* zE3K3Sc+s-N{3mt}FKF@c4Tl1eXN!rfN5^MmqZUoo0wFnhCLBb{S!bKjcW>Dy8#zpq z7~Z&RR*z+lY&my6+0Ii^KC_8`ezwpFTyrQ8#}b#Q785pAi{U^4700nL(lyEdT+caN z^Ej*%8jXNI(bimyPGT5^d(+F~DehRJp918Do?LjtVm_p}<-B7-R0F>`FT}^hT?MaA zY3E$rJW_h@xRqmg=DM86psQ>sXDa!d71NBaes9cg`y`8a z0;O~);vIIz+;bPs&lXJ+sEtk!CdPzHwAh;F6+3vz4i18Z zvdvW|PR~NhqnG2c?xWum=YXjSMOb2)|QNxYd zrVxO(iI&g+c*JG`m^d?e2Nh(`3*VHP+#ML^qd9}u@U}6=FI|i~Boj(T4JA9b8}<%C z&vs-lt@f@s@e!}y)apJ;J^UBrjp%M9&d7_8>FSYbCBKF!aXFFQ&)=S2c-R2w|Aah#hGgDdZ69%nEUR~P;V@Z4n zYhp9L+-PEwr&Mrk0rMg}cd5~Yu3Bo8rOARBpn+8|U6nN!T@|A~kb?C%U1d0%=8bEI zUMy+{;WzUTe(iTp)7ki6Snx-Vz+l!A|D16cPRp_8;AeF##3kxDRy5*P($>%h_;Epa z1d}#DNo$tIjT4&8MmT{+K~{N0SL3C(VMp5nT@_2VuI@bHa&3v2D?QxM-(S;;3;?D^ zh1OL-A~_5@X0WA0v$G_@^9wqDmy8}8ut_x3LH;` z61%G}`oY%SNscuoWnl-U#p77<7=eW2LIx6+rUj~(j$pdQjOW$sD%?Tb4j^NHbQqc( ztfo8r`a&olmTii~P z)H-4ang^#ZW;Vy-5Jb>is29#>FaJ@7m)j-=A=@^oP4okoyPH&3 z@_f!cY&R!R%*{--T>ITatH$SLrEGslBzklbtSwrAl`UWN3@Zs;mPDA5%8Wtm%R#gj z)|2M0pcPnhdQ4~*3>=`0wO?4Uo`X%>WTs=pV#R}~;dSIYvUh4{-hg5~g1YKh$2J#G zl$XrSck6PzH@nrjp2Om38Th@S*IlF*m-fR=UsuQ{B9b0zx-)SfEMLzKZ{qz;qbFT$J6`!E zS3|wObwXB7kA$3hU%Dr(F7M(|@Z-MXNil0~@vhUvc2TBpd($XuH!}FzhlJ$EC9ho~ zwUReVGAk0dLb9SH%ctjBOZJLnpG}m7B&cXdP-}0!wRUsSPmT;ZFV`k}^v_1E|MAbC F`4^T*=>Gr! From b6d1cea9a864aad527d7bcc7aa9669b19370cadc Mon Sep 17 00:00:00 2001 From: charliebudd Date: Thu, 25 Mar 2021 01:38:49 +0000 Subject: [PATCH 096/457] Conditional Random Field (#1806) * conditional random field implementation Signed-off-by: charliebudd --- docs/source/networks.rst | 5 + monai/networks/blocks/__init__.py | 1 + monai/networks/blocks/crf.py | 140 +++++++++ monai/networks/layers/filtering.py | 7 +- tests/test_crf_cpu.py | 450 ++++++++++++++++++++++++++++ tests/test_crf_cuda.py | 451 +++++++++++++++++++++++++++++ 6 files changed, 1051 insertions(+), 3 deletions(-) create mode 100644 monai/networks/blocks/crf.py create mode 100644 tests/test_crf_cpu.py create mode 100644 tests/test_crf_cuda.py diff --git a/docs/source/networks.rst b/docs/source/networks.rst index f5d498a363..7d12a94fc2 100644 --- a/docs/source/networks.rst +++ b/docs/source/networks.rst @@ -20,6 +20,11 @@ Blocks .. autoclass:: Convolution :members: +`CRF` +~~~~~~~~~~~~~ +.. autoclass:: CRF + :members: + `ResidualUnit` ~~~~~~~~~~~~~~ .. autoclass:: ResidualUnit diff --git a/monai/networks/blocks/__init__.py b/monai/networks/blocks/__init__.py index 4639630c36..cdf7bc3f6d 100644 --- a/monai/networks/blocks/__init__.py +++ b/monai/networks/blocks/__init__.py @@ -13,6 +13,7 @@ from .activation import Mish, Swish from .aspp import SimpleASPP from .convolutions import Convolution, ResidualUnit +from .crf import CRF from .downsample import MaxAvgPool from .dynunet_block import UnetBasicBlock, UnetOutBlock, UnetResBlock, UnetUpBlock, get_output_padding, get_padding from .fcn import FCN, GCN, MCFCN, Refine diff --git a/monai/networks/blocks/crf.py b/monai/networks/blocks/crf.py new file mode 100644 index 0000000000..27556a2c72 --- /dev/null +++ b/monai/networks/blocks/crf.py @@ -0,0 +1,140 @@ +# Copyright 2020 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from torch.nn.functional import conv1d, conv2d, conv3d, pad, softmax + +from monai.networks.layers.filtering import PHLFilter + +__all__ = ["CRF"] + + +class CRF(torch.nn.Module): + """ + Conditional Random Field: Combines message passing with a class + compatability convolution into an iterative process designed + to successively minimise the energy of the class labeling. + + In this implementation, the message passing step is a weighted + combination of a gaussian filter and a bilateral filter. + The bilateral term is included to respect existing structure + within the reference tensor. + + See: + https://arxiv.org/abs/1502.03240 + """ + + def __init__( + self, + bilateral_weight: float = 1.0, + gaussian_weight: float = 1.0, + bilateral_spatial_sigma: float = 5.0, + bilateral_color_sigma: float = 0.5, + gaussian_spatial_sigma: float = 5.0, + update_factor: float = 3.0, + compatability_kernel_range: int = 1, + iterations: int = 5, + ): + """ + Args: + bilateral_weight: the weighting of the bilateral term in the message passing step. + gaussian_weight: the weighting of the gaussian term in the message passing step. + bilateral_spatial_sigma: standard deviation in spatial coordinates for the bilateral term. + bilateral_color_sigma: standard deviation in color space for the bilateral term. + gaussian_spatial_sigma: standard deviation in spatial coordinates for the gaussian term. + update_factor: determines the magnitude of each update. + compatability_kernel_range: the range of the kernel used in the compatability convolution. + iterations: the number of iterations. + """ + super(CRF, self).__init__() + self.bilateral_weight = bilateral_weight + self.gaussian_weight = gaussian_weight + self.bilateral_spatial_sigma = bilateral_spatial_sigma + self.bilateral_color_sigma = bilateral_color_sigma + self.gaussian_spatial_sigma = gaussian_spatial_sigma + self.update_factor = update_factor + self.compatability_kernel_range = compatability_kernel_range + self.iterations = iterations + + def forward(self, input_tensor: torch.Tensor, reference_tensor: torch.Tensor): + """ + Args: + input_tensor: tensor containing initial class logits. + referenece_tensor: the reference tensor used to guide the message passing. + + Returns: + output (torch.Tensor): output tensor. + """ + + # useful values + spatial_dim = input_tensor.dim() - 2 + class_count = input_tensor.size(1) + padding = self.compatability_kernel_range + + # constructing spatial feature tensor + spatial_features = _create_coordinate_tensor(reference_tensor) + + # constructing final feature tensors for bilateral and gaussian kernel + bilateral_features = torch.cat( + [spatial_features / self.bilateral_spatial_sigma, reference_tensor / self.bilateral_color_sigma], dim=1 + ) + gaussian_features = spatial_features / self.gaussian_spatial_sigma + + # compatability matrix (potts model (1 - diag) for now) + compatability_matrix = _potts_model_weights(class_count).to(device=input_tensor.device) + + # expanding matrix to kernel + compatability_kernel = _expand_matrix_to_kernel( + compatability_matrix, spatial_dim, self.compatability_kernel_range + ) + + # choosing convolution function + conv = [conv1d, conv2d, conv3d][spatial_dim - 1] + + # seting up output tensor + output_tensor = softmax(input_tensor, dim=1) + + # mean field loop + for _ in range(self.iterations): + + # message passing step for both kernels + bliateral_output = PHLFilter.apply(output_tensor, bilateral_features) + gaussian_output = PHLFilter.apply(output_tensor, gaussian_features) + + # combining filter outputs + combined_output = self.bilateral_weight * bliateral_output + self.gaussian_weight * gaussian_output + + # compatibility convolution + combined_output = pad(combined_output, 2 * spatial_dim * [padding], mode="replicate") + compatibility_update = conv(combined_output, compatability_kernel) + + # update and normalize + output_tensor = softmax(input_tensor - self.update_factor * compatibility_update, dim=1) + + return output_tensor + + +# helper methods +def _create_coordinate_tensor(tensor): + axes = [torch.arange(tensor.size(i)) for i in range(2, tensor.dim())] + grids = torch.meshgrid(axes) + coords = torch.stack(grids).to(device=tensor.device, dtype=tensor.dtype) + return torch.stack(tensor.size(0) * [coords], dim=0) + + +def _potts_model_weights(class_count): + return (1 - torch.diag(torch.ones(class_count))).unsqueeze(-1) + + +def _expand_matrix_to_kernel(matrix, spatial_dim, kernel_range): + reshape_arg = (matrix.size(0), matrix.size(1)) + spatial_dim * (1,) + expand_arg = (-1, -1) + spatial_dim * (1 + 2 * kernel_range,) + return matrix.reshape(reshape_arg).expand(expand_arg) diff --git a/monai/networks/layers/filtering.py b/monai/networks/layers/filtering.py index 1bec725c7e..7eca03a280 100644 --- a/monai/networks/layers/filtering.py +++ b/monai/networks/layers/filtering.py @@ -93,6 +93,7 @@ def forward(ctx, input, features, sigmas=None): @staticmethod def backward(ctx, grad_output): - scaled_features = ctx.saved_variables - grad_input = PHLFilter.scale(grad_output, scaled_features) - return grad_input + raise NotImplementedError("PHLFilter does not currently support backpropergation") + # scaled_features, = ctx.saved_variables + # grad_input = _C.phl_filter(grad_output, scaled_features) + # return grad_input diff --git a/tests/test_crf_cpu.py b/tests/test_crf_cpu.py new file mode 100644 index 0000000000..f6e82d16a5 --- /dev/null +++ b/tests/test_crf_cpu.py @@ -0,0 +1,450 @@ +# Copyright 2020 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from parameterized import parameterized + +from monai.networks.blocks import CRF +from tests.utils import skip_if_no_cpp_extension + +TEST_CASES = [ + [ + # Case Description + "2 batche(s), 1 dimension(s), 2 classe(s), 1 channel(s)", + # Parameters + [ + 1.0, # bilateral_weight + 0.3, # gaussian_weight + 5.0, # bilateral_spatial_sigma + 0.5, # bilateral_color_sigma + 5.0, # gaussian_spatial_sigma + 1.0, # update_factor + 1, # compatability_kernel_range + 5, # iterations + ], + # Input + [ + # Batch 0 + [ + # Class 0 + [0.8, 0.9, 0.6, 0.2, 0.3], + # Class 1 + [0.1, 0.3, 0.5, 0.8, 0.7], + ], + # Batch 1 + [ + # Class 0 + [0.8, 0.9, 0.6, 0.2, 0.3], + # Class 1 + [0.1, 0.3, 0.5, 0.8, 0.7], + ], + ], + # Features + [ + # Batch 0 + [ + # Channel 0 + [1, 1, 1, 0.5, 0], + ], + # Batch 1 + [ + # Channel 0 + [1, 1, 0.5, 0, 0], + ], + ], + # Expected + [ + # Batch 0 + [ + # Class 0 + [0.976472, 0.973789, 0.951958, 0.882982, 0.876651], + # Class 1 + [0.023528, 0.026211, 0.048042, 0.117018, 0.123349], + ], + # Batch 1 + [ + # Class 0 + [0.963642, 0.946892, 0.858650, 0.633639, 0.617334], + # Class 1 + [0.036358, 0.053108, 0.141350, 0.366361, 0.382666], + ], + ], + ], + [ + # Case Description + "1 batche(s), 2 dimension(s), 3 classe(s), 2 channel(s)", + # Parameters + [ + 1.0, # bilateral_weight + 0.3, # gaussian_weight + 5.0, # bilateral_spatial_sigma + 0.5, # bilateral_color_sigma + 5.0, # gaussian_spatial_sigma + 1.0, # update_factor + 1, # compatability_kernel_range + 5, # iterations + ], + # Input + [ + # Batch 0 + [ + # Class 0 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + ], + # Class 1 + [ + [1.0, 1.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Class 2 + [ + [0.0, 0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + [0.0, 1.0, 1.0, 1.0, 0.0], + [1.0, 1.0, 1.0, 0.0, 0.0], + [1.0, 1.0, 0.0, 0.0, 0.0], + ], + ], + ], + # Features + [ + # Batch 0 + [ + # Channel 0 + [ + [1.0, 1.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Channel 1 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + ], + ], + ], + # Expected + [ + # Batch 0 + [ + # Class 0 + [ + [0.000008, 0.000008, 0.000008, 0.000003, 0.000003], + [0.000008, 0.000008, 0.000003, 0.000003, 0.000003], + [0.000008, 0.000003, 0.000003, 0.000003, 0.000008], + [0.000003, 0.000003, 0.000003, 0.000023, 0.000023], + [0.000003, 0.000003, 0.000008, 0.000023, 0.000023], + ], + # Class 1 + [ + [0.000023, 0.000023, 0.000008, 0.000003, 0.000003], + [0.000023, 0.000023, 0.000003, 0.000003, 0.000003], + [0.000008, 0.000003, 0.000003, 0.000003, 0.000008], + [0.000003, 0.000003, 0.000003, 0.000008, 0.000008], + [0.000003, 0.000003, 0.000008, 0.000008, 0.000008], + ], + # Class 2 + [ + [0.999969, 0.999969, 0.999983, 0.999994, 0.999994], + [0.999969, 0.999969, 0.999994, 0.999994, 0.999994], + [0.999983, 0.999994, 0.999994, 0.999994, 0.999983], + [0.999994, 0.999994, 0.999994, 0.999969, 0.999969], + [0.999994, 0.999994, 0.999983, 0.999969, 0.999969], + ], + ], + ], + ], + [ + # Case Description + "1 batche(s), 3 dimension(s), 2 classe(s), 1 channel(s)", + # Parameters + [ + 1.0, # bilateral_weight + 0.3, # gaussian_weight + 5.0, # bilateral_spatial_sigma + 0.1, # bilateral_color_sigma + 5.0, # gaussian_spatial_sigma + 1.0, # update_factor + 1, # compatability_kernel_range + 2, # iterations + ], + # Input + [ + # Batch 0 + [ + # Class 0 + [ + # Slice 0 + [ + [1.0, 1.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 1 + [ + [1.0, 1.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 2 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 3 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 4 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + ], + # Class 1 + [ + # Slice 0 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 1 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 2 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 3 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + ], + # Slice 4 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + ], + ], + ], + ], + # Features + [ + # Batch 0 + [ + # Channel 0 + [ + # Slice 0 + [ + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 1 + [ + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 2 + [ + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.5, 0.5, 0.8, 1.0, 1.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + ], + # Slice 3 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + ], + # Slice 4 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + ], + ], + ], + ], + # Expected + [ + # Batch 0 + [ + # Class 0 + [ + # Slice 0 + [ + [1.000000, 1.000000, 1.000000, 0.999808, 0.721122], + [1.000000, 1.000000, 1.000000, 0.999758, 0.666025], + [1.000000, 1.000000, 0.999979, 0.995894, 0.577459], + [0.999787, 0.999739, 0.995725, 0.934170, 0.488704], + [0.691645, 0.641028, 0.560127, 0.481718, 0.431180], + ], + # Slice 1 + [ + [1.000000, 1.000000, 1.000000, 0.999743, 0.650416], + [1.000000, 1.000000, 0.999999, 0.992747, 0.108034], + [1.000000, 0.999999, 0.998541, 0.402370, 0.007122], + [0.999711, 0.992109, 0.391941, 0.003358, 0.000440], + [0.615523, 0.097120, 0.006599, 0.000427, 0.000365], + ], + # Slice 2 + [ + [1.000000, 1.000000, 0.999975, 0.995241, 0.543122], + [1.000000, 0.999998, 0.998394, 0.381981, 0.006586], + [0.999973, 0.998313, 0.370238, 0.000596, 0.000034], + [0.994611, 0.361317, 0.000573, 0.000001, 0.000000], + [0.505392, 0.005862, 0.000032, 0.000000, 0.000000], + ], + # Slice 3 + [ + [0.999692, 0.999639, 0.994364, 0.919713, 0.446683], + [0.999626, 0.990123, 0.347190, 0.002895, 0.000390], + [0.993872, 0.336665, 0.000525, 0.000001, 0.000000], + [0.910704, 0.002676, 0.000001, 0.000000, 0.000000], + [0.413964, 0.000354, 0.000000, 0.000000, 0.000000], + ], + # Slice 4 + [ + [0.574496, 0.533306, 0.469335, 0.419446, 0.390403], + [0.524093, 0.072180, 0.005087, 0.000354, 0.000318], + [0.449362, 0.004875, 0.000028, 0.000000, 0.000000], + [0.393431, 0.000331, 0.000000, 0.000000, 0.000000], + [0.362417, 0.000295, 0.000000, 0.000000, 0.000000], + ], + ], + # Class 1 + [ + # Slice 0 + [ + [0.000000, 0.000000, 0.000000, 0.000192, 0.278878], + [0.000000, 0.000000, 0.000000, 0.000242, 0.333975], + [0.000000, 0.000000, 0.000021, 0.004106, 0.422541], + [0.000213, 0.000261, 0.004275, 0.065830, 0.511296], + [0.308355, 0.358972, 0.439873, 0.518282, 0.568820], + ], + # Slice 1 + [ + [0.000000, 0.000000, 0.000000, 0.000257, 0.349584], + [0.000000, 0.000000, 0.000001, 0.007253, 0.891966], + [0.000000, 0.000001, 0.001459, 0.597630, 0.992878], + [0.000289, 0.007891, 0.608059, 0.996642, 0.999560], + [0.384477, 0.902880, 0.993401, 0.999573, 0.999635], + ], + # Slice 2 + [ + [0.000000, 0.000000, 0.000025, 0.004759, 0.456878], + [0.000000, 0.000002, 0.001606, 0.618019, 0.993414], + [0.000027, 0.001687, 0.629762, 0.999404, 0.999966], + [0.005389, 0.638683, 0.999427, 0.999999, 1.000000], + [0.494608, 0.994138, 0.999968, 1.000000, 1.000000], + ], + # Slice 3 + [ + [0.000308, 0.000361, 0.005636, 0.080287, 0.553317], + [0.000374, 0.009877, 0.652810, 0.997105, 0.999610], + [0.006128, 0.663335, 0.999475, 0.999999, 1.000000], + [0.089296, 0.997324, 0.999999, 1.000000, 1.000000], + [0.586036, 0.999646, 1.000000, 1.000000, 1.000000], + ], + # Slice 4 + [ + [0.425504, 0.466694, 0.530665, 0.580554, 0.609597], + [0.475907, 0.927820, 0.994913, 0.999646, 0.999682], + [0.550638, 0.995125, 0.999972, 1.000000, 1.000000], + [0.606569, 0.999669, 1.000000, 1.000000, 1.000000], + [0.637583, 0.999705, 1.000000, 1.000000, 1.000000], + ], + ], + ], + ], + ], +] + + +@skip_if_no_cpp_extension +class CRFTestCaseCpu(unittest.TestCase): + @parameterized.expand(TEST_CASES) + def test(self, test_case_description, params, input, features, expected): + + # Create input tensors + input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=torch.device("cpu")) + feature_tensor = torch.from_numpy(np.array(features)).to(dtype=torch.float, device=torch.device("cpu")) + + # apply filter + crf = CRF(*params) + output = crf(input_tensor, feature_tensor).cpu().numpy() + + # Ensure result are as expected + np.testing.assert_allclose(output, expected, atol=1e-4) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_crf_cuda.py b/tests/test_crf_cuda.py new file mode 100644 index 0000000000..4decd433fa --- /dev/null +++ b/tests/test_crf_cuda.py @@ -0,0 +1,451 @@ +# Copyright 2020 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from parameterized import parameterized + +from monai.networks.blocks import CRF +from tests.utils import skip_if_no_cpp_extension, skip_if_no_cuda + +TEST_CASES = [ + [ + # Case Description + "2 batche(s), 1 dimension(s), 2 classe(s), 1 channel(s)", + # Parameters + [ + 1.0, # bilateral_weight + 0.3, # gaussian_weight + 5.0, # bilateral_spatial_sigma + 0.5, # bilateral_color_sigma + 5.0, # gaussian_spatial_sigma + 1.0, # update_factor + 1, # compatability_kernel_range + 5, # iterations + ], + # Input + [ + # Batch 0 + [ + # Class 0 + [0.8, 0.9, 0.6, 0.2, 0.3], + # Class 1 + [0.1, 0.3, 0.5, 0.8, 0.7], + ], + # Batch 1 + [ + # Class 0 + [0.8, 0.9, 0.6, 0.2, 0.3], + # Class 1 + [0.1, 0.3, 0.5, 0.8, 0.7], + ], + ], + # Features + [ + # Batch 0 + [ + # Channel 0 + [1, 1, 1, 0.5, 0], + ], + # Batch 1 + [ + # Channel 0 + [1, 1, 0.5, 0, 0], + ], + ], + # Expected + [ + # Batch 0 + [ + # Class 0 + [0.965345, 0.961201, 0.920527, 0.772525, 0.711900], + # Class 1 + [0.034655, 0.038799, 0.079473, 0.227475, 0.288100], + ], + # Batch 1 + [ + # Class 0 + [0.897615, 0.816166, 0.500186, 0.158644, 0.133245], + # Class 1 + [0.102385, 0.183834, 0.499814, 0.841356, 0.866755], + ], + ], + ], + [ + # Case Description + "1 batche(s), 2 dimension(s), 3 classe(s), 2 channel(s)", + # Parameters + [ + 1.0, # bilateral_weight + 0.3, # gaussian_weight + 5.0, # bilateral_spatial_sigma + 0.5, # bilateral_color_sigma + 5.0, # gaussian_spatial_sigma + 1.0, # update_factor + 1, # compatability_kernel_range + 5, # iterations + ], + # Input + [ + # Batch 0 + [ + # Class 0 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + ], + # Class 1 + [ + [1.0, 1.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Class 2 + [ + [0.0, 0.0, 0.0, 0.5, 1.0], + [0.0, 0.0, 0.5, 1.0, 0.5], + [0.0, 0.5, 1.0, 0.5, 0.0], + [0.5, 1.0, 0.5, 0.0, 0.0], + [1.0, 0.5, 0.0, 0.0, 0.0], + ], + ], + ], + # Features + [ + # Batch 0 + [ + # Channel 0 + [ + [1.0, 1.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Channel 1 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + ], + ], + ], + # Expected + [ + # Batch 0 + [ + # Class 0 + [ + [0.001529, 0.000798, 0.000323, 0.000093, 0.000053], + [0.001365, 0.000966, 0.000422, 0.000178, 0.000281], + [0.001405, 0.001007, 0.002425, 0.013078, 0.064707], + [0.001239, 0.001263, 0.033857, 0.665830, 0.951172], + [0.001534, 0.004486, 0.263298, 0.973852, 0.999018], + ], + # Class 1 + [ + [0.230989, 0.025518, 0.000764, 0.000057, 0.000029], + [0.037540, 0.008348, 0.000381, 0.000055, 0.000075], + [0.001987, 0.000665, 0.000363, 0.000499, 0.001170], + [0.000187, 0.000143, 0.000805, 0.001361, 0.000533], + [0.000131, 0.000286, 0.002139, 0.000410, 0.000069], + ], + # Class 2 + [ + [0.767482, 0.973685, 0.998913, 0.999850, 0.999919], + [0.961095, 0.990687, 0.999197, 0.999768, 0.999644], + [0.996608, 0.998328, 0.997212, 0.986423, 0.934124], + [0.998574, 0.998594, 0.965337, 0.332809, 0.048295], + [0.998334, 0.995228, 0.734563, 0.025738, 0.000912], + ], + ], + ], + ], + [ + # Case Description + "1 batche(s), 3 dimension(s), 2 classe(s), 1 channel(s)", + # Parameters + [ + 1.0, # bilateral_weight + 0.3, # gaussian_weight + 5.0, # bilateral_spatial_sigma + 0.1, # bilateral_color_sigma + 5.0, # gaussian_spatial_sigma + 1.0, # update_factor + 1, # compatability_kernel_range + 2, # iterations + ], + # Input + [ + # Batch 0 + [ + # Class 0 + [ + # Slice 0 + [ + [1.0, 1.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 1 + [ + [1.0, 1.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 2 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 3 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 4 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + ], + # Class 1 + [ + # Slice 0 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 1 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 2 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 3 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + ], + # Slice 4 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0, 1.0], + ], + ], + ], + ], + # Features + [ + # Batch 0 + [ + # Channel 0 + [ + # Slice 0 + [ + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 1 + [ + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + ], + # Slice 2 + [ + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.5, 0.5, 0.5, 0.0, 0.0], + [0.5, 0.5, 0.8, 1.0, 1.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + ], + # Slice 3 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + ], + # Slice 4 + [ + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 1.0, 1.0, 1.0], + ], + ], + ], + ], + # Expected + [ + # Batch 0 + [ + # Class 0 + [ + # Slice 0 + [ + [1.000000, 1.000000, 1.000000, 0.999884, 0.769625], + [1.000000, 1.000000, 1.000000, 0.999851, 0.714004], + [1.000000, 1.000000, 0.999988, 0.997150, 0.614165], + [0.999862, 0.999832, 0.996976, 0.945058, 0.497088], + [0.720345, 0.672450, 0.590360, 0.490120, 0.416671], + ], + # Slice 1 + [ + [1.000000, 1.000000, 1.000000, 0.999848, 0.707997], + [1.000000, 1.000000, 1.000000, 0.997064, 0.127893], + [1.000000, 1.000000, 0.999469, 0.591574, 0.007791], + [0.999812, 0.996663, 0.582521, 0.006041, 0.000427], + [0.637809, 0.107586, 0.007432, 0.000437, 0.000333], + ], + # Slice 2 + [ + [1.000000, 1.000000, 0.999987, 0.996994, 0.600095], + [1.000000, 1.000000, 0.999441, 0.575839, 0.007303], + [0.999986, 0.999411, 0.587268, 0.001117, 0.000033], + [0.996210, 0.550023, 0.001114, 0.000001, 0.000000], + [0.520757, 0.006334, 0.000034, 0.000000, 0.000000], + ], + # Slice 3 + [ + [0.999834, 0.999807, 0.996617, 0.940887, 0.482334], + [0.999799, 0.996410, 0.553696, 0.005287, 0.000376], + [0.996193, 0.546801, 0.001047, 0.000001, 0.000000], + [0.930515, 0.005142, 0.000001, 0.000000, 0.000000], + [0.430705, 0.000371, 0.000000, 0.000000, 0.000000], + ], + # Slice 4 + [ + [0.665227, 0.627316, 0.550517, 0.467839, 0.406319], + [0.617408, 0.098325, 0.006247, 0.000359, 0.000278], + [0.524800, 0.006229, 0.000030, 0.000000, 0.000000], + [0.443054, 0.000372, 0.000000, 0.000000, 0.000000], + [0.388126, 0.000305, 0.000000, 0.000000, 0.000000], + ], + ], + # Class 1 + [ + # Slice 0 + [ + [0.000000, 0.000000, 0.000000, 0.000116, 0.230375], + [0.000000, 0.000000, 0.000000, 0.000149, 0.285996], + [0.000000, 0.000000, 0.000012, 0.002850, 0.385835], + [0.000138, 0.000168, 0.003024, 0.054942, 0.502912], + [0.279655, 0.327550, 0.409640, 0.509880, 0.583329], + ], + # Slice 1 + [ + [0.000000, 0.000000, 0.000000, 0.000152, 0.292003], + [0.000000, 0.000000, 0.000000, 0.002936, 0.872107], + [0.000000, 0.000000, 0.000531, 0.408426, 0.992209], + [0.000188, 0.003337, 0.417479, 0.993959, 0.999574], + [0.362191, 0.892414, 0.992568, 0.999564, 0.999667], + ], + # Slice 2 + [ + [0.000000, 0.000000, 0.000013, 0.003006, 0.399905], + [0.000000, 0.000000, 0.000559, 0.424161, 0.992697], + [0.000014, 0.000589, 0.412732, 0.998884, 0.999967], + [0.003790, 0.449977, 0.998886, 0.999999, 1.000000], + [0.479243, 0.993666, 0.999966, 1.000000, 1.000000], + ], + # Slice 3 + [ + [0.000166, 0.000193, 0.003383, 0.059113, 0.517666], + [0.000201, 0.003590, 0.446304, 0.994713, 0.999624], + [0.003807, 0.453199, 0.998953, 0.999999, 1.000000], + [0.069485, 0.994858, 0.999999, 1.000000, 1.000000], + [0.569295, 0.999629, 1.000000, 1.000000, 1.000000], + ], + # Slice 4 + [ + [0.334773, 0.372684, 0.449483, 0.532161, 0.593681], + [0.382592, 0.901675, 0.993753, 0.999641, 0.999722], + [0.475200, 0.993771, 0.999970, 1.000000, 1.000000], + [0.556946, 0.999628, 1.000000, 1.000000, 1.000000], + [0.611874, 0.999695, 1.000000, 1.000000, 1.000000], + ], + ], + ], + ], + ], +] + + +@skip_if_no_cpp_extension +@skip_if_no_cuda +class CRFTestCaseCuda(unittest.TestCase): + @parameterized.expand(TEST_CASES) + def test(self, test_case_description, params, input, features, expected): + + # Create input tensors + input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=torch.device("cuda")) + feature_tensor = torch.from_numpy(np.array(features)).to(dtype=torch.float, device=torch.device("cuda")) + + # apply filter + crf = CRF(*params) + output = crf(input_tensor, feature_tensor).cpu().numpy() + + # Ensure result are as expected + np.testing.assert_allclose(output, expected, atol=1e-4) + + +if __name__ == "__main__": + unittest.main() From c5be1adc4b3907c5f886b219ff2f12b66c72938d Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Thu, 25 Mar 2021 22:35:38 +0800 Subject: [PATCH 097/457] 1827 Fix CUDA affine issue and thread-safe issue in AffineGrid (#1836) * [DLMED] fix affine error and thread-safe issue Signed-off-by: Nic Ma * [DLMED] update CI tests Signed-off-by: Nic Ma * [DLMED] update typehints Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot * [DLMED] fix flake8 Signed-off-by: Nic Ma * [DLMED] update according to comments Signed-off-by: Nic Ma * [DLMED] make cachedataset to be thread-safe Signed-off-by: Nic Ma * [DLMED] remove inverse ID check Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] restore CacheDataset and inverse transform Signed-off-by: Nic Ma Co-authored-by: monai-bot --- monai/transforms/spatial/array.py | 30 ++++++++++++-------------- monai/transforms/spatial/dictionary.py | 7 +++--- tests/test_affine.py | 2 +- tests/test_affine_grid.py | 2 +- 4 files changed, 19 insertions(+), 22 deletions(-) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index de9bba8e95..471b171312 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -961,7 +961,7 @@ def __call__( self, spatial_size: Optional[Sequence[int]] = None, grid: Optional[Union[np.ndarray, torch.Tensor]] = None, - ) -> Union[np.ndarray, torch.Tensor]: + ) -> Tuple[Union[np.ndarray, torch.Tensor], Union[np.ndarray, torch.Tensor]]: """ Args: spatial_size: output grid size. @@ -988,21 +988,20 @@ def __call__( affine = affine @ create_translate(spatial_dims, self.translate_params) if self.scale_params: affine = affine @ create_scale(spatial_dims, self.scale_params) - self.affine = affine + else: + affine = self.affine - self.affine = torch.as_tensor(np.ascontiguousarray(self.affine), device=self.device) + if isinstance(affine, np.ndarray): + affine = torch.as_tensor(np.ascontiguousarray(affine)) grid = torch.tensor(grid) if not isinstance(grid, torch.Tensor) else grid.detach().clone() if self.device: + affine = affine.to(self.device) grid = grid.to(self.device) - grid = (self.affine.float() @ grid.reshape((grid.shape[0], -1)).float()).reshape([-1] + list(grid.shape[1:])) + grid = (affine.float() @ grid.reshape((grid.shape[0], -1)).float()).reshape([-1] + list(grid.shape[1:])) if grid is None or not isinstance(grid, torch.Tensor): raise ValueError("Unknown grid.") - return grid if self.as_tensor_output else np.asarray(grid.cpu().numpy()) - - def get_transformation_matrix(self) -> Optional[Union[np.ndarray, torch.Tensor]]: - """Get the most recently applied transformation matrix""" - return self.affine + return grid if self.as_tensor_output else np.asarray(grid.cpu().numpy()), affine class RandAffineGrid(RandomizableTransform): @@ -1094,8 +1093,7 @@ def __call__( as_tensor_output=self.as_tensor_output, device=self.device, ) - grid = affine_grid(spatial_size, grid) - self.affine = affine_grid.get_transformation_matrix() + grid, self.affine = affine_grid(spatial_size, grid) return grid def get_transformation_matrix(self) -> Optional[Union[np.ndarray, torch.Tensor]]: @@ -1309,7 +1307,7 @@ def __call__( spatial_size: Optional[Union[Sequence[int], int]] = None, mode: Optional[Union[GridSampleMode, str]] = None, padding_mode: Optional[Union[GridSamplePadMode, str]] = None, - ) -> Union[np.ndarray, torch.Tensor]: + ) -> Tuple[Union[np.ndarray, torch.Tensor], Union[np.ndarray, torch.Tensor]]: """ Args: img: shape must be (num_channels, H, W[, D]), @@ -1326,9 +1324,10 @@ def __call__( See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample """ sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:]) - grid = self.affine_grid(spatial_size=sp_size) - return self.resampler( - img=img, grid=grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode + grid, affine = self.affine_grid(spatial_size=sp_size) + return ( + self.resampler(img=img, grid=grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode), + affine, ) @@ -1434,7 +1433,6 @@ def __call__( See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample """ self.randomize() - sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:]) if self._do_transform: grid = self.rand_affine_grid(spatial_size=sp_size) diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index e356a51a2a..86c94302a1 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -572,8 +572,7 @@ def __call__( d = dict(data) for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): orig_size = d[key].shape[1:] - d[key] = self.affine(d[key], mode=mode, padding_mode=padding_mode) - affine = self.affine.affine_grid.get_transformation_matrix() + d[key], affine = self.affine(d[key], mode=mode, padding_mode=padding_mode) self.push_transform(d, key, orig_size=orig_size, extra_info={"affine": affine}) return d @@ -588,7 +587,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar inv_affine = np.linalg.inv(fwd_affine) affine_grid = AffineGrid(affine=inv_affine) - grid: torch.Tensor = affine_grid(orig_size) # type: ignore + grid, _ = affine_grid(orig_size) # type: ignore # Apply inverse transform out = self.affine.resampler(d[key], grid, mode, padding_mode) @@ -717,7 +716,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar inv_affine = np.linalg.inv(fwd_affine) affine_grid = AffineGrid(affine=inv_affine) - grid: torch.Tensor = affine_grid(orig_size) # type: ignore + grid, _ = affine_grid(orig_size) # type: ignore # Apply inverse transform out = self.rand_affine.resampler(d[key], grid, mode, padding_mode) diff --git a/tests/test_affine.py b/tests/test_affine.py index ea146e0fbd..1b6c19596b 100644 --- a/tests/test_affine.py +++ b/tests/test_affine.py @@ -78,7 +78,7 @@ class TestAffine(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_affine(self, input_param, input_data, expected_val): g = Affine(**input_param) - result = g(**input_data) + result, _ = g(**input_data) self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor)) np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) diff --git a/tests/test_affine_grid.py b/tests/test_affine_grid.py index 2906cd18b6..24772b9a21 100644 --- a/tests/test_affine_grid.py +++ b/tests/test_affine_grid.py @@ -92,7 +92,7 @@ class TestAffineGrid(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_affine_grid(self, input_param, input_data, expected_val): g = AffineGrid(**input_param) - result = g(**input_data) + result, _ = g(**input_data) self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor)) if isinstance(result, torch.Tensor): np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) From 7e7cd944406e7c5c12dc258b84d4ad6e90984db6 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Thu, 25 Mar 2021 12:27:53 -0400 Subject: [PATCH 098/457] PatchWSIDataset (#1835) * Implement PatchWSIDataset and SmartCachePathWSIDataset Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove label preprocessing and adopt new type of inputs Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update type hints Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add init file Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Change grid_size to grid_shape Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add a unittest for PatchWSIDataset Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add more unittests Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update docstrings and make minor changes Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Convert labels to numpy to match the change in dataset Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update location from center to corner Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update unittests locations from center to corner Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update docs for pathology datasets Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update type hint and doc Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update docstrings Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Format docstring Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update length of the smartcache dataset Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add unittest for SmartCachePatchWSIDataset Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Minor changes and fixes Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add unnittest for OpenSlide option Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add new line Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove SmartCachePatchWSIDataset test to fix it Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * move init docstring to class docstring Signed-off-by: Richard Brown <33289025+rijobro@users.noreply.github.com> --- docs/source/apps.rst | 9 ++ monai/apps/pathology/__init__.py | 12 +++ monai/apps/pathology/datasets.py | 158 +++++++++++++++++++++++++++++++ monai/data/dataset.py | 33 +++---- tests/test_patch_wsi_dataset.py | 136 ++++++++++++++++++++++++++ 5 files changed, 330 insertions(+), 18 deletions(-) create mode 100644 monai/apps/pathology/__init__.py create mode 100644 monai/apps/pathology/datasets.py create mode 100644 tests/test_patch_wsi_dataset.py diff --git a/docs/source/apps.rst b/docs/source/apps.rst index 1c4f4c3dfb..4c45a5fb39 100644 --- a/docs/source/apps.rst +++ b/docs/source/apps.rst @@ -62,3 +62,12 @@ Applications :members: .. autoclass:: Fetch2DSliced :members: + +`Pathology` +----------- + +.. automodule:: monai.apps.pathology.datasets +.. autoclass:: PatchWSIDataset + :members: +.. autoclass:: SmartCachePatchWSIDataset + :members: diff --git a/monai/apps/pathology/__init__.py b/monai/apps/pathology/__init__.py new file mode 100644 index 0000000000..bbdb812c03 --- /dev/null +++ b/monai/apps/pathology/__init__.py @@ -0,0 +1,12 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .datasets import PatchWSIDataset, SmartCacheDataset diff --git a/monai/apps/pathology/datasets.py b/monai/apps/pathology/datasets.py new file mode 100644 index 0000000000..f9ce0bc62b --- /dev/null +++ b/monai/apps/pathology/datasets.py @@ -0,0 +1,158 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from typing import Callable, List, Optional, Sequence, Tuple, Union + +import numpy as np + +from monai.data import Dataset, SmartCacheDataset +from monai.data.image_reader import WSIReader + +__all__ = ["PatchWSIDataset", "SmartCachePatchWSIDataset"] + + +class PatchWSIDataset(Dataset): + """ + This dataset reads whole slide images, extracts regions, and creates patches. + It also reads labels for each patch and provides each patch with its associated class labels. + + Args: + data: the list of input samples including image, location, and label (see below for more details). + region_size: the region to be extracted from the whole slide image. + grid_shape: the grid shape on which the patches should be extracted. + patch_size: the patches extracted from the region on the grid. + image_reader_name: the name of library to be used for loading whole slide imaging, either CuCIM or OpenSlide. + Defaults to CuCIM. + transform: transforms to be executed on input data. + + Note: + The input data has the following form as an example: + `[{"image": "path/to/image1.tiff", "location": [200, 500], "label": [0,0,0,1]}]`. + + This means from "image1.tiff" extract a region centered at the given location `location` + with the size of `region_size`, and then extract patches with the size of `patch_size` + from a square grid with the shape of `grid_shape`. + Be aware the the `grid_shape` should construct a grid with the same number of element as `labels`, + so for this example the `grid_shape` should be (2, 2). + + """ + + def __init__( + self, + data: List, + region_size: Union[int, Tuple[int, int]], + grid_shape: Union[int, Tuple[int, int]], + patch_size: int, + image_reader_name: str = "cuCIM", + transform: Optional[Callable] = None, + ): + super().__init__(data, transform) + + if isinstance(region_size, int): + self.region_size = (region_size, region_size) + else: + self.region_size = region_size + + if isinstance(grid_shape, int): + self.grid_shape = (grid_shape, grid_shape) + else: + self.grid_shape = grid_shape + + self.patch_size = patch_size + self.sub_region_size = (self.region_size[0] / self.grid_shape[0], self.region_size[1] / self.grid_shape[1]) + + self.image_path_list = list({x["image"] for x in self.data}) + + self.image_reader_name = image_reader_name + self.image_reader = WSIReader(image_reader_name) + self.wsi_object_dict = None + if self.image_reader_name != "openslide": + # OpenSlide causes memory issue if we prefetch image objects + self._fetch_wsi_objects() + + def _fetch_wsi_objects(self): + """Load all the image objects and reuse them when asked for an item.""" + self.wsi_object_dict = {} + for image_path in self.image_path_list: + self.wsi_object_dict[image_path] = self.image_reader.read(image_path) + + def __getitem__(self, index): + sample = self.data[index] + if self.image_reader_name == "openslide": + img_obj = self.image_reader.read(sample["image"]) + else: + img_obj = self.wsi_object_dict[sample["image"]] + location = [sample["location"][i] - self.region_size[i] // 2 for i in range(len(self.region_size))] + images, _ = self.image_reader.get_data( + img=img_obj, + location=location, + size=self.region_size, + grid_shape=self.grid_shape, + patch_size=self.patch_size, + ) + labels = np.array(sample["label"], dtype=np.float32)[:, np.newaxis, np.newaxis] + patches = [{"image": images[i], "label": labels[i]} for i in range(len(sample["label"]))] + if self.transform: + patches = self.transform(patches) + return patches + + +class SmartCachePatchWSIDataset(SmartCacheDataset): + """Add SmartCache functionality to `PatchWSIDataset`. + + Args: + data: the list of input samples including image, location, and label (see `PatchWSIDataset` for more details) + region_size: the region to be extracted from the whole slide image. + grid_shape: the grid shape on which the patches should be extracted. + patch_size: the patches extracted from the region on the grid. + image_reader_name: the name of library to be used for loading whole slide imaging, either CuCIM or OpenSlide. + Defaults to CuCIM. + transform: transforms to be executed on input data. + replace_rate: percentage of the cached items to be replaced in every epoch. + cache_num: number of items to be cached. Default is `sys.maxsize`. + will take the minimum of (cache_num, data_length x cache_rate, data_length). + cache_rate: percentage of cached data in total, default is 1.0 (cache all). + will take the minimum of (cache_num, data_length x cache_rate, data_length). + num_init_workers: the number of worker threads to initialize the cache for first epoch. + If num_init_workers is None then the number returned by os.cpu_count() is used. + num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch. + If num_replace_workers is None then the number returned by os.cpu_count() is used. + progress: whether to display a progress bar when caching for the first epoch. + + """ + + def __init__( + self, + data: List, + region_size: Union[int, Tuple[int, int]], + grid_shape: Union[int, Tuple[int, int]], + patch_size: int, + transform: Union[Sequence[Callable], Callable], + image_reader_name: str = "cuCIM", + replace_rate: float = 0.5, + cache_num: int = sys.maxsize, + cache_rate: float = 1.0, + num_init_workers: Optional[int] = None, + num_replace_workers: Optional[int] = None, + progress: bool = True, + ): + patch_wsi_dataset = PatchWSIDataset(data, region_size, grid_shape, patch_size, image_reader_name) + super().__init__( + data=patch_wsi_dataset, # type: ignore + transform=transform, + replace_rate=replace_rate, + cache_num=cache_num, + cache_rate=cache_rate, + num_init_workers=num_init_workers, + num_replace_workers=num_replace_workers, + progress=progress, + ) diff --git a/monai/data/dataset.py b/monai/data/dataset.py index 813008e3a8..9a4e932160 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -582,6 +582,21 @@ class SmartCacheDataset(Randomizable, CacheDataset): This replacement will not work if setting the `multiprocessing_context` of DataLoader to `spawn` or on windows(the default multiprocessing method is `spawn`) and setting `num_workers` greater than 0. + Args: + data: input data to load and transform to generate dataset for model. + transform: transforms to execute operations on input data. + replace_rate: percentage of the cached items to be replaced in every epoch. + cache_num: number of items to be cached. Default is `sys.maxsize`. + will take the minimum of (cache_num, data_length x cache_rate, data_length). + cache_rate: percentage of cached data in total, default is 1.0 (cache all). + will take the minimum of (cache_num, data_length x cache_rate, data_length). + num_init_workers: the number of worker threads to initialize the cache for first epoch. + If num_init_workers is None then the number returned by os.cpu_count() is used. + num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch. + If num_replace_workers is None then the number returned by os.cpu_count() is used. + progress: whether to display a progress bar when caching for the first epoch. + shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch. + seed: random seed if shuffle is `True`, default to `0`. """ def __init__( @@ -597,24 +612,6 @@ def __init__( shuffle: bool = True, seed: int = 0, ) -> None: - """ - Args: - data: input data to load and transform to generate dataset for model. - transform: transforms to execute operations on input data. - replace_rate: percentage of the cached items to be replaced in every epoch. - cache_num: number of items to be cached. Default is `sys.maxsize`. - will take the minimum of (cache_num, data_length x cache_rate, data_length). - cache_rate: percentage of cached data in total, default is 1.0 (cache all). - will take the minimum of (cache_num, data_length x cache_rate, data_length). - num_init_workers: the number of worker threads to initialize the cache for first epoch. - If num_init_workers is None then the number returned by os.cpu_count() is used. - num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch. - If num_replace_workers is None then the number returned by os.cpu_count() is used. - progress: whether to display a progress bar when caching for the first epoch. - shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch. - seed: random seed if shuffle is `True`, default to `0`. - - """ if shuffle: self.set_random_state(seed=seed) self.randomize(data) diff --git a/tests/test_patch_wsi_dataset.py b/tests/test_patch_wsi_dataset.py new file mode 100644 index 0000000000..730519ed52 --- /dev/null +++ b/tests/test_patch_wsi_dataset.py @@ -0,0 +1,136 @@ +import os +import unittest +from unittest import skipUnless +from urllib import request + +import numpy as np +from numpy.testing import assert_array_equal +from parameterized import parameterized + +from monai.apps.pathology.datasets import PatchWSIDataset +from monai.utils import optional_import + +_, has_cim = optional_import("cucim") +_, has_osl = optional_import("openslide") + +FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" + +TEST_CASE_0 = [ + FILE_URL, + { + "data": [ + {"image": "./CMU-1.tiff", "location": [0, 0], "label": [1]}, + ], + "region_size": (1, 1), + "grid_shape": (1, 1), + "patch_size": 1, + "image_reader_name": "cuCIM", + }, + [ + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[1]])}, + ], +] + +TEST_CASE_1 = [ + FILE_URL, + { + "data": [{"image": "./CMU-1.tiff", "location": [10004, 20004], "label": [0, 0, 0, 1]}], + "region_size": (8, 8), + "grid_shape": (2, 2), + "patch_size": 1, + "image_reader_name": "cuCIM", + }, + [ + {"image": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), "label": np.array([[0]])}, + {"image": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), "label": np.array([[0]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[0]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[1]])}, + ], +] + +TEST_CASE_2 = [ + FILE_URL, + { + "data": [ + {"image": "./CMU-1.tiff", "location": [0, 0], "label": [1]}, + ], + "region_size": 1, + "grid_shape": 1, + "patch_size": 1, + "image_reader_name": "cuCIM", + }, + [ + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[1]])}, + ], +] + + +TEST_CASE_OPENSLIDE_0 = [ + FILE_URL, + { + "data": [ + {"image": "./CMU-1.tiff", "location": [0, 0], "label": [1]}, + ], + "region_size": (1, 1), + "grid_shape": (1, 1), + "patch_size": 1, + "image_reader_name": "OpenSlide", + }, + [ + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[1]])}, + ], +] + +TEST_CASE_OPENSLIDE_1 = [ + FILE_URL, + { + "data": [{"image": "./CMU-1.tiff", "location": [10004, 20004], "label": [0, 0, 0, 1]}], + "region_size": (8, 8), + "grid_shape": (2, 2), + "patch_size": 1, + "image_reader_name": "OpenSlide", + }, + [ + {"image": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), "label": np.array([[0]])}, + {"image": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), "label": np.array([[0]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[0]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[1]])}, + ], +] + + +class TestPatchWSIDataset(unittest.TestCase): + @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2]) + @skipUnless(has_cim, "Requires CuCIM") + def test_read_patches_cucim(self, file_url, input_parameters, expected): + self.camelyon_data_download(file_url) + dataset = PatchWSIDataset(**input_parameters) + samples = dataset[0] + for i in range(len(samples)): + self.assertTupleEqual(samples[i]["label"].shape, expected[i]["label"].shape) + self.assertTupleEqual(samples[i]["image"].shape, expected[i]["image"].shape) + self.assertIsNone(assert_array_equal(samples[i]["label"], expected[i]["label"])) + self.assertIsNone(assert_array_equal(samples[i]["image"], expected[i]["image"])) + + @parameterized.expand([TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1]) + @skipUnless(has_osl, "Requires OpenSlide") + def test_read_patches_openslide(self, file_url, input_parameters, expected): + self.camelyon_data_download(file_url) + dataset = PatchWSIDataset(**input_parameters) + samples = dataset[0] + for i in range(len(samples)): + self.assertTupleEqual(samples[i]["label"].shape, expected[i]["label"].shape) + self.assertTupleEqual(samples[i]["image"].shape, expected[i]["image"].shape) + self.assertIsNone(assert_array_equal(samples[i]["label"], expected[i]["label"])) + self.assertIsNone(assert_array_equal(samples[i]["image"], expected[i]["image"])) + + def camelyon_data_download(self, file_url): + filename = os.path.basename(file_url) + if not os.path.exists(filename): + print(f"Test image [{filename}] does not exist. Downloading...") + request.urlretrieve(file_url, filename) + return filename + + +if __name__ == "__main__": + unittest.main() From 8c13239ee8ec15ae03b769893a7718f12d7ffddc Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Thu, 25 Mar 2021 14:42:27 -0400 Subject: [PATCH 099/457] Fully convolutional models based on TorchVision models (#1845) * Implement fully convolutional version of torchvision models Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update networks init with TorchVisionFullyConvModel Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add unittests for TorchVisionFullyConvModel Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add another test case Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Upate docs for TorchVisionFullyConvModel Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Make torchvision import optional Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Skip the tests if torchvision not available Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Improve the model based on comments Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update and add test cases Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- docs/source/networks.rst | 5 ++ monai/networks/nets/__init__.py | 1 + monai/networks/nets/torchvision_fc.py | 67 ++++++++++++++++ tests/test_torchvision_fc_model.py | 106 ++++++++++++++++++++++++++ 4 files changed, 179 insertions(+) create mode 100644 monai/networks/nets/torchvision_fc.py create mode 100644 tests/test_torchvision_fc_model.py diff --git a/docs/source/networks.rst b/docs/source/networks.rst index 7d12a94fc2..15d7cb80b0 100644 --- a/docs/source/networks.rst +++ b/docs/source/networks.rst @@ -395,6 +395,11 @@ Nets .. autoclass:: Critic :members: +`TorchVisionFullyConvModel` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: TorchVisionFullyConvModel + :members: + Utilities --------- .. automodule:: monai.networks.utils diff --git a/monai/networks/nets/__init__.py b/monai/networks/nets/__init__.py index cd00ea1aa1..6876293bdb 100644 --- a/monai/networks/nets/__init__.py +++ b/monai/networks/nets/__init__.py @@ -22,6 +22,7 @@ from .regunet import GlobalNet, LocalNet, RegUNet from .segresnet import SegResNet, SegResNetVAE from .senet import SENet, SENet154, SEResNet50, SEResNet101, SEResNet152, SEResNext50, SEResNext101 +from .torchvision_fc import TorchVisionFullyConvModel from .unet import UNet, Unet, unet from .varautoencoder import VarAutoEncoder from .vnet import VNet diff --git a/monai/networks/nets/torchvision_fc.py b/monai/networks/nets/torchvision_fc.py new file mode 100644 index 0000000000..4fdd0d64ef --- /dev/null +++ b/monai/networks/nets/torchvision_fc.py @@ -0,0 +1,67 @@ +from typing import Tuple, Union + +import torch + +from monai.utils import optional_import + +models, _ = optional_import("torchvision.models") + + +class TorchVisionFullyConvModel(torch.nn.Module): + """ + Customize TorchVision models to replace fully connected layer by convolutional layer. + + Args: + model_name: name of any torchvision with adaptive avg pooling and fully connected layer at the end. + - resnet18 (default) + - resnet34 + - resnet50 + - resnet101 + - resnet152 + - resnext50_32x4d + - resnext101_32x8d + - wide_resnet50_2 + - wide_resnet101_2 + n_classes: number of classes for the last classification layer. Default to 1. + pool_size: the kernel size for `AvgPool2d` to replace `AdaptiveAvgPool2d`. Default to (7, 7). + pool_stride: the stride for `AvgPool2d` to replace `AdaptiveAvgPool2d`. Default to 1. + pretrained: whether to use the imagenet pretrained weights. Default to False. + """ + + def __init__( + self, + model_name: str = "resnet18", + n_classes: int = 1, + pool_size: Union[int, Tuple[int, int]] = (7, 7), + pool_stride: Union[int, Tuple[int, int]] = 1, + pretrained: bool = False, + ): + super().__init__() + model = getattr(models, model_name)(pretrained=pretrained) + layers = list(model.children()) + + # check if the model is compatible + if not str(layers[-1]).startswith("Linear"): + raise ValueError(f"Model ['{model_name}'] does not have a Linear layer at the end.") + if not str(layers[-2]).startswith("AdaptiveAvgPool2d"): + raise ValueError(f"Model ['{model_name}'] does not have a AdaptiveAvgPool2d layer next to the end.") + + # remove the last Linear layer (fully connected) and the adaptive avg pooling + self.features = torch.nn.Sequential(*layers[:-2]) + + # add 7x7 avg pooling (in place of adaptive avg pooling) + self.pool = torch.nn.AvgPool2d(kernel_size=pool_size, stride=pool_stride) + + # add 1x1 conv (it behaves like a FC layer) + self.fc = torch.nn.Conv2d(model.fc.in_features, n_classes, kernel_size=(1, 1)) + + def forward(self, x): + x = self.features(x) + + # apply 2D avg pooling + x = self.pool(x) + + # apply last 1x1 conv layer that act like a linear layer + x = self.fc(x) + + return x diff --git a/tests/test_torchvision_fc_model.py b/tests/test_torchvision_fc_model.py new file mode 100644 index 0000000000..2c65f0d32c --- /dev/null +++ b/tests/test_torchvision_fc_model.py @@ -0,0 +1,106 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from unittest import skipUnless + +import torch +from parameterized import parameterized + +from monai.networks import eval_mode +from monai.networks.nets import TorchVisionFullyConvModel +from monai.utils import optional_import + +_, has_tv = optional_import("torchvision") + +device = "cuda" if torch.cuda.is_available() else "cpu" + +TEST_CASE_0 = [ + {"model_name": "resnet18", "n_classes": 1, "pretrained": False}, + (2, 3, 224, 224), + (2, 1, 1, 1), +] + +TEST_CASE_1 = [ + {"model_name": "resnet18", "n_classes": 1, "pretrained": False}, + (2, 3, 256, 256), + (2, 1, 2, 2), +] + +TEST_CASE_2 = [ + {"model_name": "resnet101", "n_classes": 5, "pretrained": False}, + (2, 3, 256, 256), + (2, 5, 2, 2), +] + +TEST_CASE_3 = [ + {"model_name": "resnet101", "n_classes": 5, "pool_size": 6, "pretrained": False}, + (2, 3, 224, 224), + (2, 5, 2, 2), +] + +TEST_CASE_PRETRAINED_0 = [ + {"model_name": "resnet18", "n_classes": 1, "pretrained": True}, + (2, 3, 224, 224), + (2, 1, 1, 1), + -0.010419349186122417, +] + +TEST_CASE_PRETRAINED_1 = [ + {"model_name": "resnet18", "n_classes": 1, "pretrained": True}, + (2, 3, 256, 256), + (2, 1, 2, 2), + -0.010419349186122417, +] + +TEST_CASE_PRETRAINED_2 = [ + {"model_name": "resnet18", "n_classes": 5, "pretrained": True}, + (2, 3, 256, 256), + (2, 5, 2, 2), + -0.010419349186122417, +] + + +class TestTorchVisionFullyConvModel(unittest.TestCase): + @parameterized.expand( + [ + TEST_CASE_0, + TEST_CASE_1, + TEST_CASE_2, + TEST_CASE_3, + ] + ) + @skipUnless(has_tv, "Requires TorchVision.") + def test_without_pretrained(self, input_param, input_shape, expected_shape): + net = TorchVisionFullyConvModel(**input_param).to(device) + with eval_mode(net): + result = net.forward(torch.randn(input_shape).to(device)) + self.assertEqual(result.shape, expected_shape) + + @parameterized.expand( + [ + TEST_CASE_PRETRAINED_0, + TEST_CASE_PRETRAINED_1, + TEST_CASE_PRETRAINED_2, + ] + ) + @skipUnless(has_tv, "Requires TorchVision.") + def test_with_pretrained(self, input_param, input_shape, expected_shape, expected_value): + net = TorchVisionFullyConvModel(**input_param).to(device) + with eval_mode(net): + result = net.forward(torch.randn(input_shape).to(device)) + value = next(net.parameters())[0, 0, 0, 0].item() + self.assertEqual(value, expected_value) + self.assertEqual(result.shape, expected_shape) + + +if __name__ == "__main__": + unittest.main() From 5e4a0abe1c028fd2660fdb63346244465da8de82 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Thu, 25 Mar 2021 17:04:20 -0400 Subject: [PATCH 100/457] Support multidimensional labels (#1851) * Add label dimensions to be compatible with changes in model output Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update test case outputs and add new test case Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- monai/apps/pathology/datasets.py | 5 ++- tests/test_patch_wsi_dataset.py | 53 ++++++++++++++++++++++++-------- 2 files changed, 44 insertions(+), 14 deletions(-) diff --git a/monai/apps/pathology/datasets.py b/monai/apps/pathology/datasets.py index f9ce0bc62b..a2f7b17ffe 100644 --- a/monai/apps/pathology/datasets.py +++ b/monai/apps/pathology/datasets.py @@ -99,7 +99,10 @@ def __getitem__(self, index): grid_shape=self.grid_shape, patch_size=self.patch_size, ) - labels = np.array(sample["label"], dtype=np.float32)[:, np.newaxis, np.newaxis] + labels = np.array(sample["label"], dtype=np.float32) + # expand dimensions to have 4 dimension as batch, class, height, and width. + for _ in range(4 - labels.ndim): + labels = np.expand_dims(labels, 1) patches = [{"image": images[i], "label": labels[i]} for i in range(len(sample["label"]))] if self.transform: patches = self.transform(patches) diff --git a/tests/test_patch_wsi_dataset.py b/tests/test_patch_wsi_dataset.py index 730519ed52..1d470374bb 100644 --- a/tests/test_patch_wsi_dataset.py +++ b/tests/test_patch_wsi_dataset.py @@ -27,7 +27,7 @@ "image_reader_name": "cuCIM", }, [ - {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[1]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[1]]])}, ], ] @@ -41,10 +41,10 @@ "image_reader_name": "cuCIM", }, [ - {"image": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), "label": np.array([[0]])}, - {"image": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), "label": np.array([[0]])}, - {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[0]])}, - {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[1]])}, + {"image": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), "label": np.array([[[0]]])}, + {"image": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), "label": np.array([[[0]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[0]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[1]]])}, ], ] @@ -60,10 +60,25 @@ "image_reader_name": "cuCIM", }, [ - {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[1]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[1]]])}, ], ] +TEST_CASE_3 = [ + FILE_URL, + { + "data": [ + {"image": "./CMU-1.tiff", "location": [0, 0], "label": [[[0, 1], [1, 0]]]}, + ], + "region_size": 1, + "grid_shape": 1, + "patch_size": 1, + "image_reader_name": "cuCIM", + }, + [ + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[0, 1], [1, 0]]])}, + ], +] TEST_CASE_OPENSLIDE_0 = [ FILE_URL, @@ -77,7 +92,7 @@ "image_reader_name": "OpenSlide", }, [ - {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[1]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[1]]])}, ], ] @@ -91,16 +106,23 @@ "image_reader_name": "OpenSlide", }, [ - {"image": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), "label": np.array([[0]])}, - {"image": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), "label": np.array([[0]])}, - {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[0]])}, - {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[1]])}, + {"image": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), "label": np.array([[[0]]])}, + {"image": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), "label": np.array([[[0]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[0]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[1]]])}, ], ] class TestPatchWSIDataset(unittest.TestCase): - @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2]) + @parameterized.expand( + [ + TEST_CASE_0, + TEST_CASE_1, + TEST_CASE_2, + TEST_CASE_3, + ] + ) @skipUnless(has_cim, "Requires CuCIM") def test_read_patches_cucim(self, file_url, input_parameters, expected): self.camelyon_data_download(file_url) @@ -112,7 +134,12 @@ def test_read_patches_cucim(self, file_url, input_parameters, expected): self.assertIsNone(assert_array_equal(samples[i]["label"], expected[i]["label"])) self.assertIsNone(assert_array_equal(samples[i]["image"], expected[i]["image"])) - @parameterized.expand([TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1]) + @parameterized.expand( + [ + TEST_CASE_OPENSLIDE_0, + TEST_CASE_OPENSLIDE_1, + ] + ) @skipUnless(has_osl, "Requires OpenSlide") def test_read_patches_openslide(self, file_url, input_parameters, expected): self.camelyon_data_download(file_url) From 126768f226198c3f7f92b0e0104d013a03ad5fe3 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Thu, 25 Mar 2021 22:50:53 +0000 Subject: [PATCH 101/457] identity init. for global net (#1849) * fixes init affinehead Signed-off-by: Wenqi Li * fixes unit tests Signed-off-by: Wenqi Li --- monai/networks/nets/regunet.py | 6 ++++++ tests/test_globalnet.py | 8 +++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/monai/networks/nets/regunet.py b/monai/networks/nets/regunet.py index 25455c2df7..4a33ca9dcb 100644 --- a/monai/networks/nets/regunet.py +++ b/monai/networks/nets/regunet.py @@ -263,15 +263,21 @@ def __init__( if spatial_dims == 2: in_features = in_channels * decode_size[0] * decode_size[1] out_features = 6 + out_init = torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float) elif spatial_dims == 3: in_features = in_channels * decode_size[0] * decode_size[1] * decode_size[2] out_features = 12 + out_init = torch.tensor([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0], dtype=torch.float) else: raise ValueError(f"only support 2D/3D operation, got spatial_dims={spatial_dims}") self.fc = nn.Linear(in_features=in_features, out_features=out_features) self.grid = self.get_reference_grid(image_size) # (spatial_dims, ...) + # init weight/bias + self.fc.weight.data.zero_() + self.fc.bias.data.copy_(out_init) + @staticmethod def get_reference_grid(image_size: Union[Tuple[int], List[int]]) -> torch.Tensor: mesh_points = [torch.arange(0, dim) for dim in image_size] diff --git a/tests/test_globalnet.py b/tests/test_globalnet.py index 19e9db9137..ed06f5fb0c 100644 --- a/tests/test_globalnet.py +++ b/tests/test_globalnet.py @@ -5,6 +5,7 @@ from parameterized import parameterized from monai.networks import eval_mode +from monai.networks.blocks import Warp from monai.networks.nets import GlobalNet from monai.networks.nets.regunet import AffineHead from tests.utils import test_script_save @@ -64,9 +65,14 @@ class TestGlobalNet(unittest.TestCase): @parameterized.expand(TEST_CASES_GLOBAL_NET) def test_shape(self, input_param, input_shape, expected_shape): net = GlobalNet(**input_param).to(device) + warp_layer = Warp(spatial_dims=input_param.get("spatial_dims", 2)) with eval_mode(net): - result = net(torch.randn(input_shape).to(device)) + img = torch.randn(input_shape) + result = net(img.to(device)) + warped = warp_layer(img.to(device), result) self.assertEqual(result.shape, expected_shape) + # testing initial pred identity + np.testing.assert_allclose(warped.detach().cpu().numpy(), img.detach().cpu().numpy(), rtol=1e-4, atol=1e-4) def test_script(self): input_param, input_shape, _ = TEST_CASES_GLOBAL_NET[0] From e0feb53074183d2e1267ad56d7c750d6659ce425 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Fri, 26 Mar 2021 14:34:38 +0800 Subject: [PATCH 102/457] [DLMED] fix SmartCache warning (#1855) Signed-off-by: Nic Ma --- monai/data/dataset.py | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/monai/data/dataset.py b/monai/data/dataset.py index 9a4e932160..06a29574af 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -582,6 +582,9 @@ class SmartCacheDataset(Randomizable, CacheDataset): This replacement will not work if setting the `multiprocessing_context` of DataLoader to `spawn` or on windows(the default multiprocessing method is `spawn`) and setting `num_workers` greater than 0. + If using MONAI workflows, please add `SmartCacheHandler` to the handler list of trainer, + otherwise, please make sure to call `start()`, `update_cache()`, `shutdown()` during training. + Args: data: input data to load and transform to generate dataset for model. transform: transforms to execute operations on input data. @@ -800,18 +803,6 @@ def __len__(self): """ return self.cache_num - def __getitem__(self, index): - """ - Raise exception if didn't call the expected APIs in SmartCacheDataset. - - """ - if not self.is_started(): - raise RuntimeError( - "if using MONAI workflows, please add `SmartCacheHandler` to the handler list of trainer," - "otherwise, please make sure to call `start()`, `update_cache()`, `shutdown()` during training." - ) - return super().__getitem__(index) - class ZipDataset(Dataset): """ From 149216ecbb83924b3ef4b43f5639e63cb6f4610d Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Fri, 26 Mar 2021 09:36:09 +0000 Subject: [PATCH 103/457] 1857 - SpatialCrop supports with tensors (#1858) * fixes #1857, SpatialCrop is compatible with tensors Signed-off-by: Wenqi Li * update val comparisons Signed-off-by: Wenqi Li --- monai/transforms/croppad/array.py | 4 ++-- monai/transforms/spatial/array.py | 2 +- tests/test_center_spatial_crop.py | 9 ++++++++- tests/test_inverse.py | 5 ++++- tests/test_rand_elastic_2d.py | 2 +- tests/test_rand_elastic_3d.py | 2 +- tests/test_spatial_crop.py | 7 +++++++ 7 files changed, 24 insertions(+), 7 deletions(-) diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index 6174378e3b..159fa1a5f4 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -246,14 +246,14 @@ def __init__( self.roi_start = self.roi_start if isinstance(self.roi_start, np.ndarray) else np.array([self.roi_start]) self.roi_end = self.roi_end if isinstance(self.roi_end, np.ndarray) else np.array([self.roi_end]) - def __call__(self, img: Union[np.ndarray, torch.Tensor]) -> np.ndarray: + def __call__(self, img: Union[np.ndarray, torch.Tensor]): """ Apply the transform to `img`, assuming `img` is channel-first and slicing doesn't apply to the channel dim. """ sd = min(self.roi_start.size, self.roi_end.size, len(img.shape[1:])) # spatial dims slices = [slice(None)] + [slice(s, e) for s, e in zip(self.roi_start[:sd], self.roi_end[:sd])] - return np.asarray(img[tuple(slices)]) + return img[tuple(slices)] class CenterSpatialCrop(Transform): diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 471b171312..1c096ba743 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -1563,7 +1563,7 @@ def __call__( mode=InterpolateMode.BICUBIC.value, align_corners=False, ) - grid = CenterSpatialCrop(roi_size=sp_size)(np.asarray(grid[0])) + grid = CenterSpatialCrop(roi_size=sp_size)(grid[0]) else: grid = create_grid(spatial_size=sp_size) return self.resampler(img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode) diff --git a/tests/test_center_spatial_crop.py b/tests/test_center_spatial_crop.py index c03ec24e18..3e828176a5 100644 --- a/tests/test_center_spatial_crop.py +++ b/tests/test_center_spatial_crop.py @@ -12,6 +12,7 @@ import unittest import numpy as np +import torch from parameterized import parameterized from monai.transforms import CenterSpatialCrop @@ -26,9 +27,15 @@ np.array([[[1, 2], [2, 3]]]), ] +TEST_CASE_3 = [ + {"roi_size": [2, 2, 2]}, + torch.randint(0, 2, size=[3, 3, 3, 3], device="cuda" if torch.cuda.is_available() else "cpu"), + (3, 2, 2, 2), +] + class TestCenterSpatialCrop(unittest.TestCase): - @parameterized.expand([TEST_CASE_0, TEST_CASE_1]) + @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_3]) def test_shape(self, input_param, input_data, expected_shape): result = CenterSpatialCrop(**input_param)(input_data) np.testing.assert_allclose(result.shape, expected_shape) diff --git a/tests/test_inverse.py b/tests/test_inverse.py index d54855d7c1..ccc4f366c2 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -491,7 +491,10 @@ def check_inverse(self, name, keys, orig_d, fwd_bck_d, unmodified_d, acceptable_ unmodified = unmodified_d[key] if isinstance(orig, np.ndarray): mean_diff = np.mean(np.abs(orig - fwd_bck)) - unmodded_diff = np.mean(np.abs(orig - ResizeWithPadOrCrop(orig.shape[1:])(unmodified))) + resized = ResizeWithPadOrCrop(orig.shape[1:])(unmodified) + if isinstance(resized, torch.Tensor): + resized = resized.detach().cpu().numpy() + unmodded_diff = np.mean(np.abs(orig - resized)) try: self.assertLessEqual(mean_diff, acceptable_diff) except AssertionError: diff --git a/tests/test_rand_elastic_2d.py b/tests/test_rand_elastic_2d.py index aa408f0fdc..fbfb7d5761 100644 --- a/tests/test_rand_elastic_2d.py +++ b/tests/test_rand_elastic_2d.py @@ -74,7 +74,7 @@ "scale_range": [0.01, 0.02], "prob": 0.9, "as_tensor_output": False, - "device": None, + "device": "cuda" if torch.cuda.is_available() else "cpu", "spatial_size": (2, 2), }, {"img": torch.arange(27).reshape((3, 3, 3))}, diff --git a/tests/test_rand_elastic_3d.py b/tests/test_rand_elastic_3d.py index 8cd74c6be7..c63282d571 100644 --- a/tests/test_rand_elastic_3d.py +++ b/tests/test_rand_elastic_3d.py @@ -59,7 +59,7 @@ "prob": 0.9, "rotate_range": [1, 1, 1], "as_tensor_output": False, - "device": None, + "device": "cuda" if torch.cuda.is_available() else "cpu", "spatial_size": (2, 2, 2), }, {"img": torch.arange(27).reshape((1, 3, 3, 3)), "mode": "bilinear"}, diff --git a/tests/test_spatial_crop.py b/tests/test_spatial_crop.py index f3c904889f..4c56929686 100644 --- a/tests/test_spatial_crop.py +++ b/tests/test_spatial_crop.py @@ -12,6 +12,7 @@ import unittest import numpy as np +import torch from parameterized import parameterized from monai.transforms import SpatialCrop @@ -49,6 +50,12 @@ def test_shape(self, input_param, input_shape, expected_shape): result = SpatialCrop(**input_param)(input_data) self.assertTupleEqual(result.shape, expected_shape) + @parameterized.expand(TEST_CASES) + def test_tensor_shape(self, input_param, input_shape, expected_shape): + input_data = torch.randint(0, 2, size=input_shape, device="cuda" if torch.cuda.is_available() else "cpu") + result = SpatialCrop(**input_param)(input_data) + self.assertTupleEqual(result.shape, expected_shape) + if __name__ == "__main__": unittest.main() From 83a6ad82dd52f29c06f884fc1e3ec101e4189b77 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Fri, 26 Mar 2021 12:28:59 +0000 Subject: [PATCH 104/457] infer spatial dims from input (#1860) --- monai/networks/blocks/warp.py | 35 ++++++++++++----------------------- tests/test_dvf2ddf.py | 8 ++++---- tests/test_globalnet.py | 2 +- tests/test_warp.py | 14 +++++--------- 4 files changed, 22 insertions(+), 37 deletions(-) diff --git a/monai/networks/blocks/warp.py b/monai/networks/blocks/warp.py index eb4c09fa72..35d2a88f12 100644 --- a/monai/networks/blocks/warp.py +++ b/monai/networks/blocks/warp.py @@ -9,18 +9,16 @@ class Warp(nn.Module): """ - Warp an image with given DDF. + Warp an image with given dense displacement field (DDF). """ def __init__( self, - spatial_dims: int, mode: int = 1, padding_mode: Optional[Union[GridSamplePadMode, str]] = GridSamplePadMode.ZEROS, ): """ Args: - spatial_dims: {2, 3}. number of spatial dimensions mode: interpolation mode to calculate output values, defaults to 1. Possible values are:: @@ -35,9 +33,6 @@ def __init__( See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample """ super(Warp, self).__init__() - if spatial_dims not in [2, 3]: - raise ValueError(f"got unsupported spatial_dims={spatial_dims}, only support 2-d and 3-d input") - self.spatial_dims = spatial_dims if mode < 0: raise ValueError(f"do not support negative mode, got mode={mode}") self.mode = mode @@ -53,7 +48,7 @@ def get_reference_grid(ddf: torch.Tensor) -> torch.Tensor: @staticmethod def normalize_grid(grid: torch.Tensor) -> torch.Tensor: - # (batch, ..., self.spatial_dims) + # (batch, ..., spatial_dims) for i, dim in enumerate(grid.shape[1:-1]): grid[..., i] = grid[..., i] * 2 / (dim - 1) - 1 return grid @@ -67,21 +62,16 @@ def forward(self, image: torch.Tensor, ddf: torch.Tensor) -> torch.Tensor: Returns: warped_image in the same shape as image (batch, num_channels, H, W[, D]) """ - if len(image.shape) != 2 + self.spatial_dims: - raise ValueError(f"expecting {self.spatial_dims + 2}-d input, " f"got input in shape {image.shape}") - if len(ddf.shape) != 2 + self.spatial_dims or ddf.shape[1] != self.spatial_dims: + spatial_dims = len(image.shape) - 2 + if spatial_dims not in (2, 3): + raise NotImplementedError(f"got unsupported spatial_dims={spatial_dims}, currently support 2 or 3.") + ddf_shape = (image.shape[0], spatial_dims) + tuple(image.shape[2:]) + if ddf.shape != ddf_shape: raise ValueError( - f"expecting {self.spatial_dims + 2}-d ddf with {self.spatial_dims} channels, " - f"got ddf in shape {ddf.shape}" + f"Given input {spatial_dims}-d image shape {image.shape}, " f"the input DDF shape must be {ddf_shape}." ) - if image.shape[0] != ddf.shape[0] or image.shape[2:] != ddf.shape[2:]: - raise ValueError( - "expecting image and ddf of same batch size and spatial size, " - f"got image of shape {image.shape}, ddf of shape {ddf.shape}" - ) - grid = self.get_reference_grid(ddf) + ddf - grid = grid.permute([0] + list(range(2, 2 + self.spatial_dims)) + [1]) # (batch, ..., self.spatial_dims) + grid = grid.permute([0] + list(range(2, 2 + spatial_dims)) + [1]) # (batch, ..., spatial_dims) if self.mode > 1: raise ValueError(f"{self.mode}-order interpolation not yet implemented.") @@ -103,7 +93,7 @@ def forward(self, image: torch.Tensor, ddf: torch.Tensor) -> torch.Tensor: # ) else: grid = self.normalize_grid(grid) - index_ordering: List[int] = list(range(self.spatial_dims - 1, -1, -1)) + index_ordering: List[int] = list(range(spatial_dims - 1, -1, -1)) grid = grid[..., index_ordering] # z, y, x -> x, y, z _interp_mode = "bilinear" if self.mode == 1 else "nearest" warped_image = F.grid_sample( @@ -125,7 +115,6 @@ class DVF2DDF(nn.Module): def __init__( self, - spatial_dims: int, num_steps: int = 7, mode: int = 1, padding_mode: Optional[Union[GridSamplePadMode, str]] = GridSamplePadMode.ZEROS, @@ -134,7 +123,7 @@ def __init__( if num_steps <= 0: raise ValueError(f"expecting positive num_steps, got {num_steps}") self.num_steps = num_steps - self.warp_layer = Warp(spatial_dims=spatial_dims, mode=mode, padding_mode=padding_mode) + self.warp_layer = Warp(mode=mode, padding_mode=padding_mode) def forward(self, dvf): """ @@ -142,7 +131,7 @@ def forward(self, dvf): dvf: dvf to be transformed, in shape (batch, ``spatial_dims``, H, W[,D]) Returns: - + a dense displacement field """ ddf: torch.Tensor = dvf / (2 ** self.num_steps) for _ in range(self.num_steps): diff --git a/tests/test_dvf2ddf.py b/tests/test_dvf2ddf.py index 0ee8ba6c30..bf04fed8b6 100644 --- a/tests/test_dvf2ddf.py +++ b/tests/test_dvf2ddf.py @@ -10,16 +10,16 @@ from monai.utils import set_determinism TEST_CASES = [ - [{"spatial_dims": 2, "num_steps": 1}, {"dvf": torch.zeros(1, 2, 2, 2)}, torch.zeros(1, 2, 2, 2)], + [{"num_steps": 1}, {"dvf": torch.zeros(1, 2, 2, 2)}, torch.zeros(1, 2, 2, 2)], [ - {"spatial_dims": 3, "num_steps": 1}, + {"num_steps": 1}, {"dvf": torch.ones(1, 3, 2, 2, 2)}, torch.tensor([[[1.0000, 0.7500], [0.7500, 0.6250]], [[0.7500, 0.6250], [0.6250, 0.5625]]]) .reshape(1, 1, 2, 2, 2) .expand(-1, 3, -1, -1, -1), ], [ - {"spatial_dims": 3, "num_steps": 2}, + {"num_steps": 2}, {"dvf": torch.ones(1, 3, 2, 2, 2)}, torch.tensor([[[0.9175, 0.6618], [0.6618, 0.5306]], [[0.6618, 0.5306], [0.5306, 0.4506]]]) .reshape(1, 1, 2, 2, 2) @@ -43,7 +43,7 @@ def test_value(self, input_param, input_data, expected_val): def test_gradient(self): network = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=1) - dvf2ddf = DVF2DDF(spatial_dims=2, num_steps=1) + dvf2ddf = DVF2DDF(num_steps=1) optimizer = SGD(network.parameters(), lr=0.01) x = torch.ones((1, 1, 5, 5)) x = network(x) diff --git a/tests/test_globalnet.py b/tests/test_globalnet.py index ed06f5fb0c..0aab57d272 100644 --- a/tests/test_globalnet.py +++ b/tests/test_globalnet.py @@ -65,7 +65,7 @@ class TestGlobalNet(unittest.TestCase): @parameterized.expand(TEST_CASES_GLOBAL_NET) def test_shape(self, input_param, input_shape, expected_shape): net = GlobalNet(**input_param).to(device) - warp_layer = Warp(spatial_dims=input_param.get("spatial_dims", 2)) + warp_layer = Warp() with eval_mode(net): img = torch.randn(input_shape) result = net(img.to(device)) diff --git a/tests/test_warp.py b/tests/test_warp.py index 69ae997e38..613b6fb4ab 100644 --- a/tests/test_warp.py +++ b/tests/test_warp.py @@ -8,12 +8,12 @@ LOW_POWER_TEST_CASES = [ [ - {"spatial_dims": 2, "mode": 0, "padding_mode": "zeros"}, + {"mode": 0, "padding_mode": "zeros"}, {"image": torch.arange(4).reshape((1, 1, 2, 2)).to(dtype=torch.float), "ddf": torch.zeros(1, 2, 2, 2)}, torch.arange(4).reshape((1, 1, 2, 2)), ], [ - {"spatial_dims": 2, "mode": 1, "padding_mode": "zeros"}, + {"mode": 1, "padding_mode": "zeros"}, {"image": torch.arange(4).reshape((1, 1, 2, 2)).to(dtype=torch.float), "ddf": torch.ones(1, 2, 2, 2)}, torch.tensor([[[[3, 0], [0, 0]]]]), ], @@ -21,7 +21,7 @@ HIGH_POWER_TEST_CASES = [ [ - {"spatial_dims": 3, "mode": 2, "padding_mode": "border"}, + {"mode": 2, "padding_mode": "border"}, { "image": torch.arange(8).reshape((1, 1, 2, 2, 2)).to(dtype=torch.float), "ddf": torch.ones(1, 3, 2, 2, 2) * -1, @@ -29,7 +29,7 @@ torch.tensor([[[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]]), ], [ - {"spatial_dims": 3, "mode": 3, "padding_mode": "reflection"}, + {"mode": 3, "padding_mode": "reflection"}, {"image": torch.arange(8).reshape((1, 1, 2, 2, 2)).to(dtype=torch.float), "ddf": torch.ones(1, 3, 2, 2, 2)}, torch.tensor([[[[[7, 6], [5, 4]], [[3, 2], [1, 0]]]]]), ], @@ -48,7 +48,7 @@ def test_resample(self, input_param, input_data, expected_val): np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) def test_ill_shape(self): - warp_layer = Warp(spatial_dims=2) + warp_layer = Warp() with self.assertRaisesRegex(ValueError, ""): warp_layer( image=torch.arange(4).reshape((1, 1, 1, 2, 2)).to(dtype=torch.float), ddf=torch.zeros(1, 2, 2, 2) @@ -60,10 +60,6 @@ def test_ill_shape(self): with self.assertRaisesRegex(ValueError, ""): warp_layer(image=torch.arange(4).reshape((1, 1, 2, 2)).to(dtype=torch.float), ddf=torch.zeros(1, 2, 3, 3)) - def test_ill_opts(self): - with self.assertRaisesRegex(ValueError, ""): - Warp(spatial_dims=4) - if __name__ == "__main__": unittest.main() From b209ab7a8e0ce4a00f23753e06211400f4635566 Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Fri, 26 Mar 2021 22:55:50 +0800 Subject: [PATCH 105/457] 1822 Implement Prob NMS (#1842) * Optimize speed and support any dimensions Signed-off-by: Yiheng Wang * Fix black issue Signed-off-by: Yiheng Wang * Add unittest and docstrings Signed-off-by: Yiheng Wang * Modify box size and others Signed-off-by: Yiheng Wang * Add pathology nms Signed-off-by: Yiheng Wang * Update docs Signed-off-by: Yiheng Wang * Update pathology nms Signed-off-by: Yiheng Wang Co-authored-by: Behrooz <3968947+behxyz@users.noreply.github.com> Co-authored-by: Wenqi Li --- docs/source/apps.rst | 4 ++ docs/source/utils.rst | 6 ++ monai/apps/pathology/__init__.py | 1 + monai/apps/pathology/utils.py | 43 +++++++++++++ monai/utils/__init__.py | 1 + monai/utils/prob_nms.py | 99 +++++++++++++++++++++++++++++ tests/test_pathology_prob_nms.py | 57 +++++++++++++++++ tests/test_prob_nms.py | 103 +++++++++++++++++++++++++++++++ 8 files changed, 314 insertions(+) create mode 100644 monai/apps/pathology/utils.py create mode 100644 monai/utils/prob_nms.py create mode 100644 tests/test_pathology_prob_nms.py create mode 100644 tests/test_prob_nms.py diff --git a/docs/source/apps.rst b/docs/source/apps.rst index 4c45a5fb39..0c92d4c443 100644 --- a/docs/source/apps.rst +++ b/docs/source/apps.rst @@ -71,3 +71,7 @@ Applications :members: .. autoclass:: SmartCachePatchWSIDataset :members: + +.. automodule:: monai.apps.pathology.utils +.. autoclass:: PathologyProbNMS + :members: diff --git a/docs/source/utils.rst b/docs/source/utils.rst index e0b993da60..071d9ecefd 100644 --- a/docs/source/utils.rst +++ b/docs/source/utils.rst @@ -27,6 +27,12 @@ Misc .. automodule:: monai.utils.misc :members: +Prob NMS +-------- +.. automodule:: monai.utils.prob_nms +.. autoclass:: ProbNMS + :members: + Profiling --------- .. automodule:: monai.utils.profiling diff --git a/monai/apps/pathology/__init__.py b/monai/apps/pathology/__init__.py index bbdb812c03..3af25365ba 100644 --- a/monai/apps/pathology/__init__.py +++ b/monai/apps/pathology/__init__.py @@ -10,3 +10,4 @@ # limitations under the License. from .datasets import PatchWSIDataset, SmartCacheDataset +from .utils import ProbNMS diff --git a/monai/apps/pathology/utils.py b/monai/apps/pathology/utils.py new file mode 100644 index 0000000000..b0803526fd --- /dev/null +++ b/monai/apps/pathology/utils.py @@ -0,0 +1,43 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Union + +import numpy as np +import torch + +from monai.utils import ProbNMS + + +class PathologyProbNMS(ProbNMS): + """ + This class extends monai.utils.ProbNMS and add the `resolution` option for + Pathology. + """ + + def __call__( + self, + probs_map: Union[np.ndarray, torch.Tensor], + resolution_level: int = 0, + ): + """ + probs_map: the input probabilities map, it must have shape (H[, W, ...]). + resolution_level: the level at which the probabilities map is made. + """ + resolution = pow(2, resolution_level) + org_outputs = ProbNMS.__call__(self, probs_map) + outputs = [] + for org_output in org_outputs: + prob = org_output[0] + coord = np.asarray(org_output[1:]) + coord_wsi = ((coord + 0.5) * resolution).astype(int) + outputs.append([prob] + list(coord_wsi)) + return outputs diff --git a/monai/utils/__init__.py b/monai/utils/__init__.py index d622ce96ae..f6a137f47d 100644 --- a/monai/utils/__init__.py +++ b/monai/utils/__init__.py @@ -69,5 +69,6 @@ min_version, optional_import, ) +from .prob_nms import ProbNMS from .profiling import PerfContext, torch_profiler_full, torch_profiler_time_cpu_gpu, torch_profiler_time_end_to_end from .state_cacher import StateCacher diff --git a/monai/utils/prob_nms.py b/monai/utils/prob_nms.py new file mode 100644 index 0000000000..bdffdfe005 --- /dev/null +++ b/monai/utils/prob_nms.py @@ -0,0 +1,99 @@ +from typing import List, Sequence, Tuple, Union + +import numpy as np +import torch + +from monai.networks.layers import GaussianFilter + + +class ProbNMS: + """ + Performs probability based non-maximum suppression (NMS) on the probabilities map via + iteratively selecting the coordinate with highest probability and then move it as well + as its surrounding values. The remove range is determined by the parameter `box_size`. + If multiple coordinates have the same highest probability, only one of them will be + selected. + + Args: + spatial_dims: number of spatial dimensions of the input probabilities map. + Defaults to 2. + sigma: the standard deviation for gaussian filter. + It could be a single value, or `spatial_dims` number of values. Defaults to 0.0. + prob_threshold: the probability threshold, the function will stop searching if + the highest probability is no larger than the threshold. The value should be + no less than 0.0. Defaults to 0.5. + box_size: determines the sizes of the removing area of the selected coordinates for + each dimensions. Defaults to 48. + + Return: + a list of selected lists, where inner lists contain probability and coordinates. + For example, for 3D input, the inner lists are in the form of [probability, x, y, z]. + + Raises: + ValueError: When ``prob_threshold`` is less than 0.0. + ValueError: When ``box_size`` is a list or tuple, and its length is not equal to `spatial_dims`. + ValueError: When ``box_size`` has a less than 1 value. + + """ + + def __init__( + self, + spatial_dims: int = 2, + sigma: Union[Sequence[float], float, Sequence[torch.Tensor], torch.Tensor] = 0.0, + prob_threshold: float = 0.5, + box_size: Union[int, List[int], Tuple[int]] = 48, + ) -> None: + self.sigma = sigma + self.spatial_dims = spatial_dims + if self.sigma != 0: + self.filter = GaussianFilter(spatial_dims=spatial_dims, sigma=sigma) + if prob_threshold < 0: + raise ValueError("prob_threshold should be no less than 0.0.") + self.prob_threshold = prob_threshold + if isinstance(box_size, int): + self.box_size = np.asarray([box_size] * spatial_dims) + else: + if len(box_size) != spatial_dims: + raise ValueError("the sequence length of box_size should be the same as spatial_dims.") + self.box_size = np.asarray(box_size) + if self.box_size.min() <= 0: + raise ValueError("box_size should be larger than 0.") + + self.box_lower_bd = self.box_size // 2 + self.box_upper_bd = self.box_size - self.box_lower_bd + + def __call__( + self, + probs_map: Union[np.ndarray, torch.Tensor], + ): + """ + probs_map: the input probabilities map, it must have shape (H[, W, ...]). + """ + if self.sigma != 0: + if not isinstance(probs_map, torch.Tensor): + probs_map = torch.as_tensor(probs_map, dtype=torch.float) + self.filter.to(probs_map) + probs_map = self.filter(probs_map) + else: + if not isinstance(probs_map, torch.Tensor): + probs_map = probs_map.copy() + + if isinstance(probs_map, torch.Tensor): + probs_map = probs_map.detach().cpu().numpy() + + probs_map_shape = probs_map.shape + + outputs = [] + while np.max(probs_map) > self.prob_threshold: + max_idx = np.unravel_index(probs_map.argmax(), probs_map_shape) + prob_max = probs_map[max_idx] + max_idx_arr = np.asarray(max_idx) + outputs.append([prob_max] + list(max_idx_arr)) + + idx_min_range = (max_idx_arr - self.box_lower_bd).clip(0, None) + idx_max_range = (max_idx_arr + self.box_upper_bd).clip(None, probs_map_shape) + # for each dimension, set values during index ranges to 0 + slices = tuple([slice(idx_min_range[i], idx_max_range[i]) for i in range(self.spatial_dims)]) + probs_map[slices] = 0 + + return outputs diff --git a/tests/test_pathology_prob_nms.py b/tests/test_pathology_prob_nms.py new file mode 100644 index 0000000000..223b136ea7 --- /dev/null +++ b/tests/test_pathology_prob_nms.py @@ -0,0 +1,57 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from parameterized import parameterized + +from monai.apps.pathology.utils import PathologyProbNMS + +probs_map_2d = np.random.rand(100, 100).clip(0, 0.5) +probs_map_2d[33, 33] = 0.7 +probs_map_2d[66, 66] = 0.9 +expected_2d = [[0.9, 133, 133], [0.7, 67, 67]] +TEST_CASES_2D = [ + {"spatial_dims": 2, "prob_threshold": 0.5, "box_size": [10, 10]}, + {"resolution_level": 1}, + probs_map_2d, + expected_2d, +] + +probs_map_3d = torch.rand([50, 50, 50]).uniform_(0, 0.5) +probs_map_3d[25, 25, 25] = 0.7 +probs_map_3d[45, 45, 45] = 0.9 +expected_3d = [[0.9, 91, 91, 91], [0.7, 51, 51, 51]] +TEST_CASES_3D = [ + {"spatial_dims": 3, "prob_threshold": 0.5, "box_size": (10, 10, 10)}, + {"resolution_level": 1}, + probs_map_3d, + expected_3d, +] + + +class TestPathologyProbNMS(unittest.TestCase): + @parameterized.expand( + [ + TEST_CASES_2D, + TEST_CASES_3D, + ] + ) + def test_output(self, class_args, call_args, probs_map, expected): + nms = PathologyProbNMS(**class_args) + output = nms(probs_map, **call_args) + np.testing.assert_allclose(output, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_prob_nms.py b/tests/test_prob_nms.py new file mode 100644 index 0000000000..fb88d9cfb4 --- /dev/null +++ b/tests/test_prob_nms.py @@ -0,0 +1,103 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from parameterized import parameterized + +from monai.utils import ProbNMS + +probs_map_1 = np.random.rand(100, 100).clip(0, 0.5) +TEST_CASES_2D_1 = [{"spatial_dims": 2, "prob_threshold": 0.5, "box_size": 10}, probs_map_1, []] + +probs_map_2 = np.random.rand(100, 100).clip(0, 0.5) +probs_map_2[33, 33] = 0.7 +probs_map_2[66, 66] = 0.9 +expected_2 = [[0.9, 66, 66], [0.7, 33, 33]] +TEST_CASES_2D_2 = [ + {"spatial_dims": 2, "prob_threshold": 0.5, "box_size": [10, 10]}, + probs_map_2, + expected_2, +] + +probs_map_3 = np.random.rand(100, 100).clip(0, 0.5) +probs_map_3[56, 58] = 0.7 +probs_map_3[60, 66] = 0.8 +probs_map_3[66, 66] = 0.9 +expected_3 = [[0.9, 66, 66], [0.8, 60, 66]] +TEST_CASES_2D_3 = [ + {"spatial_dims": 2, "prob_threshold": 0.5, "box_size": (10, 20)}, + probs_map_3, + expected_3, +] + +probs_map_4 = np.random.rand(100, 100).clip(0, 0.5) +probs_map_4[33, 33] = 0.7 +probs_map_4[66, 66] = 0.9 +expected_4 = [[0.9, 66, 66]] +TEST_CASES_2D_4 = [ + {"spatial_dims": 2, "prob_threshold": 0.8, "box_size": 10}, + probs_map_4, + expected_4, +] + +probs_map_5 = np.random.rand(100, 100).clip(0, 0.5) +TEST_CASES_2D_5 = [{"spatial_dims": 2, "prob_threshold": 0.5, "sigma": 0.1}, probs_map_5, []] + +probs_map_6 = torch.as_tensor(np.random.rand(100, 100).clip(0, 0.5)) +TEST_CASES_2D_6 = [{"spatial_dims": 2, "prob_threshold": 0.5, "sigma": 0.1}, probs_map_6, []] + +probs_map_7 = torch.as_tensor(np.random.rand(100, 100).clip(0, 0.5)) +probs_map_7[33, 33] = 0.7 +probs_map_7[66, 66] = 0.9 +if torch.cuda.is_available(): + probs_map_7 = probs_map_7.cuda() +expected_7 = [[0.9, 66, 66], [0.7, 33, 33]] +TEST_CASES_2D_7 = [ + {"spatial_dims": 2, "prob_threshold": 0.5, "sigma": 0.1}, + probs_map_7, + expected_7, +] + +probs_map_3d = torch.rand([50, 50, 50]).uniform_(0, 0.5) +probs_map_3d[25, 25, 25] = 0.7 +probs_map_3d[45, 45, 45] = 0.9 +expected_3d = [[0.9, 45, 45, 45], [0.7, 25, 25, 25]] +TEST_CASES_3D = [ + {"spatial_dims": 3, "prob_threshold": 0.5, "box_size": (10, 10, 10)}, + probs_map_3d, + expected_3d, +] + + +class TestProbNMS(unittest.TestCase): + @parameterized.expand( + [ + TEST_CASES_2D_1, + TEST_CASES_2D_2, + TEST_CASES_2D_3, + TEST_CASES_2D_4, + TEST_CASES_2D_5, + TEST_CASES_2D_6, + TEST_CASES_2D_7, + TEST_CASES_3D, + ] + ) + def test_output(self, class_args, probs_map, expected): + nms = ProbNMS(**class_args) + output = nms(probs_map) + np.testing.assert_allclose(output, expected) + + +if __name__ == "__main__": + unittest.main() From 28e000d7d7bc0009769b88c7f231dadb5910b163 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Sat, 27 Mar 2021 01:22:55 +0800 Subject: [PATCH 106/457] 1862 Fix checkpoint read permission (#1863) * [DLMED] fix read permission issue Signed-off-by: Nic Ma --- monai/handlers/checkpoint_saver.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/monai/handlers/checkpoint_saver.py b/monai/handlers/checkpoint_saver.py index 9c67992b36..fd80182ba2 100644 --- a/monai/handlers/checkpoint_saver.py +++ b/monai/handlers/checkpoint_saver.py @@ -116,7 +116,9 @@ class _DiskSaver(DiskSaver): """ def __init__(self, dirname: str, filename: Optional[str] = None): - super().__init__(dirname=dirname, require_empty=False) + # set `atomic=False` as `atomic=True` only gives read/write permission to the user who saved the file, + # without group/others read permission + super().__init__(dirname=dirname, require_empty=False, atomic=False) self.filename = filename def __call__(self, checkpoint: Dict, filename: str, metadata: Optional[Dict] = None) -> None: From 32e6b123cfd6cfe9ad218b93a8b07029852046a6 Mon Sep 17 00:00:00 2001 From: Richard Brown <33289025+rijobro@users.noreply.github.com> Date: Fri, 26 Mar 2021 19:53:26 +0000 Subject: [PATCH 107/457] [1859] fix lr finder (#1865) * fix lr finder --- tests/test_lr_finder.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/tests/test_lr_finder.py b/tests/test_lr_finder.py index 9ee9c8a4d0..5b730c2a77 100644 --- a/tests/test_lr_finder.py +++ b/tests/test_lr_finder.py @@ -13,6 +13,7 @@ import random import sys import unittest +from typing import TYPE_CHECKING import torch from torch.utils.data import DataLoader @@ -23,7 +24,14 @@ from monai.transforms import AddChanneld, Compose, LoadImaged, ScaleIntensityd, ToTensord from monai.utils import optional_import, set_determinism -PILImage, has_pil = optional_import("PIL.Image") +if TYPE_CHECKING: + import matplotlib.pyplot as plt + + has_matplotlib = True + has_pil = True +else: + plt, has_matplotlib = optional_import("matplotlib.pyplot") + _, has_pil = optional_import("PIL.Image") RAND_SEED = 42 random.seed(RAND_SEED) @@ -73,7 +81,14 @@ def test_lr_finder(self): lr_finder = LearningRateFinder(model, optimizer, loss_function, device=device) lr_finder.range_test(train_loader, val_loader=train_loader, end_lr=10, num_iter=5) print(lr_finder.get_steepest_gradient(0, 0)[0]) - lr_finder.plot(0, 0) # to inspect the loss-learning rate graph + + if has_matplotlib: + ax = plt.subplot() + plt.show(block=False) + lr_finder.plot(0, 0, ax=ax) # to inspect the loss-learning rate graph + plt.pause(3) + plt.close() + lr_finder.reset() # to reset the model and optimizer to their initial state From bb1562255996aae448eb7ad864fa4d8009aed512 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Sat, 27 Mar 2021 08:44:53 +0800 Subject: [PATCH 108/457] 1861 Add MapLabelValue (#1864) * [DLMED] add MapLabelValue Signed-off-by: Nic Ma * [DLMED] add unit tests Signed-off-by: Nic Ma * [DLMED] add missing doc-string Signed-off-by: Nic Ma * [DLMED] update according to comments Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma --- docs/source/transforms.rst | 12 +++++ monai/transforms/__init__.py | 4 ++ monai/transforms/utility/array.py | 41 +++++++++++++++ monai/transforms/utility/dictionary.py | 38 ++++++++++++++ tests/test_map_label_value.py | 71 ++++++++++++++++++++++++++ tests/test_map_label_valued.py | 71 ++++++++++++++++++++++++++ 6 files changed, 237 insertions(+) create mode 100644 tests/test_map_label_value.py create mode 100644 tests/test_map_label_valued.py diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index 768c0665a2..28bfdc5f24 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -551,6 +551,12 @@ Utility :members: :special-members: __call__ +`MapLabelValue` +""""""""""""""" +.. autoclass:: MapLabelValue + :members: + :special-members: __call__ + Dictionary Transforms --------------------- @@ -1052,6 +1058,12 @@ Utility (Dict) :members: :special-members: __call__ +`MapLabelValued` +"""""""""""""""" +.. autoclass:: MapLabelValued + :members: + :special-members: __call__ + Transform Adaptors ------------------ .. automodule:: monai.transforms.adaptors diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index 22311cdca6..c7b60e15e3 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -268,6 +268,7 @@ Identity, LabelToMask, Lambda, + MapLabelValue, RemoveRepeatedChannel, RepeatChannel, SimulateDelay, @@ -325,6 +326,9 @@ Lambdad, LambdaD, LambdaDict, + MapLabelValued, + MapLabelValueD, + MapLabelValueDict, RandLambdad, RandLambdaD, RandLambdaDict, diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index f169002596..987542c979 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -57,6 +57,7 @@ "ConvertToMultiChannelBasedOnBratsClasses", "AddExtremePointsChannel", "TorchVision", + "MapLabelValue", ] @@ -758,3 +759,43 @@ def __call__(self, img: torch.Tensor): """ return self.trans(img) + + +class MapLabelValue: + """ + Utility to map label values to another set of values. + For example, map [3, 2, 1] to [0, 1, 2], [1, 2, 3] -> [0.5, 1.5, 2.5], ["label3", "label2", "label1"] -> [0, 1, 2], + [3.5, 2.5, 1.5] -> ["label0", "label1", "label2"], etc. + The label data must be numpy array or array-like data and the output data will be numpy array. + + """ + + def __init__(self, orig_labels: Sequence, target_labels: Sequence, dtype: DtypeLike = np.float32) -> None: + """ + Args: + orig_labels: original labels that map to others. + target_labels: expected label values, 1: 1 map to the `orig_labels`. + dtype: convert the output data to dtype, default to float32. + + """ + if len(orig_labels) != len(target_labels): + raise ValueError("orig_labels and target_labels must have the same length.") + self.orig_labels = orig_labels + self.target_labels = target_labels + self.dtype = dtype + + def __call__(self, img: np.ndarray): + img = np.asarray(img) + img_flat = img.flatten() + try: + out_flat = np.copy(img_flat).astype(self.dtype) + except ValueError: + # can't copy unchanged labels as the expected dtype is not supported, must map all the label values + out_flat = np.zeros(shape=img_flat.shape, dtype=self.dtype) + + for o, t in zip(self.orig_labels, self.target_labels): + if o == t: + continue + np.place(out_flat, img_flat == o, t) + + return out_flat.reshape(img.shape) diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index 324835a874..e7cf63e210 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -36,6 +36,7 @@ Identity, LabelToMask, Lambda, + MapLabelValue, RemoveRepeatedChannel, RepeatChannel, SimulateDelay, @@ -83,6 +84,7 @@ "ConvertToMultiChannelBasedOnBratsClassesd", "AddExtremePointsChanneld", "TorchVisiond", + "MapLabelValued", "IdentityD", "IdentityDict", "AsChannelFirstD", @@ -129,6 +131,8 @@ "AddExtremePointsChannelDict", "TorchVisionD", "TorchVisionDict", + "MapLabelValueD", + "MapLabelValueDict", ] @@ -960,6 +964,39 @@ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torc return d +class MapLabelValued(MapTransform): + """ + Dictionary-based wrapper of :py:class:`monai.transforms.MapLabelValue`. + """ + + def __init__( + self, + keys: KeysCollection, + orig_labels: Sequence, + target_labels: Sequence, + dtype: DtypeLike = np.float32, + allow_missing_keys: bool = False, + ) -> None: + """ + Args: + keys: keys of the corresponding items to be transformed. + See also: :py:class:`monai.transforms.compose.MapTransform` + orig_labels: original labels that map to others. + target_labels: expected label values, 1: 1 map to the `orig_labels`. + dtype: convert the output data to dtype, default to float32. + allow_missing_keys: don't raise exception if key is missing. + + """ + super().__init__(keys, allow_missing_keys) + self.mapper = MapLabelValue(orig_labels=orig_labels, target_labels=target_labels, dtype=dtype) + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = dict(data) + for key in self.key_iterator(d): + d[key] = self.mapper(d[key]) + return d + + IdentityD = IdentityDict = Identityd AsChannelFirstD = AsChannelFirstDict = AsChannelFirstd AsChannelLastD = AsChannelLastDict = AsChannelLastd @@ -987,3 +1024,4 @@ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torc AddExtremePointsChannelD = AddExtremePointsChannelDict = AddExtremePointsChanneld TorchVisionD = TorchVisionDict = TorchVisiond RandLambdaD = RandLambdaDict = RandLambdad +MapLabelValueD = MapLabelValueDict = MapLabelValued diff --git a/tests/test_map_label_value.py b/tests/test_map_label_value.py new file mode 100644 index 0000000000..98412ab800 --- /dev/null +++ b/tests/test_map_label_value.py @@ -0,0 +1,71 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +from parameterized import parameterized + +from monai.transforms import MapLabelValue + +TEST_CASE_1 = [ + {"orig_labels": [3, 2, 1], "target_labels": [0, 1, 2]}, + np.array([[3, 1], [1, 2]]), + np.array([[0, 2], [2, 1]]), +] + +TEST_CASE_2 = [ + {"orig_labels": [3, 5, 8], "target_labels": [0, 1, 2]}, + np.array([[[3], [5], [5], [8]]]), + np.array([[[0], [1], [1], [2]]]), +] + +TEST_CASE_3 = [ + {"orig_labels": [1, 2, 3], "target_labels": [0, 1, 2]}, + np.array([3, 1, 1, 2]), + np.array([2, 0, 0, 1]), +] + +TEST_CASE_4 = [ + {"orig_labels": [1, 2, 3], "target_labels": [0.5, 1.5, 2.5]}, + np.array([3, 1, 1, 2]), + np.array([2.5, 0.5, 0.5, 1.5]), +] + +TEST_CASE_5 = [ + {"orig_labels": [1.5, 2.5, 3.5], "target_labels": [0, 1, 2], "dtype": np.int8}, + np.array([3.5, 1.5, 1.5, 2.5]), + np.array([2, 0, 0, 1]), +] + +TEST_CASE_6 = [ + {"orig_labels": ["label3", "label2", "label1"], "target_labels": [0, 1, 2]}, + np.array([["label3", "label1"], ["label1", "label2"]]), + np.array([[0, 2], [2, 1]]), +] + +TEST_CASE_7 = [ + {"orig_labels": [3.5, 2.5, 1.5], "target_labels": ["label0", "label1", "label2"], "dtype": "str"}, + np.array([[3.5, 1.5], [1.5, 2.5]]), + np.array([["label0", "label2"], ["label2", "label1"]]), +] + + +class TestMapLabelValue(unittest.TestCase): + @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7]) + def test_shape(self, input_param, input_data, expected_value): + result = MapLabelValue(**input_param)(input_data) + np.testing.assert_equal(result, expected_value) + self.assertTupleEqual(result.shape, expected_value.shape) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_map_label_valued.py b/tests/test_map_label_valued.py new file mode 100644 index 0000000000..426ac28836 --- /dev/null +++ b/tests/test_map_label_valued.py @@ -0,0 +1,71 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +from parameterized import parameterized + +from monai.transforms import MapLabelValued + +TEST_CASE_1 = [ + {"keys": "seg", "orig_labels": [3, 2, 1], "target_labels": [0, 1, 2]}, + {"seg": np.array([[3, 1], [1, 2]])}, + np.array([[0, 2], [2, 1]]), +] + +TEST_CASE_2 = [ + {"keys": "seg", "orig_labels": [3, 5, 8], "target_labels": [0, 1, 2]}, + {"seg": np.array([[[3], [5], [5], [8]]])}, + np.array([[[0], [1], [1], [2]]]), +] + +TEST_CASE_3 = [ + {"keys": "seg", "orig_labels": [1, 2, 3], "target_labels": [0, 1, 2]}, + {"seg": np.array([3, 1, 1, 2])}, + np.array([2, 0, 0, 1]), +] + +TEST_CASE_4 = [ + {"keys": "seg", "orig_labels": [1, 2, 3], "target_labels": [0.5, 1.5, 2.5]}, + {"seg": np.array([3, 1, 1, 2])}, + np.array([2.5, 0.5, 0.5, 1.5]), +] + +TEST_CASE_5 = [ + {"keys": "seg", "orig_labels": [1.5, 2.5, 3.5], "target_labels": [0, 1, 2], "dtype": np.int8}, + {"seg": np.array([3.5, 1.5, 1.5, 2.5])}, + np.array([2, 0, 0, 1]), +] + +TEST_CASE_6 = [ + {"keys": "seg", "orig_labels": ["label3", "label2", "label1"], "target_labels": [0, 1, 2]}, + {"seg": np.array([["label3", "label1"], ["label1", "label2"]])}, + np.array([[0, 2], [2, 1]]), +] + +TEST_CASE_7 = [ + {"keys": "seg", "orig_labels": [3.5, 2.5, 1.5], "target_labels": ["label0", "label1", "label2"], "dtype": "str"}, + {"seg": np.array([[3.5, 1.5], [1.5, 2.5]])}, + np.array([["label0", "label2"], ["label2", "label1"]]), +] + + +class TestMapLabelValued(unittest.TestCase): + @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7]) + def test_shape(self, input_param, input_data, expected_value): + result = MapLabelValued(**input_param)(input_data) + np.testing.assert_equal(result["seg"], expected_value) + self.assertTupleEqual(result["seg"].shape, expected_value.shape) + + +if __name__ == "__main__": + unittest.main() From c157fb8e878436119238ea088ebcf7875d2b7eb6 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Fri, 26 Mar 2021 22:30:00 -0400 Subject: [PATCH 109/457] Update SmartCachePatchWSIDataset and add unittests (#1853) * Add unittests for SmartCachePatchWSIDataset Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Turn off shuffle for SmartCachePatchWSIDataset Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- monai/apps/pathology/datasets.py | 1 + tests/test_smartcache_patch_wsi_dataset.py | 168 +++++++++++++++++++++ 2 files changed, 169 insertions(+) create mode 100644 tests/test_smartcache_patch_wsi_dataset.py diff --git a/monai/apps/pathology/datasets.py b/monai/apps/pathology/datasets.py index a2f7b17ffe..59f7e3aceb 100644 --- a/monai/apps/pathology/datasets.py +++ b/monai/apps/pathology/datasets.py @@ -158,4 +158,5 @@ def __init__( num_init_workers=num_init_workers, num_replace_workers=num_replace_workers, progress=progress, + shuffle=False, ) diff --git a/tests/test_smartcache_patch_wsi_dataset.py b/tests/test_smartcache_patch_wsi_dataset.py new file mode 100644 index 0000000000..e3f4d03a2d --- /dev/null +++ b/tests/test_smartcache_patch_wsi_dataset.py @@ -0,0 +1,168 @@ +import os +import unittest +from unittest import skipUnless +from urllib import request + +import numpy as np +from numpy.testing import assert_array_equal +from parameterized import parameterized + +from monai.apps.pathology.datasets import SmartCachePatchWSIDataset +from monai.utils import optional_import + +_, has_cim = optional_import("cucim") + +FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" + +TEST_CASE_0 = [ + FILE_URL, + { + "data": [ + {"image": "./CMU-1.tiff", "location": [0, 0], "label": [0]}, + {"image": "./CMU-1.tiff", "location": [0, 0], "label": [1]}, + {"image": "./CMU-1.tiff", "location": [0, 0], "label": [2]}, + {"image": "./CMU-1.tiff", "location": [0, 0], "label": [3]}, + ], + "region_size": (1, 1), + "grid_shape": (1, 1), + "patch_size": 1, + "transform": lambda x: x, + "image_reader_name": "cuCIM", + "replace_rate": 0.5, + "cache_num": 2, + "num_init_workers": 1, + "num_replace_workers": 1, + }, + [ + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[0]]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[1]]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[1]]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[2]]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[2]]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[3]]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[3]]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[0]]])}, + ], +] + +TEST_CASE_1 = [ + FILE_URL, + { + "data": [ + {"image": "./CMU-1.tiff", "location": [0, 0], "label": [[0, 0]]}, + {"image": "./CMU-1.tiff", "location": [0, 0], "label": [[1, 1]]}, + {"image": "./CMU-1.tiff", "location": [0, 0], "label": [[2, 2]]}, + ], + "region_size": (1, 1), + "grid_shape": (1, 1), + "patch_size": 1, + "transform": lambda x: x, + "image_reader_name": "cuCIM", + "replace_rate": 0.5, + "cache_num": 2, + "num_init_workers": 1, + "num_replace_workers": 1, + }, + [ + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[0, 0]]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[1, 1]]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[1, 1]]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[2, 2]]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[2, 2]]])}, + {"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[0, 0]]])}, + ], +] + +TEST_CASE_2 = [ + FILE_URL, + { + "data": [ + {"image": "./CMU-1.tiff", "location": [10004, 20004], "label": [0, 0, 0, 0]}, + {"image": "./CMU-1.tiff", "location": [10004, 20004], "label": [1, 1, 1, 1]}, + {"image": "./CMU-1.tiff", "location": [10004, 20004], "label": [2, 2, 2, 2]}, + ], + "region_size": (8, 8), + "grid_shape": (2, 2), + "patch_size": 1, + "transform": lambda x: x, + "image_reader_name": "cuCIM", + "replace_rate": 0.5, + "cache_num": 2, + "num_init_workers": 1, + "num_replace_workers": 1, + }, + [ + {"image": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), "label": np.array([[[0]]])}, + {"image": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), "label": np.array([[[0]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[0]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[0]]])}, + {"image": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), "label": np.array([[[1]]])}, + {"image": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), "label": np.array([[[1]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[1]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[1]]])}, + {"image": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), "label": np.array([[[1]]])}, + {"image": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), "label": np.array([[[1]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[1]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[1]]])}, + {"image": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), "label": np.array([[[2]]])}, + {"image": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), "label": np.array([[[2]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[2]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[2]]])}, + {"image": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), "label": np.array([[[2]]])}, + {"image": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), "label": np.array([[[2]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[2]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[2]]])}, + {"image": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), "label": np.array([[[0]]])}, + {"image": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), "label": np.array([[[0]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[0]]])}, + {"image": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), "label": np.array([[[0]]])}, + ], +] + + +class TestSmartCachePatchWSIDataset(unittest.TestCase): + @parameterized.expand( + [ + TEST_CASE_0, + TEST_CASE_1, + TEST_CASE_2, + ] + ) + @skipUnless(has_cim, "Requires CuCIM") + def test_read_patches(self, file_url, input_parameters, expected): + self.camelyon_data_download(file_url) + dataset = SmartCachePatchWSIDataset(**input_parameters) + self.assertEqual(len(dataset), input_parameters["cache_num"]) + total_num_samples = len(input_parameters["data"]) + num_epochs = int( + np.ceil(total_num_samples / (input_parameters["cache_num"] * input_parameters["replace_rate"])) + ) + + dataset.start() + i = 0 + for _ in range(num_epochs): + for j in range(len(dataset)): + samples = dataset[j] + n_patches = len(samples) + self.assert_samples_expected(samples, expected[i : i + n_patches]) + i += n_patches + dataset.update_cache() + dataset.shutdown() + + def camelyon_data_download(self, file_url): + filename = os.path.basename(file_url) + if not os.path.exists(filename): + print(f"Test image [{filename}] does not exist. Downloading...") + request.urlretrieve(file_url, filename) + return filename + + def assert_samples_expected(self, samples, expected): + for i in range(len(samples)): + self.assertTupleEqual(samples[i]["label"].shape, expected[i]["label"].shape) + self.assertTupleEqual(samples[i]["image"].shape, expected[i]["image"].shape) + self.assertIsNone(assert_array_equal(samples[i]["label"], expected[i]["label"])) + self.assertIsNone(assert_array_equal(samples[i]["image"], expected[i]["image"])) + + +if __name__ == "__main__": + unittest.main() From 61a2a2135c7a5a06547f14dbebe88ce6a0bada2a Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Sat, 27 Mar 2021 11:13:24 +0000 Subject: [PATCH 110/457] adds-bicubic-option (#1850) Signed-off-by: Wenqi Li --- monai/utils/enums.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/monai/utils/enums.py b/monai/utils/enums.py index 1da7df86b6..9f753ca700 100644 --- a/monai/utils/enums.py +++ b/monai/utils/enums.py @@ -54,13 +54,19 @@ class NumpyPadMode(Enum): class GridSampleMode(Enum): """ See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample + + interpolation mode of `torch.nn.functional.grid_sample` + + Note: + (documentation from `torch.nn.functional.grid_sample`) + `mode='bicubic'` supports only 4-D input. + When `mode='bilinear'` and the input is 5-D, the interpolation mode used internally will actually be trilinear. + However, when the input is 4-D, the interpolation mode will legitimately be bilinear. """ NEAREST = "nearest" BILINEAR = "bilinear" - QUADRATIC = "quadratic" - CUBIC = "cubic" - FOURTH = "fourth" + BICUBIC = "bicubic" class InterpolateMode(Enum): From 76beeb224731e145fe0891aac0fe02e486203ec4 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Sat, 27 Mar 2021 12:40:08 +0000 Subject: [PATCH 111/457] 1848 - testing with pytorch 1.8.1 (#1856) * fixes #1848 Signed-off-by: Wenqi Li * temp tests Signed-off-by: Wenqi Li * Revert "temp tests" This reverts commit 563e7eaa79c70b1de6929a97c95d7ca3ce2f4472. Signed-off-by: Wenqi Li --- .github/workflows/integration.yml | 4 ++-- .github/workflows/pythonapp.yml | 10 +++++----- tests/test_crf_cuda.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index ac3efbb751..5f160e6e8e 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -7,7 +7,7 @@ on: jobs: integration-py3: container: - image: nvcr.io/nvidia/pytorch:20.12-py3 # CUDA 11.1, default for PT 1.8.0 + image: nvcr.io/nvidia/pytorch:20.12-py3 # CUDA 11.1 options: --gpus all runs-on: [self-hosted, linux, x64, common] steps: @@ -34,7 +34,7 @@ jobs: which python python -m pip install --upgrade pip wheel python -m pip uninstall -y torch torchvision - python -m pip install torch==1.8.0 torchvision==0.9.0 + python -m pip install torch==1.8.1+cu111 torchvision==0.9.1+cu111 -f https://download.pytorch.org/whl/torch_stable.html python -m pip install -r requirements-dev.txt - name: Run integration tests run: | diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 83d01ff5e0..e5803028a0 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -80,7 +80,7 @@ jobs: - if: runner.os == 'windows' name: Install torch cpu from pytorch.org (Windows only) run: | - python -m pip install torch==1.8.0+cpu torchvision==0.9.0+cpu -f https://download.pytorch.org/whl/torch_stable.html + python -m pip install torch==1.8.1+cpu torchvision==0.9.1+cpu -f https://download.pytorch.org/whl/torch_stable.html # min. requirements for windows instances python -c "f=open('requirements-dev.txt', 'r'); txt=f.readlines(); f.close(); print(txt); f=open('requirements-dev.txt', 'w'); f.writelines(txt[1:12]); f.close()" - if: runner.os == 'macos' @@ -89,7 +89,7 @@ jobs: python -c "f=open('requirements-dev.txt', 'r'); txt=f.readlines(); f.close(); print(txt); f=open('requirements-dev.txt', 'w'); f.writelines([t for t in txt if not t.startswith('cucim')]); f.close()" - name: Install the dependencies run: | - python -m pip install torch==1.8.0 torchvision==0.9.0 + python -m pip install torch==1.8.1 torchvision==0.9.1 cat "requirements-dev.txt" python -m pip install -r requirements-dev.txt python -m pip list @@ -137,11 +137,11 @@ jobs: - if: runner.os == 'windows' name: Install torch cpu from pytorch.org (Windows only) run: | - python -m pip install torch==1.8.0+cpu -f https://download.pytorch.org/whl/torch_stable.html + python -m pip install torch==1.8.1+cpu -f https://download.pytorch.org/whl/torch_stable.html - name: Install the dependencies run: | # min. requirements - python -m pip install torch==1.8.0 + python -m pip install torch==1.8.1 python -m pip install -r requirements-min.txt python -m pip list BUILD_MONAI=0 python setup.py develop # no compile of extensions @@ -181,7 +181,7 @@ jobs: pytorch: "-h" base: "nvcr.io/nvidia/pytorch:20.09-py3" - environment: PT18+CUDA102 - pytorch: "torch==1.8.0 torchvision==0.9.0" + pytorch: "torch==1.8.1 torchvision==0.9.1" base: "nvcr.io/nvidia/cuda:10.2-devel-ubuntu18.04" - environment: PT18+CUDA112 # we explicitly set pytorch to -h to avoid pip install error diff --git a/tests/test_crf_cuda.py b/tests/test_crf_cuda.py index 4decd433fa..55d57d67bf 100644 --- a/tests/test_crf_cuda.py +++ b/tests/test_crf_cuda.py @@ -444,7 +444,7 @@ def test(self, test_case_description, params, input, features, expected): output = crf(input_tensor, feature_tensor).cpu().numpy() # Ensure result are as expected - np.testing.assert_allclose(output, expected, atol=1e-4) + np.testing.assert_allclose(output, expected, atol=1e-4, rtol=1e-4) if __name__ == "__main__": From a1f938a53ce1cdacf70d8ec56d017dae53733e6a Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Mon, 29 Mar 2021 15:12:37 +0800 Subject: [PATCH 112/457] [DLMED] support single item (#1884) Signed-off-by: Nic Ma --- monai/transforms/utility/dictionary.py | 2 -- tests/test_concat_itemsd.py | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index e7cf63e210..63ed6ec305 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -675,8 +675,6 @@ def __init__(self, keys: KeysCollection, name: str, dim: int = 0, allow_missing_ """ super().__init__(keys, allow_missing_keys) - if len(self.keys) < 2: - raise ValueError("Concatenation requires at least 2 keys.") self.name = name self.dim = dim diff --git a/tests/test_concat_itemsd.py b/tests/test_concat_itemsd.py index 520833fc88..9c51e1efea 100644 --- a/tests/test_concat_itemsd.py +++ b/tests/test_concat_itemsd.py @@ -38,6 +38,20 @@ def test_numpy_values(self): np.testing.assert_allclose(result["img1"], np.array([[0, 1], [1, 2]])) np.testing.assert_allclose(result["cat_img"], np.array([[1, 2], [2, 3], [1, 2], [2, 3]])) + def test_single_numpy(self): + input_data = {"img": np.array([[0, 1], [1, 2]])} + result = ConcatItemsd(keys="img", name="cat_img")(input_data) + result["cat_img"] += 1 + np.testing.assert_allclose(result["img"], np.array([[0, 1], [1, 2]])) + np.testing.assert_allclose(result["cat_img"], np.array([[1, 2], [2, 3]])) + + def test_single_tensor(self): + input_data = {"img": torch.tensor([[0, 1], [1, 2]])} + result = ConcatItemsd(keys="img", name="cat_img")(input_data) + result["cat_img"] += 1 + torch.testing.assert_allclose(result["img"], torch.tensor([[0, 1], [1, 2]])) + torch.testing.assert_allclose(result["cat_img"], torch.tensor([[1, 2], [2, 3]])) + if __name__ == "__main__": unittest.main() From 9f6ceebca87c06ec9fed8447791c9a7f3378ef0a Mon Sep 17 00:00:00 2001 From: Petru-Daniel Tudosiu Date: Mon, 29 Mar 2021 13:32:08 +0100 Subject: [PATCH 113/457] Epoch/Iteration interval (#1870) Allowing the choice of epoch and iteration interval. Signed-off-by: Petru-Daniel Tudosiu --- monai/handlers/tensorboard_handlers.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/monai/handlers/tensorboard_handlers.py b/monai/handlers/tensorboard_handlers.py index 9ad1fe6353..57dad81fec 100644 --- a/monai/handlers/tensorboard_handlers.py +++ b/monai/handlers/tensorboard_handlers.py @@ -79,7 +79,9 @@ def __init__( summary_writer: Optional[SummaryWriter] = None, log_dir: str = "./runs", epoch_event_writer: Optional[Callable[[Engine, SummaryWriter], Any]] = None, + epoch_interval: int = 1, iteration_event_writer: Optional[Callable[[Engine, SummaryWriter], Any]] = None, + iteration_interval: int = 1, output_transform: Callable = lambda x: x, global_epoch_transform: Callable = lambda x: x, tag_name: str = DEFAULT_TAG, @@ -91,8 +93,10 @@ def __init__( log_dir: if using default SummaryWriter, write logs to this directory, default is `./runs`. epoch_event_writer: customized callable TensorBoard writer for epoch level. Must accept parameter "engine" and "summary_writer", use default event writer if None. + epoch_interval: the epoch interval at which the epoch_event_writer is called. Defaults to 1. iteration_event_writer: customized callable TensorBoard writer for iteration level. Must accept parameter "engine" and "summary_writer", use default event writer if None. + iteration_interval: the iteration interval at which the iteration_event_writer is called. Defaults to 1. output_transform: a callable that is used to transform the ``ignite.engine.output`` into a scalar to plot, or a dictionary of {key: scalar}. In the latter case, the output string will be formatted as key: value. @@ -104,7 +108,9 @@ def __init__( """ super().__init__(summary_writer=summary_writer, log_dir=log_dir) self.epoch_event_writer = epoch_event_writer + self.epoch_interval = epoch_interval self.iteration_event_writer = iteration_event_writer + self.iteration_interval = iteration_interval self.output_transform = output_transform self.global_epoch_transform = global_epoch_transform self.tag_name = tag_name @@ -118,9 +124,11 @@ def attach(self, engine: Engine) -> None: """ if not engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED): - engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed) + engine.add_event_handler( + Events.ITERATION_COMPLETED(every=self.iteration_interval), self.iteration_completed + ) if not engine.has_event_handler(self.epoch_completed, Events.EPOCH_COMPLETED): - engine.add_event_handler(Events.EPOCH_COMPLETED, self.epoch_completed) + engine.add_event_handler(Events.EPOCH_COMPLETED(every=self.epoch_interval), self.epoch_completed) def epoch_completed(self, engine: Engine) -> None: """ From 140b7cc4ef399c4020653097652e0ce7b3bcb50d Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Mon, 29 Mar 2021 23:43:30 +0800 Subject: [PATCH 114/457] 1833 Enhance the list collate function (#1887) * [DLMED] enhance list_collate Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot * [DLMED] update based on comments Co-authored-by: Wenqi Li Signed-off-by: Nic Ma Co-authored-by: monai-bot Co-authored-by: Wenqi Li --- monai/data/utils.py | 14 +++++++++++++- tests/test_dataloader.py | 28 +++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/monai/data/utils.py b/monai/data/utils.py index 63e630fe17..47108c68ef 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -15,7 +15,7 @@ import os import pickle import warnings -from collections import defaultdict +from collections import abc, defaultdict from itertools import product, starmap from pathlib import PurePath from typing import Any, Dict, Generator, Iterable, List, Optional, Sequence, Tuple, Union @@ -254,10 +254,20 @@ def list_data_collate(batch: Sequence): elem = batch[0] data = [i for k in batch for i in k] if isinstance(elem, list) else batch try: + elem = batch[0] + key = None + if isinstance(elem, abc.Mapping): + ret = {} + for k in elem: + key = k + ret[k] = default_collate([d[k] for d in data]) + return ret return default_collate(data) except RuntimeError as re: re_str = str(re) if "equal size" in re_str: + if key is not None: + re_str += f"\nCollate error on the key '{key}' of dictionary data." re_str += ( "\n\nMONAI hint: if your transforms intentionally create images of different shapes, creating your " + "`DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem (check its " @@ -267,6 +277,8 @@ def list_data_collate(batch: Sequence): except TypeError as re: re_str = str(re) if "numpy" in re_str and "Tensor" in re_str: + if key is not None: + re_str += f"\nCollate error on the key '{key}' of dictionary data." re_str += ( "\n\nMONAI hint: if your transforms intentionally create mixtures of torch Tensor and numpy ndarray, " + "creating your `DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem " diff --git a/tests/test_dataloader.py b/tests/test_dataloader.py index 072a4a01c0..53e7c89f67 100644 --- a/tests/test_dataloader.py +++ b/tests/test_dataloader.py @@ -12,9 +12,27 @@ import sys import unittest -from monai.data import CacheDataset, DataLoader +import numpy as np +import torch +from parameterized import parameterized + +from monai.data import CacheDataset, DataLoader, Dataset from monai.transforms import Compose, DataStatsd, SimulateDelayd +TEST_CASE_1 = [ + [ + {"image": np.asarray([1, 2, 3])}, + {"image": np.asarray([4, 5])}, + ] +] + +TEST_CASE_2 = [ + [ + {"label": torch.as_tensor([[3], [2]])}, + {"label": np.asarray([[1], [2]])}, + ] +] + class TestDataLoader(unittest.TestCase): def test_values(self): @@ -37,6 +55,14 @@ def test_values(self): self.assertEqual(d["label"][0], "spleen_label_19.nii.gz") self.assertEqual(d["label"][1], "spleen_label_31.nii.gz") + @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) + def test_exception(self, datalist): + dataset = Dataset(data=datalist, transform=None) + dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0) + with self.assertRaisesRegex((TypeError, RuntimeError), "Collate error on the key"): + for _ in dataloader: + pass + if __name__ == "__main__": unittest.main() From f3ccbac5132674e14345e06f658a33e9c66e4813 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 30 Mar 2021 01:54:13 +0800 Subject: [PATCH 115/457] [DLMED] add data_root_dir arg (#1890) Signed-off-by: Nic Ma --- monai/data/nifti_saver.py | 15 ++++++++++++++- monai/data/png_saver.py | 14 +++++++++++++- monai/data/utils.py | 7 ++++++- monai/handlers/segmentation_saver.py | 12 ++++++++++++ monai/transforms/io/array.py | 13 +++++++++++++ monai/transforms/io/dictionary.py | 12 ++++++++++++ tests/test_file_basename.py | 9 +++++++++ tests/test_png_saver.py | 19 +++++++++++++++++++ 8 files changed, 98 insertions(+), 3 deletions(-) diff --git a/monai/data/nifti_saver.py b/monai/data/nifti_saver.py index 016b06fda5..15e61c79e1 100644 --- a/monai/data/nifti_saver.py +++ b/monai/data/nifti_saver.py @@ -43,6 +43,7 @@ def __init__( dtype: DtypeLike = np.float64, output_dtype: DtypeLike = np.float32, squeeze_end_dims: bool = True, + data_root_dir: str = "", ) -> None: """ Args: @@ -67,6 +68,17 @@ def __init__( has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false, image will always be saved as (H,W,D,C). + data_root_dir: if not empty, it specifies the beginning parts of the input file's + absolute path. it's used to compute `input_file_rel_path`, the relative path to the file from + `data_root_dir` to preserve folder structure when saving in case there are files in different + folders with the same file names. for example: + input_file_name: /foo/bar/test1/image.nii, + postfix: seg + output_ext: nii.gz + output_dir: /output, + data_root_dir: /foo/bar, + output will be: /output/test1/image/image_seg.nii.gz + """ self.output_dir = output_dir self.output_postfix = output_postfix @@ -79,6 +91,7 @@ def __init__( self.output_dtype = output_dtype self._data_index = 0 self.squeeze_end_dims = squeeze_end_dims + self.data_root_dir = data_root_dir def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: """ @@ -112,7 +125,7 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] if isinstance(data, torch.Tensor): data = data.detach().cpu().numpy() - filename = create_file_basename(self.output_postfix, filename, self.output_dir) + filename = create_file_basename(self.output_postfix, filename, self.output_dir, self.data_root_dir) filename = f"{filename}{self.output_ext}" # change data shape to be (channel, h, w, d) while len(data.shape) < 4: diff --git a/monai/data/png_saver.py b/monai/data/png_saver.py index 4c4c847824..a6cc0e89a2 100644 --- a/monai/data/png_saver.py +++ b/monai/data/png_saver.py @@ -36,6 +36,7 @@ def __init__( resample: bool = True, mode: Union[InterpolateMode, str] = InterpolateMode.NEAREST, scale: Optional[int] = None, + data_root_dir: str = "", ) -> None: """ Args: @@ -48,6 +49,16 @@ def __init__( See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate scale: {``255``, ``65535``} postprocess data by clipping to [0, 1] and scaling [0, 255] (uint8) or [0, 65535] (uint16). Default is None to disable scaling. + data_root_dir: if not empty, it specifies the beginning parts of the input file's + absolute path. it's used to compute `input_file_rel_path`, the relative path to the file from + `data_root_dir` to preserve folder structure when saving in case there are files in different + folders with the same file names. for example: + input_file_name: /foo/bar/test1/image.png, + postfix: seg + output_ext: png + output_dir: /output, + data_root_dir: /foo/bar, + output will be: /output/test1/image/image_seg.png """ self.output_dir = output_dir @@ -56,6 +67,7 @@ def __init__( self.resample = resample self.mode: InterpolateMode = InterpolateMode(mode) self.scale = scale + self.data_root_dir = data_root_dir self._data_index = 0 @@ -90,7 +102,7 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] if isinstance(data, torch.Tensor): data = data.detach().cpu().numpy() - filename = create_file_basename(self.output_postfix, filename, self.output_dir) + filename = create_file_basename(self.output_postfix, filename, self.output_dir, self.data_root_dir) filename = f"{filename}{self.output_ext}" if data.shape[0] == 1: diff --git a/monai/data/utils.py b/monai/data/utils.py index 47108c68ef..afc460244f 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -608,7 +608,12 @@ def create_file_basename( `folder_path/input_file_name (no ext.) /input_file_name (no ext.)[_postfix]` - otherwise the relative path with respect to `data_root_dir` will be inserted. + otherwise the relative path with respect to `data_root_dir` will be inserted, for example: + input_file_name: /foo/bar/test1/image.png, + postfix: seg + folder_path: /output, + data_root_dir: /foo/bar, + output will be: /output/test1/image/image_seg Args: postfix: output name's postfix diff --git a/monai/handlers/segmentation_saver.py b/monai/handlers/segmentation_saver.py index 25238ea442..9ee7ca67f9 100644 --- a/monai/handlers/segmentation_saver.py +++ b/monai/handlers/segmentation_saver.py @@ -42,6 +42,7 @@ def __init__( dtype: DtypeLike = np.float64, output_dtype: DtypeLike = np.float32, squeeze_end_dims: bool = True, + data_root_dir: str = "", batch_transform: Callable = lambda x: x, output_transform: Callable = lambda x: x, name: Optional[str] = None, @@ -83,6 +84,16 @@ def __init__( then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false, image will always be saved as (H,W,D,C). it's used for NIfTI format only. + data_root_dir: if not empty, it specifies the beginning parts of the input file's + absolute path. it's used to compute `input_file_rel_path`, the relative path to the file from + `data_root_dir` to preserve folder structure when saving in case there are files in different + folders with the same file names. for example: + input_file_name: /foo/bar/test1/image.nii, + output_postfix: seg + output_ext: nii.gz + output_dir: /output, + data_root_dir: /foo/bar, + output will be: /output/test1/image/image_seg.nii.gz batch_transform: a callable that is used to transform the ignite.engine.batch into expected format to extract the meta_data dictionary. output_transform: a callable that is used to transform the @@ -103,6 +114,7 @@ def __init__( dtype=dtype, output_dtype=output_dtype, squeeze_end_dims=squeeze_end_dims, + data_root_dir=data_root_dir, save_batch=True, ) self.batch_transform = batch_transform diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index 9c2727ffc3..61439c0355 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -212,6 +212,16 @@ class SaveImage(Transform): then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false, image will always be saved as (H,W,D,C). it's used for NIfTI format only. + data_root_dir: if not empty, it specifies the beginning parts of the input file's + absolute path. it's used to compute `input_file_rel_path`, the relative path to the file from + `data_root_dir` to preserve folder structure when saving in case there are files in different + folders with the same file names. for example: + input_file_name: /foo/bar/test1/image.nii, + output_postfix: seg + output_ext: nii.gz + output_dir: /output, + data_root_dir: /foo/bar, + output will be: /output/test1/image/image_seg.nii.gz """ @@ -228,6 +238,7 @@ def __init__( output_dtype: DtypeLike = np.float32, save_batch: bool = False, squeeze_end_dims: bool = True, + data_root_dir: str = "", ) -> None: self.saver: Union[NiftiSaver, PNGSaver] if output_ext in (".nii.gz", ".nii"): @@ -241,6 +252,7 @@ def __init__( dtype=dtype, output_dtype=output_dtype, squeeze_end_dims=squeeze_end_dims, + data_root_dir=data_root_dir, ) elif output_ext == ".png": self.saver = PNGSaver( @@ -250,6 +262,7 @@ def __init__( resample=resample, mode=InterpolateMode(mode), scale=scale, + data_root_dir=data_root_dir, ) else: raise ValueError(f"unsupported output extension: {output_ext}.") diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index 79f8561d5e..6a82ff2267 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -173,6 +173,16 @@ class SaveImaged(MapTransform): then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false, image will always be saved as (H,W,D,C). it's used for NIfTI format only. + data_root_dir: if not empty, it specifies the beginning parts of the input file's + absolute path. it's used to compute `input_file_rel_path`, the relative path to the file from + `data_root_dir` to preserve folder structure when saving in case there are files in different + folders with the same file names. for example: + input_file_name: /foo/bar/test1/image.nii, + output_postfix: seg + output_ext: nii.gz + output_dir: /output, + data_root_dir: /foo/bar, + output will be: /output/test1/image/image_seg.nii.gz """ @@ -192,6 +202,7 @@ def __init__( save_batch: bool = False, allow_missing_keys: bool = False, squeeze_end_dims: bool = True, + data_root_dir: str = "", ) -> None: super().__init__(keys, allow_missing_keys) self.meta_key_postfix = meta_key_postfix @@ -207,6 +218,7 @@ def __init__( output_dtype=output_dtype, save_batch=save_batch, squeeze_end_dims=squeeze_end_dims, + data_root_dir=data_root_dir, ) def __call__(self, data): diff --git a/tests/test_file_basename.py b/tests/test_file_basename.py index 21039d3d15..1b67baea8c 100644 --- a/tests/test_file_basename.py +++ b/tests/test_file_basename.py @@ -36,6 +36,15 @@ def test_value(self): expected = os.path.join(output_tmp, "bar", "test", "test") self.assertEqual(result, expected) + result = create_file_basename( + postfix="", + input_file_name=os.path.join("foo", "bar", "data", "test.txt"), + folder_path=output_tmp, + data_root_dir=os.path.join("foo", "bar"), + ) + expected = os.path.join(output_tmp, "data", "test", "test") + self.assertEqual(result, expected) + result = create_file_basename("", os.path.join("foo", "bar", "test.txt"), output_tmp, "bar") expected = os.path.join(tempdir, "foo", "bar", "test", "test") self.assertEqual(result, expected) diff --git a/tests/test_png_saver.py b/tests/test_png_saver.py index 6aa50184df..dbc41dfd75 100644 --- a/tests/test_png_saver.py +++ b/tests/test_png_saver.py @@ -55,6 +55,25 @@ def test_saved_content_spatial_size(self): filepath = os.path.join("testfile" + str(i), "testfile" + str(i) + "_seg.png") self.assertTrue(os.path.exists(os.path.join(tempdir, filepath))) + def test_saved_specified_root(self): + with tempfile.TemporaryDirectory() as tempdir: + + saver = PNGSaver( + output_dir=tempdir, + output_postfix="seg", + output_ext=".png", + scale=255, + data_root_dir="test", + ) + + meta_data = { + "filename_or_obj": [os.path.join("test", "testfile" + str(i), "image" + ".jpg") for i in range(8)] + } + saver.save_batch(torch.randint(1, 200, (8, 1, 2, 2)), meta_data) + for i in range(8): + filepath = os.path.join("testfile" + str(i), "image", "image" + "_seg.png") + self.assertTrue(os.path.exists(os.path.join(tempdir, filepath))) + if __name__ == "__main__": unittest.main() From f5426c84147c7ef166aa047e9bc4f333d09f0d6c Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 30 Mar 2021 05:40:23 +0800 Subject: [PATCH 116/457] 1821 add slicing index and negative index for datasets (#1885) * [DLMED] add slicing support in datasets Signed-off-by: Nic Ma * [DLMED] add unit tests Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot * [DLMED] fix typo Signed-off-by: Nic Ma * [DLMED] update according to comments Signed-off-by: Nic Ma * [DLMED] update doc-strings and typehints Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma Co-authored-by: monai-bot --- monai/data/dataset.py | 51 +++++++++++++++++++++------ monai/data/grid_dataset.py | 2 +- monai/data/inverse_batch_transform.py | 2 +- monai/utils/prob_nms.py | 2 +- tests/test_cachedataset.py | 6 ++++ tests/test_dataset.py | 13 +++++++ tests/test_persistentdataset.py | 43 +++++++++++----------- tests/test_zipdataset.py | 12 +++++++ 8 files changed, 97 insertions(+), 34 deletions(-) diff --git a/monai/data/dataset.py b/monai/data/dataset.py index 06a29574af..12403bbff1 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -10,6 +10,7 @@ # limitations under the License. +import collections.abc import math import pickle import sys @@ -24,6 +25,7 @@ import numpy as np import torch from torch.utils.data import Dataset as _TorchDataset +from torch.utils.data import Subset from monai.data.utils import first, pickle_hashing from monai.transforms import Compose, Randomizable, Transform, apply_transform @@ -44,6 +46,9 @@ class Dataset(_TorchDataset): """ A generic dataset with a length property and an optional callable data transform when fetching a data sample. + If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`, + for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset + For example, typical input data can be a list of dictionaries:: [{ { { @@ -66,12 +71,26 @@ def __init__(self, data: Sequence, transform: Optional[Callable] = None) -> None def __len__(self) -> int: return len(self.data) - def __getitem__(self, index: int): - data = self.data[index] - if self.transform is not None: - data = apply_transform(self.transform, data) + def _transform(self, index: int): + """ + Fetch single data item from `self.data`. + """ + data_i = self.data[index] + return apply_transform(self.transform, data_i) if self.transform is not None else data_i - return data + def __getitem__(self, index: Union[int, slice, Sequence[int]]): + """ + Returns a `Subset` if `index` is a slice or Sequence, a data item otherwise. + """ + if isinstance(index, slice): + # dataset[:42] + start, stop, step = index.indices(len(self)) + indices = range(start, stop, step) + return Subset(dataset=self, indices=indices) + if isinstance(index, collections.abc.Sequence): + # dataset[[1, 3, 4]] + return Subset(dataset=self, indices=index) + return self._transform(index) class PersistentDataset(Dataset): @@ -79,6 +98,8 @@ class PersistentDataset(Dataset): Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data, it can operate transforms for specific fields. Results from the non-random transform components are computed when first used, and stored in the `cache_dir` for rapid retrieval on subsequent uses. + If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`, + for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset For example, typical input data can be a list of dictionaries:: @@ -228,7 +249,7 @@ def _cachecheck(self, item_transformed): temp_hash_file.rename(hashfile) return _item_transformed - def __getitem__(self, index: int): + def _transform(self, index: int): pre_random_item = self._cachecheck(self.data[index]) return self._post_transform(pre_random_item) @@ -446,6 +467,8 @@ class CacheDataset(Dataset): To improve the caching efficiency, please always put as many as possible non-random transforms before the randomized ones when composing the chain of transforms. + If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`, + for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset For example, if the transform is a `Compose` of:: @@ -529,10 +552,10 @@ def _load_cache_item(self, idx: int): item = apply_transform(_transform, item) return item - def __getitem__(self, index): - if index >= self.cache_num: + def _transform(self, index: int): + if index % len(self) >= self.cache_num: # support negative index # no cache for this index, execute all the transforms directly - return super(CacheDataset, self).__getitem__(index) + return super()._transform(index) # load data from cache and execute from the first random transform start_run = False if self._cache is None: @@ -561,6 +584,8 @@ class SmartCacheDataset(Randomizable, CacheDataset): where r is the configured replace rate). For more details, please refer to: https://docs.nvidia.com/clara/tlt-mi/clara-train-sdk-v3.0/nvmidl/additional_features/smart_cache.html#smart-cache + If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`, + for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset For example, if we have 5 images: `[image1, image2, image3, image4, image5]`, and `cache_num=4`, `replace_rate=0.25`. so the actual training images cached and replaced for every epoch are as below:: @@ -812,6 +837,8 @@ class ZipDataset(Dataset): finally return (img, imgmeta, seg, segmeta). And if the datasets don't have same length, use the minimum length of them as the length of ZipDataset. + If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`, + for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset Examples:: @@ -836,7 +863,7 @@ def __init__(self, datasets: Sequence, transform: Optional[Callable] = None) -> def __len__(self) -> int: return min((len(dataset) for dataset in self.data)) - def __getitem__(self, index: int): + def _transform(self, index: int): def to_list(x): return list(x) if isinstance(x, (tuple, list)) else [x] @@ -953,6 +980,8 @@ class NPZDictItemDataset(Dataset): Represents a dataset from a loaded NPZ file. The members of the file to load are named in the keys of `keys` and stored under the keyed name. All loaded arrays must have the same 0-dimension (batch) size. Items are always dicts mapping names to an item extracted from the loaded arrays. + If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`, + for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset Args: npzfile: Path to .npz file or stream containing .npz file data @@ -989,7 +1018,7 @@ def __init__( def __len__(self): return self.length - def __getitem__(self, index: int): + def _transform(self, index: int): data = {k: v[index] for k, v in self.arrays.items()} if self.transform is not None: diff --git a/monai/data/grid_dataset.py b/monai/data/grid_dataset.py index 3f373491ed..b789b9c032 100644 --- a/monai/data/grid_dataset.py +++ b/monai/data/grid_dataset.py @@ -227,7 +227,7 @@ def __init__( def __len__(self) -> int: return len(self.data) * self.samples_per_image - def __getitem__(self, index: int): + def _transform(self, index: int): image_id = int(index / self.samples_per_image) image = self.data[image_id] patches = self.patch_func(image) diff --git a/monai/data/inverse_batch_transform.py b/monai/data/inverse_batch_transform.py index fbc42c6ce1..fa88114c84 100644 --- a/monai/data/inverse_batch_transform.py +++ b/monai/data/inverse_batch_transform.py @@ -36,7 +36,7 @@ def __init__( self.invertible_transform = transform self.pad_collation_used = pad_collation_used - def __getitem__(self, index: int) -> Dict[Hashable, np.ndarray]: + def _transform(self, index: int) -> Dict[Hashable, np.ndarray]: data = dict(self.data[index]) # If pad collation was used, then we need to undo this first if self.pad_collation_used: diff --git a/monai/utils/prob_nms.py b/monai/utils/prob_nms.py index bdffdfe005..29ba93d287 100644 --- a/monai/utils/prob_nms.py +++ b/monai/utils/prob_nms.py @@ -93,7 +93,7 @@ def __call__( idx_min_range = (max_idx_arr - self.box_lower_bd).clip(0, None) idx_max_range = (max_idx_arr + self.box_upper_bd).clip(None, probs_map_shape) # for each dimension, set values during index ranges to 0 - slices = tuple([slice(idx_min_range[i], idx_max_range[i]) for i in range(self.spatial_dims)]) + slices = tuple(slice(idx_min_range[i], idx_max_range[i]) for i in range(self.spatial_dims)) probs_map[slices] = 0 return outputs diff --git a/tests/test_cachedataset.py b/tests/test_cachedataset.py index 2b8931704a..91e2558e89 100644 --- a/tests/test_cachedataset.py +++ b/tests/test_cachedataset.py @@ -51,10 +51,14 @@ def test_shape(self, transform, expected_shape): dataset = CacheDataset(data=test_data, transform=transform, cache_rate=0.5) data1 = dataset[0] data2 = dataset[1] + data3 = dataset[0:-1] + data4 = dataset[-1] + self.assertEqual(len(data3), 1) if transform is None: self.assertEqual(data1["image"], os.path.join(tempdir, "test_image1.nii.gz")) self.assertEqual(data2["label"], os.path.join(tempdir, "test_label2.nii.gz")) + self.assertEqual(data4["image"], os.path.join(tempdir, "test_image2.nii.gz")) else: self.assertTupleEqual(data1["image"].shape, expected_shape) self.assertTupleEqual(data1["label"].shape, expected_shape) @@ -62,6 +66,8 @@ def test_shape(self, transform, expected_shape): self.assertTupleEqual(data2["image"].shape, expected_shape) self.assertTupleEqual(data2["label"].shape, expected_shape) self.assertTupleEqual(data2["extra"].shape, expected_shape) + for d in data3: + self.assertTupleEqual(d["image"].shape, expected_shape) if __name__ == "__main__": diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 2e92b15977..491b777550 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -66,6 +66,8 @@ def test_shape(self, expected_shape): dataset = Dataset(data=test_data, transform=LoadImaged(keys=["image", "label", "extra"])) data1_simple = dataset[0] data2_simple = dataset[1] + data3_simple = dataset[-1] + data4_simple = dataset[[0, 1]] self.assertTupleEqual(data1_simple["image"].shape, expected_shape) self.assertTupleEqual(data1_simple["label"].shape, expected_shape) @@ -73,6 +75,17 @@ def test_shape(self, expected_shape): self.assertTupleEqual(data2_simple["image"].shape, expected_shape) self.assertTupleEqual(data2_simple["label"].shape, expected_shape) self.assertTupleEqual(data2_simple["extra"].shape, expected_shape) + self.assertTupleEqual(data3_simple["image"].shape, expected_shape) + self.assertTupleEqual(data3_simple["label"].shape, expected_shape) + self.assertTupleEqual(data3_simple["extra"].shape, expected_shape) + self.assertTupleEqual(data4_simple[0]["image"].shape, expected_shape) + self.assertTupleEqual(data4_simple[1]["label"].shape, expected_shape) + self.assertTupleEqual(data4_simple[-1]["extra"].shape, expected_shape) + + data4_list = dataset[0:1] + self.assertEqual(len(data4_list), 1) + for d in data4_list: + self.assertTupleEqual(d["image"].shape, expected_shape) if __name__ == "__main__": diff --git a/tests/test_persistentdataset.py b/tests/test_persistentdataset.py index deed810f1a..5d94064cf4 100644 --- a/tests/test_persistentdataset.py +++ b/tests/test_persistentdataset.py @@ -98,26 +98,29 @@ def test_shape(self, transform, expected_shape): dataset_postcached = PersistentDataset(data=test_data, transform=transform, cache_dir=cache_dir) data1_postcached = dataset_postcached[0] data2_postcached = dataset_postcached[1] - - if transform is None: - self.assertEqual(data1_precached["image"], os.path.join(tempdir, "test_image1.nii.gz")) - self.assertEqual(data2_precached["label"], os.path.join(tempdir, "test_label2.nii.gz")) - self.assertEqual(data1_postcached["image"], os.path.join(tempdir, "test_image1.nii.gz")) - self.assertEqual(data2_postcached["extra"], os.path.join(tempdir, "test_extra2.nii.gz")) - else: - self.assertTupleEqual(data1_precached["image"].shape, expected_shape) - self.assertTupleEqual(data1_precached["label"].shape, expected_shape) - self.assertTupleEqual(data1_precached["extra"].shape, expected_shape) - self.assertTupleEqual(data2_precached["image"].shape, expected_shape) - self.assertTupleEqual(data2_precached["label"].shape, expected_shape) - self.assertTupleEqual(data2_precached["extra"].shape, expected_shape) - - self.assertTupleEqual(data1_postcached["image"].shape, expected_shape) - self.assertTupleEqual(data1_postcached["label"].shape, expected_shape) - self.assertTupleEqual(data1_postcached["extra"].shape, expected_shape) - self.assertTupleEqual(data2_postcached["image"].shape, expected_shape) - self.assertTupleEqual(data2_postcached["label"].shape, expected_shape) - self.assertTupleEqual(data2_postcached["extra"].shape, expected_shape) + data3_postcached = dataset_postcached[0:2] + + if transform is None: + self.assertEqual(data1_precached["image"], os.path.join(tempdir, "test_image1.nii.gz")) + self.assertEqual(data2_precached["label"], os.path.join(tempdir, "test_label2.nii.gz")) + self.assertEqual(data1_postcached["image"], os.path.join(tempdir, "test_image1.nii.gz")) + self.assertEqual(data2_postcached["extra"], os.path.join(tempdir, "test_extra2.nii.gz")) + else: + self.assertTupleEqual(data1_precached["image"].shape, expected_shape) + self.assertTupleEqual(data1_precached["label"].shape, expected_shape) + self.assertTupleEqual(data1_precached["extra"].shape, expected_shape) + self.assertTupleEqual(data2_precached["image"].shape, expected_shape) + self.assertTupleEqual(data2_precached["label"].shape, expected_shape) + self.assertTupleEqual(data2_precached["extra"].shape, expected_shape) + + self.assertTupleEqual(data1_postcached["image"].shape, expected_shape) + self.assertTupleEqual(data1_postcached["label"].shape, expected_shape) + self.assertTupleEqual(data1_postcached["extra"].shape, expected_shape) + self.assertTupleEqual(data2_postcached["image"].shape, expected_shape) + self.assertTupleEqual(data2_postcached["label"].shape, expected_shape) + self.assertTupleEqual(data2_postcached["extra"].shape, expected_shape) + for d in data3_postcached: + self.assertTupleEqual(d["image"].shape, expected_shape) if __name__ == "__main__": diff --git a/tests/test_zipdataset.py b/tests/test_zipdataset.py index 1bdb6458d3..710ca71fc2 100644 --- a/tests/test_zipdataset.py +++ b/tests/test_zipdataset.py @@ -52,6 +52,18 @@ def test_value(self, datasets, transform, expected_output, expected_length): self.assertEqual(test_dataset[0], expected_output) self.assertEqual(len(test_dataset), expected_length) + def test_slicing(self): + test_dataset = ZipDataset(datasets=[Dataset_(5), Dataset_(5), Dataset_(5)], transform=None) + subset = test_dataset[0:2] + self.assertEqual(subset[-1], (1, 1, 1)) + self.assertEqual(len(subset), 2) + + def test_sequence(self): + test_dataset = ZipDataset(datasets=[Dataset_(5), Dataset_(5), Dataset_(5)], transform=None) + subset = test_dataset[[1, 3, 4]] + self.assertEqual(subset[-1], (4, 4, 4)) + self.assertEqual(len(subset), 3) + if __name__ == "__main__": unittest.main() From f8d975599af5a830b645c635de198cdc14a3221c Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Mon, 29 Mar 2021 20:50:07 -0400 Subject: [PATCH 117/457] Update Pathology Tests (#1892) * download_if_not_exist Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update cuimage reader tests Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update openslide reader tests Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update the file path to be relative to the test file Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update patch wsi dataset tests Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update smartcache patch wsi dataset tests Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Revert "download_if_not_exist" This reverts commit a4c65f33ed86776d8acb8cf507e29c688d42913e. Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update with download_url Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * [MONAI] python code formatting --- tests/test_cuimage_reader.py | 43 +++++++++------------ tests/test_openslide_reader.py | 45 +++++++++------------- tests/test_patch_wsi_dataset.py | 37 +++++++----------- tests/test_smartcache_patch_wsi_dataset.py | 39 ++++++++----------- 4 files changed, 65 insertions(+), 99 deletions(-) diff --git a/tests/test_cuimage_reader.py b/tests/test_cuimage_reader.py index 221a458ca8..036d5ad1ae 100644 --- a/tests/test_cuimage_reader.py +++ b/tests/test_cuimage_reader.py @@ -1,12 +1,12 @@ import os import unittest from unittest import skipUnless -from urllib import request import numpy as np from numpy.testing import assert_array_equal from parameterized import parameterized +from monai.apps.utils import download_url from monai.data.image_reader import WSIReader from monai.utils import optional_import @@ -14,25 +14,27 @@ FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" +FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL)) + HEIGHT = 32914 WIDTH = 46000 -TEST_CASE_0 = [FILE_URL, (3, HEIGHT, WIDTH)] +TEST_CASE_0 = [FILE_PATH, (3, HEIGHT, WIDTH)] TEST_CASE_1 = [ - FILE_URL, + FILE_PATH, {"location": (HEIGHT // 2, WIDTH // 2), "size": (2, 1), "level": 0}, np.array([[[246], [246]], [[246], [246]], [[246], [246]]]), ] TEST_CASE_2 = [ - FILE_URL, + FILE_PATH, {"location": (0, 0), "size": (2, 1), "level": 2}, np.array([[[239], [239]], [[239], [239]], [[239], [239]]]), ] TEST_CASE_3 = [ - FILE_URL, + FILE_PATH, { "location": (0, 0), "size": (8, 8), @@ -49,7 +51,7 @@ ] TEST_CASE_4 = [ - FILE_URL, + FILE_PATH, { "location": (0, 0), "size": (8, 8), @@ -62,42 +64,33 @@ class TestCuCIMReader(unittest.TestCase): - @parameterized.expand([TEST_CASE_0]) @skipUnless(has_cim, "Requires CuCIM") - def test_read_whole_image(self, file_url, expected_shape): - filename = self.camelyon_data_download(file_url) + def setUp(self): + download_url(FILE_URL, FILE_PATH, "5a3cfd4fd725c50578ddb80b517b759f") + + @parameterized.expand([TEST_CASE_0]) + def test_read_whole_image(self, file_path, expected_shape): reader = WSIReader("cuCIM") - img_obj = reader.read(filename) + img_obj = reader.read(file_path) img = reader.get_data(img_obj)[0] self.assertTupleEqual(img.shape, expected_shape) @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) - @skipUnless(has_cim, "Requires cuCIM") - def test_read_region(self, file_url, patch_info, expected_img): - filename = self.camelyon_data_download(file_url) + def test_read_region(self, file_path, patch_info, expected_img): reader = WSIReader("cuCIM") - img_obj = reader.read(filename) + img_obj = reader.read(file_path) img = reader.get_data(img_obj, **patch_info)[0] self.assertTupleEqual(img.shape, expected_img.shape) self.assertIsNone(assert_array_equal(img, expected_img)) @parameterized.expand([TEST_CASE_3, TEST_CASE_4]) - @skipUnless(has_cim, "Requires cuCIM") - def test_read_patches(self, file_url, patch_info, expected_img): - filename = self.camelyon_data_download(file_url) + def test_read_patches(self, file_path, patch_info, expected_img): reader = WSIReader("cuCIM") - img_obj = reader.read(filename) + img_obj = reader.read(file_path) img = reader.get_data(img_obj, **patch_info)[0] self.assertTupleEqual(img.shape, expected_img.shape) self.assertIsNone(assert_array_equal(img, expected_img)) - def camelyon_data_download(self, file_url): - filename = os.path.basename(file_url) - if not os.path.exists(filename): - print(f"Test image [{filename}] does not exist. Downloading...") - request.urlretrieve(file_url, filename) - return filename - if __name__ == "__main__": unittest.main() diff --git a/tests/test_openslide_reader.py b/tests/test_openslide_reader.py index 67a6683be3..ca50cec4de 100644 --- a/tests/test_openslide_reader.py +++ b/tests/test_openslide_reader.py @@ -1,12 +1,12 @@ import os import unittest from unittest import skipUnless -from urllib import request import numpy as np from numpy.testing import assert_array_equal from parameterized import parameterized +from monai.apps.utils import download_url from monai.data.image_reader import WSIReader from monai.utils import optional_import @@ -14,25 +14,27 @@ FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" +FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL)) + HEIGHT = 32914 WIDTH = 46000 -TEST_CASE_0 = [FILE_URL, (3, HEIGHT, WIDTH)] +TEST_CASE_0 = [FILE_PATH, (3, HEIGHT, WIDTH)] TEST_CASE_1 = [ - FILE_URL, + FILE_PATH, {"location": (HEIGHT // 2, WIDTH // 2), "size": (2, 1), "level": 0}, np.array([[[246], [246]], [[246], [246]], [[246], [246]]]), ] TEST_CASE_2 = [ - FILE_URL, + FILE_PATH, {"location": (0, 0), "size": (2, 1), "level": 2}, np.array([[[239], [239]], [[239], [239]], [[239], [239]]]), ] TEST_CASE_3 = [ - FILE_URL, + FILE_PATH, { "location": (0, 0), "size": (8, 8), @@ -49,7 +51,7 @@ ] TEST_CASE_4 = [ - FILE_URL, + FILE_PATH, { "location": (0, 0), "size": (8, 8), @@ -61,41 +63,30 @@ ] -def camelyon_data_download(file_url): - filename = os.path.basename(file_url) - fullname = os.path.join("tests", "testing_data", filename) - if not os.path.exists(fullname): - print(f"Test image [{fullname}] does not exist. Downloading...") - request.urlretrieve(file_url, fullname) - return fullname - - class TestOpenSlideReader(unittest.TestCase): - @parameterized.expand([TEST_CASE_0]) @skipUnless(has_osl, "Requires OpenSlide") - def test_read_whole_image(self, file_url, expected_shape): - filename = camelyon_data_download(file_url) + def setUp(self): + download_url(FILE_URL, FILE_PATH, "5a3cfd4fd725c50578ddb80b517b759f") + + @parameterized.expand([TEST_CASE_0]) + def test_read_whole_image(self, file_path, expected_shape): reader = WSIReader("OpenSlide") - img_obj = reader.read(filename) + img_obj = reader.read(file_path) img = reader.get_data(img_obj)[0] self.assertTupleEqual(img.shape, expected_shape) @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) - @skipUnless(has_osl, "Requires OpenSlide") - def test_read_region(self, file_url, patch_info, expected_img): - filename = camelyon_data_download(file_url) + def test_read_region(self, file_path, patch_info, expected_img): reader = WSIReader("OpenSlide") - img_obj = reader.read(filename) + img_obj = reader.read(file_path) img = reader.get_data(img_obj, **patch_info)[0] self.assertTupleEqual(img.shape, expected_img.shape) self.assertIsNone(assert_array_equal(img, expected_img)) @parameterized.expand([TEST_CASE_3, TEST_CASE_4]) - @skipUnless(has_osl, "Requires OpenSlide") - def test_read_patches(self, file_url, patch_info, expected_img): - filename = camelyon_data_download(file_url) + def test_read_patches(self, file_path, patch_info, expected_img): reader = WSIReader("OpenSlide") - img_obj = reader.read(filename) + img_obj = reader.read(file_path) img = reader.get_data(img_obj, **patch_info)[0] self.assertTupleEqual(img.shape, expected_img.shape) self.assertIsNone(assert_array_equal(img, expected_img)) diff --git a/tests/test_patch_wsi_dataset.py b/tests/test_patch_wsi_dataset.py index 1d470374bb..d030671d06 100644 --- a/tests/test_patch_wsi_dataset.py +++ b/tests/test_patch_wsi_dataset.py @@ -1,25 +1,25 @@ import os import unittest from unittest import skipUnless -from urllib import request import numpy as np from numpy.testing import assert_array_equal from parameterized import parameterized from monai.apps.pathology.datasets import PatchWSIDataset +from monai.apps.utils import download_url from monai.utils import optional_import _, has_cim = optional_import("cucim") _, has_osl = optional_import("openslide") FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" +FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL)) TEST_CASE_0 = [ - FILE_URL, { "data": [ - {"image": "./CMU-1.tiff", "location": [0, 0], "label": [1]}, + {"image": FILE_PATH, "location": [0, 0], "label": [1]}, ], "region_size": (1, 1), "grid_shape": (1, 1), @@ -32,9 +32,8 @@ ] TEST_CASE_1 = [ - FILE_URL, { - "data": [{"image": "./CMU-1.tiff", "location": [10004, 20004], "label": [0, 0, 0, 1]}], + "data": [{"image": FILE_PATH, "location": [10004, 20004], "label": [0, 0, 0, 1]}], "region_size": (8, 8), "grid_shape": (2, 2), "patch_size": 1, @@ -49,10 +48,9 @@ ] TEST_CASE_2 = [ - FILE_URL, { "data": [ - {"image": "./CMU-1.tiff", "location": [0, 0], "label": [1]}, + {"image": FILE_PATH, "location": [0, 0], "label": [1]}, ], "region_size": 1, "grid_shape": 1, @@ -65,10 +63,9 @@ ] TEST_CASE_3 = [ - FILE_URL, { "data": [ - {"image": "./CMU-1.tiff", "location": [0, 0], "label": [[[0, 1], [1, 0]]]}, + {"image": FILE_PATH, "location": [0, 0], "label": [[[0, 1], [1, 0]]]}, ], "region_size": 1, "grid_shape": 1, @@ -81,10 +78,9 @@ ] TEST_CASE_OPENSLIDE_0 = [ - FILE_URL, { "data": [ - {"image": "./CMU-1.tiff", "location": [0, 0], "label": [1]}, + {"image": FILE_PATH, "location": [0, 0], "label": [1]}, ], "region_size": (1, 1), "grid_shape": (1, 1), @@ -97,9 +93,8 @@ ] TEST_CASE_OPENSLIDE_1 = [ - FILE_URL, { - "data": [{"image": "./CMU-1.tiff", "location": [10004, 20004], "label": [0, 0, 0, 1]}], + "data": [{"image": FILE_PATH, "location": [10004, 20004], "label": [0, 0, 0, 1]}], "region_size": (8, 8), "grid_shape": (2, 2), "patch_size": 1, @@ -115,6 +110,9 @@ class TestPatchWSIDataset(unittest.TestCase): + def setUp(self): + download_url(FILE_URL, FILE_PATH, "5a3cfd4fd725c50578ddb80b517b759f") + @parameterized.expand( [ TEST_CASE_0, @@ -124,8 +122,7 @@ class TestPatchWSIDataset(unittest.TestCase): ] ) @skipUnless(has_cim, "Requires CuCIM") - def test_read_patches_cucim(self, file_url, input_parameters, expected): - self.camelyon_data_download(file_url) + def test_read_patches_cucim(self, input_parameters, expected): dataset = PatchWSIDataset(**input_parameters) samples = dataset[0] for i in range(len(samples)): @@ -141,8 +138,7 @@ def test_read_patches_cucim(self, file_url, input_parameters, expected): ] ) @skipUnless(has_osl, "Requires OpenSlide") - def test_read_patches_openslide(self, file_url, input_parameters, expected): - self.camelyon_data_download(file_url) + def test_read_patches_openslide(self, input_parameters, expected): dataset = PatchWSIDataset(**input_parameters) samples = dataset[0] for i in range(len(samples)): @@ -151,13 +147,6 @@ def test_read_patches_openslide(self, file_url, input_parameters, expected): self.assertIsNone(assert_array_equal(samples[i]["label"], expected[i]["label"])) self.assertIsNone(assert_array_equal(samples[i]["image"], expected[i]["image"])) - def camelyon_data_download(self, file_url): - filename = os.path.basename(file_url) - if not os.path.exists(filename): - print(f"Test image [{filename}] does not exist. Downloading...") - request.urlretrieve(file_url, filename) - return filename - if __name__ == "__main__": unittest.main() diff --git a/tests/test_smartcache_patch_wsi_dataset.py b/tests/test_smartcache_patch_wsi_dataset.py index e3f4d03a2d..a7c90b5205 100644 --- a/tests/test_smartcache_patch_wsi_dataset.py +++ b/tests/test_smartcache_patch_wsi_dataset.py @@ -1,27 +1,27 @@ import os import unittest from unittest import skipUnless -from urllib import request import numpy as np from numpy.testing import assert_array_equal from parameterized import parameterized from monai.apps.pathology.datasets import SmartCachePatchWSIDataset +from monai.apps.utils import download_url from monai.utils import optional_import _, has_cim = optional_import("cucim") FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" +FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL)) TEST_CASE_0 = [ - FILE_URL, { "data": [ - {"image": "./CMU-1.tiff", "location": [0, 0], "label": [0]}, - {"image": "./CMU-1.tiff", "location": [0, 0], "label": [1]}, - {"image": "./CMU-1.tiff", "location": [0, 0], "label": [2]}, - {"image": "./CMU-1.tiff", "location": [0, 0], "label": [3]}, + {"image": FILE_PATH, "location": [0, 0], "label": [0]}, + {"image": FILE_PATH, "location": [0, 0], "label": [1]}, + {"image": FILE_PATH, "location": [0, 0], "label": [2]}, + {"image": FILE_PATH, "location": [0, 0], "label": [3]}, ], "region_size": (1, 1), "grid_shape": (1, 1), @@ -46,12 +46,11 @@ ] TEST_CASE_1 = [ - FILE_URL, { "data": [ - {"image": "./CMU-1.tiff", "location": [0, 0], "label": [[0, 0]]}, - {"image": "./CMU-1.tiff", "location": [0, 0], "label": [[1, 1]]}, - {"image": "./CMU-1.tiff", "location": [0, 0], "label": [[2, 2]]}, + {"image": FILE_PATH, "location": [0, 0], "label": [[0, 0]]}, + {"image": FILE_PATH, "location": [0, 0], "label": [[1, 1]]}, + {"image": FILE_PATH, "location": [0, 0], "label": [[2, 2]]}, ], "region_size": (1, 1), "grid_shape": (1, 1), @@ -74,12 +73,11 @@ ] TEST_CASE_2 = [ - FILE_URL, { "data": [ - {"image": "./CMU-1.tiff", "location": [10004, 20004], "label": [0, 0, 0, 0]}, - {"image": "./CMU-1.tiff", "location": [10004, 20004], "label": [1, 1, 1, 1]}, - {"image": "./CMU-1.tiff", "location": [10004, 20004], "label": [2, 2, 2, 2]}, + {"image": FILE_PATH, "location": [10004, 20004], "label": [0, 0, 0, 0]}, + {"image": FILE_PATH, "location": [10004, 20004], "label": [1, 1, 1, 1]}, + {"image": FILE_PATH, "location": [10004, 20004], "label": [2, 2, 2, 2]}, ], "region_size": (8, 8), "grid_shape": (2, 2), @@ -121,6 +119,9 @@ class TestSmartCachePatchWSIDataset(unittest.TestCase): + def setUp(self): + download_url(FILE_URL, FILE_PATH, "5a3cfd4fd725c50578ddb80b517b759f") + @parameterized.expand( [ TEST_CASE_0, @@ -129,8 +130,7 @@ class TestSmartCachePatchWSIDataset(unittest.TestCase): ] ) @skipUnless(has_cim, "Requires CuCIM") - def test_read_patches(self, file_url, input_parameters, expected): - self.camelyon_data_download(file_url) + def test_read_patches(self, input_parameters, expected): dataset = SmartCachePatchWSIDataset(**input_parameters) self.assertEqual(len(dataset), input_parameters["cache_num"]) total_num_samples = len(input_parameters["data"]) @@ -149,13 +149,6 @@ def test_read_patches(self, file_url, input_parameters, expected): dataset.update_cache() dataset.shutdown() - def camelyon_data_download(self, file_url): - filename = os.path.basename(file_url) - if not os.path.exists(filename): - print(f"Test image [{filename}] does not exist. Downloading...") - request.urlretrieve(file_url, filename) - return filename - def assert_samples_expected(self, samples, expected): for i in range(len(samples)): self.assertTupleEqual(samples[i]["label"].shape, expected[i]["label"].shape) From f0c9c18c4ce5c44f572166a768465af48d1243e8 Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Tue, 30 Mar 2021 14:21:55 +0800 Subject: [PATCH 118/457] Implement random bias field transform (#1886) * Implement random bias field transform Signed-off-by: Yiheng Wang --- docs/source/transforms.rst | 12 +++ monai/transforms/__init__.py | 4 + monai/transforms/intensity/array.py | 94 +++++++++++++++++++++++- monai/transforms/intensity/dictionary.py | 55 ++++++++++++++ tests/test_random_bias_field.py | 66 +++++++++++++++++ tests/test_random_bias_fieldd.py | 65 ++++++++++++++++ 6 files changed, 295 insertions(+), 1 deletion(-) create mode 100644 tests/test_random_bias_field.py create mode 100644 tests/test_random_bias_fieldd.py diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index 28bfdc5f24..a726b25435 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -155,6 +155,12 @@ Intensity :members: :special-members: __call__ +`RandBiasField` +""""""""""""""" +.. autoclass:: RandBiasField + :members: + :special-members: __call__ + `ScaleIntensity` """""""""""""""" .. autoclass:: ScaleIntensity @@ -668,6 +674,12 @@ Instensity (Dict) :members: :special-members: __call__ +`RandBiasFieldd` +"""""""""""""""" +.. autoclass:: RandBiasFieldd + :members: + :special-members: __call__ + `ScaleIntensityd` """"""""""""""""" .. autoclass:: ScaleIntensityd diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index c7b60e15e3..b8cc832db1 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -73,6 +73,7 @@ MaskIntensity, NormalizeIntensity, RandAdjustContrast, + RandBiasField, RandGaussianNoise, RandGaussianSharpen, RandGaussianSmooth, @@ -107,6 +108,9 @@ RandAdjustContrastd, RandAdjustContrastD, RandAdjustContrastDict, + RandBiasFieldd, + RandBiasFieldD, + RandBiasFieldDict, RandGaussianNoised, RandGaussianNoiseD, RandGaussianNoiseDict, diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index abd7de151f..d284f43d24 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -14,7 +14,7 @@ """ from collections.abc import Iterable -from typing import Any, Optional, Sequence, Tuple, Union +from typing import Any, List, Optional, Sequence, Tuple, Union from warnings import warn import numpy as np @@ -32,6 +32,7 @@ "RandShiftIntensity", "StdShiftIntensity", "RandStdShiftIntensity", + "RandBiasField", "ScaleIntensity", "RandScaleIntensity", "NormalizeIntensity", @@ -297,6 +298,97 @@ def __call__(self, img: np.ndarray) -> np.ndarray: return scaler(img) +class RandBiasField(RandomizableTransform): + """ + Random bias field augmentation for MR images. + The bias field is considered as a linear combination of smoothly varying basis (polynomial) + functions, as described in `Automated Model-Based Tissue Classification of MR Images of the Brain + `_. + This implementation adapted from `NiftyNet + `_. + Referred to `Longitudinal segmentation of age-related white matter hyperintensities + `_. + + Args: + degree: degree of freedom of the polynomials. The value should be no less than 1. + Defaults to 3. + coeff_range: range of the random coefficients. Defaults to (0.0, 0.1). + dtype: output data type, defaut to float32. + prob: probability to do random bias field. + + """ + + def __init__( + self, + degree: int = 3, + coeff_range: Tuple[float, float] = (0.0, 0.1), + dtype: DtypeLike = np.float32, + prob: float = 1.0, + ) -> None: + RandomizableTransform.__init__(self, prob) + if degree < 1: + raise ValueError("degree should be no less than 1.") + self.degree = degree + self.coeff_range = coeff_range + self.dtype = dtype + + def _generate_random_field( + self, + spatial_shape: Tuple[int, ...], + rank: int, + degree: int, + coeff: Tuple[int, ...], + ): + """ + products of polynomials as bias field estimations + """ + coeff_mat = np.zeros((degree + 1,) * rank) + coords = [np.linspace(-1.0, 1.0, dim, dtype=np.float32) for dim in spatial_shape] + if rank == 2: + coeff_mat[np.tril_indices(degree + 1)] = coeff + field = np.polynomial.legendre.leggrid2d(coords[0], coords[1], coeff_mat) + elif rank == 3: + pts: List[List[int]] = [[0, 0, 0]] + for i in range(degree + 1): + for j in range(degree + 1 - i): + for k in range(degree + 1 - i - j): + pts.append([i, j, k]) + if len(pts) > 1: + pts = pts[1:] + np_pts = np.stack(pts) + coeff_mat[np_pts[:, 0], np_pts[:, 1], np_pts[:, 2]] = coeff + field = np.polynomial.legendre.leggrid3d(coords[0], coords[1], coords[2], coeff_mat) + else: + raise NotImplementedError("only supoprts 2D or 3D fields") + return field + + def randomize(self, data: np.ndarray) -> None: + super().randomize(None) + self.spatial_shape = data.shape[1:] + self.rank = len(self.spatial_shape) + n_coeff = int(np.prod([(self.degree + k) / k for k in range(1, self.rank + 1)])) + self._coeff = self.R.uniform(*self.coeff_range, n_coeff) + + def __call__(self, img: np.ndarray): + """ + Apply the transform to `img`. + """ + self.randomize(data=img) + if not self._do_transform: + return img + num_channels = img.shape[0] + _bias_fields = np.stack( + [ + self._generate_random_field( + spatial_shape=self.spatial_shape, rank=self.rank, degree=self.degree, coeff=self._coeff + ) + for _ in range(num_channels) + ], + axis=0, + ) + return (img * _bias_fields).astype(self.dtype) + + class NormalizeIntensity(Transform): """ Normalize input based on provided args, using calculated mean and std if not provided. diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 881a0d3dc9..269f240ae8 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -28,6 +28,7 @@ GaussianSmooth, MaskIntensity, NormalizeIntensity, + RandBiasField, ScaleIntensity, ScaleIntensityRange, ScaleIntensityRangePercentiles, @@ -44,6 +45,9 @@ "RandShiftIntensityd", "ScaleIntensityd", "RandScaleIntensityd", + "StdShiftIntensityd", + "RandStdShiftIntensityd", + "RandBiasFieldd", "NormalizeIntensityd", "ThresholdIntensityd", "ScaleIntensityRanged", @@ -64,8 +68,14 @@ "RandShiftIntensityDict", "ScaleIntensityD", "ScaleIntensityDict", + "StdShiftIntensityD", + "StdShiftIntensityDict", "RandScaleIntensityD", "RandScaleIntensityDict", + "RandStdShiftIntensityD", + "RandStdShiftIntensityDict", + "RandBiasFieldD", + "RandBiasFieldDict", "NormalizeIntensityD", "NormalizeIntensityDict", "ThresholdIntensityD", @@ -381,6 +391,50 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d +class RandBiasFieldd(RandomizableTransform, MapTransform): + """ + Dictionary-based version :py:class:`monai.transforms.RandBiasField`. + """ + + def __init__( + self, + keys: KeysCollection, + degree: int = 3, + coeff_range: Tuple[float, float] = (0.0, 0.1), + dtype: DtypeLike = np.float32, + prob: float = 1.0, + allow_missing_keys: bool = False, + ) -> None: + """ + Args: + keys: keys of the corresponding items to be transformed. + See also: :py:class:`monai.transforms.compose.MapTransform` + degree: degree of freedom of the polynomials. The value should be no less than 1. + Defaults to 3. + coeff_range: range of the random coefficients. Defaults to (0.0, 0.1). + dtype: output data type, defaut to float32. + prob: probability to do random bias field. + allow_missing_keys: don't raise exception if key is missing. + + """ + MapTransform.__init__(self, keys, allow_missing_keys) + RandomizableTransform.__init__(self, prob) + + self.rand_bias_field = RandBiasField(degree, coeff_range, dtype, prob) + + def randomize(self, data: Optional[Any] = None) -> None: + super().randomize(None) + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: + d = dict(data) + self.randomize() + if not self._do_transform: + return d + for key in self.key_iterator(d): + d[key] = self.rand_bias_field(d[key]) + return d + + class NormalizeIntensityd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.NormalizeIntensity`. @@ -900,6 +954,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda RandShiftIntensityD = RandShiftIntensityDict = RandShiftIntensityd StdShiftIntensityD = StdShiftIntensityDict = StdShiftIntensityd RandStdShiftIntensityD = RandStdShiftIntensityDict = RandStdShiftIntensityd +RandBiasFieldD = RandBiasFieldDict = RandBiasFieldd ScaleIntensityD = ScaleIntensityDict = ScaleIntensityd RandScaleIntensityD = RandScaleIntensityDict = RandScaleIntensityd NormalizeIntensityD = NormalizeIntensityDict = NormalizeIntensityd diff --git a/tests/test_random_bias_field.py b/tests/test_random_bias_field.py new file mode 100644 index 0000000000..16b4ab6917 --- /dev/null +++ b/tests/test_random_bias_field.py @@ -0,0 +1,66 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +from parameterized import parameterized + +from monai.transforms import RandBiasField + +TEST_CASES_2D = [{}, (3, 32, 32)] +TEST_CASES_3D = [{}, (3, 32, 32, 32)] +TEST_CASES_2D_ZERO_RANGE = [{"coeff_range": (0.0, 0.0)}, (3, 32, 32)] +TEST_CASES_2D_ONES = [{"coeff_range": (1.0, 1.0)}, np.asarray([[[2, -2], [2, 10]]])] + + +class TestRandBiasField(unittest.TestCase): + @parameterized.expand( + [ + TEST_CASES_2D, + TEST_CASES_3D, + ] + ) + def test_output_shape(self, class_args, img_shape): + for degree in [1, 2, 3]: + bias_field = RandBiasField(degree=degree, **class_args) + img = np.random.rand(*img_shape) + output = bias_field(img) + np.testing.assert_equal(output.shape, img_shape) + np.testing.assert_equal(output.dtype, bias_field.dtype) + + img_zero = np.zeros([*img_shape]) + output_zero = bias_field(img_zero) + np.testing.assert_equal(output_zero, img_zero) + + @parameterized.expand([TEST_CASES_2D_ZERO_RANGE]) + def test_zero_range(self, class_args, img_shape): + bias_field = RandBiasField(**class_args) + img = np.random.rand(*img_shape) + output = bias_field(img) + np.testing.assert_equal(output, np.zeros(img_shape)) + + @parameterized.expand([TEST_CASES_2D_ONES]) + def test_one_range_input(self, class_args, expected): + bias_field = RandBiasField(**class_args) + img = np.ones([1, 2, 2]) + output = bias_field(img) + np.testing.assert_equal(output, expected.astype(bias_field.dtype)) + + def test_zero_prob(self): + bias_field = RandBiasField(prob=0.0) + img = np.random.rand(3, 32, 32) + output = bias_field(img) + np.testing.assert_equal(output, img) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_random_bias_fieldd.py b/tests/test_random_bias_fieldd.py new file mode 100644 index 0000000000..136eb41f2e --- /dev/null +++ b/tests/test_random_bias_fieldd.py @@ -0,0 +1,65 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +from parameterized import parameterized + +from monai.transforms import RandBiasFieldd + +TEST_CASES_2D = [{}, (3, 32, 32)] +TEST_CASES_3D = [{}, (3, 32, 32, 32)] +TEST_CASES_2D_ZERO_RANGE = [{"coeff_range": (0.0, 0.0)}, (3, 32, 32)] +TEST_CASES_2D_ONES = [{"coeff_range": (1.0, 1.0)}, np.asarray([[[2, -2], [2, 10]]])] + + +class TestRandBiasFieldd(unittest.TestCase): + @parameterized.expand( + [ + TEST_CASES_2D, + TEST_CASES_3D, + ] + ) + def test_output_shape(self, class_args, img_shape): + key = "img" + bias_field = RandBiasFieldd(keys=[key], **class_args) + img = np.random.rand(*img_shape) + output = bias_field({key: img}) + np.testing.assert_equal(output[key].shape, img_shape) + np.testing.assert_equal(output[key].dtype, bias_field.rand_bias_field.dtype) + + @parameterized.expand([TEST_CASES_2D_ZERO_RANGE]) + def test_zero_range(self, class_args, img_shape): + key = "img" + bias_field = RandBiasFieldd(keys=[key], **class_args) + img = np.random.rand(*img_shape) + output = bias_field({key: img}) + np.testing.assert_equal(output[key], np.zeros(img_shape)) + + @parameterized.expand([TEST_CASES_2D_ONES]) + def test_one_range_input(self, class_args, expected): + key = "img" + bias_field = RandBiasFieldd(keys=[key], **class_args) + img = np.ones([1, 2, 2]) + output = bias_field({key: img}) + np.testing.assert_equal(output[key], expected.astype(bias_field.rand_bias_field.dtype)) + + def test_zero_prob(self): + key = "img" + bias_field = RandBiasFieldd(keys=[key], prob=0.0) + img = np.random.rand(3, 32, 32) + output = bias_field({key: img}) + np.testing.assert_equal(output[key], img) + + +if __name__ == "__main__": + unittest.main() From a397e5d63a157f6764a67ae64a383eda6c520717 Mon Sep 17 00:00:00 2001 From: charliebudd Date: Tue, 30 Mar 2021 10:33:02 +0100 Subject: [PATCH 119/457] Fix for Bilateral Filter Backprop (#1888) * Fixing error in saving non-pytorch-variable parameters for backwards pass Signed-off-by: chaliebudd * Adding unit tests to check bilateral filter backwards runs Signed-off-by: chaliebudd * fixing typo introduced by merge Signed-off-by: chaliebudd * code reformatting Signed-off-by: chaliebudd --- monai/networks/layers/filtering.py | 8 ++++-- tests/test_bilateral_approx_cpu.py | 18 ++++++++++++ tests/test_bilateral_approx_cuda.py | 18 ++++++++++++ tests/test_bilateral_precise.py | 43 ++++++++++++++++++++++++++--- 4 files changed, 80 insertions(+), 7 deletions(-) diff --git a/monai/networks/layers/filtering.py b/monai/networks/layers/filtering.py index 7eca03a280..fc6c0a38b5 100644 --- a/monai/networks/layers/filtering.py +++ b/monai/networks/layers/filtering.py @@ -47,15 +47,17 @@ class BilateralFilter(torch.autograd.Function): @staticmethod def forward(ctx, input, spatial_sigma=5, color_sigma=0.5, fast_approx=True): - ctx.save_for_backward(spatial_sigma, color_sigma, fast_approx) + ctx.ss = spatial_sigma + ctx.cs = color_sigma + ctx.fa = fast_approx output_data = _C.bilateral_filter(input, spatial_sigma, color_sigma, fast_approx) return output_data @staticmethod def backward(ctx, grad_output): - spatial_sigma, color_sigma, fast_approx = ctx.saved_variables + spatial_sigma, color_sigma, fast_approx = ctx.ss, ctx.cs, ctx.fa grad_input = _C.bilateral_filter(grad_output, spatial_sigma, color_sigma, fast_approx) - return grad_input + return grad_input, None, None, None class PHLFilter(torch.autograd.Function): diff --git a/tests/test_bilateral_approx_cpu.py b/tests/test_bilateral_approx_cpu.py index 2b6088a56f..96d60fb22c 100644 --- a/tests/test_bilateral_approx_cpu.py +++ b/tests/test_bilateral_approx_cpu.py @@ -14,6 +14,7 @@ import numpy as np import torch from parameterized import parameterized +from torch.autograd import gradcheck from monai.networks.layers.filtering import BilateralFilter from tests.utils import skip_if_no_cpp_extension @@ -376,6 +377,23 @@ def test_cpu_approx(self, test_case_description, sigmas, input, expected): # Ensure result are as expected np.testing.assert_allclose(output, expected, atol=1e-5) + @parameterized.expand(TEST_CASES) + def test_cpu_approx_backwards(self, test_case_description, sigmas, input, expected): + + # Params to determine the implementation to test + device = torch.device("cpu") + fast_approx = True + + # Prepare input tensor + input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=device) + input_tensor.requires_grad = True + + # Prepare args + args = (input_tensor, *sigmas, fast_approx) + + # Run grad check + gradcheck(BilateralFilter.apply, args, raise_exception=False) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_bilateral_approx_cuda.py b/tests/test_bilateral_approx_cuda.py index fdaba26f72..c95fbed1f5 100644 --- a/tests/test_bilateral_approx_cuda.py +++ b/tests/test_bilateral_approx_cuda.py @@ -14,6 +14,7 @@ import numpy as np import torch from parameterized import parameterized +from torch.autograd import gradcheck from monai.networks.layers.filtering import BilateralFilter from tests.utils import skip_if_no_cpp_extension, skip_if_no_cuda @@ -381,6 +382,23 @@ def test_cuda_approx(self, test_case_description, sigmas, input, expected): # Ensure result are as expected np.testing.assert_allclose(output, expected, atol=1e-2) + @parameterized.expand(TEST_CASES) + def test_cpu_approx_backwards(self, test_case_description, sigmas, input, expected): + + # Params to determine the implementation to test + device = torch.device("cuda") + fast_approx = True + + # Prepare input tensor + input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=device) + input_tensor.requires_grad = True + + # Prepare args + args = (input_tensor, *sigmas, fast_approx) + + # Run grad check + gradcheck(BilateralFilter.apply, args, raise_exception=False) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_bilateral_precise.py b/tests/test_bilateral_precise.py index db2ee88239..df4d0c2500 100644 --- a/tests/test_bilateral_precise.py +++ b/tests/test_bilateral_precise.py @@ -14,6 +14,7 @@ import numpy as np import torch from parameterized import parameterized +from torch.autograd import gradcheck from monai.networks.layers.filtering import BilateralFilter from tests.utils import skip_if_no_cpp_extension, skip_if_no_cuda @@ -361,9 +362,9 @@ @skip_if_no_cpp_extension -class BilateralFilterTestCaseCpuPrecised(unittest.TestCase): +class BilateralFilterTestCaseCpuPrecise(unittest.TestCase): @parameterized.expand(TEST_CASES) - def test_cpu_precised(self, test_case_description, sigmas, input, expected): + def test_cpu_precise(self, test_case_description, sigmas, input, expected): # Params to determine the implementation to test device = torch.device("cpu") @@ -376,12 +377,29 @@ def test_cpu_precised(self, test_case_description, sigmas, input, expected): # Ensure result are as expected np.testing.assert_allclose(output, expected, atol=1e-5) + @parameterized.expand(TEST_CASES) + def test_cpu_precise_backwards(self, test_case_description, sigmas, input, expected): + + # Params to determine the implementation to test + device = torch.device("cpu") + fast_approx = False + + # Prepare input tensor + input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=device) + input_tensor.requires_grad = True + + # Prepare args + args = (input_tensor, *sigmas, fast_approx) + + # Run grad check + gradcheck(BilateralFilter.apply, args, raise_exception=False) + @skip_if_no_cuda @skip_if_no_cpp_extension -class BilateralFilterTestCaseCudaPrecised(unittest.TestCase): +class BilateralFilterTestCaseCudaPrecise(unittest.TestCase): @parameterized.expand(TEST_CASES) - def test_cuda_precised(self, test_case_description, sigmas, input, expected): + def test_cuda_precise(self, test_case_description, sigmas, input, expected): # Skip this test if not torch.cuda.is_available(): @@ -398,6 +416,23 @@ def test_cuda_precised(self, test_case_description, sigmas, input, expected): # Ensure result are as expected np.testing.assert_allclose(output, expected, atol=1e-5) + @parameterized.expand(TEST_CASES) + def test_cuda_precise_backwards(self, test_case_description, sigmas, input, expected): + + # Params to determine the implementation to test + device = torch.device("cuda") + fast_approx = False + + # Prepare input tensor + input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=device) + input_tensor.requires_grad = True + + # Prepare args + args = (input_tensor, *sigmas, fast_approx) + + # Run grad check + gradcheck(BilateralFilter.apply, args, raise_exception=False) + if __name__ == "__main__": unittest.main() From d7ffb825f33d73f2c4f4032a2fd8a9a0443ecc24 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Tue, 30 Mar 2021 11:38:52 +0100 Subject: [PATCH 120/457] enhances warping (#1894) * fixes warping Signed-off-by: Wenqi Li * fixes types Signed-off-by: Wenqi Li * update based on comments Signed-off-by: Wenqi Li * fixes warnings in setup.cfg, config, deprecated assertexecption Signed-off-by: Wenqi Li --- monai/config/deviceconfig.py | 3 +- monai/networks/blocks/warp.py | 85 +++++++++++++++++++---------------- setup.cfg | 6 +++ tests/test_detect_envelope.py | 2 +- tests/test_warp.py | 58 +++++++++++++++++++++--- 5 files changed, 107 insertions(+), 47 deletions(-) diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py index be77a1d975..213be56b5c 100644 --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -131,7 +131,8 @@ def get_system_info() -> OrderedDict: elif output["System"] == "Darwin": _dict_append(output, "Mac version", lambda: platform.mac_ver()[0]) else: - linux_ver = re.search(r'PRETTY_NAME="(.*)"', open("/etc/os-release", "r").read()) + with open("/etc/os-release", "r") as rel_f: + linux_ver = re.search(r'PRETTY_NAME="(.*)"', rel_f.read()) if linux_ver: _dict_append(output, "Linux version", lambda: linux_ver.group(1)) diff --git a/monai/networks/blocks/warp.py b/monai/networks/blocks/warp.py index 35d2a88f12..1013540288 100644 --- a/monai/networks/blocks/warp.py +++ b/monai/networks/blocks/warp.py @@ -1,10 +1,15 @@ +import warnings from typing import List, Optional, Union import torch from torch import nn from torch.nn import functional as F -from monai.utils import GridSamplePadMode +from monai.config.deviceconfig import USE_COMPILED +from monai.networks.layers.spatial_transforms import grid_pull +from monai.utils import GridSampleMode, GridSamplePadMode + +__all__ = ["Warp", "DVF2DDF"] class Warp(nn.Module): @@ -14,7 +19,7 @@ class Warp(nn.Module): def __init__( self, - mode: int = 1, + mode=1, padding_mode: Optional[Union[GridSamplePadMode, str]] = GridSamplePadMode.ZEROS, ): """ @@ -33,10 +38,32 @@ def __init__( See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample """ super(Warp, self).__init__() - if mode < 0: - raise ValueError(f"do not support negative mode, got mode={mode}") - self.mode = mode - self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode) + # resolves _interp_mode for different methods + if USE_COMPILED: + self._interp_mode = mode + else: + warnings.warn("monai.networks.blocks.Warp: Using PyTorch native grid_sample.") + self._interp_mode = GridSampleMode.BILINEAR.value # works for both 4D and 5D tensors + if mode == 0: + self._interp_mode = GridSampleMode.NEAREST.value + elif mode == 1: + self._interp_mode = GridSampleMode.BILINEAR.value + elif mode == 3: + self._interp_mode = GridSampleMode.BICUBIC.value # torch.functional.grid_sample only supports 4D + else: + warnings.warn(f"Order-{mode} interpolation is not supported, using linear interpolation.") + + # resolves _padding_mode for different methods + padding_mode = GridSamplePadMode(padding_mode).value + if USE_COMPILED: + if padding_mode == GridSamplePadMode.ZEROS.value: + self._padding_mode = 7 + elif padding_mode == GridSamplePadMode.BORDER.value: + self._padding_mode = 0 + else: + self._padding_mode = 1 # reflection + else: + self._padding_mode = padding_mode # type: ignore @staticmethod def get_reference_grid(ddf: torch.Tensor) -> torch.Tensor: @@ -46,14 +73,7 @@ def get_reference_grid(ddf: torch.Tensor) -> torch.Tensor: grid = grid.to(ddf) return grid - @staticmethod - def normalize_grid(grid: torch.Tensor) -> torch.Tensor: - # (batch, ..., spatial_dims) - for i, dim in enumerate(grid.shape[1:-1]): - grid[..., i] = grid[..., i] * 2 / (dim - 1) - 1 - return grid - - def forward(self, image: torch.Tensor, ddf: torch.Tensor) -> torch.Tensor: + def forward(self, image: torch.Tensor, ddf: torch.Tensor): """ Args: image: Tensor in shape (batch, num_channels, H, W[, D]) @@ -73,34 +93,23 @@ def forward(self, image: torch.Tensor, ddf: torch.Tensor) -> torch.Tensor: grid = self.get_reference_grid(ddf) + ddf grid = grid.permute([0] + list(range(2, 2 + spatial_dims)) + [1]) # (batch, ..., spatial_dims) - if self.mode > 1: - raise ValueError(f"{self.mode}-order interpolation not yet implemented.") - # if not USE_COMPILED: - # raise ValueError(f"cannot perform {self.mode}-order interpolation without C compile.") - # _padding_mode = self.padding_mode.value - # if _padding_mode == "zeros": - # bound = 7 - # elif _padding_mode == "border": - # bound = 0 - # else: - # bound = 1 - # warped_image: torch.Tensor = grid_pull( - # image, - # grid, - # bound=bound, - # extrapolate=True, - # interpolation=self.mode, - # ) - else: - grid = self.normalize_grid(grid) + if not USE_COMPILED: # pytorch native grid_sample + for i, dim in enumerate(grid.shape[1:-1]): + grid[..., i] = grid[..., i] * 2 / (dim - 1) - 1 index_ordering: List[int] = list(range(spatial_dims - 1, -1, -1)) grid = grid[..., index_ordering] # z, y, x -> x, y, z - _interp_mode = "bilinear" if self.mode == 1 else "nearest" - warped_image = F.grid_sample( - image, grid, mode=_interp_mode, padding_mode=self.padding_mode.value, align_corners=True + return F.grid_sample( + image, grid, mode=self._interp_mode, padding_mode=f"{self._padding_mode}", align_corners=True ) - return warped_image + # using csrc resampling + return grid_pull( + image, + grid, + bound=self._padding_mode, + extrapolate=True, + interpolation=self._interp_mode, + ) class DVF2DDF(nn.Module): diff --git a/setup.cfg b/setup.cfg index 15e6a6d127..f06c56d001 100644 --- a/setup.cfg +++ b/setup.cfg @@ -8,6 +8,12 @@ long_description = file:README.md long_description_content_type = text/markdown; charset=UTF-8 platforms = OS Independent license = Apache License 2.0 +license_files = + LICENSE +project_urls = + Documentation=https://docs.monai.io/ + Bug Tracker=https://github.com/Project-MONAI/MONAI/issues + Source Code=https://github.com/Project-MONAI/MONAI [options] python_requires = >= 3.6 diff --git a/tests/test_detect_envelope.py b/tests/test_detect_envelope.py index 47b3a66305..ded0290de2 100644 --- a/tests/test_detect_envelope.py +++ b/tests/test_detect_envelope.py @@ -156,7 +156,7 @@ def test_no_fft_module_error(self): @SkipIfAtLeastPyTorchVersion((1, 7)) class TestDetectEnvelopeInvalidPyTorch(unittest.TestCase): def test_invalid_pytorch_error(self): - with self.assertRaisesRegexp(InvalidPyTorchVersionError, "version"): + with self.assertRaisesRegex(InvalidPyTorchVersionError, "version"): DetectEnvelope() diff --git a/tests/test_warp.py b/tests/test_warp.py index 613b6fb4ab..a2af441a5b 100644 --- a/tests/test_warp.py +++ b/tests/test_warp.py @@ -3,10 +3,12 @@ import numpy as np import torch from parameterized import parameterized +from torch.autograd import gradcheck +from monai.config.deviceconfig import USE_COMPILED from monai.networks.blocks.warp import Warp -LOW_POWER_TEST_CASES = [ +LOW_POWER_TEST_CASES = [ # run with BUILD_MONAI=1 to test csrc/resample, BUILD_MONAI=0 to test native grid_sample [ {"mode": 0, "padding_mode": "zeros"}, {"image": torch.arange(4).reshape((1, 1, 2, 2)).to(dtype=torch.float), "ddf": torch.zeros(1, 2, 2, 2)}, @@ -17,31 +19,63 @@ {"image": torch.arange(4).reshape((1, 1, 2, 2)).to(dtype=torch.float), "ddf": torch.ones(1, 2, 2, 2)}, torch.tensor([[[[3, 0], [0, 0]]]]), ], + [ + {"mode": 1, "padding_mode": "border"}, + { + "image": torch.arange(8).reshape((1, 1, 2, 2, 2)).to(dtype=torch.float), + "ddf": torch.ones(1, 3, 2, 2, 2) * -1, + }, + torch.tensor([[[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]]), + ], + [ + {"mode": 1, "padding_mode": "reflection"}, + { + "image": torch.arange(8).reshape((1, 1, 2, 2, 2)).to(dtype=torch.float), + "ddf": torch.ones(1, 3, 2, 2, 2) * -1, + }, + torch.tensor([[[[[7.0, 6.0], [5.0, 4.0]], [[3.0, 2.0], [1.0, 0.0]]]]]), + ], ] -HIGH_POWER_TEST_CASES = [ +CPP_TEST_CASES = [ # high order, BUILD_MONAI=1 to test csrc/resample [ {"mode": 2, "padding_mode": "border"}, { "image": torch.arange(8).reshape((1, 1, 2, 2, 2)).to(dtype=torch.float), "ddf": torch.ones(1, 3, 2, 2, 2) * -1, }, - torch.tensor([[[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]]), + torch.tensor([[[[[0.0000, 0.1250], [0.2500, 0.3750]], [[0.5000, 0.6250], [0.7500, 0.8750]]]]]), + ], + [ + {"mode": 2, "padding_mode": "reflection"}, + { + "image": torch.arange(8).reshape((1, 1, 2, 2, 2)).to(dtype=torch.float), + "ddf": torch.ones(1, 3, 2, 2, 2) * -1, + }, + torch.tensor([[[[[5.2500, 4.7500], [4.2500, 3.7500]], [[3.2500, 2.7500], [2.2500, 1.7500]]]]]), + ], + [ + {"mode": 2, "padding_mode": "zeros"}, + { + "image": torch.arange(8).reshape((1, 1, 2, 2, 2)).to(dtype=torch.float), + "ddf": torch.ones(1, 3, 2, 2, 2) * -1, + }, + torch.tensor([[[[[0.0000, 0.0020], [0.0039, 0.0410]], [[0.0078, 0.0684], [0.0820, 0.6699]]]]]), ], [ {"mode": 3, "padding_mode": "reflection"}, {"image": torch.arange(8).reshape((1, 1, 2, 2, 2)).to(dtype=torch.float), "ddf": torch.ones(1, 3, 2, 2, 2)}, - torch.tensor([[[[[7, 6], [5, 4]], [[3, 2], [1, 0]]]]]), + torch.tensor([[[[[4.6667, 4.3333], [4.0000, 3.6667]], [[3.3333, 3.0000], [2.6667, 2.3333]]]]]), ], ] TEST_CASES = LOW_POWER_TEST_CASES -# if USE_COMPILED: -# TEST_CASES += HIGH_POWER_TEST_CASES +if USE_COMPILED: + TEST_CASES += CPP_TEST_CASES class TestWarp(unittest.TestCase): - @parameterized.expand(TEST_CASES) + @parameterized.expand(TEST_CASES, skip_on_empty=True) def test_resample(self, input_param, input_data, expected_val): warp_layer = Warp(**input_param) result = warp_layer(**input_data) @@ -60,6 +94,16 @@ def test_ill_shape(self): with self.assertRaisesRegex(ValueError, ""): warp_layer(image=torch.arange(4).reshape((1, 1, 2, 2)).to(dtype=torch.float), ddf=torch.zeros(1, 2, 3, 3)) + def test_grad(self): + for m in [0, 1, 2, 3]: + for p in ["zeros", "border"]: + warp_layer = Warp(mode=m, padding_mode=p) + input_image = torch.rand((2, 3, 20, 20), dtype=torch.float64) * 10.0 + ddf = torch.rand((2, 2, 20, 20), dtype=torch.float64) * 2.0 + input_image.requires_grad = True + ddf.requires_grad = False # Jacobian mismatch for output 0 with respect to input 1 + gradcheck(warp_layer, (input_image, ddf), atol=1e-2, eps=1e-2) + if __name__ == "__main__": unittest.main() From 43f482c4ace9cbbc1da50d994edf389ec17f1096 Mon Sep 17 00:00:00 2001 From: "deepsource-autofix[bot]" <62050782+deepsource-autofix[bot]@users.noreply.github.com> Date: Tue, 30 Mar 2021 11:34:26 +0000 Subject: [PATCH 121/457] Remove assert statement from non-test files (#1899) * Remove assert statement from non-test files * Remove redundant `None` default * Use `is` to compare type of objects * Refactor unnecessary `else` / `elif` when `if` block has a `raise` statement * Refactor unnecessary `else` / `elif` when `if` block has a `return` statement * runtests.sh --autofix Signed-off-by: Wenqi Li * autofix/7d8e8b86-113e-4a5a-8d0d-c621aab7b5f2 Signed-off-by: Wenqi Li * fixes assert types Signed-off-by: Wenqi Li Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> Co-authored-by: Wenqi Li --- monai/__init__.py | 2 +- monai/apps/deepgrow/transforms.py | 5 +++-- monai/data/image_reader.py | 2 +- monai/data/utils.py | 2 +- monai/losses/dice.py | 5 ++--- monai/metrics/confusion_matrix.py | 3 +-- monai/metrics/froc.py | 8 ++++---- monai/networks/layers/simplelayers.py | 3 +-- monai/networks/nets/regunet.py | 6 ++++-- monai/transforms/croppad/dictionary.py | 4 ++-- monai/transforms/utility/array.py | 5 ++--- monai/utils/aliases.py | 2 +- monai/utils/jupyter_utils.py | 5 ++--- monai/utils/misc.py | 8 ++++---- 14 files changed, 29 insertions(+), 31 deletions(-) diff --git a/monai/__init__.py b/monai/__init__.py index b9b010f0fb..3bb89cc348 100644 --- a/monai/__init__.py +++ b/monai/__init__.py @@ -19,7 +19,7 @@ version_dict = get_versions() __version__ = version_dict.get("version", "0+unknown") -__revision_id__ = version_dict.get("full-revisionid", None) +__revision_id__ = version_dict.get("full-revisionid") del get_versions, version_dict __copyright__ = "(c) 2020 - 2021 MONAI Consortium" diff --git a/monai/apps/deepgrow/transforms.py b/monai/apps/deepgrow/transforms.py index 644507092d..c58d4c1123 100644 --- a/monai/apps/deepgrow/transforms.py +++ b/monai/apps/deepgrow/transforms.py @@ -115,7 +115,8 @@ def _apply(self, label, sid): label = (label > 0.5).astype(np.float32) blobs_labels = measure.label(label.astype(int), background=0) if dims == 2 else label - assert np.max(blobs_labels) > 0, "Not a valid Label" + if np.max(blobs_labels) <= 0: + raise AssertionError("Not a valid Label") pos_guidance = [] for ridx in range(1, 2 if dims == 3 else self.connected_regions + 1): @@ -602,7 +603,7 @@ def __call__(self, data): for i in range(len(clicks)): clicks[i] = list(np.roll(clicks[i], 1)) fg_bg_clicks.append(clicks) - d[self.guidance] = self._apply(fg_bg_clicks[0], fg_bg_clicks[1], factor, d.get(self.slice, None)) + d[self.guidance] = self._apply(fg_bg_clicks[0], fg_bg_clicks[1], factor, d.get(self.slice)) return d diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index 67425c0f47..047693ba55 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -675,7 +675,7 @@ def read(self, data: Union[Sequence[str], str, np.ndarray], **kwargs): """ if (self.reader_lib == "openslide") and (not has_osl): raise ImportError("No module named 'openslide'") - elif (self.reader_lib == "cucim") and (not has_cim): + if (self.reader_lib == "cucim") and (not has_cim): raise ImportError("No module named 'cucim'") img_: List = [] diff --git a/monai/data/utils.py b/monai/data/utils.py index afc460244f..a3d8f3128e 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -339,7 +339,7 @@ def decollate(data: Any, idx: int): if isinstance(data, torch.Tensor): out = data[idx] return torch_to_single(out) - elif isinstance(data, list): + if isinstance(data, list): if len(data) == 0: return data if isinstance(data[0], torch.Tensor): diff --git a/monai/losses/dice.py b/monai/losses/dice.py index 47a1605fdc..65bf47f388 100644 --- a/monai/losses/dice.py +++ b/monai/losses/dice.py @@ -286,10 +286,9 @@ def __init__( def w_func(self, grnd): if self.w_type == Weight.SIMPLE: return torch.reciprocal(grnd) - elif self.w_type == Weight.SQUARE: + if self.w_type == Weight.SQUARE: return torch.reciprocal(grnd * grnd) - else: - return torch.ones_like(grnd) + return torch.ones_like(grnd) def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """ diff --git a/monai/metrics/confusion_matrix.py b/monai/metrics/confusion_matrix.py index a0c840d45a..9c15b320eb 100644 --- a/monai/metrics/confusion_matrix.py +++ b/monai/metrics/confusion_matrix.py @@ -112,8 +112,7 @@ def __call__(self, y_pred: torch.Tensor, y: torch.Tensor): results.append(f) results.append(not_nans) return results - else: - return confusion_matrix + return confusion_matrix def get_confusion_matrix( diff --git a/monai/metrics/froc.py b/monai/metrics/froc.py index ec349967c6..faebbbf7a6 100644 --- a/monai/metrics/froc.py +++ b/monai/metrics/froc.py @@ -45,9 +45,8 @@ def compute_fp_tp_probs( num_targets: the total number of targets (excluding `labels_to_exclude`) for all images under evaluation. """ - assert ( - probs.shape == y_coord.shape == x_coord.shape - ), "the shapes for coordinates and probabilities should be the same." + if not (probs.shape == y_coord.shape == x_coord.shape): + raise AssertionError("the shapes for coordinates and probabilities should be the same.") if isinstance(probs, torch.Tensor): probs = probs.detach().cpu().numpy() @@ -97,7 +96,8 @@ def compute_froc_curve_data( num_images: the number of images under evaluation. """ - assert type(fp_probs) == type(tp_probs), "fp and tp probs should have same type." + if type(fp_probs) is not type(tp_probs): + raise AssertionError("fp and tp probs should have same type.") if isinstance(fp_probs, torch.Tensor): fp_probs = fp_probs.detach().cpu().numpy() if isinstance(tp_probs, torch.Tensor): diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index b2af4fcbcd..6737e54da7 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -266,8 +266,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: x = torch.as_tensor(x, device=x.device if isinstance(x, torch.Tensor) else None) if torch.is_complex(x): raise ValueError("x must be real.") - else: - x = x.to(dtype=torch.float) + x = x.to(dtype=torch.float) if (self.axis < 0) or (self.axis > len(x.shape) - 1): raise ValueError("Invalid axis for shape of x.") diff --git a/monai/networks/nets/regunet.py b/monai/networks/nets/regunet.py index 4a33ca9dcb..57fa801c8b 100644 --- a/monai/networks/nets/regunet.py +++ b/monai/networks/nets/regunet.py @@ -68,7 +68,8 @@ def __init__( super(RegUNet, self).__init__() if not extract_levels: extract_levels = (depth,) - assert max(extract_levels) == depth + if max(extract_levels) != depth: + raise AssertionError # save parameters self.spatial_dims = spatial_dims @@ -84,7 +85,8 @@ def __init__( if isinstance(encode_kernel_sizes, int): encode_kernel_sizes = [encode_kernel_sizes] * (self.depth + 1) - assert len(encode_kernel_sizes) == self.depth + 1 + if len(encode_kernel_sizes) != self.depth + 1: + raise AssertionError self.encode_kernel_sizes: List[int] = encode_kernel_sizes self.num_channels = [self.num_channel_initial * (2 ** d) for d in range(self.depth + 1)] diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index c3523f3993..64e9f862f9 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -765,8 +765,8 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, n d = dict(data) label = d[self.label_key] image = d[self.image_key] if self.image_key else None - fg_indices = d.get(self.fg_indices_key, None) if self.fg_indices_key is not None else None - bg_indices = d.get(self.bg_indices_key, None) if self.bg_indices_key is not None else None + fg_indices = d.get(self.fg_indices_key) if self.fg_indices_key is not None else None + bg_indices = d.get(self.bg_indices_key) if self.bg_indices_key is not None else None self.randomize(label, fg_indices, bg_indices, image) if not isinstance(self.spatial_size, tuple): diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 987542c979..be8f40c526 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -172,10 +172,9 @@ def __call__(self, img: np.ndarray, meta_dict: Optional[Dict] = None): if channel_dim is None: raise ValueError("meta_dict must contain `original_channel_dim` information.") - elif channel_dim == "no_channel": + if channel_dim == "no_channel": return AddChannel()(img) - else: - return AsChannelFirst(channel_dim=channel_dim)(img) + return AsChannelFirst(channel_dim=channel_dim)(img) class RepeatChannel(Transform): diff --git a/monai/utils/aliases.py b/monai/utils/aliases.py index e8192897b8..2b7b29eeb5 100644 --- a/monai/utils/aliases.py +++ b/monai/utils/aliases.py @@ -58,7 +58,7 @@ def resolve_name(name): """ # attempt to resolve an alias with alias_lock: - obj = GlobalAliases.get(name, None) + obj = GlobalAliases.get(name) if name in GlobalAliases and obj is None: raise AssertionError diff --git a/monai/utils/jupyter_utils.py b/monai/utils/jupyter_utils.py index df97f0fa4b..3357072cd0 100644 --- a/monai/utils/jupyter_utils.py +++ b/monai/utils/jupyter_utils.py @@ -145,7 +145,7 @@ def tensor_to_images(name: str, tensor: torch.Tensor): """ if tensor.ndim == 4 and tensor.shape[2] > 2 and tensor.shape[3] > 2: return tuple(tensor[0].cpu().data.numpy()) - elif tensor.ndim == 5 and tensor.shape[3] > 2 and tensor.shape[4] > 2: + if tensor.ndim == 5 and tensor.shape[3] > 2 and tensor.shape[4] > 2: dmid = tensor.shape[2] // 2 return tuple(tensor[0, :, dmid].cpu().data.numpy()) @@ -217,8 +217,7 @@ def _get_loss_from_output(output: Union[Dict[str, torch.Tensor], torch.Tensor]) """Returns a single value from the network output, which is a dict or tensor.""" if isinstance(output, dict): return output["loss"].item() - else: - return output.item() + return output.item() class StatusMembers(Enum): diff --git a/monai/utils/misc.py b/monai/utils/misc.py index f9346340cf..ee0963548c 100644 --- a/monai/utils/misc.py +++ b/monai/utils/misc.py @@ -339,13 +339,13 @@ def copy_to_device( if hasattr(obj, "to"): return obj.to(device, non_blocking=non_blocking) - elif isinstance(obj, tuple): + if isinstance(obj, tuple): return tuple(copy_to_device(o, device, non_blocking) for o in obj) - elif isinstance(obj, list): + if isinstance(obj, list): return [copy_to_device(o, device, non_blocking) for o in obj] - elif isinstance(obj, dict): + if isinstance(obj, dict): return {k: copy_to_device(o, device, non_blocking) for k, o in obj.items()} - elif verbose: + if verbose: fn_name = cast(types.FrameType, inspect.currentframe()).f_code.co_name warnings.warn(f"{fn_name} called with incompatible type: " + f"{type(obj)}. Data will be returned unchanged.") From 1612ec3b48b9be76e7a48faa68cda85715643cda Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Tue, 30 Mar 2021 21:45:40 -0400 Subject: [PATCH 122/457] Update WSIReader for CuCIM (#1876) Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- monai/data/image_reader.py | 73 ++++++++++++++++++++++++------------ tests/test_cuimage_reader.py | 36 +++++++++++++++++- 2 files changed, 84 insertions(+), 25 deletions(-) diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index 047693ba55..d41b779a1e 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -20,7 +20,7 @@ from monai.config import DtypeLike, KeysCollection from monai.data.utils import correct_nifti_header_if_necessary from monai.transforms.utility.array import EnsureChannelFirst -from monai.utils import ensure_tuple, optional_import +from monai.utils import ensure_tuple, ensure_tuple_rep, optional_import from .utils import is_supported_format @@ -253,7 +253,7 @@ def _get_meta_dict(self, img) -> Dict: meta_dict["direction"] = itk.array_from_matrix(img.GetDirection()) return meta_dict - def _get_affine(self, img) -> np.ndarray: + def _get_affine(self, img): """ Get or construct the affine matrix of the image, it can be used to correct spacing, orientation or execute spatial transforms. @@ -274,7 +274,7 @@ def _get_affine(self, img) -> np.ndarray: affine[(slice(-1), -1)] = origin return affine - def _get_spatial_shape(self, img) -> np.ndarray: + def _get_spatial_shape(self, img): """ Get the spatial shape of image data, it doesn't contain the channel dim. @@ -406,7 +406,7 @@ def _get_meta_dict(self, img) -> Dict: """ return dict(img.header) - def _get_affine(self, img) -> np.ndarray: + def _get_affine(self, img): """ Get the affine matrix of the image, it can be used to correct spacing, orientation or execute spatial transforms. @@ -417,7 +417,7 @@ def _get_affine(self, img) -> np.ndarray: """ return np.array(img.affine, copy=True) - def _get_spatial_shape(self, img) -> np.ndarray: + def _get_spatial_shape(self, img): """ Get the spatial shape of image data, it doesn't contain the channel dim. @@ -430,7 +430,7 @@ def _get_spatial_shape(self, img) -> np.ndarray: # the img data should have no channel dim or the last dim is channel return np.asarray(img.header["dim"][1 : spatial_rank + 1]) - def _get_array_data(self, img) -> np.ndarray: + def _get_array_data(self, img): """ Get the raw array data of the image, converted to Numpy array. @@ -623,7 +623,7 @@ def _get_meta_dict(self, img) -> Dict: "height": img.height, } - def _get_spatial_shape(self, img) -> np.ndarray: + def _get_spatial_shape(self, img): """ Get the spatial shape of image data, it doesn't contain the channel dim. Args: @@ -697,7 +697,7 @@ def get_data( level: int = 0, dtype: DtypeLike = np.uint8, grid_shape: Tuple[int, int] = (1, 1), - patch_size: Optional[int] = None, + patch_size: Optional[Union[int, Tuple[int, int]]] = None, ): """ Extract regions as numpy array from WSI image and return them. @@ -711,15 +711,15 @@ def get_data( level: the level number, or list of level numbers (default=0) dtype: the data type of output image grid_shape: (row, columns) tuple define a grid to extract patches on that - patch_size: (heigsht, width) the size of extracted patches at the given level + patch_size: (height, width) the size of extracted patches at the given level """ - if size is None: - if location == (0, 0): - # the maximum size is set to WxH - size = (img.shape[0] // (2 ** level), img.shape[1] // (2 ** level)) - print(f"Reading the whole image at level={level} with shape={size}") - else: - raise ValueError("Size need to be provided to extract the region!") + + if self.reader_lib == "openslide" and size is None: + # the maximum size is set to WxH + size = ( + img.shape[0] // (2 ** level) - location[0], + img.shape[1] // (2 ** level) - location[1], + ) region = self._extract_region(img, location=location, size=size, level=level, dtype=dtype) @@ -731,8 +731,12 @@ def get_data( if patch_size is None: patches = region else: + tuple_patch_size = ensure_tuple_rep(patch_size, 2) patches = self._extract_patches( - region, patch_size=(patch_size, patch_size), grid_shape=grid_shape, dtype=dtype + region, + patch_size=tuple_patch_size, # type: ignore + grid_shape=grid_shape, + dtype=dtype, ) return patches, metadata @@ -740,22 +744,43 @@ def get_data( def _extract_region( self, img_obj, - size: Tuple[int, int], + size: Optional[Tuple[int, int]], location: Tuple[int, int] = (0, 0), level: int = 0, dtype: DtypeLike = np.uint8, ): # reverse the order of dimensions for size and location to be compatible with image shape - size = size[::-1] location = location[::-1] - region = img_obj.read_region(location=location, size=size, level=level) - if self.reader_lib == "openslide": - region = region.convert("RGB") - # convert to numpy - region = np.asarray(region, dtype=dtype) + if size is None: + region = img_obj.read_region(location=location, level=level) + else: + size = size[::-1] + region = img_obj.read_region(location=location, size=size, level=level) + region = self.convert_to_rgb_array(region, dtype) return region + def convert_to_rgb_array( + self, + raw_region, + dtype: DtypeLike = np.uint8, + ): + """Convert to RGB mode and numpy array""" + if self.reader_lib == "openslide": + # convert to RGB + raw_region = raw_region.convert("RGB") + # convert to numpy + raw_region = np.asarray(raw_region, dtype=dtype) + else: + num_channels = len(raw_region.channel_names) + # convert to numpy + raw_region = np.asarray(raw_region, dtype=dtype) + # remove alpha channel if exist (RGBA) + if num_channels > 3: + raw_region = raw_region[:, :, :3] + + return raw_region + def _extract_patches( self, region: np.ndarray, diff --git a/tests/test_cuimage_reader.py b/tests/test_cuimage_reader.py index 036d5ad1ae..1b0293f159 100644 --- a/tests/test_cuimage_reader.py +++ b/tests/test_cuimage_reader.py @@ -11,7 +11,7 @@ from monai.utils import optional_import _, has_cim = optional_import("cucim") - +PILImage, has_pil = optional_import("PIL.Image") FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL)) @@ -62,6 +62,14 @@ np.array([[[[239]], [[239]], [[239]]], [[[243]], [[243]], [[243]]]]), ] +TEST_CASE_RGB_0 = [ + np.ones((3, 2, 2), dtype=np.uint8), # CHW +] + +TEST_CASE_RGB_1 = [ + np.ones((3, 100, 100), dtype=np.uint8), # CHW +] + class TestCuCIMReader(unittest.TestCase): @skipUnless(has_cim, "Requires CuCIM") @@ -91,6 +99,32 @@ def test_read_patches(self, file_path, patch_info, expected_img): self.assertTupleEqual(img.shape, expected_img.shape) self.assertIsNone(assert_array_equal(img, expected_img)) + @parameterized.expand([TEST_CASE_RGB_0, TEST_CASE_RGB_1]) + @skipUnless(has_pil, "Requires PIL") + def test_read_rgba(self, img_expected): + image = {} + reader = WSIReader("cuCIM") + for mode in ["RGB", "RGBA"]: + file_path = self.create_rgba_image(img_expected, "test_cu_tiff_image", mode=mode) + img_obj = reader.read(file_path) + image[mode], _ = reader.get_data(img_obj) + + self.assertIsNone(assert_array_equal(image["RGB"], img_expected)) + self.assertIsNone(assert_array_equal(image["RGBA"], img_expected)) + + def create_rgba_image(self, array: np.ndarray, filename_prefix: str, mode: str): + file_path = os.path.join(os.path.dirname(__file__), "testing_data", f"{filename_prefix}_{mode}.tiff") + + if mode == "RGBA": + array = np.concatenate([array, 255 * np.ones_like(array[0])[np.newaxis]]).astype(np.uint8) + + img_rgb = array.transpose(1, 2, 0) + + image = PILImage.fromarray(img_rgb, mode=mode) + image.save(file_path) + + return file_path + if __name__ == "__main__": unittest.main() From 8010e2a8efb5c691c6e020a9b5172e41ab5b2a48 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Wed, 31 Mar 2021 03:11:13 -0400 Subject: [PATCH 123/457] Pathology Masked Inference WSI Dataset (#1869) * Implement MaskedInferenceWSIDataset for pathology inference Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update pathology init Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update docs Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove last elemnt of cum_num_patches Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add unittest with multiple cases for MaskedInferenceWSIDataset Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * sort imports in init Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove list dataset Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove try/except and add type hint Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Convert the sample output to a list Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove some type hints Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove prints Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update based on commnets Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update patch_size Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update unittests Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add more type hints Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- docs/source/apps.rst | 2 + monai/apps/pathology/__init__.py | 2 +- monai/apps/pathology/datasets.py | 199 ++++++++++++++--- monai/data/image_reader.py | 1 - tests/test_masked_inference_wsi_dataset.py | 238 +++++++++++++++++++++ 5 files changed, 415 insertions(+), 27 deletions(-) create mode 100644 tests/test_masked_inference_wsi_dataset.py diff --git a/docs/source/apps.rst b/docs/source/apps.rst index 0c92d4c443..d81607c6b4 100644 --- a/docs/source/apps.rst +++ b/docs/source/apps.rst @@ -71,6 +71,8 @@ Applications :members: .. autoclass:: SmartCachePatchWSIDataset :members: +.. autoclass:: MaskedInferenceWSIDataset + :members: .. automodule:: monai.apps.pathology.utils .. autoclass:: PathologyProbNMS diff --git a/monai/apps/pathology/__init__.py b/monai/apps/pathology/__init__.py index 3af25365ba..591edf1dad 100644 --- a/monai/apps/pathology/__init__.py +++ b/monai/apps/pathology/__init__.py @@ -9,5 +9,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .datasets import PatchWSIDataset, SmartCacheDataset +from .datasets import MaskedInferenceWSIDataset, PatchWSIDataset, SmartCacheDataset from .utils import ProbNMS diff --git a/monai/apps/pathology/datasets.py b/monai/apps/pathology/datasets.py index 59f7e3aceb..01902d1ee2 100644 --- a/monai/apps/pathology/datasets.py +++ b/monai/apps/pathology/datasets.py @@ -9,15 +9,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import sys -from typing import Callable, List, Optional, Sequence, Tuple, Union +from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union import numpy as np from monai.data import Dataset, SmartCacheDataset from monai.data.image_reader import WSIReader +from monai.utils import ensure_tuple_rep -__all__ = ["PatchWSIDataset", "SmartCachePatchWSIDataset"] +__all__ = ["PatchWSIDataset", "SmartCachePatchWSIDataset", "MaskedInferenceWSIDataset"] class PatchWSIDataset(Dataset): @@ -26,13 +28,13 @@ class PatchWSIDataset(Dataset): It also reads labels for each patch and provides each patch with its associated class labels. Args: - data: the list of input samples including image, location, and label (see below for more details). - region_size: the region to be extracted from the whole slide image. + data: the list of input samples including image, location, and label (see the note below for more details). + region_size: the size of regions to be extracted from the whole slide image. grid_shape: the grid shape on which the patches should be extracted. - patch_size: the patches extracted from the region on the grid. + patch_size: the size of patches extracted from the region on the grid. + transform: transforms to be executed on input data. image_reader_name: the name of library to be used for loading whole slide imaging, either CuCIM or OpenSlide. Defaults to CuCIM. - transform: transforms to be executed on input data. Note: The input data has the following form as an example: @@ -40,7 +42,7 @@ class PatchWSIDataset(Dataset): This means from "image1.tiff" extract a region centered at the given location `location` with the size of `region_size`, and then extract patches with the size of `patch_size` - from a square grid with the shape of `grid_shape`. + from a grid with the shape of `grid_shape`. Be aware the the `grid_shape` should construct a grid with the same number of element as `labels`, so for this example the `grid_shape` should be (2, 2). @@ -51,27 +53,17 @@ def __init__( data: List, region_size: Union[int, Tuple[int, int]], grid_shape: Union[int, Tuple[int, int]], - patch_size: int, - image_reader_name: str = "cuCIM", + patch_size: Union[int, Tuple[int, int]], transform: Optional[Callable] = None, + image_reader_name: str = "cuCIM", ): super().__init__(data, transform) - if isinstance(region_size, int): - self.region_size = (region_size, region_size) - else: - self.region_size = region_size - - if isinstance(grid_shape, int): - self.grid_shape = (grid_shape, grid_shape) - else: - self.grid_shape = grid_shape - - self.patch_size = patch_size - self.sub_region_size = (self.region_size[0] / self.grid_shape[0], self.region_size[1] / self.grid_shape[1]) + self.region_size = ensure_tuple_rep(region_size, 2) + self.grid_shape = ensure_tuple_rep(grid_shape, 2) + self.patch_size = ensure_tuple_rep(patch_size, 2) self.image_path_list = list({x["image"] for x in self.data}) - self.image_reader_name = image_reader_name self.image_reader = WSIReader(image_reader_name) self.wsi_object_dict = None @@ -116,7 +108,7 @@ class SmartCachePatchWSIDataset(SmartCacheDataset): data: the list of input samples including image, location, and label (see `PatchWSIDataset` for more details) region_size: the region to be extracted from the whole slide image. grid_shape: the grid shape on which the patches should be extracted. - patch_size: the patches extracted from the region on the grid. + patch_size: the size of patches extracted from the region on the grid. image_reader_name: the name of library to be used for loading whole slide imaging, either CuCIM or OpenSlide. Defaults to CuCIM. transform: transforms to be executed on input data. @@ -138,7 +130,7 @@ def __init__( data: List, region_size: Union[int, Tuple[int, int]], grid_shape: Union[int, Tuple[int, int]], - patch_size: int, + patch_size: Union[int, Tuple[int, int]], transform: Union[Sequence[Callable], Callable], image_reader_name: str = "cuCIM", replace_rate: float = 0.5, @@ -148,7 +140,13 @@ def __init__( num_replace_workers: Optional[int] = None, progress: bool = True, ): - patch_wsi_dataset = PatchWSIDataset(data, region_size, grid_shape, patch_size, image_reader_name) + patch_wsi_dataset = PatchWSIDataset( + data=data, + region_size=region_size, + grid_shape=grid_shape, + patch_size=patch_size, + image_reader_name=image_reader_name, + ) super().__init__( data=patch_wsi_dataset, # type: ignore transform=transform, @@ -160,3 +158,154 @@ def __init__( progress=progress, shuffle=False, ) + + +class MaskedInferenceWSIDataset(Dataset): + """ + This dataset load the provided foreground masks at an arbitrary resolution level, + and extract patches based on that mask from the associated whole slide image. + + Args: + data: a list of sample including the path to the whole slide image and the path to the mask. + Like this: `[{"image": "path/to/image1.tiff", "mask": "path/to/mask1.npy}, ...]"`. + patch_size: the size of patches to be extracted from the whole slide image for inference. + transform: transforms to be executed on extracted patches. + image_reader_name: the name of library to be used for loading whole slide imaging, either CuCIM or OpenSlide. + Defaults to CuCIM. + + Note: + The resulting output (probability maps) after performing inference using this dataset is + supposed to be the same size as the foreground mask and not the original wsi image size. + """ + + def __init__( + self, + data: List[Dict["str", "str"]], + patch_size: Union[int, Tuple[int, int]], + transform: Optional[Callable] = None, + image_reader_name: str = "cuCIM", + ) -> None: + super().__init__(data, transform) + + self.patch_size = ensure_tuple_rep(patch_size, 2) + + # set up whole slide image reader + self.image_reader_name = image_reader_name + self.image_reader = WSIReader(image_reader_name) + + # process data and create a list of dictionaries containing all required data and metadata + self.data = self._prepare_data(data) + + # calculate cumulative number of patches for all the samples + self.num_patches_per_sample = [len(d["image_locations"]) for d in self.data] + self.num_patches = sum(self.num_patches_per_sample) + self.cum_num_patches = np.cumsum([0] + self.num_patches_per_sample[:-1]) + + def _prepare_data(self, input_data: List[Dict["str", "str"]]) -> List[Dict]: + prepared_data = [] + for sample in input_data: + prepared_sample = self._prepare_a_sample(sample) + prepared_data.append(prepared_sample) + return prepared_data + + def _prepare_a_sample(self, sample: Dict["str", "str"]) -> Dict: + """ + Preprocess input data to load WSIReader object and the foreground mask, + and define the locations where patches need to be extracted. + + Args: + sample: one sample, a dictionary containing path to the whole slide image and the foreground mask. + For example: `{"image": "path/to/image1.tiff", "mask": "path/to/mask1.npy}` + + Return: + A dictionary containing: + "name": the base name of the whole slide image, + "image": the WSIReader image object, + "mask_shape": the size of the foreground mask, + "mask_locations": the list of non-zero pixel locations (x, y) on the foreground mask, + "image_locations": the list of pixel locations (x, y) on the whole slide image where patches are extracted, and + "level": the resolution level of the mask with respect to the whole slide image. + } + """ + image = self.image_reader.read(sample["image"]) + mask = np.load(sample["mask"]).T + try: + level, ratio = self._calculate_mask_level(image, mask) + except ValueError as err: + err.args = (sample["mask"],) + err.args + raise + + # get all indices for non-zero pixels of the foreground mask + mask_locations = np.vstack(mask.nonzero()).T + + # convert mask locations to image locations to extract patches + image_locations = (mask_locations + 0.5) * ratio - np.array(self.patch_size) // 2 + + return { + "name": os.path.splitext(os.path.basename(sample["image"]))[0], + "image": image, + "mask_shape": mask.shape, + "mask_locations": mask_locations.astype(int).tolist(), + "image_locations": image_locations.astype(int).tolist(), + "level": level, + } + + def _calculate_mask_level(self, image: np.ndarray, mask: np.ndarray) -> Tuple[int, float]: + """ + Calculate level of the mask and its ratio with respect to the whole slide image + + Args: + image: the original whole slide image + mask: a mask, that can be down-sampled at an arbitrary level. + Note that down-sampling ratio should be 2^N and equal in all dimension. + + Return: + tuple: (level, ratio) where ratio is 2^level + + """ + image_shape = image.shape + mask_shape = mask.shape + ratios = [image_shape[i] / mask_shape[i] for i in range(2)] + level = np.log2(ratios[0]) + + if ratios[0] != ratios[1]: + raise ValueError( + "Image/Mask ratio across dimensions does not match!" + f"ratio 0: {ratios[0]} ({image_shape[0]} / {mask_shape[0]})," + f"ratio 1: {ratios[1]} ({image_shape[1]} / {mask_shape[1]})," + ) + elif not level.is_integer(): + raise ValueError(f"Mask is not at a regular level (ratio not power of 2), image / mask ratio: {ratios[0]}") + + return int(level), ratios[0] + + def _load_a_patch(self, index): + """ + Load sample given the index + + Since index is sequential and the patches are comming in an stream from different images, + this method, first, finds the whole slide image and the patch that should be extracted, + then it loads the patch and provide it with its image name and the corresponding mask location. + """ + sample_num = np.argmax(self.cum_num_patches > index) - 1 + sample = self.data[sample_num] + patch_num = index - self.cum_num_patches[sample_num] + location_on_image = sample["image_locations"][patch_num] + location_on_mask = sample["mask_locations"][patch_num] + + image, _ = self.image_reader.get_data( + img=sample["image"], + location=location_on_image, + size=self.patch_size, + ) + processed_sample = {"image": image, "name": sample["name"], "mask_location": location_on_mask} + return processed_sample + + def __len__(self): + return self.num_patches + + def __getitem__(self, index): + patch = [self._load_a_patch(index)] + if self.transform: + patch = self.transform(patch) + return patch diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index d41b779a1e..08aa6c6bbf 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -727,7 +727,6 @@ def get_data( metadata["spatial_shape"] = size metadata["original_channel_dim"] = -1 region = EnsureChannelFirst()(region, metadata) - if patch_size is None: patches = region else: diff --git a/tests/test_masked_inference_wsi_dataset.py b/tests/test_masked_inference_wsi_dataset.py new file mode 100644 index 0000000000..7c8a815c2e --- /dev/null +++ b/tests/test_masked_inference_wsi_dataset.py @@ -0,0 +1,238 @@ +import os +import unittest +from unittest import skipUnless + +import numpy as np +from numpy.testing import assert_array_equal +from parameterized import parameterized + +from monai.apps.pathology.datasets import MaskedInferenceWSIDataset +from monai.apps.utils import download_url +from monai.utils import optional_import +from tests.utils import skip_if_quick + +_, has_cim = optional_import("cucim") +_, has_osl = optional_import("openslide") + +FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" +FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL)) + +MASK1 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask1.npy") +MASK2 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask2.npy") +MASK4 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask4.npy") + +HEIGHT = 32914 +WIDTH = 46000 + + +def prepare_data(): + + mask = np.zeros((WIDTH // 2, HEIGHT // 2)) + mask[100, 100] = 1 + np.save(MASK1, mask) + mask[100, 100:102] = 1 + np.save(MASK2, mask) + mask[100:102, 100:102] = 1 + np.save(MASK4, mask) + + +TEST_CASE_0 = [ + { + "data": [ + {"image": FILE_PATH, "mask": MASK1}, + ], + "patch_size": 1, + "image_reader_name": "cuCIM", + }, + [ + { + "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), + "name": "CMU-1", + "mask_location": [100, 100], + }, + ], +] + +TEST_CASE_1 = [ + { + "data": [{"image": FILE_PATH, "mask": MASK2}], + "patch_size": 1, + "image_reader_name": "cuCIM", + }, + [ + { + "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), + "name": "CMU-1", + "mask_location": [100, 100], + }, + { + "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), + "name": "CMU-1", + "mask_location": [101, 100], + }, + ], +] + +TEST_CASE_2 = [ + { + "data": [{"image": FILE_PATH, "mask": MASK4}], + "patch_size": 1, + "image_reader_name": "cuCIM", + }, + [ + { + "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), + "name": "CMU-1", + "mask_location": [100, 100], + }, + { + "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), + "name": "CMU-1", + "mask_location": [100, 101], + }, + { + "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), + "name": "CMU-1", + "mask_location": [101, 100], + }, + { + "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), + "name": "CMU-1", + "mask_location": [101, 101], + }, + ], +] + +TEST_CASE_3 = [ + { + "data": [ + {"image": FILE_PATH, "mask": MASK1}, + ], + "patch_size": 2, + "image_reader_name": "cuCIM", + }, + [ + { + "image": np.array( + [ + [[243, 243], [243, 243]], + [[243, 243], [243, 243]], + [[243, 243], [243, 243]], + ], + dtype=np.uint8, + ), + "name": "CMU-1", + "mask_location": [100, 100], + }, + ], +] + +TEST_CASE_4 = [ + { + "data": [ + {"image": FILE_PATH, "mask": MASK1}, + {"image": FILE_PATH, "mask": MASK2}, + ], + "patch_size": 1, + "image_reader_name": "cuCIM", + }, + [ + { + "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), + "name": "CMU-1", + "mask_location": [100, 100], + }, + { + "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), + "name": "CMU-1", + "mask_location": [100, 100], + }, + { + "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), + "name": "CMU-1", + "mask_location": [101, 100], + }, + ], +] + + +TEST_CASE_OPENSLIDE_0 = [ + { + "data": [ + {"image": FILE_PATH, "mask": MASK1}, + ], + "patch_size": 1, + "image_reader_name": "OpenSlide", + }, + [ + { + "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), + "name": "CMU-1", + "mask_location": [100, 100], + }, + ], +] + +TEST_CASE_OPENSLIDE_1 = [ + { + "data": [{"image": FILE_PATH, "mask": MASK2}], + "patch_size": 1, + "image_reader_name": "OpenSlide", + }, + [ + { + "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), + "name": "CMU-1", + "mask_location": [100, 100], + }, + { + "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), + "name": "CMU-1", + "mask_location": [101, 100], + }, + ], +] + + +class TestMaskedInferenceWSIDataset(unittest.TestCase): + def setUp(self): + prepare_data() + download_url(FILE_URL, FILE_PATH, "5a3cfd4fd725c50578ddb80b517b759f") + + @parameterized.expand( + [ + TEST_CASE_0, + TEST_CASE_1, + TEST_CASE_2, + TEST_CASE_3, + TEST_CASE_4, + ] + ) + @skipUnless(has_cim, "Requires CuCIM") + @skip_if_quick + def test_read_patches_cucim(self, input_parameters, expected): + dataset = MaskedInferenceWSIDataset(**input_parameters) + self.compare_samples_expected(dataset, expected) + + @parameterized.expand( + [ + TEST_CASE_OPENSLIDE_0, + TEST_CASE_OPENSLIDE_1, + ] + ) + @skipUnless(has_osl, "Requires OpenSlide") + @skip_if_quick + def test_read_patches_openslide(self, input_parameters, expected): + dataset = MaskedInferenceWSIDataset(**input_parameters) + self.compare_samples_expected(dataset, expected) + + def compare_samples_expected(self, dataset, expected): + for i in range(len(dataset)): + self.assertTupleEqual(dataset[i][0]["image"].shape, expected[i]["image"].shape) + self.assertIsNone(assert_array_equal(dataset[i][0]["image"], expected[i]["image"])) + self.assertEqual(dataset[i][0]["name"], expected[i]["name"]) + self.assertListEqual(dataset[i][0]["mask_location"], expected[i]["mask_location"]) + + +if __name__ == "__main__": + unittest.main() From ca8481f6d43c571abc63c6f08e85e8fe95d541f6 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Wed, 31 Mar 2021 11:07:03 +0100 Subject: [PATCH 124/457] remove unused in_channels in LNCC (#1903) * remove unused in_channels in LNCC Signed-off-by: Wenqi Li * update integration tests Signed-off-by: Wenqi Li * extending warp options Signed-off-by: Wenqi Li --- .gitignore | 2 +- monai/losses/image_dissimilarity.py | 5 -- monai/networks/blocks/warp.py | 81 ++++++++++--------- ...local_normalized_cross_correlation_loss.py | 32 +++----- tests/test_reg_loss_integration.py | 6 +- tests/test_warp.py | 23 ++++-- 6 files changed, 78 insertions(+), 71 deletions(-) diff --git a/.gitignore b/.gitignore index f60641d6f7..4889d2d917 100644 --- a/.gitignore +++ b/.gitignore @@ -125,7 +125,7 @@ temp/ # temporary testing data MedNIST tests/testing_data/MedNIST* tests/testing_data/*Hippocampus* -tests/testing_data/CMU-1.tiff +tests/testing_data/*.tiff # clang format tool .clang-format-bin/ diff --git a/monai/losses/image_dissimilarity.py b/monai/losses/image_dissimilarity.py index 431167447b..67b2d177f6 100644 --- a/monai/losses/image_dissimilarity.py +++ b/monai/losses/image_dissimilarity.py @@ -61,7 +61,6 @@ class LocalNormalizedCrossCorrelationLoss(_Loss): def __init__( self, - in_channels: int, ndim: int = 3, kernel_size: int = 3, kernel_type: str = "rectangular", @@ -71,7 +70,6 @@ def __init__( ) -> None: """ Args: - in_channels: number of input channels ndim: number of spatial ndimensions, {``1``, ``2``, ``3``}. Defaults to 3. kernel_size: kernel spatial size, must be odd. kernel_type: {``"rectangular"``, ``"triangular"``, ``"gaussian"``}. Defaults to ``"rectangular"``. @@ -85,7 +83,6 @@ def __init__( smooth_dr: a small constant added to the denominator to avoid nan. """ super(LocalNormalizedCrossCorrelationLoss, self).__init__(reduction=LossReduction(reduction).value) - self.in_channels = in_channels self.ndim = ndim if self.ndim not in [1, 2, 3]: @@ -119,8 +116,6 @@ def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor: Raises: ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"]. """ - if pred.shape[1] != self.in_channels: - raise ValueError(f"expecting pred with {self.in_channels} channels, got pred of shape {pred.shape}") if pred.ndim - 2 != self.ndim: raise ValueError(f"expecting pred with {self.ndim} spatial dimensions, got pred of shape {pred.shape}") if target.shape != pred.shape: diff --git a/monai/networks/blocks/warp.py b/monai/networks/blocks/warp.py index 1013540288..b9967f2b62 100644 --- a/monai/networks/blocks/warp.py +++ b/monai/networks/blocks/warp.py @@ -1,5 +1,5 @@ import warnings -from typing import List, Optional, Union +from typing import List import torch from torch import nn @@ -7,7 +7,9 @@ from monai.config.deviceconfig import USE_COMPILED from monai.networks.layers.spatial_transforms import grid_pull -from monai.utils import GridSampleMode, GridSamplePadMode +from monai.utils import GridSampleMode, GridSamplePadMode, optional_import + +_C, _ = optional_import("monai._C") __all__ = ["Warp", "DVF2DDF"] @@ -19,51 +21,58 @@ class Warp(nn.Module): def __init__( self, - mode=1, - padding_mode: Optional[Union[GridSamplePadMode, str]] = GridSamplePadMode.ZEROS, + mode=GridSampleMode.BILINEAR.value, + padding_mode=GridSamplePadMode.BORDER.value, ): """ - Args: - mode: interpolation mode to calculate output values, defaults to 1. - Possible values are:: - - - 0 or 'nearest' or InterpolationType.nearest - - 1 or 'linear' or InterpolationType.linear - - 2 or 'quadratic' or InterpolationType.quadratic - - 3 or 'cubic' or InterpolationType.cubic - - 4 or 'fourth' or InterpolationType.fourth - - etc. - padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``} - Padding mode for outside grid values. Defaults to ``"border"``. - See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample + For pytorch native APIs, the possible values are: + + - mode: ``"nearest"``, ``"bilinear"``, ``"bicubic"``. + - padding_mode: ``"zeros"``, ``"border"``, ``"reflection"`` + + See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample + + For MONAI C++/CUDA extensions, the possible values are: + + - mode: ``"nearest"``, ``"bilinear"``, ``"bicubic"``, 0, 1, ... + - padding_mode: ``"zeros"``, ``"border"``, ``"reflection"``, 0, 1, ... + + See also: :py:class:`monai.networks.layers.grid_pull` """ super(Warp, self).__init__() # resolves _interp_mode for different methods + if USE_COMPILED: + if mode in (inter.value for inter in GridSampleMode): + mode = GridSampleMode(mode) + if mode == GridSampleMode.BILINEAR: + mode = 1 + elif mode == GridSampleMode.NEAREST: + mode = 0 + elif mode == GridSampleMode.BICUBIC: + mode = 3 + else: + mode = 1 # default to linear self._interp_mode = mode else: warnings.warn("monai.networks.blocks.Warp: Using PyTorch native grid_sample.") - self._interp_mode = GridSampleMode.BILINEAR.value # works for both 4D and 5D tensors - if mode == 0: - self._interp_mode = GridSampleMode.NEAREST.value - elif mode == 1: - self._interp_mode = GridSampleMode.BILINEAR.value - elif mode == 3: - self._interp_mode = GridSampleMode.BICUBIC.value # torch.functional.grid_sample only supports 4D - else: - warnings.warn(f"Order-{mode} interpolation is not supported, using linear interpolation.") + self._interp_mode = GridSampleMode(mode).value # resolves _padding_mode for different methods - padding_mode = GridSamplePadMode(padding_mode).value if USE_COMPILED: - if padding_mode == GridSamplePadMode.ZEROS.value: - self._padding_mode = 7 - elif padding_mode == GridSamplePadMode.BORDER.value: - self._padding_mode = 0 - else: - self._padding_mode = 1 # reflection + if padding_mode in (pad.value for pad in GridSamplePadMode): + padding_mode = GridSamplePadMode(padding_mode) + if padding_mode == GridSamplePadMode.ZEROS: + padding_mode = 7 + elif padding_mode == GridSamplePadMode.BORDER: + padding_mode = 0 + elif padding_mode == GridSamplePadMode.REFLECTION: + padding_mode = 1 + else: + padding_mode = 0 # default to nearest + self._padding_mode = padding_mode else: - self._padding_mode = padding_mode # type: ignore + self._padding_mode = GridSamplePadMode(padding_mode).value @staticmethod def get_reference_grid(ddf: torch.Tensor) -> torch.Tensor: @@ -125,8 +134,8 @@ class DVF2DDF(nn.Module): def __init__( self, num_steps: int = 7, - mode: int = 1, - padding_mode: Optional[Union[GridSamplePadMode, str]] = GridSamplePadMode.ZEROS, + mode=GridSampleMode.BILINEAR.value, + padding_mode=GridSamplePadMode.ZEROS.value, ): super(DVF2DDF, self).__init__() if num_steps <= 0: diff --git a/tests/test_local_normalized_cross_correlation_loss.py b/tests/test_local_normalized_cross_correlation_loss.py index bddaedb54a..bb0bd7b642 100644 --- a/tests/test_local_normalized_cross_correlation_loss.py +++ b/tests/test_local_normalized_cross_correlation_loss.py @@ -21,7 +21,7 @@ TEST_CASES = [ [ - {"in_channels": 1, "ndim": 1, "kernel_type": "rectangular", "reduction": "sum"}, + {"ndim": 1, "kernel_type": "rectangular", "reduction": "sum"}, { "pred": torch.arange(0, 3).reshape(1, 1, -1).to(dtype=torch.float, device=device), "target": torch.arange(0, 3).reshape(1, 1, -1).to(dtype=torch.float, device=device), @@ -29,7 +29,7 @@ -1.0 * 3, ], [ - {"in_channels": 1, "ndim": 1, "kernel_type": "rectangular"}, + {"ndim": 1, "kernel_type": "rectangular"}, { "pred": torch.arange(0, 3).reshape(1, 1, -1).to(dtype=torch.float, device=device), "target": torch.arange(0, 3).reshape(1, 1, -1).to(dtype=torch.float, device=device), @@ -37,7 +37,7 @@ -1.0, ], [ - {"in_channels": 1, "ndim": 2, "kernel_type": "rectangular"}, + {"ndim": 2, "kernel_type": "rectangular"}, { "pred": torch.arange(0, 3).reshape(1, 1, -1, 1).expand(1, 1, 3, 3).to(dtype=torch.float, device=device), "target": torch.arange(0, 3).reshape(1, 1, -1, 1).expand(1, 1, 3, 3).to(dtype=torch.float, device=device), @@ -45,7 +45,7 @@ -1.0, ], [ - {"in_channels": 1, "ndim": 3, "kernel_type": "rectangular"}, + {"ndim": 3, "kernel_type": "rectangular"}, { "pred": torch.arange(0, 3) .reshape(1, 1, -1, 1, 1) @@ -59,7 +59,7 @@ -1.0, ], [ - {"in_channels": 3, "ndim": 3, "kernel_type": "rectangular"}, + {"ndim": 3, "kernel_type": "rectangular"}, { "pred": torch.arange(0, 3) .reshape(1, 1, -1, 1, 1) @@ -74,7 +74,7 @@ -0.95801723, ], [ - {"in_channels": 3, "ndim": 3, "kernel_type": "triangular", "kernel_size": 5}, + {"ndim": 3, "kernel_type": "triangular", "kernel_size": 5}, { "pred": torch.arange(0, 5) .reshape(1, 1, -1, 1, 1) @@ -89,7 +89,7 @@ -0.918672, ], [ - {"in_channels": 3, "ndim": 3, "kernel_type": "gaussian"}, + {"ndim": 3, "kernel_type": "gaussian"}, { "pred": torch.arange(0, 3) .reshape(1, 1, -1, 1, 1) @@ -113,13 +113,7 @@ def test_shape(self, input_param, input_data, expected_val): np.testing.assert_allclose(result.detach().cpu().numpy(), expected_val, rtol=1e-5) def test_ill_shape(self): - loss = LocalNormalizedCrossCorrelationLoss(in_channels=3, ndim=3) - # in_channel unmatch - with self.assertRaisesRegex(ValueError, ""): - loss.forward( - torch.ones((1, 2, 3, 3, 3), dtype=torch.float, device=device), - torch.ones((1, 2, 3, 3, 3), dtype=torch.float, device=device), - ) + loss = LocalNormalizedCrossCorrelationLoss(ndim=3) # ndim unmatch with self.assertRaisesRegex(ValueError, ""): loss.forward( @@ -137,15 +131,15 @@ def test_ill_opts(self): pred = torch.ones((1, 3, 3, 3, 3), dtype=torch.float) target = torch.ones((1, 3, 3, 3, 3), dtype=torch.float) with self.assertRaisesRegex(ValueError, ""): - LocalNormalizedCrossCorrelationLoss(in_channels=3, kernel_type="unknown")(pred, target) + LocalNormalizedCrossCorrelationLoss(kernel_type="unknown")(pred, target) with self.assertRaisesRegex(ValueError, ""): - LocalNormalizedCrossCorrelationLoss(in_channels=3, kernel_type=None)(pred, target) + LocalNormalizedCrossCorrelationLoss(kernel_type=None)(pred, target) with self.assertRaisesRegex(ValueError, ""): - LocalNormalizedCrossCorrelationLoss(in_channels=3, kernel_size=4)(pred, target) + LocalNormalizedCrossCorrelationLoss(kernel_size=4)(pred, target) with self.assertRaisesRegex(ValueError, ""): - LocalNormalizedCrossCorrelationLoss(in_channels=3, reduction="unknown")(pred, target) + LocalNormalizedCrossCorrelationLoss(reduction="unknown")(pred, target) with self.assertRaisesRegex(ValueError, ""): - LocalNormalizedCrossCorrelationLoss(in_channels=3, reduction=None)(pred, target) + LocalNormalizedCrossCorrelationLoss(reduction=None)(pred, target) # def test_script(self): diff --git a/tests/test_reg_loss_integration.py b/tests/test_reg_loss_integration.py index b512add2e9..b864a64647 100644 --- a/tests/test_reg_loss_integration.py +++ b/tests/test_reg_loss_integration.py @@ -22,17 +22,17 @@ [BendingEnergyLoss, {}, ["pred"]], [ LocalNormalizedCrossCorrelationLoss, - {"in_channels": 1, "kernel_size": 7, "kernel_type": "rectangular"}, + {"kernel_size": 7, "kernel_type": "rectangular"}, ["pred", "target"], ], [ LocalNormalizedCrossCorrelationLoss, - {"in_channels": 1, "kernel_size": 5, "kernel_type": "triangular"}, + {"kernel_size": 5, "kernel_type": "triangular"}, ["pred", "target"], ], [ LocalNormalizedCrossCorrelationLoss, - {"in_channels": 1, "kernel_size": 3, "kernel_type": "gaussian"}, + {"kernel_size": 3, "kernel_type": "gaussian"}, ["pred", "target"], ], [GlobalMutualInformationLoss, {"num_bins": 10}, ["pred", "target"]], diff --git a/tests/test_warp.py b/tests/test_warp.py index a2af441a5b..4ed1562b29 100644 --- a/tests/test_warp.py +++ b/tests/test_warp.py @@ -7,20 +7,21 @@ from monai.config.deviceconfig import USE_COMPILED from monai.networks.blocks.warp import Warp +from monai.utils import GridSampleMode, GridSamplePadMode LOW_POWER_TEST_CASES = [ # run with BUILD_MONAI=1 to test csrc/resample, BUILD_MONAI=0 to test native grid_sample [ - {"mode": 0, "padding_mode": "zeros"}, + {"mode": "nearest", "padding_mode": "zeros"}, {"image": torch.arange(4).reshape((1, 1, 2, 2)).to(dtype=torch.float), "ddf": torch.zeros(1, 2, 2, 2)}, torch.arange(4).reshape((1, 1, 2, 2)), ], [ - {"mode": 1, "padding_mode": "zeros"}, + {"mode": "bilinear", "padding_mode": "zeros"}, {"image": torch.arange(4).reshape((1, 1, 2, 2)).to(dtype=torch.float), "ddf": torch.ones(1, 2, 2, 2)}, torch.tensor([[[[3, 0], [0, 0]]]]), ], [ - {"mode": 1, "padding_mode": "border"}, + {"mode": "bilinear", "padding_mode": "border"}, { "image": torch.arange(8).reshape((1, 1, 2, 2, 2)).to(dtype=torch.float), "ddf": torch.ones(1, 3, 2, 2, 2) * -1, @@ -28,7 +29,7 @@ torch.tensor([[[[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]]), ], [ - {"mode": 1, "padding_mode": "reflection"}, + {"mode": "bilinear", "padding_mode": "reflection"}, { "image": torch.arange(8).reshape((1, 1, 2, 2, 2)).to(dtype=torch.float), "ddf": torch.ones(1, 3, 2, 2, 2) * -1, @@ -62,6 +63,14 @@ }, torch.tensor([[[[[0.0000, 0.0020], [0.0039, 0.0410]], [[0.0078, 0.0684], [0.0820, 0.6699]]]]]), ], + [ + {"mode": 2, "padding_mode": 7}, + { + "image": torch.arange(8).reshape((1, 1, 2, 2, 2)).to(dtype=torch.float), + "ddf": torch.ones(1, 3, 2, 2, 2) * -1, + }, + torch.tensor([[[[[0.0000, 0.0020], [0.0039, 0.0410]], [[0.0078, 0.0684], [0.0820, 0.6699]]]]]), + ], [ {"mode": 3, "padding_mode": "reflection"}, {"image": torch.arange(8).reshape((1, 1, 2, 2, 2)).to(dtype=torch.float), "ddf": torch.ones(1, 3, 2, 2, 2)}, @@ -95,9 +104,9 @@ def test_ill_shape(self): warp_layer(image=torch.arange(4).reshape((1, 1, 2, 2)).to(dtype=torch.float), ddf=torch.zeros(1, 2, 3, 3)) def test_grad(self): - for m in [0, 1, 2, 3]: - for p in ["zeros", "border"]: - warp_layer = Warp(mode=m, padding_mode=p) + for b in GridSampleMode: + for p in GridSamplePadMode: + warp_layer = Warp(mode=b.value, padding_mode=p.value) input_image = torch.rand((2, 3, 20, 20), dtype=torch.float64) * 10.0 ddf = torch.rand((2, 2, 20, 20), dtype=torch.float64) * 2.0 input_image.requires_grad = True From b1d45ee4f11ffa7501ecd38afc568c512e8b9744 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Wed, 31 Mar 2021 20:39:53 +0800 Subject: [PATCH 125/457] 1905 Enhance the brats transform (#1906) * [DLMED] enhance brats transform Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot Co-authored-by: monai-bot --- monai/transforms/utility/array.py | 4 ++++ tests/test_convert_to_multi_channel.py | 15 +++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index be8f40c526..4ad0676fba 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -659,6 +659,10 @@ class ConvertToMultiChannelBasedOnBratsClasses(Transform): """ def __call__(self, img: np.ndarray) -> np.ndarray: + # if img has channel dim, squeeze it + if img.ndim == 4 and img.shape[0] == 1: + img = np.squeeze(img, axis=0) + result = [] # merge labels 1 (tumor non-enh) and 4 (tumor enh) to TC result.append(np.logical_or(img == 1, img == 4)) diff --git a/tests/test_convert_to_multi_channel.py b/tests/test_convert_to_multi_channel.py index 03510ad38c..2f7a38e6e4 100644 --- a/tests/test_convert_to_multi_channel.py +++ b/tests/test_convert_to_multi_channel.py @@ -16,14 +16,25 @@ from monai.transforms import ConvertToMultiChannelBasedOnBratsClasses -TEST_CASE = [ +TEST_CASE_1 = [ np.array([[0, 1, 2], [1, 2, 4], [0, 1, 4]]), np.array([[[0, 1, 0], [1, 0, 1], [0, 1, 1]], [[0, 1, 1], [1, 1, 1], [0, 1, 1]], [[0, 0, 0], [0, 0, 1], [0, 0, 1]]]), ] +TEST_CASE_2 = [ + np.array([[[[0, 1], [1, 2]], [[2, 4], [4, 4]]]]), + np.array( + [ + [[[0, 1], [1, 0]], [[0, 1], [1, 1]]], + [[[0, 1], [1, 1]], [[1, 1], [1, 1]]], + [[[0, 0], [0, 0]], [[0, 1], [1, 1]]], + ] + ), +] + class TestConvertToMultiChannel(unittest.TestCase): - @parameterized.expand([TEST_CASE]) + @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_type_shape(self, data, expected_result): result = ConvertToMultiChannelBasedOnBratsClasses()(data) np.testing.assert_equal(result, expected_result) From 2a7e1ced2db7a7f873b992e41c1d0aca8c58e428 Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Wed, 31 Mar 2021 22:22:25 +0800 Subject: [PATCH 126/457] Fix stdshift type error (#1907) * Fix stdshift type error Signed-off-by: Yiheng Wang * Add dtype testcases Signed-off-by: Yiheng Wang --- monai/transforms/intensity/array.py | 24 ++++++++++++++++++------ monai/transforms/intensity/dictionary.py | 13 +++++++++---- tests/test_std_shift_intensity.py | 15 ++++++++++++--- tests/test_std_shift_intensityd.py | 16 +++++++++++++--- 4 files changed, 52 insertions(+), 16 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index d284f43d24..f89e381daa 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -150,12 +150,20 @@ class StdShiftIntensity(Transform): nonzero: whether only count non-zero values. channel_wise: if True, calculate on each channel separately. Please ensure that the first dimension represents the channel of the image if True. + dtype: output data type, defaults to float32. """ - def __init__(self, factor: float, nonzero: bool = False, channel_wise: bool = False) -> None: + def __init__( + self, + factor: float, + nonzero: bool = False, + channel_wise: bool = False, + dtype: DtypeLike = np.float32, + ) -> None: self.factor = factor self.nonzero = nonzero self.channel_wise = channel_wise + self.dtype = dtype def _stdshift(self, img: np.ndarray) -> np.ndarray: slices = (img != 0) if self.nonzero else np.ones(img.shape, dtype=bool) @@ -169,8 +177,7 @@ def __call__(self, img: np.ndarray) -> np.ndarray: """ Apply the transform to `img`. """ - if img.dtype != float: - img = img.astype(float) + img = img.astype(self.dtype) if self.channel_wise: for i, d in enumerate(img): img[i] = self._stdshift(d) @@ -191,6 +198,7 @@ def __init__( prob: float = 0.1, nonzero: bool = False, channel_wise: bool = False, + dtype: DtypeLike = np.float32, ) -> None: """ Args: @@ -199,6 +207,7 @@ def __init__( prob: probability of std shift. nonzero: whether only count non-zero values. channel_wise: if True, calculate on each channel separately. + dtype: output data type, defaults to float32. """ RandomizableTransform.__init__(self, prob) @@ -210,6 +219,7 @@ def __init__( self.factors = (min(factors), max(factors)) self.nonzero = nonzero self.channel_wise = channel_wise + self.dtype = dtype def randomize(self, data: Optional[Any] = None) -> None: self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) @@ -222,7 +232,9 @@ def __call__(self, img: np.ndarray) -> np.ndarray: self.randomize() if not self._do_transform: return img - shifter = StdShiftIntensity(factor=self.factor, nonzero=self.nonzero, channel_wise=self.channel_wise) + shifter = StdShiftIntensity( + factor=self.factor, nonzero=self.nonzero, channel_wise=self.channel_wise, dtype=self.dtype + ) return shifter(img) @@ -313,7 +325,7 @@ class RandBiasField(RandomizableTransform): degree: degree of freedom of the polynomials. The value should be no less than 1. Defaults to 3. coeff_range: range of the random coefficients. Defaults to (0.0, 0.1). - dtype: output data type, defaut to float32. + dtype: output data type, defaults to float32. prob: probability to do random bias field. """ @@ -403,7 +415,7 @@ class NormalizeIntensity(Transform): nonzero: whether only normalize non-zero values. channel_wise: if using calculated mean and std, calculate on each channel separately or calculate on the entire image directly. - dtype: output data type, defaut to float32. + dtype: output data type, defaults to float32. """ def __init__( diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 269f240ae8..517c34cbf2 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -233,6 +233,7 @@ def __init__( factor: float, nonzero: bool = False, channel_wise: bool = False, + dtype: DtypeLike = np.float32, allow_missing_keys: bool = False, ) -> None: """ @@ -243,10 +244,11 @@ def __init__( nonzero: whether only count non-zero values. channel_wise: if True, calculate on each channel separately. Please ensure that the first dimension represents the channel of the image if True. + dtype: output data type, defaults to float32. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) - self.shifter = StdShiftIntensity(factor, nonzero, channel_wise) + self.shifter = StdShiftIntensity(factor, nonzero, channel_wise, dtype) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) @@ -267,6 +269,7 @@ def __init__( prob: float = 0.1, nonzero: bool = False, channel_wise: bool = False, + dtype: DtypeLike = np.float32, allow_missing_keys: bool = False, ) -> None: """ @@ -278,6 +281,7 @@ def __init__( prob: probability of std shift. nonzero: whether only count non-zero values. channel_wise: if True, calculate on each channel separately. + dtype: output data type, defaults to float32. allow_missing_keys: don't raise exception if key is missing. """ MapTransform.__init__(self, keys, allow_missing_keys) @@ -291,6 +295,7 @@ def __init__( self.factors = (min(factors), max(factors)) self.nonzero = nonzero self.channel_wise = channel_wise + self.dtype = dtype def randomize(self, data: Optional[Any] = None) -> None: self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) @@ -301,7 +306,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda self.randomize() if not self._do_transform: return d - shifter = StdShiftIntensity(self.factor, self.nonzero, self.channel_wise) + shifter = StdShiftIntensity(self.factor, self.nonzero, self.channel_wise, self.dtype) for key in self.key_iterator(d): d[key] = shifter(d[key]) return d @@ -412,7 +417,7 @@ def __init__( degree: degree of freedom of the polynomials. The value should be no less than 1. Defaults to 3. coeff_range: range of the random coefficients. Defaults to (0.0, 0.1). - dtype: output data type, defaut to float32. + dtype: output data type, defaults to float32. prob: probability to do random bias field. allow_missing_keys: don't raise exception if key is missing. @@ -449,7 +454,7 @@ class NormalizeIntensityd(MapTransform): nonzero: whether only normalize non-zero values. channel_wise: if using calculated mean and std, calculate on each channel separately or calculate on the entire image directly. - dtype: output data type, defaut to float32. + dtype: output data type, defaults to float32. allow_missing_keys: don't raise exception if key is missing. """ diff --git a/tests/test_std_shift_intensity.py b/tests/test_std_shift_intensity.py index a0a3b3ff0f..f317330435 100644 --- a/tests/test_std_shift_intensity.py +++ b/tests/test_std_shift_intensity.py @@ -34,7 +34,7 @@ def test_zerostd(self): factor = np.random.rand() std_shifter = StdShiftIntensity(factor=factor, nonzero=nonzero, channel_wise=channel_wise) result = std_shifter(image) - np.testing.assert_equal(result, image) + np.testing.assert_allclose(result, image, rtol=1e-5) def test_nonzero(self): image = np.asarray([[4.0, 0.0, 2.0], [0, 2, 4]]) # std = 1 @@ -42,7 +42,7 @@ def test_nonzero(self): std_shifter = StdShiftIntensity(factor=factor, nonzero=True) result = std_shifter(image) expected = np.asarray([[4 + factor, 0, 2 + factor], [0, 2 + factor, 4 + factor]]) - np.testing.assert_equal(result, expected) + np.testing.assert_allclose(result, expected, rtol=1e-5) def test_channel_wise(self): image = np.stack((np.asarray([1.0, 2.0]), np.asarray([1.0, 1.0]))) # std: 0.5, 0 @@ -50,7 +50,16 @@ def test_channel_wise(self): std_shifter = StdShiftIntensity(factor=factor, channel_wise=True) result = std_shifter(image) expected = np.stack((np.asarray([1 + 0.5 * factor, 2 + 0.5 * factor]), np.asarray([1, 1]))) - np.testing.assert_equal(result, expected) + np.testing.assert_allclose(result, expected, rtol=1e-5) + + def test_dtype(self): + trans_dtype = np.float32 + for dtype in [int, np.float32, np.float64]: + image = np.random.rand(2, 2, 2).astype(dtype) + factor = np.random.rand() + std_shifter = StdShiftIntensity(factor=factor, dtype=trans_dtype) + result = std_shifter(image) + np.testing.assert_equal(result.dtype, trans_dtype) if __name__ == "__main__": diff --git a/tests/test_std_shift_intensityd.py b/tests/test_std_shift_intensityd.py index f5c2dd650c..4eb256f1e5 100644 --- a/tests/test_std_shift_intensityd.py +++ b/tests/test_std_shift_intensityd.py @@ -36,7 +36,7 @@ def test_zerostd(self): factor = np.random.rand() std_shifter = StdShiftIntensityd(keys=[key], factor=factor, nonzero=nonzero, channel_wise=channel_wise) result = std_shifter({key: image}) - np.testing.assert_equal(result[key], image) + np.testing.assert_allclose(result[key], image, rtol=1e-5) def test_nonzero(self): key = "img" @@ -45,7 +45,7 @@ def test_nonzero(self): std_shifter = StdShiftIntensityd(keys=[key], factor=factor, nonzero=True) result = std_shifter({key: image}) expected = np.asarray([[4 + factor, 0, 2 + factor], [0, 2 + factor, 4 + factor]]) - np.testing.assert_equal(result[key], expected) + np.testing.assert_allclose(result[key], expected, rtol=1e-5) def test_channel_wise(self): key = "img" @@ -54,7 +54,17 @@ def test_channel_wise(self): std_shifter = StdShiftIntensityd(keys=[key], factor=factor, channel_wise=True) result = std_shifter({key: image}) expected = np.stack((np.asarray([1 + 0.5 * factor, 2 + 0.5 * factor]), np.asarray([1, 1]))) - np.testing.assert_equal(result[key], expected) + np.testing.assert_allclose(result[key], expected, rtol=1e-5) + + def test_dtype(self): + key = "img" + trans_dtype = np.float32 + for dtype in [int, np.float32, np.float64]: + image = np.random.rand(2, 2, 2).astype(dtype) + factor = np.random.rand() + std_shifter = StdShiftIntensityd(keys=[key], factor=factor, dtype=trans_dtype) + result = std_shifter({key: image}) + np.testing.assert_equal(result[key].dtype, trans_dtype) if __name__ == "__main__": From 972fa5fd67491bd569d980c0abd9b3694b161bd1 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Wed, 31 Mar 2021 12:35:49 -0400 Subject: [PATCH 127/457] Pathology Probability Map Generator (#1893) * Implement ProbMapGenerator handler Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update doc and init Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Sort init imports Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add unittest for ProbMapGenerator Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Ignore if ignite is not available Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update Engine import Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Exclude from min-test Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Address all the comments Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Fix file path and dtype Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- docs/source/apps.rst | 4 + monai/apps/pathology/__init__.py | 1 + monai/apps/pathology/handlers.py | 103 +++++++++++++++++++++++ tests/min_tests.py | 1 + tests/test_handler_prob_map_generator.py | 95 +++++++++++++++++++++ 5 files changed, 204 insertions(+) create mode 100644 monai/apps/pathology/handlers.py create mode 100644 tests/test_handler_prob_map_generator.py diff --git a/docs/source/apps.rst b/docs/source/apps.rst index d81607c6b4..fa92a2bc2d 100644 --- a/docs/source/apps.rst +++ b/docs/source/apps.rst @@ -77,3 +77,7 @@ Applications .. automodule:: monai.apps.pathology.utils .. autoclass:: PathologyProbNMS :members: + +.. automodule:: monai.apps.pathology.handlers +.. autoclass:: ProbMapProducer + :members: \ No newline at end of file diff --git a/monai/apps/pathology/__init__.py b/monai/apps/pathology/__init__.py index 591edf1dad..3474a7c10a 100644 --- a/monai/apps/pathology/__init__.py +++ b/monai/apps/pathology/__init__.py @@ -10,4 +10,5 @@ # limitations under the License. from .datasets import MaskedInferenceWSIDataset, PatchWSIDataset, SmartCacheDataset +from .handlers import ProbMapProducer from .utils import ProbNMS diff --git a/monai/apps/pathology/handlers.py b/monai/apps/pathology/handlers.py new file mode 100644 index 0000000000..046e403e0f --- /dev/null +++ b/monai/apps/pathology/handlers.py @@ -0,0 +1,103 @@ +import logging +import os +from typing import TYPE_CHECKING, Dict, Optional + +import numpy as np + +from monai.config import DtypeLike +from monai.utils import exact_version, optional_import + +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") +if TYPE_CHECKING: + from ignite.engine import Engine +else: + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") + + +class ProbMapProducer: + """ + Event handler triggered on completing every iteration to save the probability map + """ + + def __init__( + self, + output_dir: str = "./", + output_postfix: str = "", + dtype: DtypeLike = np.float64, + name: Optional[str] = None, + ) -> None: + """ + Args: + output_dir: output directory to save probability maps. + output_postfix: a string appended to all output file names. + dtype: the data type in which the probability map is stored. Default np.float64. + name: identifier of logging.logger to use, defaulting to `engine.logger`. + + """ + self.logger = logging.getLogger(name) + self._name = name + self.output_dir = output_dir + self.output_postfix = output_postfix + self.dtype = dtype + self.prob_map: Dict[str, np.ndarray] = {} + self.level: Dict[str, int] = {} + self.counter: Dict[str, int] = {} + self.num_done_images: int = 0 + self.num_images: int = 0 + + def attach(self, engine: Engine) -> None: + """ + Args: + engine: Ignite Engine, it can be a trainer, validator or evaluator. + """ + + self.num_images = len(engine.data_loader.dataset.data) + + for sample in engine.data_loader.dataset.data: + name = sample["name"] + self.prob_map[name] = np.zeros(sample["mask_shape"], dtype=self.dtype) + self.counter[name] = len(sample["mask_locations"]) + self.level[name] = sample["level"] + + if self._name is None: + self.logger = engine.logger + if not engine.has_event_handler(self, Events.ITERATION_COMPLETED): + engine.add_event_handler(Events.ITERATION_COMPLETED, self) + if not engine.has_event_handler(self.finalize, Events.COMPLETED): + engine.add_event_handler(Events.COMPLETED, self.finalize) + + def __call__(self, engine: Engine) -> None: + """ + This method assumes self.batch_transform will extract metadata from the input batch. + + Args: + engine: Ignite Engine, it can be a trainer, validator or evaluator. + """ + names = engine.state.batch["name"] + locs = engine.state.batch["mask_location"] + pred = engine.state.output["pred"] + for i, name in enumerate(names): + self.prob_map[name][locs[0][i], locs[1][i]] = pred[i] + self.counter[name] -= 1 + if self.counter[name] == 0: + self.save_prob_map(name) + + def save_prob_map(self, name: str) -> None: + """ + This method save the probability map for an image, when its inference is finished, + and delete that probability map from memory. + + Args: + name: the name of image to be saved. + """ + file_path = os.path.join(self.output_dir, name) + np.save(file_path + self.output_postfix + ".npy", self.prob_map[name]) + + self.num_done_images += 1 + self.logger.info(f"Inference of '{name}' is done [{self.num_done_images}/{self.num_images}]!") + del self.prob_map[name] + del self.counter[name] + del self.level[name] + + def finalize(self, engine: Engine): + self.logger.info(f"Probability map is created for {self.num_done_images}/{self.num_images} images!") diff --git a/tests/min_tests.py b/tests/min_tests.py index 83c1ceea9f..e896e81c70 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -43,6 +43,7 @@ def run_testsuit(): "test_handler_confusion_matrix_dist", "test_handler_hausdorff_distance", "test_handler_mean_dice", + "test_handler_prob_map_generator", "test_handler_rocauc", "test_handler_rocauc_dist", "test_handler_segmentation_saver", diff --git a/tests/test_handler_prob_map_generator.py b/tests/test_handler_prob_map_generator.py new file mode 100644 index 0000000000..4882060be9 --- /dev/null +++ b/tests/test_handler_prob_map_generator.py @@ -0,0 +1,95 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest + +import numpy as np +import torch +from ignite.engine import Engine +from parameterized import parameterized +from torch.utils.data import DataLoader + +from monai.apps.pathology.handlers import ProbMapProducer +from monai.data.dataset import Dataset +from monai.engines import Evaluator +from monai.handlers import ValidationHandler + +TEST_CASE_0 = ["image_inference_output_1", 2] +TEST_CASE_1 = ["image_inference_output_2", 9] +TEST_CASE_2 = ["image_inference_output_3", 1000] + + +class TestDataset(Dataset): + def __init__(self, name, size): + self.data = [ + { + "name": name, + "mask_shape": (size, size), + "mask_locations": [[i, i] for i in range(size)], + "level": 0, + } + ] + self.len = size + + def __len__(self): + return self.len + + def __getitem__(self, index): + return { + "name": self.data[0]["name"], + "mask_location": self.data[0]["mask_locations"][index], + "pred": index + 1, + } + + +class TestEvaluator(Evaluator): + def _iteration(self, engine, batchdata): + return batchdata + + +class TestHandlerProbMapGenerator(unittest.TestCase): + @parameterized.expand( + [ + TEST_CASE_0, + TEST_CASE_1, + TEST_CASE_2, + ] + ) + def test_prob_map_generator(self, name, size): + # set up dataset + dataset = TestDataset(name, size) + data_loader = DataLoader(dataset, batch_size=1) + + # set up engine + def inference(enging, batch): + pass + + engine = Engine(inference) + + # add ProbMapGenerator() to evaluator + output_dir = os.path.join(os.path.dirname(__file__), "testing_data") + prob_map_gen = ProbMapProducer(output_dir=output_dir) + + evaluator = TestEvaluator(torch.device("cpu:0"), data_loader, size, val_handlers=[prob_map_gen]) + + # set up validation handler + validation = ValidationHandler(evaluator, interval=1) + validation.attach(engine) + + engine.run(data_loader) + + prob_map = np.load(os.path.join(output_dir, name + ".npy")) + self.assertListEqual(np.diag(prob_map).astype(int).tolist(), list(range(1, size + 1))) + + +if __name__ == "__main__": + unittest.main() From 1314701c15623422574b0153d746666dc6004454 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Wed, 31 Mar 2021 15:03:30 -0400 Subject: [PATCH 128/457] Tumor FROC Evaluation (#1878) * Implement MaskedInferenceWSIDataset for pathology inference Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update pathology init Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update docs Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove last elemnt of cum_num_patches Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add unittest with multiple cases for MaskedInferenceWSIDataset Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * sort imports in init Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove list dataset Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove try/except and add type hint Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Convert the sample output to a list Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove some type hints Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Implement FROC calcualtion for pathology Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update ProbNMS doctring Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update docs and change namings Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Fix a bug and minor changes Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Minor changes Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Fix docstring formatting Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add a type hint Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Implement unittests for EvaluateTumorFROC Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Ignore type for np.amax Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove space Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Ignore type for range instead of np.amax Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Skip test if PIL is not available Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update docstring Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Skip ground truth generating if PIL is not available Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update unittest Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove print Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Rename TumorFROC and add few type hints Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Rename evaluators to metrics Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove non-relevant files Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Rename to LesionFROC and minor changes Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update test Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- .gitignore | 3 + docs/source/apps.rst | 14 +- monai/apps/pathology/__init__.py | 3 +- monai/apps/pathology/metrics.py | 180 +++++++++++++++++ monai/apps/pathology/utils.py | 45 ++++- monai/utils/prob_nms.py | 5 +- tests/test_lesion_froc.py | 320 +++++++++++++++++++++++++++++++ 7 files changed, 561 insertions(+), 9 deletions(-) create mode 100644 monai/apps/pathology/metrics.py create mode 100644 tests/test_lesion_froc.py diff --git a/.gitignore b/.gitignore index 4889d2d917..7444d7f2f9 100644 --- a/.gitignore +++ b/.gitignore @@ -48,6 +48,9 @@ coverage.xml .hypothesis/ .pytest_cache/ +# temporary unittest artifacts +tests/testing_data/temp_* + # Translations *.mo *.pot diff --git a/docs/source/apps.rst b/docs/source/apps.rst index fa92a2bc2d..29d835514f 100644 --- a/docs/source/apps.rst +++ b/docs/source/apps.rst @@ -74,10 +74,16 @@ Applications .. autoclass:: MaskedInferenceWSIDataset :members: +.. automodule:: monai.apps.pathology.handlers +.. autoclass:: ProbMapProducer + :members: + +.. automodule:: monai.apps.pathology.metrics +.. autoclass:: LesionFROC + :members: + .. automodule:: monai.apps.pathology.utils +.. autofunction:: compute_multi_instance_mask +.. autofunction:: compute_isolated_tumor_cells .. autoclass:: PathologyProbNMS :members: - -.. automodule:: monai.apps.pathology.handlers -.. autoclass:: ProbMapProducer - :members: \ No newline at end of file diff --git a/monai/apps/pathology/__init__.py b/monai/apps/pathology/__init__.py index 3474a7c10a..203e1a80d7 100644 --- a/monai/apps/pathology/__init__.py +++ b/monai/apps/pathology/__init__.py @@ -11,4 +11,5 @@ from .datasets import MaskedInferenceWSIDataset, PatchWSIDataset, SmartCacheDataset from .handlers import ProbMapProducer -from .utils import ProbNMS +from .metrics import LesionFROC +from .utils import PathologyProbNMS, compute_isolated_tumor_cells, compute_multi_instance_mask diff --git a/monai/apps/pathology/metrics.py b/monai/apps/pathology/metrics.py new file mode 100644 index 0000000000..63b9d073a7 --- /dev/null +++ b/monai/apps/pathology/metrics.py @@ -0,0 +1,180 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from typing import Dict, List, Tuple, Union + +import numpy as np + +from monai.apps.pathology.utils import PathologyProbNMS, compute_isolated_tumor_cells, compute_multi_instance_mask +from monai.data.image_reader import WSIReader +from monai.metrics import compute_fp_tp_probs, compute_froc_curve_data, compute_froc_score + + +class LesionFROC: + """ + Evaluate with Free Response Operating Characteristic (FROC) score. + + Args: + data: either the list of dictionaries containing probability maps (inference result) and + tumor mask (ground truth), as below, or the path to a json file containing such list. + `{ + "prob_map": "path/to/prob_map_1.npy", + "tumor_mask": "path/to/ground_truth_1.tiff", + "level": 6, + "pixel_spacing": 0.243 + }` + grow_distance: Euclidean distance (in micrometer) by which to grow the label the ground truth's tumors. + Defaults to 75, which is the equivalent size of 5 tumor cells. + itc_diameter: the maximum diameter of a region (in micrometer) to be considered as an isolated tumor cell. + Defaults to 200. + eval_thresholds: the false positive rates for calculating the average sensitivity. + Defaults to (0.25, 0.5, 1, 2, 4, 8) which is the same as the CAMELYON 16 Challenge. + nms_sigma: the standard deviation for gaussian filter of non-maximal suppression. Defaults to 0.0. + nms_prob_threshold: the probability threshold of non-maximal suppression. Defaults to 0.5. + nms_box_size: the box size (in pixel) to be removed around the the pixel for non-maximal suppression. + image_reader_name: the name of library to be used for loading whole slide imaging, either CuCIM or OpenSlide. + Defaults to CuCIM. + + Note: + For more info on `nms_*` parameters look at monai.utils.prob_nms.ProbNMS`. + + """ + + def __init__( + self, + data: Union[List[Dict], str], + grow_distance: int = 75, + itc_diameter: int = 200, + eval_thresholds: Tuple = (0.25, 0.5, 1, 2, 4, 8), + nms_sigma: float = 0.0, + nms_prob_threshold: float = 0.5, + nms_box_size: int = 48, + image_reader_name: str = "cuCIM", + ) -> None: + + if isinstance(data, str): + self.data = self._load_data(data) + else: + self.data = data + self.grow_distance = grow_distance + self.itc_diameter = itc_diameter + self.eval_thresholds = eval_thresholds + self.image_reader = WSIReader(image_reader_name) + self.nms = PathologyProbNMS( + sigma=nms_sigma, + prob_threshold=nms_prob_threshold, + box_size=nms_box_size, + ) + + def _load_data(self, file_path: str) -> List[Dict]: + with open(file_path, "r") as f: + data: List[Dict] = json.load(f) + return data + + def prepare_inference_result(self, sample: Dict): + """ + Prepare the probability map for detection evaluation. + + """ + # load the probability map (the result of model inference) + prob_map = np.load(sample["prob_map"]) + + # apply non-maximal suppression + nms_outputs = self.nms(probs_map=prob_map, resolution_level=sample["level"]) + + # separate nms outputs + if nms_outputs: + probs, x_coord, y_coord = zip(*nms_outputs) + else: + probs, x_coord, y_coord = [], [], [] + + return np.array(probs), np.array(x_coord), np.array(y_coord) + + def prepare_ground_truth(self, sample): + """ + Prepare the ground truth for evaluation based on the binary tumor mask + + """ + # load binary tumor masks + img_obj = self.image_reader.read(sample["tumor_mask"]) + tumor_mask = self.image_reader.get_data(img_obj, level=sample["level"])[0][0] + + # calculate pixel spacing at the mask level + mask_pixel_spacing = sample["pixel_spacing"] * pow(2, sample["level"]) + + # compute multi-instance mask from a binary mask + grow_pixel_threshold = self.grow_distance / (mask_pixel_spacing * 2) + tumor_mask = compute_multi_instance_mask(mask=tumor_mask, threshold=grow_pixel_threshold) + + # identify isolated tumor cells + itc_threshold = (self.itc_diameter + self.grow_distance) / mask_pixel_spacing + itc_labels = compute_isolated_tumor_cells(tumor_mask=tumor_mask, threshold=itc_threshold) + + return tumor_mask, itc_labels + + def compute_fp_tp(self): + """ + Compute false positive and true positive probabilities for tumor detection, + by comparing the model outputs with the prepared ground truths for all samples + + """ + total_fp_probs, total_tp_probs = [], [] + total_num_targets = 0 + num_images = len(self.data) + + for sample in self.data: + probs, y_coord, x_coord = self.prepare_inference_result(sample) + ground_truth, itc_labels = self.prepare_ground_truth(sample) + # compute FP and TP probabilities for a pair of an image and an ground truth mask + fp_probs, tp_probs, num_targets = compute_fp_tp_probs( + probs=probs, + y_coord=y_coord, + x_coord=x_coord, + evaluation_mask=ground_truth, + labels_to_exclude=itc_labels, + resolution_level=sample["level"], + ) + total_fp_probs.extend(fp_probs) + total_tp_probs.extend(tp_probs) + total_num_targets += num_targets + + return ( + np.array(total_fp_probs), + np.array(total_tp_probs), + total_num_targets, + num_images, + ) + + def evaluate(self): + """ + Evaluate the detection performance of a model based on the model probability map output, + the ground truth tumor mask, and their associated metadata (e.g., pixel_spacing, level) + """ + # compute false positive (FP) and true positive (TP) probabilities for all images + fp_probs, tp_probs, num_targets, num_images = self.compute_fp_tp() + + # compute FROC curve given the evaluation of all images + fps_per_image, total_sensitivity = compute_froc_curve_data( + fp_probs=fp_probs, + tp_probs=tp_probs, + num_targets=num_targets, + num_images=num_images, + ) + + # compute FROC score give specific evaluation threshold + froc_score = compute_froc_score( + fps_per_image=fps_per_image, + total_sensitivity=total_sensitivity, + eval_thresholds=self.eval_thresholds, + ) + + return froc_score diff --git a/monai/apps/pathology/utils.py b/monai/apps/pathology/utils.py index b0803526fd..ae77bfafd1 100644 --- a/monai/apps/pathology/utils.py +++ b/monai/apps/pathology/utils.py @@ -9,12 +9,53 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Union +from typing import List, Union import numpy as np import torch -from monai.utils import ProbNMS +from monai.utils import ProbNMS, optional_import + +measure, _ = optional_import("skimage.measure") +ndimage, _ = optional_import("scipy.ndimage") + + +def compute_multi_instance_mask(mask: np.ndarray, threshold: float): + """ + This method computes the segmentation mask according to the binary tumor mask. + + Args: + mask: the binary mask array + threshold: the threshold to fill holes + """ + + neg = 255 - mask * 255 + distance = ndimage.morphology.distance_transform_edt(neg) + binary = distance < threshold + + filled_image = ndimage.morphology.binary_fill_holes(binary) + multi_instance_mask = measure.label(filled_image, connectivity=2) + + return multi_instance_mask + + +def compute_isolated_tumor_cells(tumor_mask: np.ndarray, threshold: float) -> List[int]: + """ + This method computes identifies Isolated Tumor Cells (ITC) and return their labels. + + Args: + tumor_mask: the tumor mask. + threshold: the threshold (at the mask level) to define an isolated tumor cell (ITC). + A region with the longest diameter less than this threshold is considered as an ITC. + """ + max_label = np.amax(tumor_mask) + properties = measure.regionprops(tumor_mask, coordinates="rc") + itc_list = [] + for i in range(max_label): # type: ignore + if properties[i].major_axis_length < threshold: + itc_list.append(i + 1) + + return itc_list class PathologyProbNMS(ProbNMS): diff --git a/monai/utils/prob_nms.py b/monai/utils/prob_nms.py index 29ba93d287..c789dab0bb 100644 --- a/monai/utils/prob_nms.py +++ b/monai/utils/prob_nms.py @@ -22,8 +22,9 @@ class ProbNMS: prob_threshold: the probability threshold, the function will stop searching if the highest probability is no larger than the threshold. The value should be no less than 0.0. Defaults to 0.5. - box_size: determines the sizes of the removing area of the selected coordinates for - each dimensions. Defaults to 48. + box_size: the box size (in pixel) to be removed around the the pixel with the maximum probability. + It can be an integer that defines the size of a square or cube, + or a list containing different values for each dimensions. Defaults to 48. Return: a list of selected lists, where inner lists contain probability and coordinates. diff --git a/tests/test_lesion_froc.py b/tests/test_lesion_froc.py new file mode 100644 index 0000000000..6702997c64 --- /dev/null +++ b/tests/test_lesion_froc.py @@ -0,0 +1,320 @@ +import os +import unittest +from unittest import skipUnless + +import numpy as np +from parameterized import parameterized + +from monai.apps.pathology.metrics import LesionFROC +from monai.utils import optional_import + +_, has_cucim = optional_import("cucim") +_, has_skimage = optional_import("skimage.measure") +_, has_sp = optional_import("scipy.ndimage") +PILImage, has_pil = optional_import("PIL.Image") + + +def save_as_tif(filename, array): + array = array[::-1, ...] # Upside-down + img = PILImage.fromarray(array) + if not filename.endswith(".tif"): + filename += ".tif" + img.save(os.path.join("tests", "testing_data", filename)) + + +def around(val, interval=3): + return slice(val - interval, val + interval) + + +# mask and prediction image size +HEIGHT = 101 +WIDTH = 800 + + +def prepare_test_data(): + # ------------------------------------- + # Ground Truth - Binary Masks + # ------------------------------------- + # ground truth with no tumor + ground_truth = np.zeros((HEIGHT, WIDTH), dtype=np.uint8) + save_as_tif("temp_ground_truth_0", ground_truth) + + # ground truth with one tumor + ground_truth[around(HEIGHT // 2), around(1 * WIDTH // 7)] = 1 + save_as_tif("temp_ground_truth_1", ground_truth) + + # ground truth with two tumors + ground_truth[around(HEIGHT // 2), around(2 * WIDTH // 7)] = 1 + save_as_tif("temp_ground_truth_2", ground_truth) + + # ground truth with three tumors + ground_truth[around(HEIGHT // 2), around(3 * WIDTH // 7)] = 1 + save_as_tif("temp_ground_truth_3", ground_truth) + + # ground truth with four tumors + ground_truth[around(HEIGHT // 2), around(4 * WIDTH // 7)] = 1 + save_as_tif("temp_ground_truth_4", ground_truth) + + # ------------------------------------- + # predictions - Probability Maps + # ------------------------------------- + + # prediction with no tumor + prob_map = np.zeros((HEIGHT, WIDTH)) + np.save("./tests/testing_data/temp_prob_map_0_0.npy", prob_map) + + # prediction with one incorrect tumor + prob_map[HEIGHT // 2, 5 * WIDTH // 7] = 0.6 + np.save("./tests/testing_data/temp_prob_map_0_1.npy", prob_map) + + # prediction with correct first tumors and an incorrect tumor + prob_map[HEIGHT // 2, 1 * WIDTH // 7] = 0.8 + np.save("./tests/testing_data/temp_prob_map_1_1.npy", prob_map) + + # prediction with correct firt two tumors and an incorrect tumor + prob_map[HEIGHT // 2, 2 * WIDTH // 7] = 0.8 + np.save("./tests/testing_data/temp_prob_map_2_1.npy", prob_map) + + # prediction with two incorrect tumors + prob_map = np.zeros((HEIGHT, WIDTH)) + prob_map[HEIGHT // 2, 5 * WIDTH // 7] = 0.6 + prob_map[HEIGHT // 2, 6 * WIDTH // 7] = 0.4 + np.save("./tests/testing_data/temp_prob_map_0_2.npy", prob_map) + + # prediction with correct first tumors and two incorrect tumors + prob_map[HEIGHT // 2, 1 * WIDTH // 7] = 0.8 + np.save("./tests/testing_data/temp_prob_map_1_2.npy", prob_map) + + +TEST_CASE_0 = [ + { + "data": [ + { + "prob_map": "./tests/testing_data/temp_prob_map_0_0.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_0.tif", + "level": 0, + "pixel_spacing": 1, + } + ], + "grow_distance": 2, + "itc_diameter": 0, + }, + np.nan, +] + + +TEST_CASE_1 = [ + { + "data": [ + { + "prob_map": "./tests/testing_data/temp_prob_map_0_0.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_1.tif", + "level": 0, + "pixel_spacing": 1, + } + ], + "grow_distance": 2, + "itc_diameter": 0, + }, + 0.0, +] + +TEST_CASE_2 = [ + { + "data": [ + { + "prob_map": "./tests/testing_data/temp_prob_map_1_1.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_1.tif", + "level": 0, + "pixel_spacing": 1, + } + ], + "grow_distance": 2, + "itc_diameter": 0, + }, + 1.0, +] + +TEST_CASE_3 = [ + { + "data": [ + { + "prob_map": "./tests/testing_data/temp_prob_map_2_1.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_1.tif", + "level": 0, + "pixel_spacing": 1, + } + ], + "grow_distance": 2, + "itc_diameter": 0, + }, + 1.0, +] + + +TEST_CASE_4 = [ + { + "data": [ + { + "prob_map": "./tests/testing_data/temp_prob_map_2_1.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_2.tif", + "level": 0, + "pixel_spacing": 1, + } + ], + "grow_distance": 2, + "itc_diameter": 0, + }, + 1.0, +] + +TEST_CASE_5 = [ + { + "data": [ + { + "prob_map": "./tests/testing_data/temp_prob_map_1_2.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_2.tif", + "level": 0, + "pixel_spacing": 1, + } + ], + "grow_distance": 2, + "itc_diameter": 0, + }, + 0.5, +] + + +TEST_CASE_5 = [ + { + "data": [ + { + "prob_map": "./tests/testing_data/temp_prob_map_1_1.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_1.tif", + "level": 0, + "pixel_spacing": 1, + }, + { + "prob_map": "./tests/testing_data/temp_prob_map_1_2.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_2.tif", + "level": 0, + "pixel_spacing": 1, + }, + ], + "grow_distance": 2, + "itc_diameter": 0, + }, + 2.0 / 3.0, +] + +TEST_CASE_6 = [ + { + "data": [ + { + "prob_map": "./tests/testing_data/temp_prob_map_1_1.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_3.tif", + "level": 0, + "pixel_spacing": 1, + }, + { + "prob_map": "./tests/testing_data/temp_prob_map_1_2.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_2.tif", + "level": 0, + "pixel_spacing": 1, + }, + ], + "grow_distance": 2, + "itc_diameter": 0, + }, + 0.4, +] + +TEST_CASE_7 = [ + { + "data": [ + { + "prob_map": "./tests/testing_data/temp_prob_map_0_1.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_1.tif", + "level": 0, + "pixel_spacing": 1, + }, + { + "prob_map": "./tests/testing_data/temp_prob_map_1_1.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_3.tif", + "level": 0, + "pixel_spacing": 1, + }, + { + "prob_map": "./tests/testing_data/temp_prob_map_1_2.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_2.tif", + "level": 0, + "pixel_spacing": 1, + }, + ], + "grow_distance": 2, + "itc_diameter": 0, + }, + 1.0 / 3.0, +] + +TEST_CASE_8 = [ + { + "data": [ + { + "prob_map": "./tests/testing_data/temp_prob_map_0_2.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_4.tif", + "level": 0, + "pixel_spacing": 1, + }, + { + "prob_map": "./tests/testing_data/temp_prob_map_1_1.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_3.tif", + "level": 0, + "pixel_spacing": 1, + }, + { + "prob_map": "./tests/testing_data/temp_prob_map_1_2.npy", + "tumor_mask": "./tests/testing_data/temp_ground_truth_2.tif", + "level": 0, + "pixel_spacing": 1, + }, + ], + "grow_distance": 2, + "itc_diameter": 0, + }, + 2.0 / 9.0, +] + + +class TestEvaluateTumorFROC(unittest.TestCase): + @skipUnless(has_cucim, "Requires cucim") + @skipUnless(has_skimage, "Requires skimage") + @skipUnless(has_sp, "Requires scipy") + @skipUnless(has_pil, "Requires PIL") + def setUp(self): + prepare_test_data() + + @parameterized.expand( + [ + TEST_CASE_0, + TEST_CASE_1, + TEST_CASE_2, + TEST_CASE_3, + TEST_CASE_4, + TEST_CASE_5, + TEST_CASE_6, + TEST_CASE_7, + TEST_CASE_8, + ] + ) + def test_read_patches_cucim(self, input_parameters, expected): + froc = LesionFROC(**input_parameters) + froc_score = froc.evaluate() + if np.isnan(expected): + self.assertTrue(np.isnan(froc_score)) + else: + self.assertAlmostEqual(froc_score, expected) + + +if __name__ == "__main__": + unittest.main() From 613f25e2441357e7dfb45ecea06a9bedf0f358f1 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Wed, 31 Mar 2021 16:25:43 -0400 Subject: [PATCH 129/457] Update CuCIM (#1909) Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- docs/requirements.txt | 2 +- requirements-dev.txt | 2 +- setup.cfg | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index f05bc5b9ca..c31f06f2ca 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -4,7 +4,7 @@ pytorch-ignite==0.4.4 numpy>=1.17 itk>=5.0 nibabel -cucim==0.18.1 +cucim==0.18.2 openslide-python==1.1.2 parameterized scikit-image>=0.14.2 diff --git a/requirements-dev.txt b/requirements-dev.txt index dc4181b310..dfa1eb1853 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -30,5 +30,5 @@ Sphinx==3.3.0 recommonmark==0.6.0 sphinx-autodoc-typehints==1.11.1 sphinx-rtd-theme==0.5.0 -cucim==0.18.1 +cucim==0.18.2 openslide-python==1.1.2 diff --git a/setup.cfg b/setup.cfg index f06c56d001..a41081cd11 100644 --- a/setup.cfg +++ b/setup.cfg @@ -38,7 +38,7 @@ all = torchvision itk>=5.0 tqdm>=4.47.0 - cucim==0.18.1 + cucim==0.18.2 openslide-python==1.1.2 nibabel = nibabel @@ -63,7 +63,7 @@ lmdb = psutil = psutil cucim = - cucim==0.18.1 + cucim==0.18.2 openslide = openslide-python==1.1.2 From 5fec7916c7097f2b1032c32abad853cd54653fb7 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Wed, 31 Mar 2021 17:58:05 -0400 Subject: [PATCH 130/457] Update pathology unittests (#1910) Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- tests/test_cuimage_reader.py | 4 +-- tests/test_masked_inference_wsi_dataset.py | 38 ++++++++++++---------- tests/test_openslide_reader.py | 2 +- tests/test_patch_wsi_dataset.py | 2 +- tests/test_smartcache_patch_wsi_dataset.py | 2 +- 5 files changed, 25 insertions(+), 23 deletions(-) diff --git a/tests/test_cuimage_reader.py b/tests/test_cuimage_reader.py index 1b0293f159..c096bad0c2 100644 --- a/tests/test_cuimage_reader.py +++ b/tests/test_cuimage_reader.py @@ -14,7 +14,7 @@ PILImage, has_pil = optional_import("PIL.Image") FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" -FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL)) +FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", "temp_" + os.path.basename(FILE_URL)) HEIGHT = 32914 WIDTH = 46000 @@ -105,7 +105,7 @@ def test_read_rgba(self, img_expected): image = {} reader = WSIReader("cuCIM") for mode in ["RGB", "RGBA"]: - file_path = self.create_rgba_image(img_expected, "test_cu_tiff_image", mode=mode) + file_path = self.create_rgba_image(img_expected, "temp_cu_tiff_image", mode=mode) img_obj = reader.read(file_path) image[mode], _ = reader.get_data(img_obj) diff --git a/tests/test_masked_inference_wsi_dataset.py b/tests/test_masked_inference_wsi_dataset.py index 7c8a815c2e..88af8c05c0 100644 --- a/tests/test_masked_inference_wsi_dataset.py +++ b/tests/test_masked_inference_wsi_dataset.py @@ -15,11 +15,13 @@ _, has_osl = optional_import("openslide") FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" -FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL)) +base_name, extension = os.path.splitext(os.path.basename(FILE_URL)) +FILE_NAME = "temp_" + base_name +FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", FILE_NAME + extension) -MASK1 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask1.npy") -MASK2 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask2.npy") -MASK4 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask4.npy") +MASK1 = os.path.join(os.path.dirname(__file__), "testing_data", "temp_tissue_mask1.npy") +MASK2 = os.path.join(os.path.dirname(__file__), "testing_data", "temp_tissue_mask2.npy") +MASK4 = os.path.join(os.path.dirname(__file__), "testing_data", "temp_tissue_mask4.npy") HEIGHT = 32914 WIDTH = 46000 @@ -47,7 +49,7 @@ def prepare_data(): [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), - "name": "CMU-1", + "name": FILE_NAME, "mask_location": [100, 100], }, ], @@ -62,12 +64,12 @@ def prepare_data(): [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), - "name": "CMU-1", + "name": FILE_NAME, "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), - "name": "CMU-1", + "name": FILE_NAME, "mask_location": [101, 100], }, ], @@ -82,22 +84,22 @@ def prepare_data(): [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), - "name": "CMU-1", + "name": FILE_NAME, "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), - "name": "CMU-1", + "name": FILE_NAME, "mask_location": [100, 101], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), - "name": "CMU-1", + "name": FILE_NAME, "mask_location": [101, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), - "name": "CMU-1", + "name": FILE_NAME, "mask_location": [101, 101], }, ], @@ -121,7 +123,7 @@ def prepare_data(): ], dtype=np.uint8, ), - "name": "CMU-1", + "name": FILE_NAME, "mask_location": [100, 100], }, ], @@ -139,17 +141,17 @@ def prepare_data(): [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), - "name": "CMU-1", + "name": FILE_NAME, "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), - "name": "CMU-1", + "name": FILE_NAME, "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), - "name": "CMU-1", + "name": FILE_NAME, "mask_location": [101, 100], }, ], @@ -167,7 +169,7 @@ def prepare_data(): [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), - "name": "CMU-1", + "name": FILE_NAME, "mask_location": [100, 100], }, ], @@ -182,12 +184,12 @@ def prepare_data(): [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), - "name": "CMU-1", + "name": FILE_NAME, "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), - "name": "CMU-1", + "name": FILE_NAME, "mask_location": [101, 100], }, ], diff --git a/tests/test_openslide_reader.py b/tests/test_openslide_reader.py index ca50cec4de..e005dbd1c4 100644 --- a/tests/test_openslide_reader.py +++ b/tests/test_openslide_reader.py @@ -14,7 +14,7 @@ FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" -FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL)) +FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", "temp_" + os.path.basename(FILE_URL)) HEIGHT = 32914 WIDTH = 46000 diff --git a/tests/test_patch_wsi_dataset.py b/tests/test_patch_wsi_dataset.py index d030671d06..c4a94a60c4 100644 --- a/tests/test_patch_wsi_dataset.py +++ b/tests/test_patch_wsi_dataset.py @@ -14,7 +14,7 @@ _, has_osl = optional_import("openslide") FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" -FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL)) +FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", "temp_" + os.path.basename(FILE_URL)) TEST_CASE_0 = [ { diff --git a/tests/test_smartcache_patch_wsi_dataset.py b/tests/test_smartcache_patch_wsi_dataset.py index a7c90b5205..d7c2ce5bd1 100644 --- a/tests/test_smartcache_patch_wsi_dataset.py +++ b/tests/test_smartcache_patch_wsi_dataset.py @@ -13,7 +13,7 @@ _, has_cim = optional_import("cucim") FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" -FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL)) +FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", "temp_" + os.path.basename(FILE_URL)) TEST_CASE_0 = [ { From cd3c21a091f3c59f67790432c4fe6521a7891aa7 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Thu, 1 Apr 2021 03:14:57 -0400 Subject: [PATCH 131/457] Update prob map handler (#1911) * Rename the prob map producer unittest to match the module Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Change probs_map to prob_map Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Prepend image_inference_outputs with temp to be ignored Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- monai/utils/prob_nms.py | 32 +++++++++---------- tests/min_tests.py | 2 +- ...r.py => test_handler_prob_map_producer.py} | 6 ++-- 3 files changed, 20 insertions(+), 20 deletions(-) rename tests/{test_handler_prob_map_generator.py => test_handler_prob_map_producer.py} (94%) diff --git a/monai/utils/prob_nms.py b/monai/utils/prob_nms.py index c789dab0bb..c25223d524 100644 --- a/monai/utils/prob_nms.py +++ b/monai/utils/prob_nms.py @@ -65,36 +65,36 @@ def __init__( def __call__( self, - probs_map: Union[np.ndarray, torch.Tensor], + prob_map: Union[np.ndarray, torch.Tensor], ): """ - probs_map: the input probabilities map, it must have shape (H[, W, ...]). + prob_map: the input probabilities map, it must have shape (H[, W, ...]). """ if self.sigma != 0: - if not isinstance(probs_map, torch.Tensor): - probs_map = torch.as_tensor(probs_map, dtype=torch.float) - self.filter.to(probs_map) - probs_map = self.filter(probs_map) + if not isinstance(prob_map, torch.Tensor): + prob_map = torch.as_tensor(prob_map, dtype=torch.float) + self.filter.to(prob_map) + prob_map = self.filter(prob_map) else: - if not isinstance(probs_map, torch.Tensor): - probs_map = probs_map.copy() + if not isinstance(prob_map, torch.Tensor): + prob_map = prob_map.copy() - if isinstance(probs_map, torch.Tensor): - probs_map = probs_map.detach().cpu().numpy() + if isinstance(prob_map, torch.Tensor): + prob_map = prob_map.detach().cpu().numpy() - probs_map_shape = probs_map.shape + prob_map_shape = prob_map.shape outputs = [] - while np.max(probs_map) > self.prob_threshold: - max_idx = np.unravel_index(probs_map.argmax(), probs_map_shape) - prob_max = probs_map[max_idx] + while np.max(prob_map) > self.prob_threshold: + max_idx = np.unravel_index(prob_map.argmax(), prob_map_shape) + prob_max = prob_map[max_idx] max_idx_arr = np.asarray(max_idx) outputs.append([prob_max] + list(max_idx_arr)) idx_min_range = (max_idx_arr - self.box_lower_bd).clip(0, None) - idx_max_range = (max_idx_arr + self.box_upper_bd).clip(None, probs_map_shape) + idx_max_range = (max_idx_arr + self.box_upper_bd).clip(None, prob_map_shape) # for each dimension, set values during index ranges to 0 slices = tuple(slice(idx_min_range[i], idx_max_range[i]) for i in range(self.spatial_dims)) - probs_map[slices] = 0 + prob_map[slices] = 0 return outputs diff --git a/tests/min_tests.py b/tests/min_tests.py index e896e81c70..06231af0a1 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -43,7 +43,7 @@ def run_testsuit(): "test_handler_confusion_matrix_dist", "test_handler_hausdorff_distance", "test_handler_mean_dice", - "test_handler_prob_map_generator", + "test_handler_prob_map_producer", "test_handler_rocauc", "test_handler_rocauc_dist", "test_handler_segmentation_saver", diff --git a/tests/test_handler_prob_map_generator.py b/tests/test_handler_prob_map_producer.py similarity index 94% rename from tests/test_handler_prob_map_generator.py rename to tests/test_handler_prob_map_producer.py index 4882060be9..8bf42131b4 100644 --- a/tests/test_handler_prob_map_generator.py +++ b/tests/test_handler_prob_map_producer.py @@ -23,9 +23,9 @@ from monai.engines import Evaluator from monai.handlers import ValidationHandler -TEST_CASE_0 = ["image_inference_output_1", 2] -TEST_CASE_1 = ["image_inference_output_2", 9] -TEST_CASE_2 = ["image_inference_output_3", 1000] +TEST_CASE_0 = ["temp_image_inference_output_1", 2] +TEST_CASE_1 = ["temp_image_inference_output_2", 9] +TEST_CASE_2 = ["temp_image_inference_output_3", 1000] class TestDataset(Dataset): From ba127a2077b1a5d8aed01bc98a6f9550cc191c2b Mon Sep 17 00:00:00 2001 From: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Date: Thu, 1 Apr 2021 20:55:24 +0800 Subject: [PATCH 132/457] Implement dice_focal loss (#1914) Signed-off-by: Yiheng Wang --- docs/source/losses.rst | 5 ++ monai/losses/__init__.py | 3 + monai/losses/dice.py | 150 ++++++++++++++++++++++++++++++---- monai/losses/focal_loss.py | 14 +++- monai/networks/nets/senet.py | 4 +- tests/test_dice_ce_loss.py | 14 ++++ tests/test_dice_focal_loss.py | 80 ++++++++++++++++++ tests/test_focal_loss.py | 10 +++ 8 files changed, 263 insertions(+), 17 deletions(-) create mode 100644 tests/test_dice_focal_loss.py diff --git a/docs/source/losses.rst b/docs/source/losses.rst index 5e19219fee..eea6656a24 100644 --- a/docs/source/losses.rst +++ b/docs/source/losses.rst @@ -48,6 +48,11 @@ Segmentation Losses .. autoclass:: DiceCELoss :members: +`DiceFocalLoss` +~~~~~~~~~~~~~~~ +.. autoclass:: DiceFocalLoss + :members: + `FocalLoss` ~~~~~~~~~~~ .. autoclass:: FocalLoss diff --git a/monai/losses/__init__.py b/monai/losses/__init__.py index b9146a6962..78a0fbc191 100644 --- a/monai/losses/__init__.py +++ b/monai/losses/__init__.py @@ -13,11 +13,14 @@ from .dice import ( Dice, DiceCELoss, + DiceFocalLoss, DiceLoss, GeneralizedDiceLoss, GeneralizedWassersteinDiceLoss, MaskedDiceLoss, dice, + dice_ce, + dice_focal, generalized_dice, generalized_wasserstein_dice, ) diff --git a/monai/losses/dice.py b/monai/losses/dice.py index 65bf47f388..47af8ea171 100644 --- a/monai/losses/dice.py +++ b/monai/losses/dice.py @@ -10,7 +10,7 @@ # limitations under the License. import warnings -from typing import Callable, List, Optional, Union +from typing import Callable, List, Optional, Sequence, Union import numpy as np import torch @@ -18,6 +18,7 @@ import torch.nn.functional as F from torch.nn.modules.loss import _Loss +from monai.losses.focal_loss import FocalLoss from monai.networks import one_hot from monai.utils import LossReduction, Weight @@ -600,15 +601,12 @@ def _compute_alpha_generalized_true_positives(self, flat_target: torch.Tensor) - class DiceCELoss(_Loss): """ - Compute both Dice loss and Cross Entropy Loss, and return the sum of these two losses. - Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]). - Axis N of `input` is expected to have logit predictions for each class rather than being image channels, - while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are - values added for dice loss part to the intersection and union components of the inter-over-union calculation - to smooth results respectively, these values should be small. The `include_background` class attribute can be - set to False for an instance of the loss to exclude the first category (channel index 0) which is by convention - assumed to be background. If the non-background segmentations are small compared to the total image size they can get - overwhelmed by the signal from the background so excluding it in such cases helps convergence. + Compute both Dice loss and Cross Entropy Loss, and return the weighted sum of these two losses. + The details of Dice loss is shown in ``monai.losses.DiceLoss``. + The details of Cross Entropy Loss is shown in ``torch.nn.CrossEntropyLoss``. In this implementation, + two deprecated parameters ``size_average`` and ``reduce``, and the parameter ``ignore_index`` are + not supported. + """ def __init__( @@ -625,11 +623,13 @@ def __init__( smooth_dr: float = 1e-5, batch: bool = False, ce_weight: Optional[torch.Tensor] = None, + lambda_dice: float = 1.0, + lambda_ce: float = 1.0, ) -> None: """ Args: - ``ce_weight`` is only used for cross entropy loss, ``reduction`` is used for both losses and other - parameters are only used for dice loss. + ``ce_weight`` and ``lambda_ce`` are only used for cross entropy loss. + ``reduction`` is used for both losses and other parameters are only used for dice loss. include_background: if False channel index 0 (background category) is excluded from the calculation. to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False. @@ -655,6 +655,10 @@ def __init__( before any `reduction`. ce_weight: a rescaling weight given to each class for cross entropy loss. See ``torch.nn.CrossEntropyLoss()`` for more information. + lambda_dice: the trade-off weight value for dice loss. The value should be no less than 0.0. + Defaults to 1.0. + lambda_ce: the trade-off weight value for cross entropy loss. The value should be no less than 0.0. + Defaults to 1.0. """ super().__init__() @@ -675,6 +679,12 @@ def __init__( weight=ce_weight, reduction=reduction, ) + if lambda_dice < 0.0: + raise ValueError("lambda_dice should be no less than 0.0.") + if lambda_ce < 0.0: + raise ValueError("lambda_ce should be no less than 0.0.") + self.lambda_dice = lambda_dice + self.lambda_ce = lambda_ce def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """ @@ -684,7 +694,7 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: Raises: ValueError: When number of dimensions for input and target are different. - ValueError: When number of channels for target is nither 1 or the same as input. + ValueError: When number of channels for target is neither 1 nor the same as input. """ if len(input.shape) != len(target.shape): @@ -700,11 +710,123 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: target = torch.squeeze(target, dim=1) target = target.long() ce_loss = self.cross_entropy(input, target) - total_loss: torch.Tensor = dice_loss + ce_loss + total_loss: torch.Tensor = self.lambda_dice * dice_loss + self.lambda_ce * ce_loss + return total_loss + + +class DiceFocalLoss(_Loss): + """ + Compute both Dice loss and Focal Loss, and return the weighted sum of these two losses. + The details of Dice loss is shown in ``monai.losses.DiceLoss``. + The details of Focal Loss is shown in ``monai.losses.FocalLoss``. + + """ + + def __init__( + self, + include_background: bool = True, + to_onehot_y: bool = False, + sigmoid: bool = False, + softmax: bool = False, + other_act: Optional[Callable] = None, + squared_pred: bool = False, + jaccard: bool = False, + reduction: str = "mean", + smooth_nr: float = 1e-5, + smooth_dr: float = 1e-5, + batch: bool = False, + gamma: float = 2.0, + focal_weight: Optional[Union[Sequence[float], float, int, torch.Tensor]] = None, + lambda_dice: float = 1.0, + lambda_focal: float = 1.0, + ) -> None: + """ + Args: + ``gamma``, ``focal_weight`` and ``lambda_focal`` are only used for focal loss. + ``include_background``, ``to_onehot_y``and ``reduction`` are used for both losses + and other parameters are only used for dice loss. + include_background: if False channel index 0 (background category) is excluded from the calculation. + to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False. + sigmoid: if True, apply a sigmoid function to the prediction. + softmax: if True, apply a softmax function to the prediction. + other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute + other activation layers, Defaults to ``None``. for example: + `other_act = torch.tanh`. + squared_pred: use squared versions of targets and predictions in the denominator or not. + jaccard: compute Jaccard Index (soft IoU) instead of dice or not. + reduction: {``"none"``, ``"mean"``, ``"sum"``} + Specifies the reduction to apply to the output. Defaults to ``"mean"``. + + - ``"none"``: no reduction will be applied. + - ``"mean"``: the sum of the output will be divided by the number of elements in the output. + - ``"sum"``: the output will be summed. + + smooth_nr: a small constant added to the numerator to avoid zero. + smooth_dr: a small constant added to the denominator to avoid nan. + batch: whether to sum the intersection and union areas over the batch dimension before the dividing. + Defaults to False, a Dice loss value is computed independently from each item in the batch + before any `reduction`. + gamma: value of the exponent gamma in the definition of the Focal loss. + focal_weight: weights to apply to the voxels of each class. If None no weights are applied. + The input can be a single value (same weight for all classes), a sequence of values (the length + of the sequence should be the same as the number of classes). + lambda_dice: the trade-off weight value for dice loss. The value should be no less than 0.0. + Defaults to 1.0. + lambda_focal: the trade-off weight value for focal loss. The value should be no less than 0.0. + Defaults to 1.0. + + """ + super().__init__() + self.dice = DiceLoss( + include_background=include_background, + to_onehot_y=to_onehot_y, + sigmoid=sigmoid, + softmax=softmax, + other_act=other_act, + squared_pred=squared_pred, + jaccard=jaccard, + reduction=reduction, + smooth_nr=smooth_nr, + smooth_dr=smooth_dr, + batch=batch, + ) + self.focal = FocalLoss( + include_background=include_background, + to_onehot_y=to_onehot_y, + gamma=gamma, + weight=focal_weight, + reduction=reduction, + ) + if lambda_dice < 0.0: + raise ValueError("lambda_dice should be no less than 0.0.") + if lambda_focal < 0.0: + raise ValueError("lambda_focal should be no less than 0.0.") + self.lambda_dice = lambda_dice + self.lambda_focal = lambda_focal + + def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + """ + Args: + input: the shape should be BNH[WD]. The input should be the original logits + due to the restriction of ``monai.losses.FocalLoss``. + target: the shape should be BNH[WD] or B1H[WD]. + + Raises: + ValueError: When number of dimensions for input and target are different. + ValueError: When number of channels for target is neither 1 nor the same as input. + + """ + if len(input.shape) != len(target.shape): + raise ValueError("the number of dimensions for input and target should be the same.") + + dice_loss = self.dice(input, target) + focal_loss = self.focal(input, target) + total_loss: torch.Tensor = self.lambda_dice * dice_loss + self.lambda_focal * focal_loss return total_loss dice = Dice = DiceLoss dice_ce = DiceCELoss +dice_focal = DiceFocalLoss generalized_dice = GeneralizedDiceLoss generalized_wasserstein_dice = GeneralizedWassersteinDiceLoss diff --git a/monai/losses/focal_loss.py b/monai/losses/focal_loss.py index 664e7673a4..5e0ccd3179 100644 --- a/monai/losses/focal_loss.py +++ b/monai/losses/focal_loss.py @@ -45,7 +45,9 @@ def __init__( weight: weights to apply to the voxels of each class. If None no weights are applied. This corresponds to the weights `\alpha` in [1]. The input can be a single value (same weight for all classes), a sequence of values (the length - of the sequence should be the same as the number of classes). + of the sequence should be the same as the number of classes, if not ``include_background``, the + number should not include class 0). + The value/values should be no less than 0. Defaults to None. reduction: {``"none"``, ``"mean"``, ``"sum"``} Specifies the reduction to apply to the output. Defaults to ``"mean"``. @@ -83,6 +85,9 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: AssertionError: When input and target (after one hot transform if setted) have different shapes. ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"]. + ValueError: When ``self.weight`` is a sequence and the length is not equal to the + number of classes. + ValueError: When ``self.weight`` is/contains a value that is less than 0. """ n_pred_ch = input.shape[1] @@ -122,6 +127,13 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: class_weight = torch.as_tensor([self.weight] * i.size(1)) else: class_weight = torch.as_tensor(self.weight) + if class_weight.size(0) != i.size(1): + raise ValueError( + "the length of the weight sequence should be the same as the number of classes. " + + "If `include_background=False`, the number should not include class 0." + ) + if class_weight.min() < 0: + raise ValueError("the value/values of weights should be no less than 0.") class_weight = class_weight.to(i) # Convert the weight to a map in which each voxel # has the weight associated with the ground-truth label diff --git a/monai/networks/nets/senet.py b/monai/networks/nets/senet.py index f5738edeeb..1e04e02973 100644 --- a/monai/networks/nets/senet.py +++ b/monai/networks/nets/senet.py @@ -263,8 +263,8 @@ def _load_state_dict(model, arch, progress): model_url = model_urls[arch] else: raise ValueError( - "only 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152', 'se_resnext50_32x4d', \ - and se_resnext101_32x4d are supported to load pretrained weights." + "only 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152', 'se_resnext50_32x4d', " + + "and se_resnext101_32x4d are supported to load pretrained weights." ) pattern_conv = re.compile(r"^(layer[1-4]\.\d\.(?:conv)\d\.)(\w*)$") diff --git a/tests/test_dice_ce_loss.py b/tests/test_dice_ce_loss.py index 8627c6d130..3423e1425b 100644 --- a/tests/test_dice_ce_loss.py +++ b/tests/test_dice_ce_loss.py @@ -43,6 +43,20 @@ }, 0.2088, ], + [ # shape: (2, 2, 3), (2, 1, 3) lambda_dice: 1.0, lambda_ce: 2.0 + { + "include_background": False, + "to_onehot_y": True, + "ce_weight": torch.tensor([1.0, 1.0]), + "lambda_dice": 1.0, + "lambda_ce": 2.0, + }, + { + "input": torch.tensor([[[100.0, 100.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]]]), + "target": torch.tensor([[[0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0]]]), + }, + 0.4176, + ], [ # shape: (2, 2, 3), (2, 1, 3), do not include class 0 {"include_background": False, "to_onehot_y": True, "ce_weight": torch.tensor([0.0, 1.0])}, { diff --git a/tests/test_dice_focal_loss.py b/tests/test_dice_focal_loss.py new file mode 100644 index 0000000000..4bab68131c --- /dev/null +++ b/tests/test_dice_focal_loss.py @@ -0,0 +1,80 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch + +from monai.losses import DiceFocalLoss, DiceLoss, FocalLoss +from tests.utils import SkipIfBeforePyTorchVersion, test_script_save + + +class TestDiceFocalLoss(unittest.TestCase): + def test_result_onehot_target_include_bg(self): + size = [3, 3, 5, 5] + label = torch.randint(low=0, high=2, size=size) + pred = torch.randn(size) + for reduction in ["sum", "mean", "none"]: + common_params = { + "include_background": True, + "to_onehot_y": False, + "reduction": reduction, + } + for focal_weight in [None, torch.tensor([1.0, 1.0, 2.0]), (3, 2.0, 1)]: + for lambda_focal in [0.5, 1.0, 1.5]: + dice_focal = DiceFocalLoss( + focal_weight=focal_weight, gamma=1.0, lambda_focal=lambda_focal, **common_params + ) + dice = DiceLoss(**common_params) + focal = FocalLoss(weight=focal_weight, gamma=1.0, **common_params) + result = dice_focal(pred, label) + expected_val = dice(pred, label) + lambda_focal * focal(pred, label) + np.testing.assert_allclose(result, expected_val) + + def test_result_no_onehot_no_bg(self): + size = [3, 3, 5, 5] + label = torch.randint(low=0, high=2, size=size) + label = torch.argmax(label, dim=1, keepdim=True) + pred = torch.randn(size) + for reduction in ["sum", "mean", "none"]: + common_params = { + "include_background": False, + "to_onehot_y": True, + "reduction": reduction, + } + for focal_weight in [2.0, torch.tensor([1.0, 2.0]), (2.0, 1)]: + for lambda_focal in [0.5, 1.0, 1.5]: + dice_focal = DiceFocalLoss(focal_weight=focal_weight, lambda_focal=lambda_focal, **common_params) + dice = DiceLoss(**common_params) + focal = FocalLoss(weight=focal_weight, **common_params) + result = dice_focal(pred, label) + expected_val = dice(pred, label) + lambda_focal * focal(pred, label) + np.testing.assert_allclose(result, expected_val) + + def test_ill_shape(self): + loss = DiceFocalLoss() + with self.assertRaisesRegex(ValueError, ""): + loss(torch.ones((1, 2, 3)), torch.ones((1, 1, 2, 3))) + + def test_ill_lambda(self): + with self.assertRaisesRegex(ValueError, ""): + loss = DiceFocalLoss(lambda_dice=-1.0) + + @SkipIfBeforePyTorchVersion((1, 7, 0)) + def test_script(self): + loss = DiceFocalLoss() + test_input = torch.ones(2, 1, 8, 8) + test_script_save(loss, test_input, test_input) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_focal_loss.py b/tests/test_focal_loss.py index 4512dac4b9..66665774ef 100644 --- a/tests/test_focal_loss.py +++ b/tests/test_focal_loss.py @@ -187,6 +187,16 @@ def test_ill_shape(self): with self.assertRaisesRegex(AssertionError, ""): FocalLoss(reduction="mean")(chn_input, chn_target) + def test_ill_class_weight(self): + chn_input = torch.ones((1, 4, 3, 3)) + chn_target = torch.ones((1, 4, 3, 3)) + with self.assertRaisesRegex(ValueError, ""): + FocalLoss(include_background=True, weight=(1.0, 1.0, 2.0))(chn_input, chn_target) + with self.assertRaisesRegex(ValueError, ""): + FocalLoss(include_background=False, weight=(1.0, 1.0, 1.0, 1.0))(chn_input, chn_target) + with self.assertRaisesRegex(ValueError, ""): + FocalLoss(include_background=False, weight=(1.0, 1.0, -1.0))(chn_input, chn_target) + @SkipIfBeforePyTorchVersion((1, 7, 0)) def test_script(self): loss = FocalLoss() From debc5614791b7af33748a2dac557d554590e2211 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Thu, 1 Apr 2021 17:50:35 +0100 Subject: [PATCH 133/457] followup of #1878 (#1913) * followup of #1878, fixes tests, remove json loading Signed-off-by: Wenqi Li * Update test ordinal numbers Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> Co-authored-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- monai/apps/pathology/metrics.py | 15 +++------------ tests/test_lesion_froc.py | 9 +++++---- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/monai/apps/pathology/metrics.py b/monai/apps/pathology/metrics.py index 63b9d073a7..ae01d8a1db 100644 --- a/monai/apps/pathology/metrics.py +++ b/monai/apps/pathology/metrics.py @@ -9,8 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json -from typing import Dict, List, Tuple, Union +from typing import Dict, List, Tuple import numpy as np @@ -51,7 +50,7 @@ class LesionFROC: def __init__( self, - data: Union[List[Dict], str], + data: List[Dict], grow_distance: int = 75, itc_diameter: int = 200, eval_thresholds: Tuple = (0.25, 0.5, 1, 2, 4, 8), @@ -61,10 +60,7 @@ def __init__( image_reader_name: str = "cuCIM", ) -> None: - if isinstance(data, str): - self.data = self._load_data(data) - else: - self.data = data + self.data = data self.grow_distance = grow_distance self.itc_diameter = itc_diameter self.eval_thresholds = eval_thresholds @@ -75,11 +71,6 @@ def __init__( box_size=nms_box_size, ) - def _load_data(self, file_path: str) -> List[Dict]: - with open(file_path, "r") as f: - data: List[Dict] = json.load(f) - return data - def prepare_inference_result(self, sample: Dict): """ Prepare the probability map for detection evaluation. diff --git a/tests/test_lesion_froc.py b/tests/test_lesion_froc.py index 6702997c64..1f2926631f 100644 --- a/tests/test_lesion_froc.py +++ b/tests/test_lesion_froc.py @@ -185,7 +185,7 @@ def prepare_test_data(): ] -TEST_CASE_5 = [ +TEST_CASE_6 = [ { "data": [ { @@ -207,7 +207,7 @@ def prepare_test_data(): 2.0 / 3.0, ] -TEST_CASE_6 = [ +TEST_CASE_7 = [ { "data": [ { @@ -229,7 +229,7 @@ def prepare_test_data(): 0.4, ] -TEST_CASE_7 = [ +TEST_CASE_8 = [ { "data": [ { @@ -257,7 +257,7 @@ def prepare_test_data(): 1.0 / 3.0, ] -TEST_CASE_8 = [ +TEST_CASE_9 = [ { "data": [ { @@ -305,6 +305,7 @@ def setUp(self): TEST_CASE_6, TEST_CASE_7, TEST_CASE_8, + TEST_CASE_9, ] ) def test_read_patches_cucim(self, input_parameters, expected): From 32e540f99d43d56c038537dd2ed6954d80a531ee Mon Sep 17 00:00:00 2001 From: Yiwen Li <44606435+kate-sann5100@users.noreply.github.com> Date: Thu, 1 Apr 2021 19:15:32 +0100 Subject: [PATCH 134/457] 1868-fix-potential-inf-in-lncc-loss (#1915) * clip variance to be >= 0 Signed-off-by: kate-sann5100 * max for torch1.6 Signed-off-by: Wenqi Li Co-authored-by: Wenqi Li --- monai/losses/image_dissimilarity.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/monai/losses/image_dissimilarity.py b/monai/losses/image_dissimilarity.py index 67b2d177f6..eed5808aa3 100644 --- a/monai/losses/image_dissimilarity.py +++ b/monai/losses/image_dissimilarity.py @@ -65,8 +65,8 @@ def __init__( kernel_size: int = 3, kernel_type: str = "rectangular", reduction: Union[LossReduction, str] = LossReduction.MEAN, - smooth_nr: float = 1e-7, - smooth_dr: float = 1e-7, + smooth_nr: float = 1e-5, + smooth_dr: float = 1e-5, ) -> None: """ Args: @@ -146,6 +146,8 @@ def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor: cross = tp_sum - p_avg * t_sum t_var = t2_sum - t_avg * t_sum # std[t] ** 2 p_var = p2_sum - p_avg * p_sum # std[p] ** 2 + t_var = torch.max(t_var, torch.zeros_like(t_var)) + p_var = torch.max(p_var, torch.zeros_like(p_var)) ncc: torch.Tensor = (cross * cross + self.smooth_nr) / (t_var * p_var + self.smooth_dr) # shape = (batch, 1, D, H, W) From 4bd26f9f6c18e7c6e68320d19f556705d9afab60 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Fri, 2 Apr 2021 12:12:09 +0100 Subject: [PATCH 135/457] 1919 - test pt 2103 (#1920) * update to use pytorch2103 Signed-off-by: Wenqi Li --- .github/workflows/cron.yml | 4 ++-- .github/workflows/pythonapp.yml | 2 +- Dockerfile | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 761b1f7ebc..3562672232 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -60,7 +60,7 @@ jobs: cron-pt-image: if: github.repository == 'Project-MONAI/MONAI' container: - image: nvcr.io/nvidia/pytorch:21.02-py3 # testing with the latest pytorch base image + image: nvcr.io/nvidia/pytorch:21.03-py3 # testing with the latest pytorch base image options: "--gpus all" runs-on: [self-hosted, linux, x64, common] steps: @@ -133,7 +133,7 @@ jobs: if: github.repository == 'Project-MONAI/MONAI' needs: cron-gpu # so that monai itself is verified first container: - image: nvcr.io/nvidia/pytorch:21.02-py3 # testing with the latest pytorch base image + image: nvcr.io/nvidia/pytorch:21.03-py3 # testing with the latest pytorch base image options: "--gpus all --ipc=host" runs-on: [self-hosted, linux, x64, common] steps: diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index e5803028a0..738d657211 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -186,7 +186,7 @@ jobs: - environment: PT18+CUDA112 # we explicitly set pytorch to -h to avoid pip install error pytorch: "-h" - base: "nvcr.io/nvidia/pytorch:21.02-py3" + base: "nvcr.io/nvidia/pytorch:21.03-py3" container: image: ${{ matrix.base }} options: --gpus all diff --git a/Dockerfile b/Dockerfile index 57ea567869..54d1f02275 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,7 +11,7 @@ # To build with a different base image # please run `docker build` using the `--build-arg PYTORCH_IMAGE=...` flag. -ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:21.02-py3 +ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:21.03-py3 FROM ${PYTORCH_IMAGE} LABEL maintainer="monai.contact@gmail.com" From d9e68c887b66e1ba0c77c4740cc1680aebda04b2 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Fri, 2 Apr 2021 16:04:14 +0100 Subject: [PATCH 136/457] fixes typos (#1924) Signed-off-by: Wenqi Li --- docs/source/networks.rst | 2 +- monai/apps/pathology/datasets.py | 2 +- .../bilateral/bilateralfilter_cuda_phl.cu | 2 +- .../filtering/permutohedral/hash_table.cuh | 2 +- monai/networks/blocks/crf.py | 24 +++++++++---------- monai/networks/blocks/regunet_block.py | 2 +- monai/networks/layers/filtering.py | 4 ++-- monai/transforms/compose.py | 2 +- tests/test_crf_cpu.py | 6 ++--- tests/test_crf_cuda.py | 6 ++--- 10 files changed, 26 insertions(+), 26 deletions(-) diff --git a/docs/source/networks.rst b/docs/source/networks.rst index 15d7cb80b0..abf75bda1d 100644 --- a/docs/source/networks.rst +++ b/docs/source/networks.rst @@ -99,7 +99,7 @@ Blocks .. autoclass:: SEResNetBottleneck :members: -`Squeeze-and-Excitation ResneXt Bottleneck` +`Squeeze-and-Excitation ResNeXt Bottleneck` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: SEResNeXtBottleneck :members: diff --git a/monai/apps/pathology/datasets.py b/monai/apps/pathology/datasets.py index 01902d1ee2..cba8cd2da9 100644 --- a/monai/apps/pathology/datasets.py +++ b/monai/apps/pathology/datasets.py @@ -283,7 +283,7 @@ def _load_a_patch(self, index): """ Load sample given the index - Since index is sequential and the patches are comming in an stream from different images, + Since index is sequential and the patches are coming in an stream from different images, this method, first, finds the whole slide image and the patch that should be extracted, then it loads the patch and provide it with its image name and the corresponding mask location. """ diff --git a/monai/csrc/filtering/bilateral/bilateralfilter_cuda_phl.cu b/monai/csrc/filtering/bilateral/bilateralfilter_cuda_phl.cu index 603ab689cf..17dc9e7ebd 100644 --- a/monai/csrc/filtering/bilateral/bilateralfilter_cuda_phl.cu +++ b/monai/csrc/filtering/bilateral/bilateralfilter_cuda_phl.cu @@ -95,7 +95,7 @@ void BilateralFilterPHLCuda( cudaMalloc(&data, desc.batchCount * desc.channelStride * desc.channelCount * sizeof(scalar_t)); cudaMalloc(&features, desc.batchCount * desc.channelStride * featureChannelCount * sizeof(scalar_t)); - // Prparing constant memory + // Preparing constant memory cudaMemcpyToSymbol(cBatchStride, &desc.batchStride, sizeof(int)); cudaMemcpyToSymbol(cChannelStride, &desc.channelStride, sizeof(int)); cudaMemcpyToSymbol(cSpatialStrides, desc.strides, sizeof(int) * desc.dimensions); diff --git a/monai/csrc/filtering/permutohedral/hash_table.cuh b/monai/csrc/filtering/permutohedral/hash_table.cuh index 7d9d7eb163..f9893dffe2 100644 --- a/monai/csrc/filtering/permutohedral/hash_table.cuh +++ b/monai/csrc/filtering/permutohedral/hash_table.cuh @@ -15,7 +15,7 @@ limitations under the License. //#define USE_ADDITIVE_HASH -// turn this on if you want to get slighly less memory consumption and slightly longer run times. +// turn this on if you want to get slightly less memory consumption and slightly longer run times. //#define LINEAR_D_MEMORY #define USE_CUSTOM_MODULO diff --git a/monai/networks/blocks/crf.py b/monai/networks/blocks/crf.py index 27556a2c72..635c750ba9 100644 --- a/monai/networks/blocks/crf.py +++ b/monai/networks/blocks/crf.py @@ -20,7 +20,7 @@ class CRF(torch.nn.Module): """ Conditional Random Field: Combines message passing with a class - compatability convolution into an iterative process designed + compatibility convolution into an iterative process designed to successively minimise the energy of the class labeling. In this implementation, the message passing step is a weighted @@ -40,7 +40,7 @@ def __init__( bilateral_color_sigma: float = 0.5, gaussian_spatial_sigma: float = 5.0, update_factor: float = 3.0, - compatability_kernel_range: int = 1, + compatibility_kernel_range: int = 1, iterations: int = 5, ): """ @@ -51,7 +51,7 @@ def __init__( bilateral_color_sigma: standard deviation in color space for the bilateral term. gaussian_spatial_sigma: standard deviation in spatial coordinates for the gaussian term. update_factor: determines the magnitude of each update. - compatability_kernel_range: the range of the kernel used in the compatability convolution. + compatibility_kernel_range: the range of the kernel used in the compatibility convolution. iterations: the number of iterations. """ super(CRF, self).__init__() @@ -61,14 +61,14 @@ def __init__( self.bilateral_color_sigma = bilateral_color_sigma self.gaussian_spatial_sigma = gaussian_spatial_sigma self.update_factor = update_factor - self.compatability_kernel_range = compatability_kernel_range + self.compatibility_kernel_range = compatibility_kernel_range self.iterations = iterations def forward(self, input_tensor: torch.Tensor, reference_tensor: torch.Tensor): """ Args: input_tensor: tensor containing initial class logits. - referenece_tensor: the reference tensor used to guide the message passing. + reference_tensor: the reference tensor used to guide the message passing. Returns: output (torch.Tensor): output tensor. @@ -77,7 +77,7 @@ def forward(self, input_tensor: torch.Tensor, reference_tensor: torch.Tensor): # useful values spatial_dim = input_tensor.dim() - 2 class_count = input_tensor.size(1) - padding = self.compatability_kernel_range + padding = self.compatibility_kernel_range # constructing spatial feature tensor spatial_features = _create_coordinate_tensor(reference_tensor) @@ -88,18 +88,18 @@ def forward(self, input_tensor: torch.Tensor, reference_tensor: torch.Tensor): ) gaussian_features = spatial_features / self.gaussian_spatial_sigma - # compatability matrix (potts model (1 - diag) for now) - compatability_matrix = _potts_model_weights(class_count).to(device=input_tensor.device) + # compatibility matrix (potts model (1 - diag) for now) + compatibility_matrix = _potts_model_weights(class_count).to(device=input_tensor.device) # expanding matrix to kernel - compatability_kernel = _expand_matrix_to_kernel( - compatability_matrix, spatial_dim, self.compatability_kernel_range + compatibility_kernel = _expand_matrix_to_kernel( + compatibility_matrix, spatial_dim, self.compatibility_kernel_range ) # choosing convolution function conv = [conv1d, conv2d, conv3d][spatial_dim - 1] - # seting up output tensor + # setting up output tensor output_tensor = softmax(input_tensor, dim=1) # mean field loop @@ -114,7 +114,7 @@ def forward(self, input_tensor: torch.Tensor, reference_tensor: torch.Tensor): # compatibility convolution combined_output = pad(combined_output, 2 * spatial_dim * [padding], mode="replicate") - compatibility_update = conv(combined_output, compatability_kernel) + compatibility_update = conv(combined_output, compatibility_kernel) # update and normalize output_tensor = softmax(input_tensor - self.update_factor * compatibility_update, dim=1) diff --git a/monai/networks/blocks/regunet_block.py b/monai/networks/blocks/regunet_block.py index f4c2c1f3a7..591837be75 100644 --- a/monai/networks/blocks/regunet_block.py +++ b/monai/networks/blocks/regunet_block.py @@ -227,7 +227,7 @@ def __init__( spatial_dims: number of spatial dimensions extract_levels: spatial levels to extract feature from, 0 refers to the input scale num_channels: number of channels at each scale level, - List or Tuple of lenth equals to `depth` of the RegNet + List or Tuple of length equals to `depth` of the RegNet out_channels: number of output channels kernel_initializer: kernel initializer activation: kernel activation function diff --git a/monai/networks/layers/filtering.py b/monai/networks/layers/filtering.py index fc6c0a38b5..3b2214d59a 100644 --- a/monai/networks/layers/filtering.py +++ b/monai/networks/layers/filtering.py @@ -32,7 +32,7 @@ class BilateralFilter(torch.autograd.Function): input: input tensor. spatial sigma: the standard deviation of the spatial blur. Higher values can - hurt performace when not using the approximate method (see fast approx). + hurt performance when not using the approximate method (see fast approx). color sigma: the standard deviation of the color blur. Lower values preserve edges better whilst higher values tend to a simple gaussian spatial blur. @@ -95,7 +95,7 @@ def forward(ctx, input, features, sigmas=None): @staticmethod def backward(ctx, grad_output): - raise NotImplementedError("PHLFilter does not currently support backpropergation") + raise NotImplementedError("PHLFilter does not currently support Backpropagation") # scaled_features, = ctx.saved_variables # grad_input = _C.phl_filter(grad_output, scaled_features) # return grad_input diff --git a/monai/transforms/compose.py b/monai/transforms/compose.py index d509ea33a1..dd40663e2a 100644 --- a/monai/transforms/compose.py +++ b/monai/transforms/compose.py @@ -19,7 +19,7 @@ from monai.transforms.inverse import InvertibleTransform -# For backwards compatiblity (so this still works: from monai.transforms.compose import MapTransform) +# For backwards compatibility (so this still works: from monai.transforms.compose import MapTransform) from monai.transforms.transform import ( # noqa: F401 MapTransform, Randomizable, diff --git a/tests/test_crf_cpu.py b/tests/test_crf_cpu.py index f6e82d16a5..41ae75f4b4 100644 --- a/tests/test_crf_cpu.py +++ b/tests/test_crf_cpu.py @@ -30,7 +30,7 @@ 0.5, # bilateral_color_sigma 5.0, # gaussian_spatial_sigma 1.0, # update_factor - 1, # compatability_kernel_range + 1, # compatibility_kernel_range 5, # iterations ], # Input @@ -92,7 +92,7 @@ 0.5, # bilateral_color_sigma 5.0, # gaussian_spatial_sigma 1.0, # update_factor - 1, # compatability_kernel_range + 1, # compatibility_kernel_range 5, # iterations ], # Input @@ -189,7 +189,7 @@ 0.1, # bilateral_color_sigma 5.0, # gaussian_spatial_sigma 1.0, # update_factor - 1, # compatability_kernel_range + 1, # compatibility_kernel_range 2, # iterations ], # Input diff --git a/tests/test_crf_cuda.py b/tests/test_crf_cuda.py index 55d57d67bf..6e67d4ec8c 100644 --- a/tests/test_crf_cuda.py +++ b/tests/test_crf_cuda.py @@ -30,7 +30,7 @@ 0.5, # bilateral_color_sigma 5.0, # gaussian_spatial_sigma 1.0, # update_factor - 1, # compatability_kernel_range + 1, # compatibility_kernel_range 5, # iterations ], # Input @@ -92,7 +92,7 @@ 0.5, # bilateral_color_sigma 5.0, # gaussian_spatial_sigma 1.0, # update_factor - 1, # compatability_kernel_range + 1, # compatibility_kernel_range 5, # iterations ], # Input @@ -189,7 +189,7 @@ 0.1, # bilateral_color_sigma 5.0, # gaussian_spatial_sigma 1.0, # update_factor - 1, # compatability_kernel_range + 1, # compatibility_kernel_range 2, # iterations ], # Input From 03b7560d0f5945db949715b219029cfaeb96381d Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Sat, 3 Apr 2021 10:06:46 +0800 Subject: [PATCH 137/457] 1900 Add support to save patch data into NIfTI or PNG files (#1922) * [DLMED] SaveImage supports patch data Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma Co-authored-by: Wenqi Li --- monai/data/nifti_saver.py | 30 +++++++++++++++++------ monai/data/png_saver.py | 30 +++++++++++++++++------ monai/data/utils.py | 8 +++++- monai/handlers/segmentation_saver.py | 7 ++++-- monai/transforms/croppad/dictionary.py | 10 +++++++- monai/transforms/io/array.py | 17 ++++++++++--- monai/transforms/io/dictionary.py | 10 +++++--- monai/utils/misc.py | 1 + tests/test_file_basename.py | 8 ++++++ tests/test_handler_segmentation_saver.py | 9 +++++-- tests/test_rand_crop_by_pos_neg_labeld.py | 2 ++ tests/test_rand_spatial_crop_samplesd.py | 2 ++ tests/test_save_imaged.py | 17 +++++++++++-- 13 files changed, 123 insertions(+), 28 deletions(-) diff --git a/monai/data/nifti_saver.py b/monai/data/nifti_saver.py index 15e61c79e1..34df819d32 100644 --- a/monai/data/nifti_saver.py +++ b/monai/data/nifti_saver.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Optional, Union +from typing import Dict, Optional, Sequence, Union import numpy as np import torch @@ -93,7 +93,12 @@ def __init__( self.squeeze_end_dims = squeeze_end_dims self.data_root_dir = data_root_dir - def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: + def save( + self, + data: Union[torch.Tensor, np.ndarray], + meta_data: Optional[Dict] = None, + patch_index: Optional[int] = None, + ) -> None: """ Save data into a Nifti file. The meta_data could optionally have the following keys: @@ -112,6 +117,7 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] data: target data content that to be saved as a NIfTI format file. Assuming the data shape starts with a channel dimension and followed by spatial dimensions. meta_data: the meta data information corresponding to the data. + patch_index: if the data is a patch of big image, need to append the patch index to filename. See Also :py:meth:`monai.data.nifti_writer.write_nifti` @@ -125,8 +131,8 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] if isinstance(data, torch.Tensor): data = data.detach().cpu().numpy() - filename = create_file_basename(self.output_postfix, filename, self.output_dir, self.data_root_dir) - filename = f"{filename}{self.output_ext}" + path = create_file_basename(self.output_postfix, filename, self.output_dir, self.data_root_dir, patch_index) + path = f"{path}{self.output_ext}" # change data shape to be (channel, h, w, d) while len(data.shape) < 4: data = np.expand_dims(data, -1) @@ -140,7 +146,7 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] write_nifti( data, - file_name=filename, + file_name=path, affine=affine, target_affine=original_affine, resample=self.resample, @@ -152,7 +158,12 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] output_dtype=self.output_dtype, ) - def save_batch(self, batch_data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: + def save_batch( + self, + batch_data: Union[torch.Tensor, np.ndarray], + meta_data: Optional[Dict] = None, + patch_indice: Optional[Sequence[int]] = None, + ) -> None: """ Save a batch of data into Nifti format files. @@ -169,6 +180,11 @@ def save_batch(self, batch_data: Union[torch.Tensor, np.ndarray], meta_data: Opt Args: batch_data: target batch data content that save into NIfTI format. meta_data: every key-value in the meta_data is corresponding to a batch of data. + patch_indice: if the data is a patch of big image, need to append the patch index to filename. """ for i, data in enumerate(batch_data): # save a batch of files - self.save(data, {k: meta_data[k][i] for k in meta_data} if meta_data else None) + self.save( + data=data, + meta_data={k: meta_data[k][i] for k in meta_data} if meta_data is not None else None, + patch_index=patch_indice[i] if patch_indice is not None else None, + ) diff --git a/monai/data/png_saver.py b/monai/data/png_saver.py index a6cc0e89a2..17087fcaca 100644 --- a/monai/data/png_saver.py +++ b/monai/data/png_saver.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Optional, Union +from typing import Dict, Optional, Sequence, Union import numpy as np import torch @@ -71,7 +71,12 @@ def __init__( self._data_index = 0 - def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: + def save( + self, + data: Union[torch.Tensor, np.ndarray], + meta_data: Optional[Dict] = None, + patch_index: Optional[int] = None, + ) -> None: """ Save data into a png file. The meta_data could optionally have the following keys: @@ -87,6 +92,7 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] Shape of the spatial dimensions (C,H,W). C should be 1, 3 or 4 meta_data: the meta data information corresponding to the data. + patch_index: if the data is a patch of big image, need to append the patch index to filename. Raises: ValueError: When ``data`` channels is not one of [1, 3, 4]. @@ -102,8 +108,8 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] if isinstance(data, torch.Tensor): data = data.detach().cpu().numpy() - filename = create_file_basename(self.output_postfix, filename, self.output_dir, self.data_root_dir) - filename = f"{filename}{self.output_ext}" + path = create_file_basename(self.output_postfix, filename, self.output_dir, self.data_root_dir, patch_index) + path = f"{path}{self.output_ext}" if data.shape[0] == 1: data = data.squeeze(0) @@ -114,18 +120,28 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] write_png( np.asarray(data), - file_name=filename, + file_name=path, output_spatial_shape=spatial_shape, mode=self.mode, scale=self.scale, ) - def save_batch(self, batch_data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: + def save_batch( + self, + batch_data: Union[torch.Tensor, np.ndarray], + meta_data: Optional[Dict] = None, + patch_indice: Optional[Sequence[int]] = None, + ) -> None: """Save a batch of data into png format files. Args: batch_data: target batch data content that save into png format. meta_data: every key-value in the meta_data is corresponding to a batch of data. + patch_indice: if the data is a patch of big image, need to append the patch index to filename. """ for i, data in enumerate(batch_data): # save a batch of files - self.save(data, {k: meta_data[k][i] for k in meta_data} if meta_data else None) + self.save( + data=data, + meta_data={k: meta_data[k][i] for k in meta_data} if meta_data is not None else None, + patch_index=patch_indice[i] if patch_indice is not None else None, + ) diff --git a/monai/data/utils.py b/monai/data/utils.py index a3d8f3128e..938365460b 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -600,6 +600,7 @@ def create_file_basename( input_file_name: str, folder_path: str, data_root_dir: str = "", + patch_index: Optional[int] = None, ) -> str: """ Utility function to create the path to the output file based on the input @@ -623,6 +624,7 @@ def create_file_basename( absolute path. This is used to compute `input_file_rel_path`, the relative path to the file from `data_root_dir` to preserve folder structure when saving in case there are files in different folders with the same file names. + patch_index: if not None, append the patch index to filename. """ # get the filename and directory @@ -641,11 +643,15 @@ def create_file_basename( if not os.path.exists(subfolder_path): os.makedirs(subfolder_path) - if postfix: + if len(postfix) > 0: # add the sub-folder plus the postfix name to become the file basename in the output path output = os.path.join(subfolder_path, filename + "_" + postfix) else: output = os.path.join(subfolder_path, filename) + + if patch_index is not None: + output += f"_{patch_index}" + return os.path.abspath(output) diff --git a/monai/handlers/segmentation_saver.py b/monai/handlers/segmentation_saver.py index 9ee7ca67f9..7df10b9dad 100644 --- a/monai/handlers/segmentation_saver.py +++ b/monai/handlers/segmentation_saver.py @@ -16,7 +16,9 @@ from monai.config import DtypeLike from monai.transforms import SaveImage -from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, exact_version, optional_import +from monai.utils import GridSampleMode, GridSamplePadMode +from monai.utils import ImageMetaKey as Key +from monai.utils import InterpolateMode, exact_version, optional_import Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") if TYPE_CHECKING: @@ -143,5 +145,6 @@ def __call__(self, engine: Engine) -> None: """ meta_data = self.batch_transform(engine.state.batch) engine_output = self.output_transform(engine.state.output) - self._saver(engine_output, meta_data) + patch_indice = engine.state.batch.get(Key.PATCH_INDEX, None) + self._saver(engine_output, meta_data, patch_indice) self.logger.info("saved all the model outputs into files.") diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index 64e9f862f9..428e35335c 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -41,6 +41,7 @@ map_binary_to_indices, weighted_patch_samples, ) +from monai.utils import ImageMetaKey as Key from monai.utils import Method, NumpyPadMode, ensure_tuple, ensure_tuple_rep, fall_back_tuple from monai.utils.enums import InverseKeys @@ -528,7 +529,12 @@ def randomize(self, data: Optional[Any] = None) -> None: pass def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, np.ndarray]]: - return [self.cropper(data) for _ in range(self.num_samples)] + ret = [] + for i in range(self.num_samples): + cropped = self.cropper(data) + cropped[Key.PATCH_INDEX] = i # type: ignore + ret.append(cropped) + return ret class CropForegroundd(MapTransform, InvertibleTransform): @@ -783,6 +789,8 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, n # fill in the extra keys with unmodified data for key in set(data.keys()).difference(set(self.keys)): results[i][key] = data[key] + # add patch index in the meta data + results[i][Key.PATCH_INDEX] = i # type: ignore return results diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index 61439c0355..b138b97cb2 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -269,8 +269,19 @@ def __init__( self.save_batch = save_batch - def __call__(self, img: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None): + def __call__( + self, + img: Union[torch.Tensor, np.ndarray], + meta_data: Optional[Dict] = None, + patch_index=None, # type is Union[Sequence[int], int, None], can't be compatible with save and save_batch + ): + """ + Args: + img: target data content that save into file. + meta_data: key-value pairs of meta_data corresponding to the data. + patch_index: if the data is a patch of big image, need to append the patch index to filename. + """ if self.save_batch: - self.saver.save_batch(img, meta_data) + self.saver.save_batch(img, meta_data, patch_index) else: - self.saver.save(img, meta_data) + self.saver.save(img, meta_data, patch_index) diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index 6a82ff2267..5b8f0a41d3 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -23,7 +23,9 @@ from monai.data.image_reader import ImageReader from monai.transforms.io.array import LoadImage, SaveImage from monai.transforms.transform import MapTransform -from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode +from monai.utils import GridSampleMode, GridSamplePadMode +from monai.utils import ImageMetaKey as Key +from monai.utils import InterpolateMode __all__ = [ "LoadImaged", @@ -124,7 +126,9 @@ class SaveImaged(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.SaveImage`. - NB: image should include channel dimension: [B],C,H,W,[D]. + Note: + Image should include channel dimension: [B],C,H,W,[D]. + If the data is a patch of big image, will append the patch index to filename. Args: keys: keys of the corresponding items to be transformed. @@ -225,7 +229,7 @@ def __call__(self, data): d = dict(data) for key in self.key_iterator(d): meta_data = d[f"{key}_{self.meta_key_postfix}"] if self.meta_key_postfix is not None else None - self._saver(img=d[key], meta_data=meta_data) + self._saver(img=d[key], meta_data=meta_data, patch_index=d.get(Key.PATCH_INDEX, None)) return d diff --git a/monai/utils/misc.py b/monai/utils/misc.py index ee0963548c..bd8e46d8b5 100644 --- a/monai/utils/misc.py +++ b/monai/utils/misc.py @@ -358,3 +358,4 @@ class ImageMetaKey: """ FILENAME_OR_OBJ = "filename_or_obj" + PATCH_INDEX = "patch_index" diff --git a/tests/test_file_basename.py b/tests/test_file_basename.py index 1b67baea8c..cb7ee77e62 100644 --- a/tests/test_file_basename.py +++ b/tests/test_file_basename.py @@ -57,10 +57,18 @@ def test_value(self): expected = os.path.join(output_tmp, "test", "test") self.assertEqual(result, expected) + result = create_file_basename("", "test.txt", output_tmp, "foo", 5) + expected = os.path.join(output_tmp, "test", "test_5") + self.assertEqual(result, expected) + result = create_file_basename("post", "test.tar.gz", output_tmp, "foo") expected = os.path.join(output_tmp, "test", "test_post") self.assertEqual(result, expected) + result = create_file_basename("post", "test.tar.gz", output_tmp, "foo", 8) + expected = os.path.join(output_tmp, "test", "test_post_8") + self.assertEqual(result, expected) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_handler_segmentation_saver.py b/tests/test_handler_segmentation_saver.py index 1a2bbb7fbd..5449530b50 100644 --- a/tests/test_handler_segmentation_saver.py +++ b/tests/test_handler_segmentation_saver.py @@ -40,10 +40,15 @@ def _train_func(engine, batch): saver = SegmentationSaver(output_dir=tempdir, output_postfix="seg", output_ext=output_ext, scale=255) saver.attach(engine) - data = [{"filename_or_obj": ["testfile" + str(i) + ".nii.gz" for i in range(8)]}] + data = [ + { + "filename_or_obj": ["testfile" + str(i) + ".nii.gz" for i in range(8)], + "patch_index": list(range(8)), + } + ] engine.run(data, max_epochs=1) for i in range(8): - filepath = os.path.join("testfile" + str(i), "testfile" + str(i) + "_seg" + output_ext) + filepath = os.path.join("testfile" + str(i), "testfile" + str(i) + "_seg" + f"_{i}" + output_ext) self.assertTrue(os.path.exists(os.path.join(tempdir, filepath))) @parameterized.expand([TEST_CASE_0, TEST_CASE_1]) diff --git a/tests/test_rand_crop_by_pos_neg_labeld.py b/tests/test_rand_crop_by_pos_neg_labeld.py index 06e63c14e8..2744d729a1 100644 --- a/tests/test_rand_crop_by_pos_neg_labeld.py +++ b/tests/test_rand_crop_by_pos_neg_labeld.py @@ -91,6 +91,8 @@ def test_type_shape(self, input_param, input_data, expected_type, expected_shape self.assertTupleEqual(result[0]["image"].shape, expected_shape) self.assertTupleEqual(result[0]["extral"].shape, expected_shape) self.assertTupleEqual(result[0]["label"].shape, expected_shape) + for i, item in enumerate(result): + self.assertEqual(item["patch_index"], i) if __name__ == "__main__": diff --git a/tests/test_rand_spatial_crop_samplesd.py b/tests/test_rand_spatial_crop_samplesd.py index afd7ab602c..5b745add18 100644 --- a/tests/test_rand_spatial_crop_samplesd.py +++ b/tests/test_rand_spatial_crop_samplesd.py @@ -70,6 +70,8 @@ def test_shape(self, input_param, input_data, expected_shape, expected_last): for item, expected in zip(result, expected_shape): self.assertTupleEqual(item["img"].shape, expected) self.assertTupleEqual(item["seg"].shape, expected) + for i, item in enumerate(result): + self.assertEqual(item["patch_index"], i) np.testing.assert_allclose(item["img"], expected_last["img"]) np.testing.assert_allclose(item["seg"], expected_last["seg"]) diff --git a/tests/test_save_imaged.py b/tests/test_save_imaged.py index a6ebfe0d8d..b5293473c2 100644 --- a/tests/test_save_imaged.py +++ b/tests/test_save_imaged.py @@ -87,9 +87,20 @@ False, ] +TEST_CASE_6 = [ + { + "img": torch.randint(0, 255, (1, 2, 3, 4)), + "img_meta_dict": {"filename_or_obj": "testfile0.nii.gz"}, + "patch_index": 6, + }, + ".nii.gz", + False, + False, +] + class TestSaveImaged(unittest.TestCase): - @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5]) + @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6]) def test_saved_content(self, test_data, output_ext, resample, save_batch): with tempfile.TemporaryDirectory() as tempdir: trans = SaveImaged( @@ -106,7 +117,9 @@ def test_saved_content(self, test_data, output_ext, resample, save_batch): filepath = os.path.join("testfile" + str(i), "testfile" + str(i) + "_trans" + output_ext) self.assertTrue(os.path.exists(os.path.join(tempdir, filepath))) else: - filepath = os.path.join("testfile0", "testfile0" + "_trans" + output_ext) + patch_index = test_data.get("patch_index", None) + patch_index = f"_{patch_index}" if patch_index is not None else "" + filepath = os.path.join("testfile0", "testfile0" + "_trans" + patch_index + output_ext) self.assertTrue(os.path.exists(os.path.join(tempdir, filepath))) From e7d3d66ccd4fdd4e7de7ccdff0cdc4c4dc1e1e02 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Sat, 3 Apr 2021 04:02:12 +0100 Subject: [PATCH 138/457] Revert "1919 - test pt 2103 (#1920)" (#1925) This reverts commit 4bd26f9f6c18e7c6e68320d19f556705d9afab60. Signed-off-by: Wenqi Li Co-authored-by: Nic Ma --- .github/workflows/cron.yml | 4 ++-- .github/workflows/pythonapp.yml | 2 +- Dockerfile | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 3562672232..761b1f7ebc 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -60,7 +60,7 @@ jobs: cron-pt-image: if: github.repository == 'Project-MONAI/MONAI' container: - image: nvcr.io/nvidia/pytorch:21.03-py3 # testing with the latest pytorch base image + image: nvcr.io/nvidia/pytorch:21.02-py3 # testing with the latest pytorch base image options: "--gpus all" runs-on: [self-hosted, linux, x64, common] steps: @@ -133,7 +133,7 @@ jobs: if: github.repository == 'Project-MONAI/MONAI' needs: cron-gpu # so that monai itself is verified first container: - image: nvcr.io/nvidia/pytorch:21.03-py3 # testing with the latest pytorch base image + image: nvcr.io/nvidia/pytorch:21.02-py3 # testing with the latest pytorch base image options: "--gpus all --ipc=host" runs-on: [self-hosted, linux, x64, common] steps: diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 738d657211..e5803028a0 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -186,7 +186,7 @@ jobs: - environment: PT18+CUDA112 # we explicitly set pytorch to -h to avoid pip install error pytorch: "-h" - base: "nvcr.io/nvidia/pytorch:21.03-py3" + base: "nvcr.io/nvidia/pytorch:21.02-py3" container: image: ${{ matrix.base }} options: --gpus all diff --git a/Dockerfile b/Dockerfile index 54d1f02275..57ea567869 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,7 +11,7 @@ # To build with a different base image # please run `docker build` using the `--build-arg PYTORCH_IMAGE=...` flag. -ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:21.03-py3 +ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:21.02-py3 FROM ${PYTORCH_IMAGE} LABEL maintainer="monai.contact@gmail.com" From e871ee3d821bb0fff26baca3f889e9b794a73322 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Sat, 3 Apr 2021 13:24:30 +0100 Subject: [PATCH 139/457] skip warp tests before torch 18 (#1927) Signed-off-by: Wenqi Li --- tests/test_warp.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_warp.py b/tests/test_warp.py index 4ed1562b29..37a8551241 100644 --- a/tests/test_warp.py +++ b/tests/test_warp.py @@ -8,6 +8,7 @@ from monai.config.deviceconfig import USE_COMPILED from monai.networks.blocks.warp import Warp from monai.utils import GridSampleMode, GridSamplePadMode +from tests.utils import SkipIfBeforePyTorchVersion LOW_POWER_TEST_CASES = [ # run with BUILD_MONAI=1 to test csrc/resample, BUILD_MONAI=0 to test native grid_sample [ @@ -103,6 +104,7 @@ def test_ill_shape(self): with self.assertRaisesRegex(ValueError, ""): warp_layer(image=torch.arange(4).reshape((1, 1, 2, 2)).to(dtype=torch.float), ddf=torch.zeros(1, 2, 3, 3)) + @SkipIfBeforePyTorchVersion((1, 8)) def test_grad(self): for b in GridSampleMode: for p in GridSamplePadMode: From e80cc4a1aec9a76a4f24a6bbb141c1279007bd49 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Sat, 3 Apr 2021 14:22:48 +0100 Subject: [PATCH 140/457] update docstrings (#1931) Signed-off-by: Wenqi Li --- monai/apps/pathology/handlers.py | 11 ++++++++++ .../filtering/permutohedral/permutohedral.cpp | 13 +++++++++++ monai/networks/blocks/localnet_block.py | 11 ++++++++++ monai/networks/blocks/regunet_block.py | 1 + monai/networks/blocks/warp.py | 11 ++++++++++ monai/networks/nets/torchvision_fc.py | 22 +++++++++++-------- monai/optimizers/lr_finder.py | 11 ++++++++++ monai/utils/state_cacher.py | 11 ++++++++++ tests/test_cuimage_reader.py | 11 ++++++++++ tests/test_dvf2ddf.py | 11 ++++++++++ tests/test_globalnet.py | 11 ++++++++++ tests/test_lesion_froc.py | 11 ++++++++++ ...local_normalized_cross_correlation_loss.py | 2 +- tests/test_localnet.py | 11 ++++++++++ tests/test_localnet_block.py | 11 ++++++++++ tests/test_masked_inference_wsi_dataset.py | 11 ++++++++++ tests/test_nifti_endianness.py | 11 ++++++++++ tests/test_openslide_reader.py | 11 ++++++++++ tests/test_patch_wsi_dataset.py | 11 ++++++++++ tests/test_smartcache_patch_wsi_dataset.py | 11 ++++++++++ tests/test_warp.py | 11 ++++++++++ 21 files changed, 215 insertions(+), 10 deletions(-) diff --git a/monai/apps/pathology/handlers.py b/monai/apps/pathology/handlers.py index 046e403e0f..f0790c20b1 100644 --- a/monai/apps/pathology/handlers.py +++ b/monai/apps/pathology/handlers.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import logging import os from typing import TYPE_CHECKING, Dict, Optional diff --git a/monai/csrc/filtering/permutohedral/permutohedral.cpp b/monai/csrc/filtering/permutohedral/permutohedral.cpp index 5d6916b8f4..04ef6fa4da 100644 --- a/monai/csrc/filtering/permutohedral/permutohedral.cpp +++ b/monai/csrc/filtering/permutohedral/permutohedral.cpp @@ -1,3 +1,16 @@ +/* +Copyright 2020 - 2021 MONAI Consortium +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + #include "utils/common_utils.h" #include "utils/meta_macros.h" diff --git a/monai/networks/blocks/localnet_block.py b/monai/networks/blocks/localnet_block.py index 4166c08774..cc90e6ed1d 100644 --- a/monai/networks/blocks/localnet_block.py +++ b/monai/networks/blocks/localnet_block.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from typing import Optional, Sequence, Tuple, Type, Union import torch diff --git a/monai/networks/blocks/regunet_block.py b/monai/networks/blocks/regunet_block.py index 591837be75..d2cd3518b9 100644 --- a/monai/networks/blocks/regunet_block.py +++ b/monai/networks/blocks/regunet_block.py @@ -8,6 +8,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from typing import List, Optional, Sequence, Tuple, Type, Union import torch diff --git a/monai/networks/blocks/warp.py b/monai/networks/blocks/warp.py index b9967f2b62..43ada86b27 100644 --- a/monai/networks/blocks/warp.py +++ b/monai/networks/blocks/warp.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import warnings from typing import List diff --git a/monai/networks/nets/torchvision_fc.py b/monai/networks/nets/torchvision_fc.py index 4fdd0d64ef..8b8a223b55 100644 --- a/monai/networks/nets/torchvision_fc.py +++ b/monai/networks/nets/torchvision_fc.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from typing import Tuple, Union import torch @@ -13,15 +24,8 @@ class TorchVisionFullyConvModel(torch.nn.Module): Args: model_name: name of any torchvision with adaptive avg pooling and fully connected layer at the end. - - resnet18 (default) - - resnet34 - - resnet50 - - resnet101 - - resnet152 - - resnext50_32x4d - - resnext101_32x8d - - wide_resnet50_2 - - wide_resnet101_2 + ``resnet18`` (default), ``resnet34m``, ``resnet50``, ``resnet101``, ``resnet152``, + ``resnext50_32x4d``, ``resnext101_32x8d``, ``wide_resnet50_2``, ``wide_resnet101_2``. n_classes: number of classes for the last classification layer. Default to 1. pool_size: the kernel size for `AvgPool2d` to replace `AdaptiveAvgPool2d`. Default to (7, 7). pool_stride: the stride for `AvgPool2d` to replace `AdaptiveAvgPool2d`. Default to 1. diff --git a/monai/optimizers/lr_finder.py b/monai/optimizers/lr_finder.py index 9e753a1ced..49d4427b3d 100644 --- a/monai/optimizers/lr_finder.py +++ b/monai/optimizers/lr_finder.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import warnings from functools import partial from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Type, Union diff --git a/monai/utils/state_cacher.py b/monai/utils/state_cacher.py index 66e9080724..65a6118670 100644 --- a/monai/utils/state_cacher.py +++ b/monai/utils/state_cacher.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import copy import os import tempfile diff --git a/tests/test_cuimage_reader.py b/tests/test_cuimage_reader.py index c096bad0c2..2cbfaec113 100644 --- a/tests/test_cuimage_reader.py +++ b/tests/test_cuimage_reader.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import os import unittest from unittest import skipUnless diff --git a/tests/test_dvf2ddf.py b/tests/test_dvf2ddf.py index bf04fed8b6..cc3323cf13 100644 --- a/tests/test_dvf2ddf.py +++ b/tests/test_dvf2ddf.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import unittest import numpy as np diff --git a/tests/test_globalnet.py b/tests/test_globalnet.py index 0aab57d272..32bc58f610 100644 --- a/tests/test_globalnet.py +++ b/tests/test_globalnet.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import unittest import numpy as np diff --git a/tests/test_lesion_froc.py b/tests/test_lesion_froc.py index 1f2926631f..2454de88fa 100644 --- a/tests/test_lesion_froc.py +++ b/tests/test_lesion_froc.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import os import unittest from unittest import skipUnless diff --git a/tests/test_local_normalized_cross_correlation_loss.py b/tests/test_local_normalized_cross_correlation_loss.py index bb0bd7b642..31954e727b 100644 --- a/tests/test_local_normalized_cross_correlation_loss.py +++ b/tests/test_local_normalized_cross_correlation_loss.py @@ -1,4 +1,4 @@ -# Copyright 2020 MONAI Consortium +# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/tests/test_localnet.py b/tests/test_localnet.py index df1d9f61cb..dc680f15f9 100644 --- a/tests/test_localnet.py +++ b/tests/test_localnet.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import unittest import torch diff --git a/tests/test_localnet_block.py b/tests/test_localnet_block.py index e6171aeae9..f4e857a0fa 100644 --- a/tests/test_localnet_block.py +++ b/tests/test_localnet_block.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import unittest import torch diff --git a/tests/test_masked_inference_wsi_dataset.py b/tests/test_masked_inference_wsi_dataset.py index 88af8c05c0..ed79b4f3a7 100644 --- a/tests/test_masked_inference_wsi_dataset.py +++ b/tests/test_masked_inference_wsi_dataset.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import os import unittest from unittest import skipUnless diff --git a/tests/test_nifti_endianness.py b/tests/test_nifti_endianness.py index b725e2462c..57f26d2247 100644 --- a/tests/test_nifti_endianness.py +++ b/tests/test_nifti_endianness.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import os import tempfile import unittest diff --git a/tests/test_openslide_reader.py b/tests/test_openslide_reader.py index e005dbd1c4..c0b395fd02 100644 --- a/tests/test_openslide_reader.py +++ b/tests/test_openslide_reader.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import os import unittest from unittest import skipUnless diff --git a/tests/test_patch_wsi_dataset.py b/tests/test_patch_wsi_dataset.py index c4a94a60c4..7c34997872 100644 --- a/tests/test_patch_wsi_dataset.py +++ b/tests/test_patch_wsi_dataset.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import os import unittest from unittest import skipUnless diff --git a/tests/test_smartcache_patch_wsi_dataset.py b/tests/test_smartcache_patch_wsi_dataset.py index d7c2ce5bd1..876a30a3b8 100644 --- a/tests/test_smartcache_patch_wsi_dataset.py +++ b/tests/test_smartcache_patch_wsi_dataset.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import os import unittest from unittest import skipUnless diff --git a/tests/test_warp.py b/tests/test_warp.py index 37a8551241..c6c79a369a 100644 --- a/tests/test_warp.py +++ b/tests/test_warp.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import unittest import numpy as np From 30523c037ecdac87dc5a68addb8458802ddd6a97 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Sat, 3 Apr 2021 15:34:56 +0100 Subject: [PATCH 141/457] update get_package_version (#1930) Signed-off-by: Wenqi Li --- monai/utils/module.py | 16 +++------------- tests/test_get_package_version.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 13 deletions(-) create mode 100644 tests/test_get_package_version.py diff --git a/monai/utils/module.py b/monai/utils/module.py index 0e11a6531d..c12eaf101d 100644 --- a/monai/utils/module.py +++ b/monai/utils/module.py @@ -250,21 +250,11 @@ def has_option(obj, keywords: Union[str, Sequence[str]]) -> bool: def get_package_version(dep_name, default="NOT INSTALLED or UNKNOWN VERSION."): """ Try to load package and get version. If not found, return `default`. - - If the package was already loaded, leave it. If wasn't previously loaded, unload it. """ - dep_ver = default - dep_already_loaded = dep_name not in sys.modules - dep, has_dep = optional_import(dep_name) - if has_dep: - if hasattr(dep, "__version__"): - dep_ver = dep.__version__ - # if not previously loaded, unload it - if not dep_already_loaded: - del dep - del sys.modules[dep_name] - return dep_ver + if has_dep and hasattr(dep, "__version__"): + return dep.__version__ + return default def get_torch_version_tuple(): diff --git a/tests/test_get_package_version.py b/tests/test_get_package_version.py new file mode 100644 index 0000000000..beddb340ab --- /dev/null +++ b/tests/test_get_package_version.py @@ -0,0 +1,31 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from monai.utils.module import get_package_version + + +class TestGetVersion(unittest.TestCase): + def test_default(self): + output = get_package_version("42foobarnoexist") + self.assertTrue("UNKNOWN" in output) + + output = get_package_version("numpy") + self.assertFalse("UNKNOWN" in output) + + def test_msg(self): + output = get_package_version("42foobarnoexist", "test") + self.assertTrue("test" in output) + + +if __name__ == "__main__": + unittest.main() From 1fd5e0a0b4ed5fc48bde833f3b1b103b53e37b15 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Sat, 3 Apr 2021 23:56:03 +0800 Subject: [PATCH 142/457] 1904 Add early stop handler (#1921) * [DLMED] add EarlyStop handler Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot * [DLMED] enhance validation handler Signed-off-by: Nic Ma * [DLMED] add set_trainer support Signed-off-by: Nic Ma * [DLMED] add more check Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] update according to comments Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma Co-authored-by: monai-bot Co-authored-by: Wenqi Li --- docs/source/handlers.rst | 5 ++ monai/handlers/__init__.py | 1 + monai/handlers/checkpoint_saver.py | 1 - monai/handlers/earlystop_handler.py | 95 +++++++++++++++++++++++++ monai/handlers/metric_logger.py | 2 +- monai/handlers/validation_handler.py | 19 +++-- tests/min_tests.py | 1 + tests/test_handler_early_stop.py | 66 +++++++++++++++++ tests/test_handler_prob_map_producer.py | 3 +- tests/test_handler_validation.py | 2 +- 10 files changed, 187 insertions(+), 8 deletions(-) create mode 100644 monai/handlers/earlystop_handler.py create mode 100644 tests/test_handler_early_stop.py diff --git a/docs/source/handlers.rst b/docs/source/handlers.rst index a629b28b27..080e7e138c 100644 --- a/docs/source/handlers.rst +++ b/docs/source/handlers.rst @@ -110,3 +110,8 @@ SmartCache handler ------------------ .. autoclass:: SmartCacheHandler :members: + +EarlyStop handler +----------------- +.. autoclass:: EarlyStopHandler + :members: diff --git a/monai/handlers/__init__.py b/monai/handlers/__init__.py index 5669e8a9ee..a1f86310ae 100644 --- a/monai/handlers/__init__.py +++ b/monai/handlers/__init__.py @@ -13,6 +13,7 @@ from .checkpoint_saver import CheckpointSaver from .classification_saver import ClassificationSaver from .confusion_matrix import ConfusionMatrix +from .earlystop_handler import EarlyStopHandler from .hausdorff_distance import HausdorffDistance from .iteration_metric import IterationMetric from .lr_schedule_handler import LrScheduleHandler diff --git a/monai/handlers/checkpoint_saver.py b/monai/handlers/checkpoint_saver.py index fd80182ba2..68857e17ff 100644 --- a/monai/handlers/checkpoint_saver.py +++ b/monai/handlers/checkpoint_saver.py @@ -17,7 +17,6 @@ Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") Checkpoint, _ = optional_import("ignite.handlers", "0.4.4", exact_version, "Checkpoint") -BaseSaveHandler, _ = optional_import("ignite.handlers.checkpoint", "0.4.4", exact_version, "BaseSaveHandler") if TYPE_CHECKING: from ignite.engine import Engine diff --git a/monai/handlers/earlystop_handler.py b/monai/handlers/earlystop_handler.py new file mode 100644 index 0000000000..99e072b81f --- /dev/null +++ b/monai/handlers/earlystop_handler.py @@ -0,0 +1,95 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Callable, Optional + +from monai.utils import exact_version, optional_import + +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") +EarlyStopping, _ = optional_import("ignite.handlers", "0.4.4", exact_version, "EarlyStopping") + +if TYPE_CHECKING: + from ignite.engine import Engine +else: + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") + + +class EarlyStopHandler: + """ + EarlyStopHandler acts as an Ignite handler to stop training if no improvement after a given number of events. + It‘s based on the `EarlyStopping` handler in ignite. + + Args: + patience: number of events to wait if no improvement and then stop the training. + score_function: It should be a function taking a single argument, an :class:`~ignite.engine.engine.Engine` + object that the handler attached, can be a trainer or validator, and return a score `float`. + an improvement is considered if the score is higher. + trainer: trainer engine to stop the run if no improvement, if None, must call `set_trainer()` before training. + min_delta: a minimum increase in the score to qualify as an improvement, + i.e. an increase of less than or equal to `min_delta`, will count as no improvement. + cumulative_delta: if True, `min_delta` defines an increase since the last `patience` reset, otherwise, + it defines an increase after the last event, default to False. + epoch_level: check early stopping for every epoch or every iteration of the attached engine, + `True` is epoch level, `False` is iteration level, defaut to epoch level. + + Note: + If in distributed training and uses loss value of every iteration to detect earlystopping, + the values may be different in different ranks. + User may attach this handler to validator engine to detect validation metrics and stop the training, + in this case, the `score_function` is executed on validator engine and `trainer` is the trainer engine. + + """ + + def __init__( + self, + patience: int, + score_function: Callable, + trainer: Optional[Engine] = None, + min_delta: float = 0.0, + cumulative_delta: bool = False, + epoch_level: bool = True, + ) -> None: + self.patience = patience + self.score_function = score_function + self.min_delta = min_delta + self.cumulative_delta = cumulative_delta + self.epoch_level = epoch_level + self._handler = None + + if trainer is not None: + self.set_trainer(trainer=trainer) + + def attach(self, engine: Engine) -> None: + """ + Args: + engine: Ignite Engine, it can be a trainer, validator or evaluator. + """ + if self.epoch_level: + engine.add_event_handler(Events.EPOCH_COMPLETED, self) + else: + engine.add_event_handler(Events.ITERATION_COMPLETED, self) + + def set_trainer(self, trainer: Engine): + """ + Set trainer to execute early stop if not setting properly in `__init__()`. + """ + self._handler = EarlyStopping( + patience=self.patience, + score_function=self.score_function, + trainer=trainer, + min_delta=self.min_delta, + cumulative_delta=self.cumulative_delta, + ) + + def __call__(self, engine: Engine) -> None: + if self._handler is None: + raise RuntimeError("please set trainer in __init__() or call set_trainer() before training.") + self._handler(engine) diff --git a/monai/handlers/metric_logger.py b/monai/handlers/metric_logger.py index 778ec13900..f9a3913c56 100644 --- a/monai/handlers/metric_logger.py +++ b/monai/handlers/metric_logger.py @@ -48,7 +48,7 @@ class MetricLogger: logger = MetricLogger(evaluator=evaluator) # construct the trainer with the logger passed in as a handler so that it logs loss values - trainer = SupervisedTrainer(..., train_handlers=[logger, ValidationHandler(evaluator, 1)]) + trainer = SupervisedTrainer(..., train_handlers=[logger, ValidationHandler(1, evaluator)]) # run training, logger.loss will be a list of (iteration, loss) values, logger.metrics a dict with key # "val_mean_dice" storing a list of (iteration, metric) values diff --git a/monai/handlers/validation_handler.py b/monai/handlers/validation_handler.py index 4458a17380..fbd4b7862e 100644 --- a/monai/handlers/validation_handler.py +++ b/monai/handlers/validation_handler.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional from monai.engines.evaluator import Evaluator from monai.utils import exact_version, optional_import @@ -28,11 +28,12 @@ class ValidationHandler: """ - def __init__(self, validator: Evaluator, interval: int, epoch_level: bool = True) -> None: + def __init__(self, interval: int, validator: Optional[Evaluator] = None, epoch_level: bool = True) -> None: """ Args: - validator: run the validator when trigger validation, suppose to be Evaluator. interval: do validation every N epochs or every N iterations during training. + validator: run the validator when trigger validation, suppose to be Evaluator. + if None, should call `set_validator()` before training. epoch_level: execute validation every N epochs or N iterations. `True` is epoch level, `False` is iteration level. @@ -40,12 +41,20 @@ def __init__(self, validator: Evaluator, interval: int, epoch_level: bool = True TypeError: When ``validator`` is not a ``monai.engines.evaluator.Evaluator``. """ - if not isinstance(validator, Evaluator): + if validator is not None and not isinstance(validator, Evaluator): raise TypeError(f"validator must be a monai.engines.evaluator.Evaluator but is {type(validator).__name__}.") self.validator = validator self.interval = interval self.epoch_level = epoch_level + def set_validator(self, validator: Evaluator): + """ + Set validator if not setting in the __init__(). + """ + if not isinstance(validator, Evaluator): + raise TypeError(f"validator must be a monai.engines.evaluator.Evaluator but is {type(validator).__name__}.") + self.validator = validator + def attach(self, engine: Engine) -> None: """ Args: @@ -61,4 +70,6 @@ def __call__(self, engine: Engine) -> None: Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. """ + if self.validator is None: + raise RuntimeError("please set validator in __init__() or call `set_validator()` before training.") self.validator.run(engine.state.epoch) diff --git a/tests/min_tests.py b/tests/min_tests.py index 06231af0a1..abb5b73764 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -112,6 +112,7 @@ def run_testsuit(): "test_save_imaged", "test_ensure_channel_first", "test_ensure_channel_firstd", + "test_handler_early_stop", ] assert sorted(exclude_cases) == sorted(set(exclude_cases)), f"Duplicated items in {exclude_cases}" diff --git a/tests/test_handler_early_stop.py b/tests/test_handler_early_stop.py new file mode 100644 index 0000000000..efe8e89825 --- /dev/null +++ b/tests/test_handler_early_stop.py @@ -0,0 +1,66 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from ignite.engine import Engine, Events + +from monai.handlers import EarlyStopHandler + + +class TestHandlerEarlyStop(unittest.TestCase): + def test_early_stop_train_loss(self): + def _train_func(engine, batch): + return {"loss": 1.5} + + trainer = Engine(_train_func) + EarlyStopHandler( + patience=5, + score_function=lambda x: x.state.output["loss"], + trainer=trainer, + epoch_level=False, + ).attach(trainer) + + trainer.run(range(4), max_epochs=2) + self.assertEqual(trainer.state.iteration, 6) + self.assertEqual(trainer.state.epoch, 2) + + def test_early_stop_val_metric(self): + def _train_func(engine, batch): + pass + + trainer = Engine(_train_func) + validator = Engine(_train_func) + validator.state.metrics["val_acc"] = 0.90 + + @trainer.on(Events.EPOCH_COMPLETED) + def run_validation(engine): + validator.state.metrics["val_acc"] += 0.01 + validator.run(range(3)) + + handler = EarlyStopHandler( + patience=3, + score_function=lambda x: x.state.metrics["val_acc"], + trainer=None, + min_delta=0.1, + cumulative_delta=True, + epoch_level=True, + ) + handler.attach(validator) + handler.set_trainer(trainer=trainer) + + trainer.run(range(3), max_epochs=5) + self.assertEqual(trainer.state.iteration, 12) + self.assertEqual(trainer.state.epoch, 4) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_handler_prob_map_producer.py b/tests/test_handler_prob_map_producer.py index 8bf42131b4..4f719fccc0 100644 --- a/tests/test_handler_prob_map_producer.py +++ b/tests/test_handler_prob_map_producer.py @@ -82,8 +82,9 @@ def inference(enging, batch): evaluator = TestEvaluator(torch.device("cpu:0"), data_loader, size, val_handlers=[prob_map_gen]) # set up validation handler - validation = ValidationHandler(evaluator, interval=1) + validation = ValidationHandler(interval=1, validator=None) validation.attach(engine) + validation.set_validator(validator=evaluator) engine.run(data_loader) diff --git a/tests/test_handler_validation.py b/tests/test_handler_validation.py index 11a51c7213..06f400109d 100644 --- a/tests/test_handler_validation.py +++ b/tests/test_handler_validation.py @@ -37,7 +37,7 @@ def _train_func(engine, batch): # set up testing handler val_data_loader = torch.utils.data.DataLoader(Dataset(data)) evaluator = TestEvaluator(torch.device("cpu:0"), val_data_loader) - saver = ValidationHandler(evaluator, interval=2) + saver = ValidationHandler(interval=2, validator=evaluator) saver.attach(engine) engine.run(data, max_epochs=5) From 481f0cea7286d1918b9c73717f2290dd277bb80a Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Sun, 4 Apr 2021 03:03:17 +0800 Subject: [PATCH 143/457] 1900 Change to save `patch_index` in the meta_dict (#1933) * [DLMED] change to save patch_index in meta_dict Signed-off-by: Nic Ma * [DLMED] remove error import Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot * [DLMED] fix CI tests Signed-off-by: Nic Ma Co-authored-by: monai-bot --- monai/data/nifti_saver.py | 27 +++++-------------- monai/data/png_saver.py | 27 +++++-------------- monai/handlers/segmentation_saver.py | 7 ++--- monai/transforms/croppad/dictionary.py | 32 +++++++++++++++++++---- monai/transforms/io/array.py | 13 +++------ monai/transforms/io/dictionary.py | 6 ++--- tests/test_rand_crop_by_pos_neg_labeld.py | 2 +- tests/test_rand_spatial_crop_samplesd.py | 3 ++- tests/test_save_imaged.py | 2 +- 9 files changed, 53 insertions(+), 66 deletions(-) diff --git a/monai/data/nifti_saver.py b/monai/data/nifti_saver.py index 34df819d32..0ff719023c 100644 --- a/monai/data/nifti_saver.py +++ b/monai/data/nifti_saver.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Optional, Sequence, Union +from typing import Dict, Optional, Union import numpy as np import torch @@ -93,12 +93,7 @@ def __init__( self.squeeze_end_dims = squeeze_end_dims self.data_root_dir = data_root_dir - def save( - self, - data: Union[torch.Tensor, np.ndarray], - meta_data: Optional[Dict] = None, - patch_index: Optional[int] = None, - ) -> None: + def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: """ Save data into a Nifti file. The meta_data could optionally have the following keys: @@ -107,6 +102,7 @@ def save( - ``'original_affine'`` -- for data orientation handling, defaulting to an identity matrix. - ``'affine'`` -- for data output affine, defaulting to an identity matrix. - ``'spatial_shape'`` -- for data output shape. + - ``'patch_index'`` -- if the data is a patch of big image, append the patch index to filename. When meta_data is specified, the saver will try to resample batch data from the space defined by "affine" to the space defined by "original_affine". @@ -117,7 +113,6 @@ def save( data: target data content that to be saved as a NIfTI format file. Assuming the data shape starts with a channel dimension and followed by spatial dimensions. meta_data: the meta data information corresponding to the data. - patch_index: if the data is a patch of big image, need to append the patch index to filename. See Also :py:meth:`monai.data.nifti_writer.write_nifti` @@ -127,6 +122,7 @@ def save( original_affine = meta_data.get("original_affine", None) if meta_data else None affine = meta_data.get("affine", None) if meta_data else None spatial_shape = meta_data.get("spatial_shape", None) if meta_data else None + patch_index = meta_data.get(Key.PATCH_INDEX, None) if meta_data else None if isinstance(data, torch.Tensor): data = data.detach().cpu().numpy() @@ -158,12 +154,7 @@ def save( output_dtype=self.output_dtype, ) - def save_batch( - self, - batch_data: Union[torch.Tensor, np.ndarray], - meta_data: Optional[Dict] = None, - patch_indice: Optional[Sequence[int]] = None, - ) -> None: + def save_batch(self, batch_data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: """ Save a batch of data into Nifti format files. @@ -180,11 +171,7 @@ def save_batch( Args: batch_data: target batch data content that save into NIfTI format. meta_data: every key-value in the meta_data is corresponding to a batch of data. - patch_indice: if the data is a patch of big image, need to append the patch index to filename. + """ for i, data in enumerate(batch_data): # save a batch of files - self.save( - data=data, - meta_data={k: meta_data[k][i] for k in meta_data} if meta_data is not None else None, - patch_index=patch_indice[i] if patch_indice is not None else None, - ) + self.save(data=data, meta_data={k: meta_data[k][i] for k in meta_data} if meta_data is not None else None) diff --git a/monai/data/png_saver.py b/monai/data/png_saver.py index 17087fcaca..880f6b204f 100644 --- a/monai/data/png_saver.py +++ b/monai/data/png_saver.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Optional, Sequence, Union +from typing import Dict, Optional, Union import numpy as np import torch @@ -71,18 +71,14 @@ def __init__( self._data_index = 0 - def save( - self, - data: Union[torch.Tensor, np.ndarray], - meta_data: Optional[Dict] = None, - patch_index: Optional[int] = None, - ) -> None: + def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: """ Save data into a png file. The meta_data could optionally have the following keys: - ``'filename_or_obj'`` -- for output file name creation, corresponding to filename or object. - ``'spatial_shape'`` -- for data output shape. + - ``'patch_index'`` -- if the data is a patch of big image, append the patch index to filename. If meta_data is None, use the default index (starting from 0) as the filename. @@ -92,7 +88,6 @@ def save( Shape of the spatial dimensions (C,H,W). C should be 1, 3 or 4 meta_data: the meta data information corresponding to the data. - patch_index: if the data is a patch of big image, need to append the patch index to filename. Raises: ValueError: When ``data`` channels is not one of [1, 3, 4]. @@ -104,6 +99,7 @@ def save( filename = meta_data[Key.FILENAME_OR_OBJ] if meta_data else str(self._data_index) self._data_index += 1 spatial_shape = meta_data.get("spatial_shape", None) if meta_data and self.resample else None + patch_index = meta_data.get(Key.PATCH_INDEX, None) if meta_data else None if isinstance(data, torch.Tensor): data = data.detach().cpu().numpy() @@ -126,22 +122,13 @@ def save( scale=self.scale, ) - def save_batch( - self, - batch_data: Union[torch.Tensor, np.ndarray], - meta_data: Optional[Dict] = None, - patch_indice: Optional[Sequence[int]] = None, - ) -> None: + def save_batch(self, batch_data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: """Save a batch of data into png format files. Args: batch_data: target batch data content that save into png format. meta_data: every key-value in the meta_data is corresponding to a batch of data. - patch_indice: if the data is a patch of big image, need to append the patch index to filename. + """ for i, data in enumerate(batch_data): # save a batch of files - self.save( - data=data, - meta_data={k: meta_data[k][i] for k in meta_data} if meta_data is not None else None, - patch_index=patch_indice[i] if patch_indice is not None else None, - ) + self.save(data=data, meta_data={k: meta_data[k][i] for k in meta_data} if meta_data is not None else None) diff --git a/monai/handlers/segmentation_saver.py b/monai/handlers/segmentation_saver.py index 7df10b9dad..9ee7ca67f9 100644 --- a/monai/handlers/segmentation_saver.py +++ b/monai/handlers/segmentation_saver.py @@ -16,9 +16,7 @@ from monai.config import DtypeLike from monai.transforms import SaveImage -from monai.utils import GridSampleMode, GridSamplePadMode -from monai.utils import ImageMetaKey as Key -from monai.utils import InterpolateMode, exact_version, optional_import +from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, exact_version, optional_import Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") if TYPE_CHECKING: @@ -145,6 +143,5 @@ def __call__(self, engine: Engine) -> None: """ meta_data = self.batch_transform(engine.state.batch) engine_output = self.output_transform(engine.state.output) - patch_indice = engine.state.batch.get(Key.PATCH_INDEX, None) - self._saver(engine_output, meta_data, patch_indice) + self._saver(engine_output, meta_data) self.logger.info("saved all the model outputs into files.") diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index 428e35335c..1d4fcfdb1f 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -483,7 +483,7 @@ class RandSpatialCropSamplesd(RandomizableTransform, MapTransform): Crop image with random size or specific size ROI to generate a list of N samples. It can crop at a random position as center or at the image center. And allows to set the minimum size to limit the randomly generated ROI. Suppose all the expected fields - specified by `keys` have same shape. + specified by `keys` have same shape, and add `patch_index` to the corresponding meta data. It will return a list of dictionaries for all the cropped images. Args: @@ -495,6 +495,9 @@ class RandSpatialCropSamplesd(RandomizableTransform, MapTransform): random_center: crop at random position as center or the image center. random_size: crop with random size or specific size ROI. The actual size is sampled from `randint(roi_size, img_size)`. + meta_key_postfix: use `key_{postfix}` to to fetch the meta data according to the key data, + default is `meta_dict`, the meta data is a dictionary object. + used to add `patch_index` to the meta dict. allow_missing_keys: don't raise exception if key is missing. Raises: @@ -509,6 +512,7 @@ def __init__( num_samples: int, random_center: bool = True, random_size: bool = True, + meta_key_postfix: str = "meta_dict", allow_missing_keys: bool = False, ) -> None: RandomizableTransform.__init__(self, prob=1.0, do_transform=True) @@ -517,6 +521,7 @@ def __init__( raise ValueError(f"num_samples must be positive, got {num_samples}.") self.num_samples = num_samples self.cropper = RandSpatialCropd(keys, roi_size, random_center, random_size, allow_missing_keys) + self.meta_key_postfix = meta_key_postfix def set_random_state( self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None @@ -530,9 +535,15 @@ def randomize(self, data: Optional[Any] = None) -> None: def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, np.ndarray]]: ret = [] + d = dict(data) for i in range(self.num_samples): - cropped = self.cropper(data) - cropped[Key.PATCH_INDEX] = i # type: ignore + cropped = self.cropper(d) + # add `patch_index` to the meta data + for key in self.key_iterator(d): + meta_data_key = f"{key}_{self.meta_key_postfix}" + if meta_data_key not in cropped: + cropped[meta_data_key] = {} # type: ignore + cropped[meta_data_key][Key.PATCH_INDEX] = i ret.append(cropped) return ret @@ -687,6 +698,8 @@ class RandCropByPosNegLabeld(RandomizableTransform, MapTransform): Dictionary-based version :py:class:`monai.transforms.RandCropByPosNegLabel`. Crop random fixed sized regions with the center being a foreground or background voxel based on the Pos Neg Ratio. + Suppose all the expected fields specified by `keys` have same shape, + and add `patch_index` to the corresponding meta data. And will return a list of dictionaries for all the cropped images. Args: @@ -712,6 +725,9 @@ class RandCropByPosNegLabeld(RandomizableTransform, MapTransform): `image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices_key` and `bg_indices_key` together, expect to be 1 dim array of spatial indices after flattening. a typical usage is to call `FgBgToIndicesd` transform first and cache the results. + meta_key_postfix: use `key_{postfix}` to to fetch the meta data according to the key data, + default is `meta_dict`, the meta data is a dictionary object. + used to add `patch_index` to the meta dict. allow_missing_keys: don't raise exception if key is missing. Raises: @@ -732,6 +748,7 @@ def __init__( image_threshold: float = 0.0, fg_indices_key: Optional[str] = None, bg_indices_key: Optional[str] = None, + meta_key_postfix: str = "meta_dict", allow_missing_keys: bool = False, ) -> None: RandomizableTransform.__init__(self) @@ -748,6 +765,7 @@ def __init__( self.image_threshold = image_threshold self.fg_indices_key = fg_indices_key self.bg_indices_key = bg_indices_key + self.meta_key_postfix = meta_key_postfix self.centers: Optional[List[List[np.ndarray]]] = None def randomize( @@ -789,8 +807,12 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, n # fill in the extra keys with unmodified data for key in set(data.keys()).difference(set(self.keys)): results[i][key] = data[key] - # add patch index in the meta data - results[i][Key.PATCH_INDEX] = i # type: ignore + # add `patch_index` to the meta data + for key in self.key_iterator(d): + meta_data_key = f"{key}_{self.meta_key_postfix}" + if meta_data_key not in results[i]: + results[i][meta_data_key] = {} # type: ignore + results[i][meta_data_key][Key.PATCH_INDEX] = i return results diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index b138b97cb2..7a7fcb8cda 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -269,19 +269,14 @@ def __init__( self.save_batch = save_batch - def __call__( - self, - img: Union[torch.Tensor, np.ndarray], - meta_data: Optional[Dict] = None, - patch_index=None, # type is Union[Sequence[int], int, None], can't be compatible with save and save_batch - ): + def __call__(self, img: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None): """ Args: img: target data content that save into file. meta_data: key-value pairs of meta_data corresponding to the data. - patch_index: if the data is a patch of big image, need to append the patch index to filename. + """ if self.save_batch: - self.saver.save_batch(img, meta_data, patch_index) + self.saver.save_batch(img, meta_data) else: - self.saver.save(img, meta_data, patch_index) + self.saver.save(img, meta_data) diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index 5b8f0a41d3..58d6431c74 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -23,9 +23,7 @@ from monai.data.image_reader import ImageReader from monai.transforms.io.array import LoadImage, SaveImage from monai.transforms.transform import MapTransform -from monai.utils import GridSampleMode, GridSamplePadMode -from monai.utils import ImageMetaKey as Key -from monai.utils import InterpolateMode +from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode __all__ = [ "LoadImaged", @@ -229,7 +227,7 @@ def __call__(self, data): d = dict(data) for key in self.key_iterator(d): meta_data = d[f"{key}_{self.meta_key_postfix}"] if self.meta_key_postfix is not None else None - self._saver(img=d[key], meta_data=meta_data, patch_index=d.get(Key.PATCH_INDEX, None)) + self._saver(img=d[key], meta_data=meta_data) return d diff --git a/tests/test_rand_crop_by_pos_neg_labeld.py b/tests/test_rand_crop_by_pos_neg_labeld.py index 2744d729a1..d52ba900ac 100644 --- a/tests/test_rand_crop_by_pos_neg_labeld.py +++ b/tests/test_rand_crop_by_pos_neg_labeld.py @@ -92,7 +92,7 @@ def test_type_shape(self, input_param, input_data, expected_type, expected_shape self.assertTupleEqual(result[0]["extral"].shape, expected_shape) self.assertTupleEqual(result[0]["label"].shape, expected_shape) for i, item in enumerate(result): - self.assertEqual(item["patch_index"], i) + self.assertEqual(item["image_meta_dict"]["patch_index"], i) if __name__ == "__main__": diff --git a/tests/test_rand_spatial_crop_samplesd.py b/tests/test_rand_spatial_crop_samplesd.py index 5b745add18..09688f44b7 100644 --- a/tests/test_rand_spatial_crop_samplesd.py +++ b/tests/test_rand_spatial_crop_samplesd.py @@ -71,7 +71,8 @@ def test_shape(self, input_param, input_data, expected_shape, expected_last): self.assertTupleEqual(item["img"].shape, expected) self.assertTupleEqual(item["seg"].shape, expected) for i, item in enumerate(result): - self.assertEqual(item["patch_index"], i) + self.assertEqual(item["img_meta_dict"]["patch_index"], i) + self.assertEqual(item["seg_meta_dict"]["patch_index"], i) np.testing.assert_allclose(item["img"], expected_last["img"]) np.testing.assert_allclose(item["seg"], expected_last["seg"]) diff --git a/tests/test_save_imaged.py b/tests/test_save_imaged.py index b5293473c2..d6536b3d51 100644 --- a/tests/test_save_imaged.py +++ b/tests/test_save_imaged.py @@ -117,7 +117,7 @@ def test_saved_content(self, test_data, output_ext, resample, save_batch): filepath = os.path.join("testfile" + str(i), "testfile" + str(i) + "_trans" + output_ext) self.assertTrue(os.path.exists(os.path.join(tempdir, filepath))) else: - patch_index = test_data.get("patch_index", None) + patch_index = test_data["img_meta_dict"].get("patch_index", None) patch_index = f"_{patch_index}" if patch_index is not None else "" filepath = os.path.join("testfile0", "testfile0" + "_trans" + patch_index + output_ext) self.assertTrue(os.path.exists(os.path.join(tempdir, filepath))) From 23f98f39fe4fa6b552e9567af328cb844680bcd6 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Sun, 4 Apr 2021 01:30:43 +0100 Subject: [PATCH 144/457] enhance min/exact version check (#1937) --- monai/utils/module.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/monai/utils/module.py b/monai/utils/module.py index c12eaf101d..afb4dd9f08 100644 --- a/monai/utils/module.py +++ b/monai/utils/module.py @@ -11,6 +11,7 @@ import inspect import sys +import warnings from importlib import import_module from pkgutil import walk_packages from re import match @@ -95,6 +96,9 @@ def min_version(the_module, min_version_str: str = "") -> bool: Returns True if the module's version is greater or equal to the 'min_version'. When min_version_str is not provided, it always returns True. """ + if not hasattr(the_module, "__version__"): + warnings.warn(f"{the_module} has no attribute __version__ in min_version check.") + return True # min_version is the default, shouldn't be noisy if min_version_str: mod_version = tuple(int(x) for x in the_module.__version__.split(".")[:2]) required = tuple(int(x) for x in min_version_str.split(".")[:2]) @@ -106,6 +110,9 @@ def exact_version(the_module, version_str: str = "") -> bool: """ Returns True if the module's __version__ matches version_str """ + if not hasattr(the_module, "__version__"): + warnings.warn(f"{the_module} has no attribute __version__ in exact_version check.") + return False return bool(the_module.__version__ == version_str) From 348cbe89feadc49fbe50b78588927aa98949924e Mon Sep 17 00:00:00 2001 From: Petru-Daniel Tudosiu Date: Sun, 4 Apr 2021 03:00:43 +0100 Subject: [PATCH 145/457] Enabled partial checkpoint loading (#1936) * Enabled partial checkpoint loading Allowing partial loading of a model via strict=False. Signed-off-by: Petru-Daniel Tudosiu * [MONAI] python code formatting Signed-off-by: monai-bot * [DLMED] simplify strict arg Signed-off-by: Nic Ma Co-authored-by: monai-bot Co-authored-by: Nic Ma --- monai/handlers/checkpoint_loader.py | 6 ++- tests/test_handler_checkpoint_loader.py | 54 +++++++++++++++++++++++-- 2 files changed, 55 insertions(+), 5 deletions(-) diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py index bb67428bef..40483e8c85 100644 --- a/monai/handlers/checkpoint_loader.py +++ b/monai/handlers/checkpoint_loader.py @@ -44,6 +44,8 @@ class CheckpointLoader: first load the module to CPU and then copy each parameter to where it was saved, which would result in all processes on the same machine using the same set of devices. + strict: whether to strictly enforce that the keys in :attr:`state_dict` match the keys + returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` """ @@ -53,6 +55,7 @@ def __init__( load_dict: Dict, name: Optional[str] = None, map_location: Optional[Dict] = None, + strict: bool = True, ) -> None: if load_path is None: raise AssertionError("must provide clear path to load checkpoint.") @@ -63,6 +66,7 @@ def __init__( self.load_dict = load_dict self._name = name self.map_location = map_location + self.strict = strict def attach(self, engine: Engine) -> None: """ @@ -82,7 +86,7 @@ def __call__(self, engine: Engine) -> None: # save current max epochs setting in the engine, don't overwrite it if larger than max_epochs in checkpoint prior_max_epochs = engine.state.max_epochs - Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint) + Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint, strict=self.strict) if engine.state.epoch > prior_max_epochs: raise ValueError( f"Epoch count ({engine.state.epoch}) in checkpoint is larger than " diff --git a/tests/test_handler_checkpoint_loader.py b/tests/test_handler_checkpoint_loader.py index 838cc3f4dd..d58260ac8c 100644 --- a/tests/test_handler_checkpoint_loader.py +++ b/tests/test_handler_checkpoint_loader.py @@ -38,7 +38,7 @@ def test_one_save_one_load(self): engine1.run([0] * 8, max_epochs=5) path = tempdir + "/checkpoint_final_iteration=40.pt" engine2 = Engine(lambda e, b: None) - CheckpointLoader(load_path=path, load_dict={"net": net2, "eng": engine2}).attach(engine2) + CheckpointLoader(load_path=path, load_dict={"net": net2, "eng": engine2}, strict=True).attach(engine2) @engine2.on(Events.STARTED) def check_epoch(engine: Engine): @@ -49,7 +49,7 @@ def check_epoch(engine: Engine): # test bad case with max_epochs smaller than current epoch engine3 = Engine(lambda e, b: None) - CheckpointLoader(load_path=path, load_dict={"net": net2, "eng": engine3}).attach(engine3) + CheckpointLoader(load_path=path, load_dict={"net": net2, "eng": engine3}, strict=True).attach(engine3) try: engine3.run([0] * 8, max_epochs=3) @@ -75,7 +75,7 @@ def test_two_save_one_load(self): engine.run([0] * 8, max_epochs=5) path = tempdir + "/checkpoint_final_iteration=40.pt" engine = Engine(lambda e, b: None) - CheckpointLoader(load_path=path, load_dict={"net": net2}).attach(engine) + CheckpointLoader(load_path=path, load_dict={"net": net2}, strict=True).attach(engine) engine.run([0] * 8, max_epochs=1) torch.testing.assert_allclose(net2.state_dict()["weight"], torch.tensor([0.1])) @@ -96,10 +96,56 @@ def test_save_single_device_load_multi_devices(self): engine.run([0] * 8, max_epochs=5) path = tempdir + "/net_final_iteration=40.pt" engine = Engine(lambda e, b: None) - CheckpointLoader(load_path=path, load_dict={"net": net2}).attach(engine) + CheckpointLoader(load_path=path, load_dict={"net": net2}, strict=True).attach(engine) engine.run([0] * 8, max_epochs=1) torch.testing.assert_allclose(net2.state_dict()["module.weight"].cpu(), torch.tensor([0.1])) + def test_partial_under_load(self): + logging.basicConfig(stream=sys.stdout, level=logging.INFO) + net1 = torch.nn.Sequential(*[torch.nn.PReLU(), torch.nn.PReLU()]) + data1 = net1.state_dict() + data1["0.weight"] = torch.tensor([0.1]) + data1["1.weight"] = torch.tensor([0.2]) + net1.load_state_dict(data1) + + net2 = torch.nn.Sequential(*[torch.nn.PReLU()]) + data2 = net2.state_dict() + data2["0.weight"] = torch.tensor([0.3]) + net2.load_state_dict(data2) + + with tempfile.TemporaryDirectory() as tempdir: + engine = Engine(lambda e, b: None) + CheckpointSaver(save_dir=tempdir, save_dict={"net": net1}, save_final=True).attach(engine) + engine.run([0] * 8, max_epochs=5) + path = tempdir + "/net_final_iteration=40.pt" + engine = Engine(lambda e, b: None) + CheckpointLoader(load_path=path, load_dict={"net": net2}, strict=False).attach(engine) + engine.run([0] * 8, max_epochs=1) + torch.testing.assert_allclose(net2.state_dict()["0.weight"].cpu(), torch.tensor([0.1])) + + def test_partial_over_load(self): + logging.basicConfig(stream=sys.stdout, level=logging.INFO) + net1 = torch.nn.Sequential(*[torch.nn.PReLU()]) + data1 = net1.state_dict() + data1["0.weight"] = torch.tensor([0.1]) + net1.load_state_dict(data1) + + net2 = torch.nn.Sequential(*[torch.nn.PReLU(), torch.nn.PReLU()]) + data2 = net2.state_dict() + data2["0.weight"] = torch.tensor([0.2]) + data2["1.weight"] = torch.tensor([0.3]) + net2.load_state_dict(data2) + + with tempfile.TemporaryDirectory() as tempdir: + engine = Engine(lambda e, b: None) + CheckpointSaver(save_dir=tempdir, save_dict={"net": net1}, save_final=True).attach(engine) + engine.run([0] * 8, max_epochs=5) + path = tempdir + "/net_final_iteration=40.pt" + engine = Engine(lambda e, b: None) + CheckpointLoader(load_path=path, load_dict={"net": net2}, strict=False).attach(engine) + engine.run([0] * 8, max_epochs=1) + torch.testing.assert_allclose(net2.state_dict()["0.weight"].cpu(), torch.tensor([0.1])) + if __name__ == "__main__": unittest.main() From da3240f60c98fee2e067337d388f492e5f1d6692 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Mon, 5 Apr 2021 03:21:15 +0100 Subject: [PATCH 146/457] fixes issue of typing with py3.6 (#1942) * less warning msg; remove PILImage types Signed-off-by: Wenqi Li * remove engine type Signed-off-by: Wenqi Li * temp tests Signed-off-by: Wenqi Li * add quick py36 37 tests Signed-off-by: Wenqi Li * temp tests Signed-off-by: Wenqi Li * Revert "temp tests" This reverts commit deaed40aa64edeb771f5fe0ce3257d3746b3045d. Signed-off-by: Wenqi Li * Revert "temp tests" This reverts commit 3e8d8f4cf1d9463a5a2075897eed053091b56587. Signed-off-by: Wenqi Li * min test exclude senet test Signed-off-by: Wenqi Li * update Signed-off-by: Wenqi Li * temp test Signed-off-by: Wenqi Li * Revert "temp test" This reverts commit 31f40a00c1456950a0a050d81f067f38ddede70d. Signed-off-by: Wenqi Li * update get gpu id Signed-off-by: Wenqi Li * update docsstrings Signed-off-by: Wenqi Li --- .github/workflows/pythonapp.yml | 49 ++++++++++++++++++++++++-- monai/handlers/utils.py | 6 ++-- monai/transforms/utility/array.py | 18 ++++------ monai/transforms/utility/dictionary.py | 23 +++--------- monai/utils/module.py | 10 +++--- tests/min_tests.py | 1 + tests/utils.py | 3 +- 7 files changed, 69 insertions(+), 41 deletions(-) diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index e5803028a0..9425f9fa77 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -41,7 +41,7 @@ jobs: # Git hub actions have 2 cores, so parallize pytype $(pwd)/runtests.sh --codeformat -j 2 - quick-py3: # full dependencies installed + quick-py3: # full dependencies installed tests for different OS runs-on: ${{ matrix.os }} strategy: fail-fast: false @@ -105,7 +105,7 @@ jobs: env: QUICKTEST: True - min-dep-py3: # min dependencies installed + min-dep-os: # min dependencies installed tests for different OS runs-on: ${{ matrix.os }} strategy: fail-fast: false @@ -154,6 +154,51 @@ jobs: env: QUICKTEST: True + min-dep-py3: # min dependencies installed tests for different python + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [3.6, 3.7] + timeout-minutes: 40 + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Prepare pip wheel + run: | + which python + python -m pip install --user --upgrade pip setuptools wheel + - name: cache weekly timestamp + id: pip-cache + run: | + echo "::set-output name=datew::$(date '+%Y-%V')" + echo "::set-output name=dir::$(pip cache dir)" + shell: bash + - name: cache for pip + uses: actions/cache@v2 + id: cache + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ubuntu-latest-latest-pip-${{ steps.pip-cache.outputs.datew }} + - name: Install the dependencies + run: | + # min. requirements + python -m pip install torch==1.8.1 + python -m pip install -r requirements-min.txt + python -m pip list + BUILD_MONAI=0 python setup.py develop # no compile of extensions + shell: bash + - name: Run quick tests (CPU ${{ runner.os }}) + run: | + python -c 'import torch; print(torch.__version__); print(torch.rand(5,3))' + python -c "import monai; monai.config.print_config()" + python -m tests.min_tests + env: + QUICKTEST: True + GPU-quick-py3: # GPU with full dependencies if: github.repository == 'Project-MONAI/MONAI' strategy: diff --git a/monai/handlers/utils.py b/monai/handlers/utils.py index 2eaf3ab932..4ae38b908a 100644 --- a/monai/handlers/utils.py +++ b/monai/handlers/utils.py @@ -11,7 +11,7 @@ import os from collections import OrderedDict -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Union +from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Union import numpy as np import torch @@ -33,7 +33,7 @@ ] -def stopping_fn_from_metric(metric_name: str) -> Callable[[Engine], Any]: +def stopping_fn_from_metric(metric_name: str): """ Returns a stopping function for ignite.handlers.EarlyStopping using the given metric name. """ @@ -44,7 +44,7 @@ def stopping_fn(engine: Engine): return stopping_fn -def stopping_fn_from_loss() -> Callable[[Engine], Any]: +def stopping_fn_from_loss(): """ Returns a stopping function for ignite.handlers.EarlyStopping using the loss value. """ diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 4ad0676fba..8e0dabafb2 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -16,7 +16,7 @@ import logging import sys import time -from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Sequence, Tuple, Union +from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union import numpy as np import torch @@ -26,14 +26,8 @@ from monai.transforms.utils import extreme_points_to_image, get_extreme_points, map_binary_to_indices from monai.utils import ensure_tuple, min_version, optional_import -if TYPE_CHECKING: - from PIL.Image import Image as PILImageImage - from PIL.Image import fromarray as pil_image_fromarray - - has_pil = True -else: - PILImageImage, has_pil = optional_import("PIL.Image", name="Image") - pil_image_fromarray, _ = optional_import("PIL.Image", name="fromarray") +PILImageImage, has_pil = optional_import("PIL.Image", name="Image") +pil_image_fromarray, _ = optional_import("PIL.Image", name="fromarray") __all__ = [ "Identity", @@ -302,7 +296,7 @@ class ToTensor(Transform): Converts the input image to a tensor without applying any other transformations. """ - def __call__(self, img: Union[np.ndarray, torch.Tensor, PILImageImage]) -> torch.Tensor: + def __call__(self, img) -> torch.Tensor: """ Apply the transform to `img` and make it contiguous. """ @@ -316,7 +310,7 @@ class ToNumpy(Transform): Converts the input data to numpy array, can support list or tuple of numbers and PyTorch Tensor. """ - def __call__(self, img: Union[List, Tuple, np.ndarray, torch.Tensor, PILImageImage]) -> np.ndarray: + def __call__(self, img) -> np.ndarray: """ Apply the transform to `img` and make it contiguous. """ @@ -330,7 +324,7 @@ class ToPIL(Transform): Converts the input image (in the form of NumPy array or PyTorch Tensor) to PIL image """ - def __call__(self, img: Union[np.ndarray, torch.Tensor, PILImageImage]) -> PILImageImage: + def __call__(self, img): """ Apply the transform to `img` and make it contiguous. """ diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index 63ed6ec305..f57cbd1116 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -17,7 +17,7 @@ import copy import logging -from typing import TYPE_CHECKING, Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union import numpy as np import torch @@ -48,14 +48,7 @@ ToTensor, ) from monai.transforms.utils import extreme_points_to_image, get_extreme_points -from monai.utils import ensure_tuple, ensure_tuple_rep, optional_import - -if TYPE_CHECKING: - from PIL.Image import Image as PILImageImage - - has_pil = True -else: - PILImageImage, has_pil = optional_import("PIL.Image", name="Image") +from monai.utils import ensure_tuple, ensure_tuple_rep __all__ = [ "Identityd", @@ -401,9 +394,7 @@ def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> No super().__init__(keys, allow_missing_keys) self.converter = ToTensor() - def __call__( - self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]] - ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]]: + def __call__(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]: d = dict(data) for key in self.key_iterator(d): d[key] = self.converter(d[key]) @@ -425,9 +416,7 @@ def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> No super().__init__(keys, allow_missing_keys) self.converter = ToNumpy() - def __call__( - self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]] - ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]]: + def __call__(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]: d = dict(data) for key in self.key_iterator(d): d[key] = self.converter(d[key]) @@ -449,9 +438,7 @@ def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> No super().__init__(keys, allow_missing_keys) self.converter = ToPIL() - def __call__( - self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]] - ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor, PILImageImage]]: + def __call__(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]: d = dict(data) for key in self.key_iterator(d): d[key] = self.converter(d[key]) diff --git a/monai/utils/module.py b/monai/utils/module.py index afb4dd9f08..448046b9e6 100644 --- a/monai/utils/module.py +++ b/monai/utils/module.py @@ -96,14 +96,14 @@ def min_version(the_module, min_version_str: str = "") -> bool: Returns True if the module's version is greater or equal to the 'min_version'. When min_version_str is not provided, it always returns True. """ + if not min_version_str: + return True # always valid version if not hasattr(the_module, "__version__"): warnings.warn(f"{the_module} has no attribute __version__ in min_version check.") return True # min_version is the default, shouldn't be noisy - if min_version_str: - mod_version = tuple(int(x) for x in the_module.__version__.split(".")[:2]) - required = tuple(int(x) for x in min_version_str.split(".")[:2]) - return mod_version >= required - return True # always valid version + mod_version = tuple(int(x) for x in the_module.__version__.split(".")[:2]) + required = tuple(int(x) for x in min_version_str.split(".")[:2]) + return mod_version >= required def exact_version(the_module, version_str: str = "") -> bool: diff --git a/tests/min_tests.py b/tests/min_tests.py index abb5b73764..4433081c46 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -94,6 +94,7 @@ def run_testsuit(): "test_smartcachedataset", "test_spacing", "test_spacingd", + "test_senet", "test_surface_distance", "test_zoom", "test_zoom_affine", diff --git a/tests/utils.py b/tests/utils.py index 20f94cd1eb..5fa67f3e49 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -569,13 +569,14 @@ def query_memory(n=2): """ Find best n idle devices and return a string of device ids. """ - bash_string = "nvidia-smi --query-gpu=utilization.gpu,power.draw,memory.used --format=csv,noheader,nounits" + bash_string = "nvidia-smi --query-gpu=power.draw,temperature.gpu,memory.used --format=csv,noheader,nounits" try: p1 = Popen(bash_string.split(), stdout=PIPE) output, error = p1.communicate() free_memory = [x.split(",") for x in output.decode("utf-8").split("\n")[:-1]] free_memory = np.asarray(free_memory, dtype=float).T + free_memory[1] += free_memory[0] # combine 0/1 column measures ids = np.lexsort(free_memory)[:n] except (FileNotFoundError, TypeError, IndexError): ids = range(n) if isinstance(n, int) else [] From 3f2238e7b8af46d424a300cb44ed7748fbc05462 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Mon, 5 Apr 2021 09:08:33 -0400 Subject: [PATCH 147/457] Make ProbNMS a Transform (#1941) * Add ProbNMS to transforts/post/array Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Implement ProbNMSDict Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update the usage and add unittests Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update docs Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Correct docs Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Correct test case Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> Co-authored-by: Wenqi Li --- docs/source/transforms.rst | 5 + docs/source/utils.rst | 5 - monai/apps/pathology/utils.py | 3 +- monai/transforms/__init__.py | 4 + monai/transforms/post/array.py | 95 ++++++++++++++++++ monai/transforms/post/dictionary.py | 57 +++++++++++ monai/utils/__init__.py | 1 - monai/utils/prob_nms.py | 100 ------------------- tests/{test_prob_nms.py => test_probnms.py} | 2 +- tests/test_probnmsd.py | 103 ++++++++++++++++++++ 10 files changed, 267 insertions(+), 108 deletions(-) delete mode 100644 monai/utils/prob_nms.py rename tests/{test_prob_nms.py => test_probnms.py} (98%) create mode 100644 tests/test_probnmsd.py diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index a726b25435..4f039b9c35 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -305,6 +305,11 @@ Post-processing :members: :special-members: __call__ +`Prob NMS` +"""""""""" +.. autoclass:: ProbNMS + :members: + `VoteEnsemble` """""""""""""" .. autoclass:: VoteEnsemble diff --git a/docs/source/utils.rst b/docs/source/utils.rst index 071d9ecefd..855954fd29 100644 --- a/docs/source/utils.rst +++ b/docs/source/utils.rst @@ -27,11 +27,6 @@ Misc .. automodule:: monai.utils.misc :members: -Prob NMS --------- -.. automodule:: monai.utils.prob_nms -.. autoclass:: ProbNMS - :members: Profiling --------- diff --git a/monai/apps/pathology/utils.py b/monai/apps/pathology/utils.py index ae77bfafd1..0d1f530bff 100644 --- a/monai/apps/pathology/utils.py +++ b/monai/apps/pathology/utils.py @@ -14,7 +14,8 @@ import numpy as np import torch -from monai.utils import ProbNMS, optional_import +from monai.transforms.post.array import ProbNMS +from monai.utils import optional_import measure, _ = optional_import("skimage.measure") ndimage, _ = optional_import("scipy.ndimage") diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index b8cc832db1..b66567e71a 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -160,6 +160,7 @@ KeepLargestConnectedComponent, LabelToContour, MeanEnsemble, + ProbNMS, VoteEnsemble, ) from .post.dictionary import ( @@ -182,6 +183,9 @@ MeanEnsembled, MeanEnsembleD, MeanEnsembleDict, + ProbNMSd, + ProbNMSD, + ProbNMSDict, VoteEnsembled, VoteEnsembleD, VoteEnsembleDict, diff --git a/monai/transforms/post/array.py b/monai/transforms/post/array.py index 6462753cf9..7ac0e6799c 100644 --- a/monai/transforms/post/array.py +++ b/monai/transforms/post/array.py @@ -21,6 +21,7 @@ import torch.nn.functional as F from monai.networks import one_hot +from monai.networks.layers import GaussianFilter from monai.transforms.transform import Transform from monai.transforms.utils import get_largest_connected_component_mask from monai.utils import ensure_tuple @@ -422,3 +423,97 @@ def __call__(self, img: Union[Sequence[torch.Tensor], torch.Tensor]) -> torch.Te return torch.argmax(img_, dim=1, keepdim=has_ch_dim) # for One-Hot data, round the float number to 0 or 1 return torch.round(img_) + + +class ProbNMS(Transform): + """ + Performs probability based non-maximum suppression (NMS) on the probabilities map via + iteratively selecting the coordinate with highest probability and then move it as well + as its surrounding values. The remove range is determined by the parameter `box_size`. + If multiple coordinates have the same highest probability, only one of them will be + selected. + + Args: + spatial_dims: number of spatial dimensions of the input probabilities map. + Defaults to 2. + sigma: the standard deviation for gaussian filter. + It could be a single value, or `spatial_dims` number of values. Defaults to 0.0. + prob_threshold: the probability threshold, the function will stop searching if + the highest probability is no larger than the threshold. The value should be + no less than 0.0. Defaults to 0.5. + box_size: the box size (in pixel) to be removed around the the pixel with the maximum probability. + It can be an integer that defines the size of a square or cube, + or a list containing different values for each dimensions. Defaults to 48. + + Return: + a list of selected lists, where inner lists contain probability and coordinates. + For example, for 3D input, the inner lists are in the form of [probability, x, y, z]. + + Raises: + ValueError: When ``prob_threshold`` is less than 0.0. + ValueError: When ``box_size`` is a list or tuple, and its length is not equal to `spatial_dims`. + ValueError: When ``box_size`` has a less than 1 value. + + """ + + def __init__( + self, + spatial_dims: int = 2, + sigma: Union[Sequence[float], float, Sequence[torch.Tensor], torch.Tensor] = 0.0, + prob_threshold: float = 0.5, + box_size: Union[int, Sequence[int]] = 48, + ) -> None: + self.sigma = sigma + self.spatial_dims = spatial_dims + if self.sigma != 0: + self.filter = GaussianFilter(spatial_dims=spatial_dims, sigma=sigma) + if prob_threshold < 0: + raise ValueError("prob_threshold should be no less than 0.0.") + self.prob_threshold = prob_threshold + if isinstance(box_size, int): + self.box_size = np.asarray([box_size] * spatial_dims) + else: + if len(box_size) != spatial_dims: + raise ValueError("the sequence length of box_size should be the same as spatial_dims.") + self.box_size = np.asarray(box_size) + if self.box_size.min() <= 0: + raise ValueError("box_size should be larger than 0.") + + self.box_lower_bd = self.box_size // 2 + self.box_upper_bd = self.box_size - self.box_lower_bd + + def __call__( + self, + prob_map: Union[np.ndarray, torch.Tensor], + ): + """ + prob_map: the input probabilities map, it must have shape (H[, W, ...]). + """ + if self.sigma != 0: + if not isinstance(prob_map, torch.Tensor): + prob_map = torch.as_tensor(prob_map, dtype=torch.float) + self.filter.to(prob_map) + prob_map = self.filter(prob_map) + else: + if not isinstance(prob_map, torch.Tensor): + prob_map = prob_map.copy() + + if isinstance(prob_map, torch.Tensor): + prob_map = prob_map.detach().cpu().numpy() + + prob_map_shape = prob_map.shape + + outputs = [] + while np.max(prob_map) > self.prob_threshold: + max_idx = np.unravel_index(prob_map.argmax(), prob_map_shape) + prob_max = prob_map[max_idx] + max_idx_arr = np.asarray(max_idx) + outputs.append([prob_max] + list(max_idx_arr)) + + idx_min_range = (max_idx_arr - self.box_lower_bd).clip(0, None) + idx_max_range = (max_idx_arr + self.box_upper_bd).clip(None, prob_map_shape) + # for each dimension, set values during index ranges to 0 + slices = tuple(slice(idx_min_range[i], idx_max_range[i]) for i in range(self.spatial_dims)) + prob_map[slices] = 0 + + return outputs diff --git a/monai/transforms/post/dictionary.py b/monai/transforms/post/dictionary.py index 6d28f780d4..52bde4ab79 100644 --- a/monai/transforms/post/dictionary.py +++ b/monai/transforms/post/dictionary.py @@ -28,6 +28,7 @@ KeepLargestConnectedComponent, LabelToContour, MeanEnsemble, + ProbNMS, VoteEnsemble, ) from monai.transforms.transform import MapTransform @@ -340,10 +341,66 @@ def __call__(self, data: dict) -> List[dict]: return monai.data.decollate_batch(data, self.batch_size) +class ProbNMSd(MapTransform): + """ + Performs probability based non-maximum suppression (NMS) on the probabilities map via + iteratively selecting the coordinate with highest probability and then move it as well + as its surrounding values. The remove range is determined by the parameter `box_size`. + If multiple coordinates have the same highest probability, only one of them will be + selected. + + Args: + spatial_dims: number of spatial dimensions of the input probabilities map. + Defaults to 2. + sigma: the standard deviation for gaussian filter. + It could be a single value, or `spatial_dims` number of values. Defaults to 0.0. + prob_threshold: the probability threshold, the function will stop searching if + the highest probability is no larger than the threshold. The value should be + no less than 0.0. Defaults to 0.5. + box_size: the box size (in pixel) to be removed around the the pixel with the maximum probability. + It can be an integer that defines the size of a square or cube, + or a list containing different values for each dimensions. Defaults to 48. + + Return: + a list of selected lists, where inner lists contain probability and coordinates. + For example, for 3D input, the inner lists are in the form of [probability, x, y, z]. + + Raises: + ValueError: When ``prob_threshold`` is less than 0.0. + ValueError: When ``box_size`` is a list or tuple, and its length is not equal to `spatial_dims`. + ValueError: When ``box_size`` has a less than 1 value. + + """ + + def __init__( + self, + keys: KeysCollection, + spatial_dims: int = 2, + sigma: Union[Sequence[float], float, Sequence[torch.Tensor], torch.Tensor] = 0.0, + prob_threshold: float = 0.5, + box_size: Union[int, Sequence[int]] = 48, + allow_missing_keys: bool = False, + ) -> None: + super().__init__(keys, allow_missing_keys) + self.prob_nms = ProbNMS( + spatial_dims=spatial_dims, + sigma=sigma, + prob_threshold=prob_threshold, + box_size=box_size, + ) + + def __call__(self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]): + d = dict(data) + for key in self.key_iterator(d): + d[key] = self.prob_nms(d[key]) + return d + + ActivationsD = ActivationsDict = Activationsd AsDiscreteD = AsDiscreteDict = AsDiscreted KeepLargestConnectedComponentD = KeepLargestConnectedComponentDict = KeepLargestConnectedComponentd LabelToContourD = LabelToContourDict = LabelToContourd MeanEnsembleD = MeanEnsembleDict = MeanEnsembled +ProbNMSD = ProbNMSDict = ProbNMSd VoteEnsembleD = VoteEnsembleDict = VoteEnsembled DecollateD = DecollateDict = Decollated diff --git a/monai/utils/__init__.py b/monai/utils/__init__.py index f6a137f47d..d622ce96ae 100644 --- a/monai/utils/__init__.py +++ b/monai/utils/__init__.py @@ -69,6 +69,5 @@ min_version, optional_import, ) -from .prob_nms import ProbNMS from .profiling import PerfContext, torch_profiler_full, torch_profiler_time_cpu_gpu, torch_profiler_time_end_to_end from .state_cacher import StateCacher diff --git a/monai/utils/prob_nms.py b/monai/utils/prob_nms.py deleted file mode 100644 index c25223d524..0000000000 --- a/monai/utils/prob_nms.py +++ /dev/null @@ -1,100 +0,0 @@ -from typing import List, Sequence, Tuple, Union - -import numpy as np -import torch - -from monai.networks.layers import GaussianFilter - - -class ProbNMS: - """ - Performs probability based non-maximum suppression (NMS) on the probabilities map via - iteratively selecting the coordinate with highest probability and then move it as well - as its surrounding values. The remove range is determined by the parameter `box_size`. - If multiple coordinates have the same highest probability, only one of them will be - selected. - - Args: - spatial_dims: number of spatial dimensions of the input probabilities map. - Defaults to 2. - sigma: the standard deviation for gaussian filter. - It could be a single value, or `spatial_dims` number of values. Defaults to 0.0. - prob_threshold: the probability threshold, the function will stop searching if - the highest probability is no larger than the threshold. The value should be - no less than 0.0. Defaults to 0.5. - box_size: the box size (in pixel) to be removed around the the pixel with the maximum probability. - It can be an integer that defines the size of a square or cube, - or a list containing different values for each dimensions. Defaults to 48. - - Return: - a list of selected lists, where inner lists contain probability and coordinates. - For example, for 3D input, the inner lists are in the form of [probability, x, y, z]. - - Raises: - ValueError: When ``prob_threshold`` is less than 0.0. - ValueError: When ``box_size`` is a list or tuple, and its length is not equal to `spatial_dims`. - ValueError: When ``box_size`` has a less than 1 value. - - """ - - def __init__( - self, - spatial_dims: int = 2, - sigma: Union[Sequence[float], float, Sequence[torch.Tensor], torch.Tensor] = 0.0, - prob_threshold: float = 0.5, - box_size: Union[int, List[int], Tuple[int]] = 48, - ) -> None: - self.sigma = sigma - self.spatial_dims = spatial_dims - if self.sigma != 0: - self.filter = GaussianFilter(spatial_dims=spatial_dims, sigma=sigma) - if prob_threshold < 0: - raise ValueError("prob_threshold should be no less than 0.0.") - self.prob_threshold = prob_threshold - if isinstance(box_size, int): - self.box_size = np.asarray([box_size] * spatial_dims) - else: - if len(box_size) != spatial_dims: - raise ValueError("the sequence length of box_size should be the same as spatial_dims.") - self.box_size = np.asarray(box_size) - if self.box_size.min() <= 0: - raise ValueError("box_size should be larger than 0.") - - self.box_lower_bd = self.box_size // 2 - self.box_upper_bd = self.box_size - self.box_lower_bd - - def __call__( - self, - prob_map: Union[np.ndarray, torch.Tensor], - ): - """ - prob_map: the input probabilities map, it must have shape (H[, W, ...]). - """ - if self.sigma != 0: - if not isinstance(prob_map, torch.Tensor): - prob_map = torch.as_tensor(prob_map, dtype=torch.float) - self.filter.to(prob_map) - prob_map = self.filter(prob_map) - else: - if not isinstance(prob_map, torch.Tensor): - prob_map = prob_map.copy() - - if isinstance(prob_map, torch.Tensor): - prob_map = prob_map.detach().cpu().numpy() - - prob_map_shape = prob_map.shape - - outputs = [] - while np.max(prob_map) > self.prob_threshold: - max_idx = np.unravel_index(prob_map.argmax(), prob_map_shape) - prob_max = prob_map[max_idx] - max_idx_arr = np.asarray(max_idx) - outputs.append([prob_max] + list(max_idx_arr)) - - idx_min_range = (max_idx_arr - self.box_lower_bd).clip(0, None) - idx_max_range = (max_idx_arr + self.box_upper_bd).clip(None, prob_map_shape) - # for each dimension, set values during index ranges to 0 - slices = tuple(slice(idx_min_range[i], idx_max_range[i]) for i in range(self.spatial_dims)) - prob_map[slices] = 0 - - return outputs diff --git a/tests/test_prob_nms.py b/tests/test_probnms.py similarity index 98% rename from tests/test_prob_nms.py rename to tests/test_probnms.py index fb88d9cfb4..e51d1017d8 100644 --- a/tests/test_prob_nms.py +++ b/tests/test_probnms.py @@ -15,7 +15,7 @@ import torch from parameterized import parameterized -from monai.utils import ProbNMS +from monai.transforms.post.array import ProbNMS probs_map_1 = np.random.rand(100, 100).clip(0, 0.5) TEST_CASES_2D_1 = [{"spatial_dims": 2, "prob_threshold": 0.5, "box_size": 10}, probs_map_1, []] diff --git a/tests/test_probnmsd.py b/tests/test_probnmsd.py new file mode 100644 index 0000000000..5b75d4310f --- /dev/null +++ b/tests/test_probnmsd.py @@ -0,0 +1,103 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from parameterized import parameterized + +from monai.transforms.post.dictionary import ProbNMSD + +probs_map_1 = np.random.rand(100, 100).clip(0, 0.5) +TEST_CASES_2D_1 = [{"spatial_dims": 2, "prob_threshold": 0.5, "box_size": 10}, {"prob_map": probs_map_1}, []] + +probs_map_2 = np.random.rand(100, 100).clip(0, 0.5) +probs_map_2[33, 33] = 0.7 +probs_map_2[66, 66] = 0.9 +expected_2 = [[0.9, 66, 66], [0.7, 33, 33]] +TEST_CASES_2D_2 = [ + {"spatial_dims": 2, "prob_threshold": 0.5, "box_size": [10, 10]}, + {"prob_map": probs_map_2}, + expected_2, +] + +probs_map_3 = np.random.rand(100, 100).clip(0, 0.5) +probs_map_3[56, 58] = 0.7 +probs_map_3[60, 66] = 0.8 +probs_map_3[66, 66] = 0.9 +expected_3 = [[0.9, 66, 66], [0.8, 60, 66]] +TEST_CASES_2D_3 = [ + {"spatial_dims": 2, "prob_threshold": 0.5, "box_size": (10, 20)}, + {"prob_map": probs_map_3}, + expected_3, +] + +probs_map_4 = np.random.rand(100, 100).clip(0, 0.5) +probs_map_4[33, 33] = 0.7 +probs_map_4[66, 66] = 0.9 +expected_4 = [[0.9, 66, 66]] +TEST_CASES_2D_4 = [ + {"spatial_dims": 2, "prob_threshold": 0.8, "box_size": 10}, + {"prob_map": probs_map_4}, + expected_4, +] + +probs_map_5 = np.random.rand(100, 100).clip(0, 0.5) +TEST_CASES_2D_5 = [{"spatial_dims": 2, "prob_threshold": 0.5, "sigma": 0.1}, {"prob_map": probs_map_5}, []] + +probs_map_6 = torch.as_tensor(np.random.rand(100, 100).clip(0, 0.5)) +TEST_CASES_2D_6 = [{"spatial_dims": 2, "prob_threshold": 0.5, "sigma": 0.1}, {"prob_map": probs_map_6}, []] + +probs_map_7 = torch.as_tensor(np.random.rand(100, 100).clip(0, 0.5)) +probs_map_7[33, 33] = 0.7 +probs_map_7[66, 66] = 0.9 +if torch.cuda.is_available(): + probs_map_7 = probs_map_7.cuda() +expected_7 = [[0.9, 66, 66], [0.7, 33, 33]] +TEST_CASES_2D_7 = [ + {"spatial_dims": 2, "prob_threshold": 0.5, "sigma": 0.1}, + {"prob_map": probs_map_7}, + expected_7, +] + +probs_map_3d = torch.rand([50, 50, 50]).uniform_(0, 0.5) +probs_map_3d[25, 25, 25] = 0.7 +probs_map_3d[45, 45, 45] = 0.9 +expected_3d = [[0.9, 45, 45, 45], [0.7, 25, 25, 25]] +TEST_CASES_3D = [ + {"spatial_dims": 3, "prob_threshold": 0.5, "box_size": (10, 10, 10)}, + {"prob_map": probs_map_3d}, + expected_3d, +] + + +class TestProbNMS(unittest.TestCase): + @parameterized.expand( + [ + TEST_CASES_2D_1, + TEST_CASES_2D_2, + TEST_CASES_2D_3, + TEST_CASES_2D_4, + TEST_CASES_2D_5, + TEST_CASES_2D_6, + TEST_CASES_2D_7, + TEST_CASES_3D, + ] + ) + def test_output(self, class_args, probs_map, expected): + nms = ProbNMSD(keys="prob_map", **class_args) + output = nms(probs_map) + np.testing.assert_allclose(output["prob_map"], expected) + + +if __name__ == "__main__": + unittest.main() From baf1beccb1282178899a4ec62c6c9fd98acf30c1 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Mon, 5 Apr 2021 15:36:57 +0100 Subject: [PATCH 148/457] fixes ThreadDataLoader (#1945) Signed-off-by: Wenqi Li --- monai/data/thread_buffer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/data/thread_buffer.py b/monai/data/thread_buffer.py index da5f864900..8ea71e3555 100644 --- a/monai/data/thread_buffer.py +++ b/monai/data/thread_buffer.py @@ -88,7 +88,7 @@ def __init__(self, dataset: Dataset, num_workers: int = 0, **kwargs): super().__init__(dataset, num_workers, **kwargs) # ThreadBuffer will use the inherited __iter__ instead of the one defined below - self.buffer = ThreadBuffer(super()) + self.buffer = ThreadBuffer(super().__iter__()) def __iter__(self): yield from self.buffer From a6b8f2a89754205200ff16de6f6b2446136fd20f Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Mon, 5 Apr 2021 18:02:23 +0100 Subject: [PATCH 149/457] fixes itk dep. version (#1944) Signed-off-by: Wenqi Li --- .github/workflows/pythonapp.yml | 9 ++++++++- docs/requirements.txt | 2 +- requirements-dev.txt | 2 +- setup.cfg | 4 ++-- 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 9425f9fa77..514301ad5b 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -281,12 +281,19 @@ jobs: python -m pip install --upgrade pip wheel python -m pip install ${{ matrix.pytorch }} python -m pip install -r requirements-dev.txt + python -m pip list - name: Run quick tests (GPU) run: | - python -m pip list nvidia-smi + export LAUNCH_DELAY=$(( RANDOM % 30 * 5 )) + echo "Sleep $LAUNCH_DELAY" + sleep $LAUNCH_DELAY export CUDA_VISIBLE_DEVICES=$(coverage run -m tests.utils) echo $CUDA_VISIBLE_DEVICES + stop_time=$((LAUNCH_DELAY + $(date +%s))) + while [ $(date +%s) -lt $stop_time ]; do + python -c 'import torch; torch.rand(5, 3, device=torch.device("cuda:0"))'; + done python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' python -c "import monai; monai.config.print_config()" diff --git a/docs/requirements.txt b/docs/requirements.txt index c31f06f2ca..c03e3327f4 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -2,7 +2,7 @@ torch>=1.5 pytorch-ignite==0.4.4 numpy>=1.17 -itk>=5.0 +itk>=5.0, <=5.1.2 nibabel cucim==0.18.2 openslide-python==1.1.2 diff --git a/requirements-dev.txt b/requirements-dev.txt index dfa1eb1853..f9a2464495 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,7 +3,7 @@ pytorch-ignite==0.4.4 gdown>=3.6.4 scipy -itk>=5.0 +itk>=5.0, <=5.1.2 nibabel pillow tensorboard diff --git a/setup.cfg b/setup.cfg index a41081cd11..702c8638c1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -36,7 +36,7 @@ all = pytorch-ignite==0.4.4 gdown>=3.6.4 torchvision - itk>=5.0 + itk>=5.0, <=5.1.2 tqdm>=4.47.0 cucim==0.18.2 openslide-python==1.1.2 @@ -55,7 +55,7 @@ ignite = torchvision = torchvision itk = - itk>=5.0 + itk>=5.0, <=5.1.2 tqdm = tqdm>=4.47.0 lmdb = From 713490b82d2f5f23d7a077a052d64f2cd1e0cfd9 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Mon, 5 Apr 2021 14:09:28 -0400 Subject: [PATCH 150/457] Garbage Collector Handler (#1940) * Implement garbage collector handler Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Make trigger_event lower case Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Add unittest for garbage collector Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Update docs Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Exclude from min test Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Fix a typo Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Fix a bug Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- docs/source/handlers.rst | 5 ++ monai/handlers/__init__.py | 1 + monai/handlers/garbage_collector.py | 80 +++++++++++++++++++++++++ tests/min_tests.py | 1 + tests/test_handler_garbage_collector.py | 77 ++++++++++++++++++++++++ 5 files changed, 164 insertions(+) create mode 100644 monai/handlers/garbage_collector.py create mode 100644 tests/test_handler_garbage_collector.py diff --git a/docs/source/handlers.rst b/docs/source/handlers.rst index 080e7e138c..869467c496 100644 --- a/docs/source/handlers.rst +++ b/docs/source/handlers.rst @@ -115,3 +115,8 @@ EarlyStop handler ----------------- .. autoclass:: EarlyStopHandler :members: + +GarbageCollector handler +------------------------ +.. autoclass:: GarbageCollector + :members: diff --git a/monai/handlers/__init__.py b/monai/handlers/__init__.py index a1f86310ae..2112b074a0 100644 --- a/monai/handlers/__init__.py +++ b/monai/handlers/__init__.py @@ -14,6 +14,7 @@ from .classification_saver import ClassificationSaver from .confusion_matrix import ConfusionMatrix from .earlystop_handler import EarlyStopHandler +from .garbage_collector import GarbageCollector from .hausdorff_distance import HausdorffDistance from .iteration_metric import IterationMetric from .lr_schedule_handler import LrScheduleHandler diff --git a/monai/handlers/garbage_collector.py b/monai/handlers/garbage_collector.py new file mode 100644 index 0000000000..7bb59c9049 --- /dev/null +++ b/monai/handlers/garbage_collector.py @@ -0,0 +1,80 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +from typing import TYPE_CHECKING + +from monai.utils import exact_version, optional_import + +if TYPE_CHECKING: + from ignite.engine import Engine, Events +else: + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") + Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") + + +class GarbageCollector: + """ + Run garbage collector after each epoch + + Args: + trigger_event: the event that trigger a call to this handler. + - "epoch", after completion of each epoch (equivalent of ignite.engine.Events.EPOCH_COMPLETED) + - "iteration", after completion of each iteration (equivalent of ignite.engine.Events.ITERATION_COMPLETED) + - any ignite built-in event from ignite.engine.Events. + Defaults to "epoch". + log_level: log level (integer) for some garbage collection information as below. Defaults to 10 (DEBUG). + - 50 (CRITICAL) + - 40 (ERROR) + - 30 (WARNING) + - 20 (INFO) + - 10 (DEBUG) + - 0 (NOTSET) + """ + + def __init__(self, trigger_event: str = "epoch", log_level: int = 10): + if isinstance(trigger_event, Events): + self.trigger_event = trigger_event + elif trigger_event.lower() == "epoch": + self.trigger_event = Events.EPOCH_COMPLETED + elif trigger_event.lower() == "iteration": + self.trigger_event = Events.ITERATION_COMPLETED + else: + raise ValueError( + f"'trigger_event' should be either epoch, iteration, or an ignite built-in event from" + f" ignite.engine.Events, '{trigger_event}' was given." + ) + + self.log_level = log_level + + def attach(self, engine: Engine) -> None: + if not engine.has_event_handler(self, self.trigger_event): + engine.add_event_handler(self.trigger_event, self) + + def __call__(self, engine: Engine) -> None: + """ + This method calls python garbage collector. + + Args: + engine: Ignite Engine, it should be either a trainer or validator. + """ + # get count before garbage collection + pre_count = gc.get_count() + # fits call to garbage collector + gc.collect() + # second call to garbage collector + unreachable = gc.collect() + # get count after garbage collection + after_count = gc.get_count() + engine.logger.log( + self.log_level, + f"Garbage Count: [before: {pre_count}] -> [after: {after_count}] (unreachable : {unreachable})", + ) diff --git a/tests/min_tests.py b/tests/min_tests.py index 4433081c46..9b96e7eaab 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -42,6 +42,7 @@ def run_testsuit(): "test_handler_confusion_matrix", "test_handler_confusion_matrix_dist", "test_handler_hausdorff_distance", + "test_handler_garbage_collector", "test_handler_mean_dice", "test_handler_prob_map_producer", "test_handler_rocauc", diff --git a/tests/test_handler_garbage_collector.py b/tests/test_handler_garbage_collector.py new file mode 100644 index 0000000000..5e6bd7275c --- /dev/null +++ b/tests/test_handler_garbage_collector.py @@ -0,0 +1,77 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest +from unittest import skipUnless + +import torch +from ignite.engine import Engine +from parameterized import parameterized + +from monai.data import Dataset +from monai.handlers import GarbageCollector +from monai.utils import exact_version, optional_import + +Events, has_ignite = optional_import("ignite.engine", "0.4.4", exact_version, "Events") + + +TEST_CASE_0 = [[0, 1, 2], "epoch"] + +TEST_CASE_1 = [[0, 1, 2], "iteration"] + +TEST_CASE_2 = [[0, 1, 2], Events.EPOCH_COMPLETED] + + +class TestHandlerGarbageCollector(unittest.TestCase): + @skipUnless(has_ignite, "Requires ignite") + @parameterized.expand( + [ + TEST_CASE_0, + TEST_CASE_1, + TEST_CASE_2, + ] + ) + def test_content(self, data, trigger_event): + # set up engine + gb_count_dict = {} + + def _train_func(engine, batch): + # store garbage collection counts + if trigger_event == Events.EPOCH_COMPLETED or trigger_event.lower() == "epoch": + if engine.state.iteration % engine.state.epoch_length == 1: + gb_count_dict[engine.state.epoch] = gc.get_count() + elif trigger_event.lower() == "iteration": + gb_count_dict[engine.state.iteration] = gc.get_count() + + engine = Engine(_train_func) + + # set up testing handler + dataset = Dataset(data, transform=None) + data_loader = torch.utils.data.DataLoader(dataset, batch_size=1) + GarbageCollector(trigger_event=trigger_event, log_level=30).attach(engine) + + engine.run(data_loader, max_epochs=5) + print(gb_count_dict) + + first_count = 0 + for epoch, gb_count in gb_count_dict.items(): + # At least one zero-generation object + self.assertGreater(gb_count[0], 0) + if epoch == 1: + first_count = gb_count[0] + else: + # The should be less number of collected objects in the next calls. + self.assertLess(gb_count[0], first_count) + + +if __name__ == "__main__": + unittest.main() From f1db865e6c6f8bfc780107dc8b479130b578b18a Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 6 Apr 2021 03:20:56 +0800 Subject: [PATCH 151/457] 1939 Add strict_shape option in CheckpointLoader (#1946) * [DLMED] add strict_shape option Signed-off-by: Nic Ma * [DLMED] add unit tests Signed-off-by: Nic Ma * update test case Signed-off-by: Wenqi Li * fixes test config Signed-off-by: Wenqi Li Co-authored-by: Wenqi Li --- .github/workflows/pythonapp.yml | 4 ++-- monai/handlers/checkpoint_loader.py | 25 +++++++++++++++++++++++-- tests/test_handler_checkpoint_loader.py | 24 ++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 4 deletions(-) diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 514301ad5b..30e6102965 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -285,7 +285,7 @@ jobs: - name: Run quick tests (GPU) run: | nvidia-smi - export LAUNCH_DELAY=$(( RANDOM % 30 * 5 )) + export LAUNCH_DELAY=$(python -c "import numpy; print(numpy.random.randint(30) * 5)") echo "Sleep $LAUNCH_DELAY" sleep $LAUNCH_DELAY export CUDA_VISIBLE_DEVICES=$(coverage run -m tests.utils) @@ -298,7 +298,7 @@ jobs: python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' python -c "import monai; monai.config.print_config()" BUILD_MONAI=1 ./runtests.sh --quick --unittests - if [ ${{ matrix.environment }} == "PT18+CUDA112" ]; then + if [ ${{ matrix.environment }} = "PT18+CUDA112" ]; then # test the clang-format tool downloading once coverage run -m tests.clang_format_utils fi diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py index 40483e8c85..6d8f065f1e 100644 --- a/monai/handlers/checkpoint_loader.py +++ b/monai/handlers/checkpoint_loader.py @@ -13,6 +13,7 @@ from typing import TYPE_CHECKING, Dict, Optional import torch +import torch.nn as nn from monai.utils import exact_version, optional_import @@ -44,8 +45,12 @@ class CheckpointLoader: first load the module to CPU and then copy each parameter to where it was saved, which would result in all processes on the same machine using the same set of devices. - strict: whether to strictly enforce that the keys in :attr:`state_dict` match the keys - returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` + strict: whether to strictly enforce that the keys in `state_dict` match the keys + returned by `torch.nn.Module.state_dict` function. default to `True`. + strict_shape: whether to enforce the data shape of the matched layers in the checkpoint, + `if `False`, it will skip the layers that have different data shape with checkpoint content. + This can be useful advanced feature for transfer learning. users should totally + understand which layers will have different shape. default to `True`. """ @@ -56,6 +61,7 @@ def __init__( name: Optional[str] = None, map_location: Optional[Dict] = None, strict: bool = True, + strict_shape: bool = True, ) -> None: if load_path is None: raise AssertionError("must provide clear path to load checkpoint.") @@ -67,6 +73,7 @@ def __init__( self._name = name self.map_location = map_location self.strict = strict + self.strict_shape = strict_shape def attach(self, engine: Engine) -> None: """ @@ -84,6 +91,20 @@ def __call__(self, engine: Engine) -> None: """ checkpoint = torch.load(self.load_path, map_location=self.map_location) + if not self.strict_shape: + k, _ = list(self.load_dict.items())[0] + # single object and checkpoint is directly a state_dict + if len(self.load_dict) == 1 and k not in checkpoint: + checkpoint = {k: checkpoint} + + # skip items that don't match data shape + for k, obj in self.load_dict.items(): + if isinstance(obj, (nn.DataParallel, nn.parallel.DistributedDataParallel)): + obj = obj.module + if isinstance(obj, torch.nn.Module): + d = obj.state_dict() + checkpoint[k] = {k: v for k, v in checkpoint[k].items() if k in d and v.shape == d[k].shape} + # save current max epochs setting in the engine, don't overwrite it if larger than max_epochs in checkpoint prior_max_epochs = engine.state.max_epochs Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint, strict=self.strict) diff --git a/tests/test_handler_checkpoint_loader.py b/tests/test_handler_checkpoint_loader.py index d58260ac8c..a69193c98c 100644 --- a/tests/test_handler_checkpoint_loader.py +++ b/tests/test_handler_checkpoint_loader.py @@ -146,6 +146,30 @@ def test_partial_over_load(self): engine.run([0] * 8, max_epochs=1) torch.testing.assert_allclose(net2.state_dict()["0.weight"].cpu(), torch.tensor([0.1])) + def test_strict_shape(self): + logging.basicConfig(stream=sys.stdout, level=logging.INFO) + net1 = torch.nn.Sequential(*[torch.nn.PReLU(num_parameters=5)]) + data1 = net1.state_dict() + data1["0.weight"] = torch.tensor([1, 2, 3, 4, 5]) + data1["new"] = torch.tensor(0.1) + net1.load_state_dict(data1, strict=False) + + net2 = torch.nn.Sequential(*[torch.nn.PReLU(), torch.nn.PReLU()]) + data2 = net2.state_dict() + data2["0.weight"] = torch.tensor([0.2]) + data2["1.weight"] = torch.tensor([0.3]) + net2.load_state_dict(data2) + + with tempfile.TemporaryDirectory() as tempdir: + engine = Engine(lambda e, b: None) + CheckpointSaver(save_dir=tempdir, save_dict={"net": net1}, save_final=True).attach(engine) + engine.run([0] * 8, max_epochs=5) + path = tempdir + "/net_final_iteration=40.pt" + engine = Engine(lambda e, b: None) + CheckpointLoader(load_path=path, load_dict={"net": net2}, strict=False, strict_shape=False).attach(engine) + engine.run([0] * 8, max_epochs=1) + torch.testing.assert_allclose(net2.state_dict()["0.weight"].cpu(), torch.tensor([0.2])) + if __name__ == "__main__": unittest.main() From a182da6acd8ca5cfebc8f6a7c5d692913aa896c6 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 6 Apr 2021 05:44:54 +0800 Subject: [PATCH 152/457] 1947 Enhance load decathlon datalist API (#1948) * [DLMED] enhance decathlon datalist Signed-off-by: Nic Ma * [DLMED] fix typo Signed-off-by: Nic Ma * [DLMED] add unit test Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma Co-authored-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- monai/data/decathlon_datalist.py | 30 ++++++++++++++++++-------- monai/transforms/utility/dictionary.py | 4 ---- tests/test_load_decathlon_datalist.py | 25 +++++++++++++++++++++ 3 files changed, 46 insertions(+), 13 deletions(-) diff --git a/monai/data/decathlon_datalist.py b/monai/data/decathlon_datalist.py index 6167e83e47..11fb5edd28 100644 --- a/monai/data/decathlon_datalist.py +++ b/monai/data/decathlon_datalist.py @@ -17,34 +17,43 @@ @overload -def _compute_path(base_dir: str, element: str) -> str: +def _compute_path(base_dir: str, element: str, check_path: bool = False) -> str: ... @overload -def _compute_path(base_dir: str, element: List[str]) -> List[str]: +def _compute_path(base_dir: str, element: List[str], check_path: bool = False) -> List[str]: ... -def _compute_path(base_dir, element): +def _compute_path(base_dir, element, check_path=False): """ Args: base_dir: the base directory of the dataset. element: file path(s) to append to directory. + check_path: if `True`, only compute when the result is an existing path. Raises: TypeError: When ``element`` contains a non ``str``. TypeError: When ``element`` type is not in ``Union[list, str]``. """ + + def _join_path(base_dir: str, item: str): + result = os.path.normpath(os.path.join(base_dir, item)) + if check_path and not os.path.exists(result): + # if not an existing path, don't join with base dir + return item + return result + if isinstance(element, str): - return os.path.normpath(os.path.join(base_dir, element)) + return _join_path(base_dir, element) if isinstance(element, list): for e in element: if not isinstance(e, str): - raise TypeError(f"Every file path in element must be a str but got {type(element).__name__}.") - return [os.path.normpath(os.path.join(base_dir, e)) for e in element] - raise TypeError(f"element must be one of (str, list) but is {type(element).__name__}.") + return element + return [_join_path(base_dir, e) for e in element] + return element def _append_paths(base_dir: str, is_segmentation: bool, items: List[Dict]) -> List[Dict]: @@ -63,9 +72,12 @@ def _append_paths(base_dir: str, is_segmentation: bool, items: List[Dict]) -> Li raise TypeError(f"Every item in items must be a dict but got {type(item).__name__}.") for k, v in item.items(): if k == "image": - item[k] = _compute_path(base_dir, v) + item[k] = _compute_path(base_dir, v, check_path=False) elif is_segmentation and k == "label": - item[k] = _compute_path(base_dir, v) + item[k] = _compute_path(base_dir, v, check_path=False) + else: + # for other items, auto detect whether it's a valid path + item[k] = _compute_path(base_dir, v, check_path=True) return items diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index f57cbd1116..c437cd055b 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -656,10 +656,6 @@ def __init__(self, keys: KeysCollection, name: str, dim: int = 0, allow_missing_ name: the name corresponding to the key to store the concatenated data. dim: on which dimension to concatenate the items, default is 0. allow_missing_keys: don't raise exception if key is missing. - - Raises: - ValueError: When insufficient keys are given (``len(self.keys) < 2``). - """ super().__init__(keys, allow_missing_keys) self.name = name diff --git a/tests/test_load_decathlon_datalist.py b/tests/test_load_decathlon_datalist.py index 90b9d3ab03..fe7ff6f8a2 100644 --- a/tests/test_load_decathlon_datalist.py +++ b/tests/test_load_decathlon_datalist.py @@ -96,6 +96,31 @@ def test_seg_no_labels(self): result = load_decathlon_datalist(file_path, True, "test", tempdir) self.assertEqual(result[0]["image"], os.path.join(tempdir, "spleen_15.nii.gz")) + def test_additional_items(self): + with tempfile.TemporaryDirectory() as tempdir: + with open(os.path.join(tempdir, "mask31.txt"), "w") as f: + f.write("spleen31 mask") + + test_data = { + "name": "Spleen", + "description": "Spleen Segmentation", + "labels": {"0": "background", "1": "spleen"}, + "training": [ + {"image": "spleen_19.nii.gz", "label": "spleen_19.nii.gz", "mask": "spleen mask"}, + {"image": "spleen_31.nii.gz", "label": "spleen_31.nii.gz", "mask": "mask31.txt"}, + ], + "test": ["spleen_15.nii.gz", "spleen_23.nii.gz"], + } + json_str = json.dumps(test_data) + file_path = os.path.join(tempdir, "test_data.json") + with open(file_path, "w") as json_file: + json_file.write(json_str) + result = load_decathlon_datalist(file_path, True, "training", tempdir) + self.assertEqual(result[0]["image"], os.path.join(tempdir, "spleen_19.nii.gz")) + self.assertEqual(result[0]["label"], os.path.join(tempdir, "spleen_19.nii.gz")) + self.assertEqual(result[1]["mask"], os.path.join(tempdir, "mask31.txt")) + self.assertEqual(result[0]["mask"], "spleen mask") + if __name__ == "__main__": unittest.main() From fbce3c23e29d4215502db668fcd7c316a936fa69 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Mon, 5 Apr 2021 19:32:08 -0400 Subject: [PATCH 153/457] Add progress bar to LesionFROC (#1951) Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- monai/apps/pathology/metrics.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/monai/apps/pathology/metrics.py b/monai/apps/pathology/metrics.py index ae01d8a1db..2140de0080 100644 --- a/monai/apps/pathology/metrics.py +++ b/monai/apps/pathology/metrics.py @@ -9,13 +9,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List, Tuple +from typing import TYPE_CHECKING, Dict, List, Tuple import numpy as np from monai.apps.pathology.utils import PathologyProbNMS, compute_isolated_tumor_cells, compute_multi_instance_mask from monai.data.image_reader import WSIReader from monai.metrics import compute_fp_tp_probs, compute_froc_curve_data, compute_froc_score +from monai.utils import min_version, optional_import + +if TYPE_CHECKING: + from tqdm import tqdm + + has_tqdm = True +else: + tqdm, has_tqdm = optional_import("tqdm", "4.47.0", min_version, "tqdm") + +if not has_tqdm: + + def tqdm(x): + return x class LesionFROC: @@ -122,7 +135,7 @@ def compute_fp_tp(self): total_num_targets = 0 num_images = len(self.data) - for sample in self.data: + for sample in tqdm(self.data): probs, y_coord, x_coord = self.prepare_inference_result(sample) ground_truth, itc_labels = self.prepare_ground_truth(sample) # compute FP and TP probabilities for a pair of an image and an ground truth mask From 501026ba11971eb8bbc49706a75f65ecb9e770a4 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Tue, 6 Apr 2021 12:42:46 +0100 Subject: [PATCH 154/457] pipeline for releasing the docker images (#1953) * adds docker tag action Signed-off-by: Wenqi Li * adds tag info Signed-off-by: Wenqi Li * update versioneer Signed-off-by: Wenqi Li --- .github/workflows/release.yml | 29 ++++++ .github/workflows/setupapp.yml | 2 + monai/_version.py | 26 ++++-- versioneer.py | 166 ++++++++++++++++++++------------- 4 files changed, 146 insertions(+), 77 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 840194b1da..f36abc9fcf 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -83,3 +83,32 @@ jobs: password: ${{ secrets.TEST_PYPI }} repository_url: https://test.pypi.org/legacy/ + release_docker: + if: github.repository == 'Project-MONAI/MONAI' + needs: packaging + runs-on: [ self-hosted, linux, x64, build_only ] + steps: + - uses: actions/checkout@v2 + with: + ref: master + - name: Set tag + id: versioning + run: echo ::set-output name=tag::${GITHUB_REF#refs/*/} + - name: Check tag + env: + RELEASE_VERSION: ${{ steps.versioning.outputs.tag }} + run: | + echo "$RELEASE_VERSION" + - if: startsWith(github.ref, 'refs/tags/') + name: build with the tag + env: + RELEASE_VERSION: ${{ steps.versioning.outputs.tag }} + run: | + git fetch --depth=1 origin +refs/tags/*:refs/tags/* + # remove flake package as it is not needed on hub.docker.com + sed -i '/flake/d' requirements-dev.txt + docker build -t projectmonai/monai:"$RELEASE_VERSION" -f Dockerfile . + # distribute with a tag to hub.docker.com + echo "${{ secrets.DOCKER_PW }}" | docker login -u projectmonai --password-stdin + docker push projectmonai/monai:"$RELEASE_VERSION" + docker logout diff --git a/.github/workflows/setupapp.yml b/.github/workflows/setupapp.yml index e5cb9a7cf1..450be403a0 100644 --- a/.github/workflows/setupapp.yml +++ b/.github/workflows/setupapp.yml @@ -159,6 +159,8 @@ jobs: ref: master - name: docker_build run: | + # get tag info for versioning + git fetch --depth=1 origin +refs/tags/*:refs/tags/* # build and run original docker image for local registry docker build -t localhost:5000/local_monai:latest -f Dockerfile . docker push localhost:5000/local_monai:latest diff --git a/monai/_version.py b/monai/_version.py index 1b31d5fd1a..79f569dd79 100644 --- a/monai/_version.py +++ b/monai/_version.py @@ -1,3 +1,4 @@ + # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -5,7 +6,7 @@ # that just contains the computed version number. # This file is released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) +# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer) """Git implementation of _version.py.""" @@ -56,7 +57,7 @@ class NotThisMethod(Exception): def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" + """Create decorator to mark a method as the handler of a VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: @@ -92,9 +93,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, if verbose: print("unable to find command, tried %s" % (commands,)) return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() + stdout = p.communicate()[0].strip().decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) @@ -164,6 +163,10 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because @@ -299,6 +302,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces @@ -337,18 +343,18 @@ def render_pep440(pieces): def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. + """TAG[.post0.devDISTANCE] -- No -dirty. Exceptions: - 1: no tags. 0.post.devDISTANCE + 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] + rendered += ".post0.dev%d" % pieces["distance"] else: # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] + rendered = "0.post0.dev%d" % pieces["distance"] return rendered @@ -494,7 +500,7 @@ def get_versions(): # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): # lgtm[py/unused-loop-variable] + for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, diff --git a/versioneer.py b/versioneer.py index 441b3d4c2d..9112ac66a5 100644 --- a/versioneer.py +++ b/versioneer.py @@ -1,4 +1,4 @@ -# Version: 0.18 +# Version: 0.19 """The Versioneer - like a rocketeer, but for versions. @@ -6,16 +6,12 @@ ============== * like a rocketeer, but for versions! -* https://github.com/warner/python-versioneer +* https://github.com/python-versioneer/python-versioneer * Brian Warner * License: Public Domain -* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy -* [![Latest Version] -(https://pypip.in/version/versioneer/badge.svg?style=flat) -](https://pypi.python.org/pypi/versioneer/) -* [![Build Status] -(https://travis-ci.org/warner/python-versioneer.png?branch=master) -](https://travis-ci.org/warner/python-versioneer) +* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3 +* [![Latest Version][pypi-image]][pypi-url] +* [![Build Status][travis-image]][travis-url] This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update @@ -26,9 +22,10 @@ ## Quick Install -* `pip install versioneer` to somewhere to your $PATH -* add a `[versioneer]` section to your setup.cfg (see below) +* `pip install versioneer` to somewhere in your $PATH +* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md)) * run `versioneer install` in your source tree, commit the results +* Verify version information with `python setup.py version` ## Version Identifiers @@ -60,7 +57,7 @@ for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has -uncommitted changes. +uncommitted changes). The version identifier is used for multiple purposes: @@ -165,7 +162,7 @@ Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github -[issues page](https://github.com/warner/python-versioneer/issues). +[issues page](https://github.com/python-versioneer/python-versioneer/issues). ### Subprojects @@ -193,9 +190,9 @@ Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. -[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking +[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking this issue. The discussion in -[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the +[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve @@ -223,22 +220,10 @@ cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. -[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes +[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. -### Unicode version strings - -While Versioneer works (and is continually tested) with both Python 2 and -Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. -Newer releases probably generate unicode version strings on py2. It's not -clear that this is wrong, but it may be surprising for applications when then -write these strings to a network connection or include them in bytes-oriented -APIs like cryptographic checksums. - -[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates -this question. - ## Updating Versioneer @@ -264,6 +249,12 @@ direction and include code from all supported VCS systems, reducing the number of intermediate scripts. +## Similar projects + +* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time + dependency +* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of + versioneer ## License @@ -273,14 +264,15 @@ Dedication" license (CC0-1.0), as described in https://creativecommons.org/publicdomain/zero/1.0/ . -""" +[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg +[pypi-url]: https://pypi.python.org/pypi/versioneer/ +[travis-image]: +https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg +[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer -from __future__ import print_function +""" -try: - import configparser -except ImportError: - import ConfigParser as configparser +import configparser import errno import json import os @@ -340,9 +332,9 @@ def get_config_from_root(root): # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") - parser = configparser.SafeConfigParser() + parser = configparser.ConfigParser() with open(setup_cfg, "r") as f: - parser.readfp(f) + parser.read_file(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name): @@ -373,7 +365,7 @@ class NotThisMethod(Exception): def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" + """Create decorator to mark a method as the handler of a VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" @@ -409,9 +401,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env= if verbose: print("unable to find command, tried %s" % (commands,)) return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() + stdout = p.communicate()[0].strip().decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) @@ -422,7 +412,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env= LONG_VERSION_PY[ "git" -] = ''' +] = r''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -430,7 +420,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env= # that just contains the computed version number. # This file is released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) +# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer) """Git implementation of _version.py.""" @@ -481,7 +471,7 @@ class NotThisMethod(Exception): def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" + """Create decorator to mark a method as the handler of a VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: @@ -517,9 +507,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() + stdout = p.communicate()[0].strip().decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) @@ -589,6 +577,10 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because @@ -724,6 +716,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces @@ -762,18 +757,18 @@ def render_pep440(pieces): def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. + """TAG[.post0.devDISTANCE] -- No -dirty. Exceptions: - 1: no tags. 0.post.devDISTANCE + 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: - rendered += ".post.dev%%d" %% pieces["distance"] + rendered += ".post0.dev%%d" %% pieces["distance"] else: # exception #1 - rendered = "0.post.dev%%d" %% pieces["distance"] + rendered = "0.post0.dev%%d" %% pieces["distance"] return rendered @@ -981,6 +976,10 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because @@ -1117,6 +1116,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces @@ -1189,7 +1191,7 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): SHORT_VERSION_PY = """ -# This file was generated by 'versioneer.py' (0.18) from +# This file was generated by 'versioneer.py' (0.19) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. @@ -1263,18 +1265,18 @@ def render_pep440(pieces): def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. + """TAG[.post0.devDISTANCE] -- No -dirty. Exceptions: - 1: no tags. 0.post.devDISTANCE + 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] + rendered += ".post0.dev%d" % pieces["distance"] else: # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] + rendered = "0.post0.dev%d" % pieces["distance"] return rendered @@ -1310,7 +1312,7 @@ def render_pep440_old(pieces): The ".dev0" means dirty. - Eexceptions: + Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: @@ -1493,8 +1495,12 @@ def get_version(): return get_versions()["version"] -def get_cmdclass(): - """Get the custom setuptools/distutils subclasses used by Versioneer.""" +def get_cmdclass(cmdclass=None): + """Get the custom setuptools/distutils subclasses used by Versioneer. + + If the package uses a different cmdclass (e.g. one from numpy), it + should be provide as an argument. + """ if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and @@ -1508,9 +1514,9 @@ def get_cmdclass(): # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. - # Also see https://github.com/warner/python-versioneer/issues/52 + # Also see https://github.com/python-versioneer/python-versioneer/issues/52 - cmds = {} + cmds = {} if cmdclass is None else cmdclass.copy() # we add "version" to both distutils and setuptools from distutils.core import Command @@ -1553,7 +1559,9 @@ def run(self): # setup.py egg_info -> ? # we override different "build_py" commands for both environments - if "setuptools" in sys.modules: + if "build_py" in cmds: + _build_py = cmds["build_py"] + elif "setuptools" in sys.modules: from setuptools.command.build_py import build_py as _build_py else: from distutils.command.build_py import build_py as _build_py @@ -1573,6 +1581,31 @@ def run(self): cmds["build_py"] = cmd_build_py + if "setuptools" in sys.modules: + from setuptools.command.build_ext import build_ext as _build_ext + else: + from distutils.command.build_ext import build_ext as _build_ext + + class cmd_build_ext(_build_ext): + def run(self): + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + _build_ext.run(self) + if self.inplace: + # build_ext --inplace will only build extensions in + # build/lib<..> dir with no _version.py to write to. + # As in place builds will already have a _version.py + # in the module dir, we do not need to write one. + return + # now locate _version.py in the new build/ directory and replace + # it with an updated value + target_versionfile = os.path.join(self.build_lib, cfg.versionfile_source) + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + + cmds["build_ext"] = cmd_build_ext + if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe @@ -1611,10 +1644,7 @@ def run(self): del cmds["build_py"] if "py2exe" in sys.modules: # py2exe enabled? - try: - from py2exe.distutils_buildexe import py2exe as _py2exe # py3 - except ImportError: - from py2exe.build_exe import py2exe as _py2exe # py2 + from py2exe.distutils_buildexe import py2exe as _py2exe class cmd_py2exe(_py2exe): def run(self): @@ -1643,7 +1673,9 @@ def run(self): cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments - if "setuptools" in sys.modules: + if "sdist" in cmds: + _sdist = cmds["sdist"] + elif "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist @@ -1718,7 +1750,7 @@ def make_release_tree(self, base_dir, files): def do_setup(): - """Main VCS-independent setup function for installing Versioneer.""" + """Do main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) From 81cfbf0a81d0b2d19b9d1604ef5962b9f38e3e4b Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Tue, 6 Apr 2021 15:38:57 +0100 Subject: [PATCH 155/457] remove unused RandomizableTransform (#1952) Signed-off-by: Wenqi Li --- monai/apps/deepgrow/transforms.py | 8 +++----- monai/data/dataset.py | 13 ++++++------- monai/data/image_dataset.py | 5 ++--- monai/data/test_time_augmentation.py | 6 +++--- monai/transforms/compose.py | 6 +++--- monai/transforms/croppad/array.py | 10 +++++----- monai/transforms/croppad/dictionary.py | 14 +++++--------- monai/transforms/intensity/array.py | 7 +++++++ monai/transforms/intensity/dictionary.py | 9 ++++++--- monai/transforms/spatial/array.py | 8 ++++---- monai/transforms/transform.py | 16 +++++++++++++--- monai/transforms/utility/array.py | 4 ++-- monai/transforms/utility/dictionary.py | 8 ++++---- tests/test_compose.py | 8 ++++---- tests/test_rand_lambdad.py | 4 ++-- 15 files changed, 69 insertions(+), 57 deletions(-) diff --git a/monai/apps/deepgrow/transforms.py b/monai/apps/deepgrow/transforms.py index c58d4c1123..3d8f08bc01 100644 --- a/monai/apps/deepgrow/transforms.py +++ b/monai/apps/deepgrow/transforms.py @@ -16,7 +16,7 @@ from monai.config import IndexSelection, KeysCollection from monai.networks.layers import GaussianFilter from monai.transforms import Resize, SpatialCrop -from monai.transforms.transform import MapTransform, RandomizableTransform, Transform +from monai.transforms.transform import MapTransform, Randomizable, Transform from monai.transforms.utils import generate_spatial_bounding_box from monai.utils import InterpolateMode, ensure_tuple_rep, min_version, optional_import @@ -61,7 +61,7 @@ def __call__(self, data): return d -class AddInitialSeedPointd(RandomizableTransform): +class AddInitialSeedPointd(Randomizable): """ Add random guidance as initial seed point for a given label. @@ -86,7 +86,6 @@ def __init__( sid: str = "sid", connected_regions: int = 5, ): - super().__init__(prob=1.0, do_transform=True) self.label = label self.sids_key = sids self.sid_key = sid @@ -284,7 +283,7 @@ def __call__(self, data): return d -class AddRandomGuidanced(RandomizableTransform): +class AddRandomGuidanced(Randomizable): """ Add random guidance based on discrepancies that were found between label and prediction. @@ -320,7 +319,6 @@ def __init__( probability: str = "probability", batched: bool = True, ): - super().__init__(prob=1.0, do_transform=True) self.guidance = guidance self.discrepancy = discrepancy self.probability = probability diff --git a/monai/data/dataset.py b/monai/data/dataset.py index 12403bbff1..a09050e5bc 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -29,7 +29,6 @@ from monai.data.utils import first, pickle_hashing from monai.transforms import Compose, Randomizable, Transform, apply_transform -from monai.transforms.transform import RandomizableTransform from monai.utils import MAX_SEED, get_seed, min_version, optional_import if TYPE_CHECKING: @@ -182,7 +181,7 @@ def _pre_transform(self, item_transformed): raise ValueError("transform must be an instance of monai.transforms.Compose.") for _transform in self.transform.transforms: # execute all the deterministic transforms - if isinstance(_transform, RandomizableTransform) or not isinstance(_transform, Transform): + if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform): break item_transformed = apply_transform(_transform, item_transformed) return item_transformed @@ -204,7 +203,7 @@ def _post_transform(self, item_transformed): for _transform in self.transform.transforms: if ( start_post_randomize_run - or isinstance(_transform, RandomizableTransform) + or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform) ): start_post_randomize_run = True @@ -547,7 +546,7 @@ def _load_cache_item(self, idx: int): raise ValueError("transform must be an instance of monai.transforms.Compose.") for _transform in self.transform.transforms: # execute all the deterministic transforms - if isinstance(_transform, RandomizableTransform) or not isinstance(_transform, Transform): + if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform): break item = apply_transform(_transform, item) return item @@ -564,7 +563,7 @@ def _transform(self, index: int): if not isinstance(self.transform, Compose): raise ValueError("transform must be an instance of monai.transforms.Compose.") for _transform in self.transform.transforms: - if start_run or isinstance(_transform, RandomizableTransform) or not isinstance(_transform, Transform): + if start_run or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform): start_run = True data = apply_transform(_transform, data) return data @@ -967,10 +966,10 @@ def __getitem__(self, index: int): # set transforms of each zip component for dataset in self.dataset.data: transform = getattr(dataset, "transform", None) - if isinstance(transform, RandomizableTransform): + if isinstance(transform, Randomizable): transform.set_random_state(seed=self._seed) transform = getattr(self.dataset, "transform", None) - if isinstance(transform, RandomizableTransform): + if isinstance(transform, Randomizable): transform.set_random_state(seed=self._seed) return self.dataset[index] diff --git a/monai/data/image_dataset.py b/monai/data/image_dataset.py index 1074105508..1568e082ee 100644 --- a/monai/data/image_dataset.py +++ b/monai/data/image_dataset.py @@ -17,7 +17,6 @@ from monai.config import DtypeLike from monai.data.image_reader import ImageReader from monai.transforms import LoadImage, Randomizable, apply_transform -from monai.transforms.transform import RandomizableTransform from monai.utils import MAX_SEED, get_seed @@ -107,14 +106,14 @@ def __getitem__(self, index: int): label = self.labels[index] if self.transform is not None: - if isinstance(self.transform, RandomizableTransform): + if isinstance(self.transform, Randomizable): self.transform.set_random_state(seed=self._seed) img = apply_transform(self.transform, img) data = [img] if self.seg_transform is not None: - if isinstance(self.seg_transform, RandomizableTransform): + if isinstance(self.seg_transform, Randomizable): self.seg_transform.set_random_state(seed=self._seed) seg = apply_transform(self.seg_transform, seg) diff --git a/monai/data/test_time_augmentation.py b/monai/data/test_time_augmentation.py index 51b95adc58..06e1f63da5 100644 --- a/monai/data/test_time_augmentation.py +++ b/monai/data/test_time_augmentation.py @@ -20,7 +20,7 @@ from monai.data.utils import list_data_collate, pad_list_data_collate from monai.transforms.compose import Compose from monai.transforms.inverse import InvertibleTransform -from monai.transforms.transform import RandomizableTransform +from monai.transforms.transform import Randomizable from monai.transforms.utils import allow_missing_keys_mode from monai.utils.enums import CommonKeys, InverseKeys @@ -47,7 +47,7 @@ class TestTimeAugmentation: Args: transform: transform (or composed) to be applied to each realisation. At least one transform must be of type - `RandomizableTransform`. All random transforms must be of type `InvertibleTransform`. + `Randomizable`. All random transforms must be of type `InvertibleTransform`. batch_size: number of realisations to infer at once. num_workers: how many subprocesses to use for data. inferrer_fn: function to use to perform inference. @@ -96,7 +96,7 @@ def __init__( def _check_transforms(self): """Should be at least 1 random transform, and all random transforms should be invertible.""" ts = [self.transform] if not isinstance(self.transform, Compose) else self.transform.transforms - randoms = np.array([isinstance(t, RandomizableTransform) for t in ts]) + randoms = np.array([isinstance(t, Randomizable) for t in ts]) invertibles = np.array([isinstance(t, InvertibleTransform) for t in ts]) # check at least 1 random if sum(randoms) == 0: diff --git a/monai/transforms/compose.py b/monai/transforms/compose.py index dd40663e2a..ce965b8b18 100644 --- a/monai/transforms/compose.py +++ b/monai/transforms/compose.py @@ -32,7 +32,7 @@ __all__ = ["Compose"] -class Compose(RandomizableTransform, InvertibleTransform): +class Compose(Randomizable, InvertibleTransform): """ ``Compose`` provides the ability to chain a series of calls together in a sequence. Each transform in the sequence must take a single argument and @@ -102,14 +102,14 @@ def __init__(self, transforms: Optional[Union[Sequence[Callable], Callable]] = N def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None) -> "Compose": super().set_random_state(seed=seed, state=state) for _transform in self.transforms: - if not isinstance(_transform, RandomizableTransform): + if not isinstance(_transform, Randomizable): continue _transform.set_random_state(seed=self.R.randint(MAX_SEED, dtype="uint32")) return self def randomize(self, data: Optional[Any] = None) -> None: for _transform in self.transforms: - if not isinstance(_transform, RandomizableTransform): + if not isinstance(_transform, Randomizable): continue try: _transform.randomize(data) diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index 159fa1a5f4..c8f7136334 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -20,7 +20,7 @@ from monai.config import IndexSelection from monai.data.utils import get_random_patch, get_valid_patch_size -from monai.transforms.transform import Randomizable, RandomizableTransform, Transform +from monai.transforms.transform import Randomizable, Transform from monai.transforms.utils import ( generate_pos_neg_label_crop_centers, generate_spatial_bounding_box, @@ -279,7 +279,7 @@ def __call__(self, img: np.ndarray): return cropper(img) -class RandSpatialCrop(RandomizableTransform): +class RandSpatialCrop(Randomizable): """ Crop image with random size or specific size ROI. It can crop at a random position as center or at the image center. And allows to set the minimum size to limit the randomly generated ROI. @@ -324,7 +324,7 @@ def __call__(self, img: np.ndarray): return cropper(img) -class RandSpatialCropSamples(RandomizableTransform): +class RandSpatialCropSamples(Randomizable): """ Crop image with random size or specific size ROI to generate a list of N samples. It can crop at a random position as center or at the image center. And allows to set @@ -432,7 +432,7 @@ def __call__(self, img: np.ndarray): return cropped -class RandWeightedCrop(RandomizableTransform): +class RandWeightedCrop(Randomizable): """ Samples a list of `num_samples` image patches according to the provided `weight_map`. @@ -484,7 +484,7 @@ def __call__(self, img: np.ndarray, weight_map: Optional[np.ndarray] = None) -> return results -class RandCropByPosNegLabel(RandomizableTransform): +class RandCropByPosNegLabel(Randomizable): """ Crop random fixed sized regions with the center being a foreground or background voxel based on the Pos Neg Ratio. diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index 1d4fcfdb1f..c8d5ceea40 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -34,7 +34,7 @@ SpatialPad, ) from monai.transforms.inverse import InvertibleTransform -from monai.transforms.transform import MapTransform, Randomizable, RandomizableTransform +from monai.transforms.transform import MapTransform, Randomizable from monai.transforms.utils import ( generate_pos_neg_label_crop_centers, generate_spatial_bounding_box, @@ -386,7 +386,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar return d -class RandSpatialCropd(RandomizableTransform, MapTransform, InvertibleTransform): +class RandSpatialCropd(Randomizable, MapTransform, InvertibleTransform): """ Dictionary-based version :py:class:`monai.transforms.RandSpatialCrop`. Crop image with random size or specific size ROI. It can crop at a random position as @@ -413,7 +413,6 @@ def __init__( random_size: bool = True, allow_missing_keys: bool = False, ) -> None: - RandomizableTransform.__init__(self, prob=1.0, do_transform=True) MapTransform.__init__(self, keys, allow_missing_keys) self.roi_size = roi_size self.random_center = random_center @@ -477,7 +476,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar return d -class RandSpatialCropSamplesd(RandomizableTransform, MapTransform): +class RandSpatialCropSamplesd(Randomizable, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandSpatialCropSamples`. Crop image with random size or specific size ROI to generate a list of N samples. @@ -515,7 +514,6 @@ def __init__( meta_key_postfix: str = "meta_dict", allow_missing_keys: bool = False, ) -> None: - RandomizableTransform.__init__(self, prob=1.0, do_transform=True) MapTransform.__init__(self, keys, allow_missing_keys) if num_samples < 1: raise ValueError(f"num_samples must be positive, got {num_samples}.") @@ -626,7 +624,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar return d -class RandWeightedCropd(RandomizableTransform, MapTransform): +class RandWeightedCropd(Randomizable, MapTransform): """ Samples a list of `num_samples` image patches according to the provided `weight_map`. @@ -654,7 +652,6 @@ def __init__( center_coord_key: Optional[str] = None, allow_missing_keys: bool = False, ): - RandomizableTransform.__init__(self, prob=1.0, do_transform=True) MapTransform.__init__(self, keys, allow_missing_keys) self.spatial_size = ensure_tuple(spatial_size) self.w_key = w_key @@ -693,7 +690,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, n return results -class RandCropByPosNegLabeld(RandomizableTransform, MapTransform): +class RandCropByPosNegLabeld(Randomizable, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandCropByPosNegLabel`. Crop random fixed sized regions with the center being a foreground or background voxel @@ -751,7 +748,6 @@ def __init__( meta_key_postfix: str = "meta_dict", allow_missing_keys: bool = False, ) -> None: - RandomizableTransform.__init__(self) MapTransform.__init__(self, keys, allow_missing_keys) self.label_key = label_key self.spatial_size: Union[Tuple[int, ...], Sequence[int], int] = spatial_size diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index f89e381daa..62350d4ab0 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -122,6 +122,7 @@ def __init__(self, offsets: Union[Tuple[float, float], float], prob: float = 0.1 if len(offsets) != 2: raise AssertionError("offsets should be a number or pair of numbers.") self.offsets = (min(offsets), max(offsets)) + self._offset = self.offsets[0] def randomize(self, data: Optional[Any] = None) -> None: self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) @@ -217,6 +218,7 @@ def __init__( if len(factors) != 2: raise AssertionError("factors should be a number or pair of numbers.") self.factors = (min(factors), max(factors)) + self.factor = self.factors[0] self.nonzero = nonzero self.channel_wise = channel_wise self.dtype = dtype @@ -294,6 +296,7 @@ def __init__(self, factors: Union[Tuple[float, float], float], prob: float = 0.1 if len(factors) != 2: raise AssertionError("factors should be a number or pair of numbers.") self.factors = (min(factors), max(factors)) + self.factor = self.factors[0] def randomize(self, data: Optional[Any] = None) -> None: self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) @@ -874,6 +877,10 @@ def __init__( self.sigma_z = sigma_z self.approx = approx + self.x = self.sigma_x[0] + self.y = self.sigma_y[0] + self.z = self.sigma_z[0] + def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) self.x = self.R.uniform(low=self.sigma_x[0], high=self.sigma_x[1]) diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 517c34cbf2..a35e5c8ea6 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -206,6 +206,7 @@ def __init__( if len(offsets) != 2: raise AssertionError("offsets should be a number or pair of numbers.") self.offsets = (min(offsets), max(offsets)) + self._offset = self.offsets[0] def randomize(self, data: Optional[Any] = None) -> None: self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) @@ -293,6 +294,7 @@ def __init__( if len(factors) != 2: raise AssertionError("factors should be a number or pair of numbers.") self.factors = (min(factors), max(factors)) + self.factor = self.factors[0] self.nonzero = nonzero self.channel_wise = channel_wise self.dtype = dtype @@ -380,6 +382,7 @@ def __init__( if len(factors) != 2: raise AssertionError("factors should be a number or pair of numbers.") self.factors = (min(factors), max(factors)) + self.factor = self.factors[0] def randomize(self, data: Optional[Any] = None) -> None: self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) @@ -760,11 +763,11 @@ def __init__( ) -> None: MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) - self.sigma_x = sigma_x - self.sigma_y = sigma_y - self.sigma_z = sigma_z + self.sigma_x, self.sigma_y, self.sigma_z = sigma_x, sigma_y, sigma_z self.approx = approx + self.x, self.y, self.z = self.sigma_x[0], self.sigma_y[0], self.sigma_z[0] + def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) self.x = self.R.uniform(low=self.sigma_x[0], high=self.sigma_x[1]) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 1c096ba743..a3eb055f7e 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -23,7 +23,7 @@ from monai.data.utils import compute_shape_offset, to_affine_nd, zoom_affine from monai.networks.layers import AffineTransform, GaussianFilter, grid_pull from monai.transforms.croppad.array import CenterSpatialCrop -from monai.transforms.transform import RandomizableTransform, Transform +from monai.transforms.transform import Randomizable, RandomizableTransform, Transform from monai.transforms.utils import ( create_control_grid, create_grid, @@ -790,7 +790,7 @@ class RandAxisFlip(RandomizableTransform): """ def __init__(self, prob: float = 0.1) -> None: - RandomizableTransform.__init__(self, min(max(prob, 0.0), 1.0)) + RandomizableTransform.__init__(self, prob) self._axis: Optional[int] = None def randomize(self, data: np.ndarray) -> None: @@ -1004,7 +1004,7 @@ def __call__( return grid if self.as_tensor_output else np.asarray(grid.cpu().numpy()), affine -class RandAffineGrid(RandomizableTransform): +class RandAffineGrid(Randomizable): """ Generate randomised affine grid. """ @@ -1101,7 +1101,7 @@ def get_transformation_matrix(self) -> Optional[Union[np.ndarray, torch.Tensor]] return self.affine -class RandDeformGrid(RandomizableTransform): +class RandDeformGrid(Randomizable): """ Generate random deformation grid. """ diff --git a/monai/transforms/transform.py b/monai/transforms/transform.py index 6a22db1076..ff5f021739 100644 --- a/monai/transforms/transform.py +++ b/monai/transforms/transform.py @@ -180,17 +180,27 @@ class RandomizableTransform(Randomizable, Transform): """ An interface for handling random state locally, currently based on a class variable `R`, which is an instance of `np.random.RandomState`. - This is mainly for randomized data augmentation transforms. For example:: + This class introduces a randomized flag `_do_transform`, is mainly for randomized data augmentation transforms. + For example: - class RandShiftIntensity(RandomizableTransform): - def randomize(): + .. code-block:: python + + from monai.transforms import RandomizableTransform + + class RandShiftIntensity100(RandomizableTransform): + def randomize(self): + super().randomize(None) self._offset = self.R.uniform(low=0, high=100) + def __call__(self, img): self.randomize() + if not self._do_transform: + return img return img + self._offset transform = RandShiftIntensity() transform.set_random_state(seed=0) + print(transform(10)) """ diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 8e0dabafb2..6903b2628d 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -22,7 +22,7 @@ import torch from monai.config import DtypeLike, NdarrayTensor -from monai.transforms.transform import RandomizableTransform, Transform +from monai.transforms.transform import Randomizable, Transform from monai.transforms.utils import extreme_points_to_image, get_extreme_points, map_binary_to_indices from monai.utils import ensure_tuple, min_version, optional_import @@ -667,7 +667,7 @@ def __call__(self, img: np.ndarray) -> np.ndarray: return np.stack(result, axis=0) -class AddExtremePointsChannel(RandomizableTransform): +class AddExtremePointsChannel(Randomizable): """ Add extreme points of label to the image as a new channel. This transform generates extreme point from label and applies a gaussian filter. The pixel values in points image are rescaled diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index c437cd055b..9464faa503 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -23,7 +23,7 @@ import torch from monai.config import DtypeLike, KeysCollection, NdarrayTensor -from monai.transforms.transform import MapTransform, RandomizableTransform +from monai.transforms.transform import MapTransform, Randomizable from monai.transforms.utility.array import ( AddChannel, AsChannelFirst, @@ -731,9 +731,9 @@ def __call__(self, data): return d -class RandLambdad(Lambdad, RandomizableTransform): +class RandLambdad(Lambdad, Randomizable): """ - RandomizableTransform version :py:class:`monai.transforms.Lambdad`, the input `func` contains random logic. + Randomizable version :py:class:`monai.transforms.Lambdad`, the input `func` contains random logic. It's a randomizable transform so `CacheDataset` will not execute it and cache the results. Args: @@ -853,7 +853,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda return d -class AddExtremePointsChanneld(RandomizableTransform, MapTransform): +class AddExtremePointsChanneld(Randomizable, MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.AddExtremePointsChannel`. diff --git a/tests/test_compose.py b/tests/test_compose.py index bb8a5f08c5..97b044af8f 100644 --- a/tests/test_compose.py +++ b/tests/test_compose.py @@ -14,11 +14,11 @@ from monai.data import DataLoader, Dataset from monai.transforms import AddChannel, Compose -from monai.transforms.transform import RandomizableTransform +from monai.transforms.transform import Randomizable from monai.utils import set_determinism -class _RandXform(RandomizableTransform): +class _RandXform(Randomizable): def randomize(self): self.val = self.R.random_sample() @@ -80,7 +80,7 @@ def c(d): # transform to handle dict data self.assertDictEqual(item, {"a": 2, "b": 1, "c": 2}) def test_random_compose(self): - class _Acc(RandomizableTransform): + class _Acc(Randomizable): self.rand = 0.0 def randomize(self, data=None): @@ -99,7 +99,7 @@ def __call__(self, data): self.assertAlmostEqual(c(1), 1.90734751) def test_randomize_warn(self): - class _RandomClass(RandomizableTransform): + class _RandomClass(Randomizable): def randomize(self, foo1, foo2): pass diff --git a/tests/test_rand_lambdad.py b/tests/test_rand_lambdad.py index 2ddfeefae0..a450b67413 100644 --- a/tests/test_rand_lambdad.py +++ b/tests/test_rand_lambdad.py @@ -13,11 +13,11 @@ import numpy as np -from monai.transforms.transform import RandomizableTransform +from monai.transforms.transform import Randomizable from monai.transforms.utility.dictionary import RandLambdad -class RandTest(RandomizableTransform): +class RandTest(Randomizable): """ randomisable transform for testing. """ From 5c411d1c5b98ea061c0c931a3df7d90f78f0d514 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Wed, 7 Apr 2021 01:12:13 +0100 Subject: [PATCH 156/457] 1956 - docker image building pipelines (#1957) * refactor docker building Signed-off-by: Wenqi Li --- .dockerignore | 1 - .github/workflows/docker.yml | 112 +++++++++++++++++++++++++++++++++ .github/workflows/release.yml | 44 ++++++++++++- .github/workflows/setupapp.yml | 43 ------------- Dockerfile | 3 +- 5 files changed, 154 insertions(+), 49 deletions(-) create mode 100644 .github/workflows/docker.yml diff --git a/.dockerignore b/.dockerignore index 262da4d0dd..4e1161bfb2 100644 --- a/.dockerignore +++ b/.dockerignore @@ -8,7 +8,6 @@ docs/ .coverage/ coverage.xml .readthedocs.yml -*.md *.toml !README.md diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000000..2745d4169f --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,112 @@ +name: docker +# versioning: compute a static version file +# local_docker: use the version file to build docker images +# docker_test_latest: test the latest internal docker image (has flake) +# docker_test_dockerhub: test the latest dockerhub release (no flake) +on: + # master only docker deployment and quick tests + push: + branches: + - master + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + versioning: + # compute versioning file from python setup.py + # upload as artifact + # (also used in release.yml) + if: github.repository == 'Project-MONAI/MONAI' + container: + image: localhost:5000/local_monai:latest + runs-on: [self-hosted, linux, x64, build_only] + steps: + - uses: actions/checkout@v2 + # full history so that we can git describe + with: + ref: master + fetch-depth: 0 + - shell: bash + run: | + git describe + python setup.py build + cat build/lib/monai/_version.py + - name: Upload version + uses: actions/upload-artifact@v2 + with: + name: _version.py + path: build/lib/monai/_version.py + - name: Clean up directory + shell: bash + run: | + ls -al + rm -rf {*,.[^.]*} + + local_docker: + # builds two versions: local_monai:latest and local_monai:dockerhub + # latest: used for local tests + # dockerhub: release, no flake package + if: github.repository == 'Project-MONAI/MONAI' + needs: versioning + runs-on: [self-hosted, linux, x64, build_only] + steps: + - uses: actions/checkout@v2 + with: + ref: master + - name: Download version + uses: actions/download-artifact@v2 + with: + name: _version.py + - name: docker_build + shell: bash + run: | + # get tag info for versioning + cat _version.py + mv _version.py monai/ + # build and run original docker image for local registry + docker build -t localhost:5000/local_monai:latest -f Dockerfile . + docker push localhost:5000/local_monai:latest + # build once more w/ tag "latest": remove flake package as it is not needed on hub.docker.com + sed -i '/flake/d' requirements-dev.txt + docker build -t projectmonai/monai:latest -f Dockerfile . + # also push as tag "dockerhub" to local registry + docker image tag projectmonai/monai:latest localhost:5000/local_monai:dockerhub + docker push localhost:5000/local_monai:dockerhub + # distribute as always w/ tag "latest" to hub.docker.com + echo "${{ secrets.DOCKER_PW }}" | docker login -u projectmonai --password-stdin + docker push projectmonai/monai:latest + docker logout + + docker_test_latest: + if: github.repository == 'Project-MONAI/MONAI' + needs: local_docker + container: + image: localhost:5000/local_monai:latest + runs-on: [self-hosted, linux, x64, common] + steps: + - name: Import + run: | + python -c 'import monai; monai.config.print_config()' + cd /opt/monai + ls -al + ngc --version + python -m tests.min_tests + env: + QUICKTEST: True + + docker_test_dockerhub: + if: github.repository == 'Project-MONAI/MONAI' + needs: local_docker + container: + image: localhost:5000/local_monai:dockerhub + runs-on: [self-hosted, linux, x64, common] + steps: + - name: Import + run: | + python -c 'import monai; monai.config.print_config()' + cd /opt/monai + ls -al + ngc --version + python -m tests.min_tests + env: + QUICKTEST: True diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f36abc9fcf..00e28ecd52 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -83,14 +83,49 @@ jobs: password: ${{ secrets.TEST_PYPI }} repository_url: https://test.pypi.org/legacy/ - release_docker: + versioning: + # compute versioning file from python setup.py + # upload as artifact + # (also used in docker.yml) if: github.repository == 'Project-MONAI/MONAI' needs: packaging - runs-on: [ self-hosted, linux, x64, build_only ] + container: + image: localhost:5000/local_monai:latest + runs-on: [self-hosted, linux, x64, build_only] steps: - uses: actions/checkout@v2 + # full history so that we can git describe with: ref: master + fetch-depth: 0 + - shell: bash + run: | + git describe + python setup.py build + cat build/lib/monai/_version.py + - name: Upload version + uses: actions/upload-artifact@v2 + with: + name: _version.py + path: build/lib/monai/_version.py + - name: Clean up directory + shell: bash + run: | + ls -al + rm -rf {*,.[^.]*} + + release_tag_docker: + if: github.repository == 'Project-MONAI/MONAI' + needs: versioning + runs-on: [self-hosted, linux, x64, build_only] + steps: + - uses: actions/checkout@v2 + with: + ref: master + - name: Download version + uses: actions/download-artifact@v2 + with: + name: _version.py - name: Set tag id: versioning run: echo ::set-output name=tag::${GITHUB_REF#refs/*/} @@ -99,12 +134,15 @@ jobs: RELEASE_VERSION: ${{ steps.versioning.outputs.tag }} run: | echo "$RELEASE_VERSION" + cat _version.py - if: startsWith(github.ref, 'refs/tags/') name: build with the tag env: RELEASE_VERSION: ${{ steps.versioning.outputs.tag }} + shell: bash run: | - git fetch --depth=1 origin +refs/tags/*:refs/tags/* + # get tag info for versioning + mv _version.py monai/ # remove flake package as it is not needed on hub.docker.com sed -i '/flake/d' requirements-dev.txt docker build -t projectmonai/monai:"$RELEASE_VERSION" -f Dockerfile . diff --git a/.github/workflows/setupapp.yml b/.github/workflows/setupapp.yml index 450be403a0..dc65141fe8 100644 --- a/.github/workflows/setupapp.yml +++ b/.github/workflows/setupapp.yml @@ -148,46 +148,3 @@ jobs: python -m tests.min_tests env: QUICKTEST: True - - local_docker: - if: github.repository == 'Project-MONAI/MONAI' - runs-on: [self-hosted, linux, x64, build_only] - # we only push built container if it is built from master branch - steps: - - uses: actions/checkout@v2 - with: - ref: master - - name: docker_build - run: | - # get tag info for versioning - git fetch --depth=1 origin +refs/tags/*:refs/tags/* - # build and run original docker image for local registry - docker build -t localhost:5000/local_monai:latest -f Dockerfile . - docker push localhost:5000/local_monai:latest - # build once more w/ tag "latest": remove flake package as it is not needed on hub.docker.com - sed -i '/flake/d' requirements-dev.txt - docker build -t projectmonai/monai:latest -f Dockerfile . - # also push as tag "dockerhub" to local registry - docker image tag projectmonai/monai:latest localhost:5000/local_monai:dockerhub - docker push localhost:5000/local_monai:dockerhub - # distribute as always w/ tag "latest" to hub.docker.com - echo "${{ secrets.DOCKER_PW }}" | docker login -u projectmonai --password-stdin - docker push projectmonai/monai:latest - docker logout - - docker: - if: github.repository == 'Project-MONAI/MONAI' - needs: local_docker - container: - image: localhost:5000/local_monai:latest - runs-on: [self-hosted, linux, x64, common] - steps: - - name: Import - run: | - python -c 'import monai; monai.config.print_config()' - cd /opt/monai - ls -al - ngc --version - python -m tests.min_tests - env: - QUICKTEST: True diff --git a/Dockerfile b/Dockerfile index 57ea567869..23be9ae1c3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -30,10 +30,9 @@ RUN cp /tmp/requirements.txt /tmp/req.bak \ # please specify exact files and folders to be copied -- else, basically always, the Docker build process cannot cache # this or anything below it and always will build from at most here; one file change leads to no caching from here on... -COPY LICENSE setup.py setup.cfg versioneer.py runtests.sh .gitignore .gitattributes README.md MANIFEST.in ./ +COPY LICENSE CHANGELOG.md CODE_OF_CONDUCT.md CONTRIBUTING.md README.md versioneer.py setup.py setup.cfg runtests.sh MANIFEST.in ./ COPY tests ./tests COPY monai ./monai -COPY .git ./.git RUN BUILD_MONAI=1 FORCE_CUDA=1 python setup.py develop \ && rm -rf build __pycache__ From 0813ae5fb2488f1407d98bc3e32a8c4e3c931524 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Wed, 7 Apr 2021 08:26:20 +0100 Subject: [PATCH 157/457] 1955 style issue (#1958) * update docs deps. Signed-off-by: Wenqi Li * fixes https://github.com/Project-MONAI/MONAI/runs/2283148095\?check_suite_focus\=true\#step:7:8972 Signed-off-by: Wenqi Li * Revert "fixes https://github.com/Project-MONAI/MONAI/runs/2283148095\?check_suite_focus\=true\#step:7:8972" This reverts commit 1407400da84bebe997ee7b720545f12f105a7025. Signed-off-by: Wenqi Li --- docs/requirements.txt | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index c03e3327f4..acc983129f 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -4,15 +4,13 @@ pytorch-ignite==0.4.4 numpy>=1.17 itk>=5.0, <=5.1.2 nibabel -cucim==0.18.2 -openslide-python==1.1.2 parameterized scikit-image>=0.14.2 tensorboard commonmark==0.9.1 recommonmark==0.6.0 -Sphinx==3.3.0 -sphinx-rtd-theme==0.5.0 +Sphinx==3.5.3 +sphinx-rtd-theme==0.5.2 sphinxcontrib-applehelp sphinxcontrib-devhelp sphinxcontrib-htmlhelp From d6ed95635c078428b06563b8c6e5bc973e319361 Mon Sep 17 00:00:00 2001 From: Behrooz <3968947+behxyz@users.noreply.github.com> Date: Wed, 7 Apr 2021 05:14:20 -0400 Subject: [PATCH 158/457] Update garbage collection assertion (#1959) * Update garbage collection assertion to a more reliable one Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> * Remove print Signed-off-by: Behrooz <3968947+behxyz@users.noreply.github.com> --- tests/test_handler_garbage_collector.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tests/test_handler_garbage_collector.py b/tests/test_handler_garbage_collector.py index 5e6bd7275c..9f63211a13 100644 --- a/tests/test_handler_garbage_collector.py +++ b/tests/test_handler_garbage_collector.py @@ -60,17 +60,16 @@ def _train_func(engine, batch): GarbageCollector(trigger_event=trigger_event, log_level=30).attach(engine) engine.run(data_loader, max_epochs=5) - print(gb_count_dict) first_count = 0 - for epoch, gb_count in gb_count_dict.items(): - # At least one zero-generation object + for iter, gb_count in gb_count_dict.items(): + # At least one zero-generation object is collected self.assertGreater(gb_count[0], 0) - if epoch == 1: - first_count = gb_count[0] - else: - # The should be less number of collected objects in the next calls. - self.assertLess(gb_count[0], first_count) + if iter > 1: + # Since we are collecting all objects from all generations manually at each call, + # starting from the second call, there shouldn't be any 1st and 2nd generation objects available to collect. + self.assertEqual(gb_count[1], first_count) + self.assertEqual(gb_count[2], first_count) if __name__ == "__main__": From 75812552ec5e51b2414d03c6125b7758ccd6a888 Mon Sep 17 00:00:00 2001 From: Petru-Daniel Tudosiu Date: Wed, 7 Apr 2021 12:08:03 +0100 Subject: [PATCH 159/457] Working ParameterScheduler (#1949) * Working ParameterScheduler Added a new ParameterScheduler handler and the required tests. Signed-off-by: Petru-Daniel Tudosiu --- docs/source/handlers.rst | 5 + monai/handlers/__init__.py | 1 + monai/handlers/parameter_scheduler.py | 163 ++++++++++++++++++++++ tests/min_tests.py | 1 + tests/test_handler_parameter_scheduler.py | 123 ++++++++++++++++ 5 files changed, 293 insertions(+) create mode 100644 monai/handlers/parameter_scheduler.py create mode 100644 tests/test_handler_parameter_scheduler.py diff --git a/docs/source/handlers.rst b/docs/source/handlers.rst index 869467c496..9030fa3ced 100644 --- a/docs/source/handlers.rst +++ b/docs/source/handlers.rst @@ -111,6 +111,11 @@ SmartCache handler .. autoclass:: SmartCacheHandler :members: +Parameter Scheduler handler +--------------------------- +.. autoclass:: ParamSchedulerHandler + :members: + EarlyStop handler ----------------- .. autoclass:: EarlyStopHandler diff --git a/monai/handlers/__init__.py b/monai/handlers/__init__.py index 2112b074a0..f88531ea8e 100644 --- a/monai/handlers/__init__.py +++ b/monai/handlers/__init__.py @@ -21,6 +21,7 @@ from .mean_dice import MeanDice from .metric_logger import MetricLogger, MetricLoggerKeys from .metrics_saver import MetricsSaver +from .parameter_scheduler import ParamSchedulerHandler from .roc_auc import ROCAUC from .segmentation_saver import SegmentationSaver from .smartcache_handler import SmartCacheHandler diff --git a/monai/handlers/parameter_scheduler.py b/monai/handlers/parameter_scheduler.py new file mode 100644 index 0000000000..2aa0224a5a --- /dev/null +++ b/monai/handlers/parameter_scheduler.py @@ -0,0 +1,163 @@ +import logging +from bisect import bisect_right +from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Union + +from monai.utils import exact_version, optional_import + +if TYPE_CHECKING: + from ignite.engine import Engine, Events +else: + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") + Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") + + +class ParamSchedulerHandler: + """ + General purpose scheduler for parameters values. By default it can schedule in a linear, exponential, step or + multistep function. One can also pass Callables to have customized scheduling logic. + + Args: + parameter_setter (Callable): Function that sets the required parameter + value_calculator (Union[str,Callable]): Either a string ('linear', 'exponential', 'step' or 'multistep') + or Callable for custom logic. + vc_kwargs (Dict): Dictionary that stores the required parameters for the value_calculator. + epoch_level (bool): Whether the the step is based on epoch or iteration. Defaults to False. + name (Optional[str]): Identifier of logging.logger to use, if None, defaulting to ``engine.logger``. + event (Optional[str]): Event to which the handler attaches. Defaults to Events.ITERATION_COMPLETED. + """ + + def __init__( + self, + parameter_setter: Callable, + value_calculator: Union[str, Callable], + vc_kwargs: Dict, + epoch_level: bool = False, + name: Optional[str] = None, + event=Events.ITERATION_COMPLETED, + ): + self.epoch_level = epoch_level + self.event = event + + self._calculators = { + "linear": self._linear, + "exponential": self._exponential, + "step": self._step, + "multistep": self._multistep, + } + + self._parameter_setter = parameter_setter + self._vc_kwargs = vc_kwargs + self._value_calculator = self._get_value_calculator(value_calculator=value_calculator) + + self.logger = logging.getLogger(name) + self._name = name + + def _get_value_calculator(self, value_calculator): + if isinstance(value_calculator, str): + return self._calculators[value_calculator] + if callable(value_calculator): + return value_calculator + raise ValueError( + f"value_calculator must be either a string from {list(self._calculators.keys())} or a Callable." + ) + + def __call__(self, engine: Engine): + if self.epoch_level: + self._vc_kwargs["current_step"] = engine.state.epoch + else: + self._vc_kwargs["current_step"] = engine.state.iteration + + new_value = self._value_calculator(**self._vc_kwargs) + self._parameter_setter(new_value) + + def attach(self, engine: Engine) -> None: + """ + Args: + engine: Ignite Engine that is used for training. + """ + if self._name is None: + self.logger = engine.logger + engine.add_event_handler(self.event, self) + + @staticmethod + def _linear( + initial_value: float, step_constant: int, step_max_value: int, max_value: float, current_step: int + ) -> float: + """ + Keeps the parameter value to zero until step_zero steps passed and then linearly increases it to 1 until an + additional step_one steps passed. Continues the trend until it reaches max_value. + + Args: + initial_value (float): Starting value of the parameter. + step_constant (int): Step index until parameter's value is kept constant. + step_max_value (int): Step index at which parameter's value becomes max_value. + max_value (float): Max parameter value. + current_step (int): Current step index. + + Returns: + float: new parameter value + """ + if current_step <= step_constant: + delta = 0.0 + elif current_step > step_max_value: + delta = max_value - initial_value + else: + delta = (max_value - initial_value) / (step_max_value - step_constant) * (current_step - step_constant) + + return initial_value + delta + + @staticmethod + def _exponential(initial_value: float, gamma: float, current_step: int) -> float: + """ + Decays the parameter value by gamma every step. + + Based on the closed form of ExponentialLR from Pytorch + https://github.com/pytorch/pytorch/blob/master/torch/optim/lr_scheduler.py#L457 + + Args: + initial_value (float): Starting value of the parameter. + gamma (float): Multiplicative factor of parameter value decay. + current_step (int): Current step index. + + Returns: + float: new parameter value + """ + return initial_value * gamma ** current_step + + @staticmethod + def _step(initial_value: float, gamma: float, step_size: int, current_step: int) -> float: + """ + Decays the parameter value by gamma every step_size. + + Based on StepLR from Pytorch. + https://github.com/pytorch/pytorch/blob/master/torch/optim/lr_scheduler.py#L377 + + Args: + initial_value (float): Starting value of the parameter. + gamma (float): Multiplicative factor of parameter value decay. + step_size (int): Period of parameter value decay. + current_step (int): Current step index. + + Returns + float: new parameter value + """ + return initial_value * gamma ** (current_step // step_size) + + @staticmethod + def _multistep(initial_value: float, gamma: float, milestones: List[int], current_step: int) -> float: + """ + Decays the parameter value by gamma once the number of steps reaches one of the milestones. + + Based on MultiStepLR from Pytorch. + https://github.com/pytorch/pytorch/blob/master/torch/optim/lr_scheduler.py#L424 + + Args: + initial_value (float): Starting value of the parameter. + gamma (float): Multiplicative factor of parameter value decay. + milestones (List[int]): List of step indices. Must be increasing. + current_step (int): Current step index. + + Returns: + float: new parameter value + """ + return initial_value * gamma ** bisect_right(milestones, current_step) diff --git a/tests/min_tests.py b/tests/min_tests.py index 9b96e7eaab..98f6d822a7 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -47,6 +47,7 @@ def run_testsuit(): "test_handler_prob_map_producer", "test_handler_rocauc", "test_handler_rocauc_dist", + "test_handler_parameter_scheduler", "test_handler_segmentation_saver", "test_handler_smartcache", "test_handler_stats", diff --git a/tests/test_handler_parameter_scheduler.py b/tests/test_handler_parameter_scheduler.py new file mode 100644 index 0000000000..5b3e845ace --- /dev/null +++ b/tests/test_handler_parameter_scheduler.py @@ -0,0 +1,123 @@ +import unittest + +import torch +from ignite.engine import Engine, Events +from torch.nn import Module + +from monai.handlers.parameter_scheduler import ParamSchedulerHandler + + +class ToyNet(Module): + def __init__(self, value): + super(ToyNet, self).__init__() + self.value = value + + def forward(self, input): + return input + + def get_value(self): + return self.value + + def set_value(self, value): + self.value = value + + +class TestHandlerParameterScheduler(unittest.TestCase): + def test_linear_scheduler(self): + # Testing step_constant + net = ToyNet(value=-1) + engine = Engine(lambda e, b: None) + ParamSchedulerHandler( + parameter_setter=net.set_value, + value_calculator="linear", + vc_kwargs={"initial_value": 0, "step_constant": 2, "step_max_value": 5, "max_value": 10}, + epoch_level=True, + event=Events.EPOCH_COMPLETED, + ).attach(engine) + engine.run([0] * 8, max_epochs=2) + torch.testing.assert_allclose(net.get_value(), 0) + + # Testing linear increase + net = ToyNet(value=-1) + engine = Engine(lambda e, b: None) + ParamSchedulerHandler( + parameter_setter=net.set_value, + value_calculator="linear", + vc_kwargs={"initial_value": 0, "step_constant": 2, "step_max_value": 5, "max_value": 10}, + epoch_level=True, + event=Events.EPOCH_COMPLETED, + ).attach(engine) + engine.run([0] * 8, max_epochs=3) + torch.testing.assert_allclose(net.get_value(), 3.333333, atol=0.001, rtol=0.0) + + # Testing max_value + net = ToyNet(value=-1) + engine = Engine(lambda e, b: None) + ParamSchedulerHandler( + parameter_setter=net.set_value, + value_calculator="linear", + vc_kwargs={"initial_value": 0, "step_constant": 2, "step_max_value": 5, "max_value": 10}, + epoch_level=True, + event=Events.EPOCH_COMPLETED, + ).attach(engine) + engine.run([0] * 8, max_epochs=10) + torch.testing.assert_allclose(net.get_value(), 10) + + def test_exponential_scheduler(self): + net = ToyNet(value=-1) + engine = Engine(lambda e, b: None) + ParamSchedulerHandler( + parameter_setter=net.set_value, + value_calculator="exponential", + vc_kwargs={"initial_value": 10, "gamma": 0.99}, + epoch_level=True, + event=Events.EPOCH_COMPLETED, + ).attach(engine) + engine.run([0] * 8, max_epochs=2) + torch.testing.assert_allclose(net.get_value(), 10 * 0.99 * 0.99) + + def test_step_scheduler(self): + net = ToyNet(value=-1) + engine = Engine(lambda e, b: None) + ParamSchedulerHandler( + parameter_setter=net.set_value, + value_calculator="step", + vc_kwargs={"initial_value": 10, "gamma": 0.99, "step_size": 5}, + epoch_level=True, + event=Events.EPOCH_COMPLETED, + ).attach(engine) + engine.run([0] * 8, max_epochs=10) + torch.testing.assert_allclose(net.get_value(), 10 * 0.99 * 0.99) + + def test_multistep_scheduler(self): + net = ToyNet(value=-1) + engine = Engine(lambda e, b: None) + ParamSchedulerHandler( + parameter_setter=net.set_value, + value_calculator="multistep", + vc_kwargs={"initial_value": 10, "gamma": 0.99, "milestones": [3, 6]}, + epoch_level=True, + event=Events.EPOCH_COMPLETED, + ).attach(engine) + engine.run([0] * 8, max_epochs=10) + torch.testing.assert_allclose(net.get_value(), 10 * 0.99 * 0.99) + + def test_custom_scheduler(self): + def custom_logic(initial_value, gamma, current_step): + return initial_value * gamma ** (current_step % 9) + + net = ToyNet(value=-1) + engine = Engine(lambda e, b: None) + ParamSchedulerHandler( + parameter_setter=net.set_value, + value_calculator=custom_logic, + vc_kwargs={"initial_value": 10, "gamma": 0.99}, + epoch_level=True, + event=Events.EPOCH_COMPLETED, + ).attach(engine) + engine.run([0] * 8, max_epochs=2) + torch.testing.assert_allclose(net.get_value(), 10 * 0.99 * 0.99) + + +if __name__ == "__main__": + unittest.main() From baca6e0223c296ac97a678607f18ceefd64bb6f9 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Wed, 7 Apr 2021 22:59:07 +0800 Subject: [PATCH 160/457] [DLMED] remove warning (#1962) Signed-off-by: Nic Ma --- monai/utils/module.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/monai/utils/module.py b/monai/utils/module.py index 448046b9e6..b51b2820a8 100644 --- a/monai/utils/module.py +++ b/monai/utils/module.py @@ -96,11 +96,9 @@ def min_version(the_module, min_version_str: str = "") -> bool: Returns True if the module's version is greater or equal to the 'min_version'. When min_version_str is not provided, it always returns True. """ - if not min_version_str: + if not min_version_str or not hasattr(the_module, "__version__"): return True # always valid version - if not hasattr(the_module, "__version__"): - warnings.warn(f"{the_module} has no attribute __version__ in min_version check.") - return True # min_version is the default, shouldn't be noisy + mod_version = tuple(int(x) for x in the_module.__version__.split(".")[:2]) required = tuple(int(x) for x in min_version_str.split(".")[:2]) return mod_version >= required From 99bca69f26f9f6a76e60f2ce7848e83470547833 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Thu, 8 Apr 2021 00:36:04 +0800 Subject: [PATCH 161/457] 1916 Add support for custom events in workflows (#1961) * [DLMED] add support for addtional events Signed-off-by: Nic Ma * [DLMED] add unit tests Signed-off-by: Nic Ma * [DLMED] fix typehints Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot * fixes typos Signed-off-by: Wenqi Li Co-authored-by: monai-bot Co-authored-by: Wenqi Li --- monai/engines/evaluator.py | 39 ++++++++++++++++++------- monai/engines/trainer.py | 19 +++++++----- monai/engines/workflow.py | 39 +++++++++++++++---------- tests/test_ensemble_evaluator.py | 23 ++++++++++++++- tests/test_handler_garbage_collector.py | 3 +- 5 files changed, 89 insertions(+), 34 deletions(-) diff --git a/monai/engines/evaluator.py b/monai/engines/evaluator.py index c1fe79c848..bfa69c0bdd 100644 --- a/monai/engines/evaluator.py +++ b/monai/engines/evaluator.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Callable, Dict, Iterable, Optional, Sequence, Tuple, Union +from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from torch.utils.data import DataLoader @@ -23,11 +23,12 @@ from monai.utils.enums import CommonKeys as Keys if TYPE_CHECKING: - from ignite.engine import Engine + from ignite.engine import Engine, EventEnum from ignite.metrics import Metric else: Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") Metric, _ = optional_import("ignite.metrics", "0.4.4", exact_version, "Metric") + EventEnum, _ = optional_import("ignite.engine", "0.4.4", exact_version, "EventEnum") __all__ = ["Evaluator", "SupervisedEvaluator", "EnsembleEvaluator"] @@ -56,6 +57,10 @@ class Evaluator(Workflow): amp: whether to enable auto-mixed-precision evaluation, default is False. mode: model forward mode during evaluation, should be 'eval' or 'train', which maps to `model.eval()` or `model.train()`, default to 'eval'. + event_names: additional custom ignite events that will register to the engine. + new events can be a list of str or `ignite.engine.events.EventEnum`. + event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`. + for more details, check: https://github.com/pytorch/ignite/blob/v0.4.4.post1/ignite/engine/engine.py#L160 """ @@ -73,6 +78,8 @@ def __init__( val_handlers: Optional[Sequence] = None, amp: bool = False, mode: Union[ForwardMode, str] = ForwardMode.EVAL, + event_names: Optional[List[Union[str, EventEnum]]] = None, + event_to_attr: Optional[dict] = None, ) -> None: super().__init__( device=device, @@ -87,6 +94,8 @@ def __init__( additional_metrics=additional_metrics, handlers=val_handlers, amp=amp, + event_names=event_names, + event_to_attr=event_to_attr, ) mode = ForwardMode(mode) if mode == ForwardMode.EVAL: @@ -140,6 +149,10 @@ class SupervisedEvaluator(Evaluator): amp: whether to enable auto-mixed-precision evaluation, default is False. mode: model forward mode during evaluation, should be 'eval' or 'train', which maps to `model.eval()` or `model.train()`, default to 'eval'. + event_names: additional custom ignite events that will register to the engine. + new events can be a list of str or `ignite.engine.events.EventEnum`. + event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`. + for more details, check: https://github.com/pytorch/ignite/blob/v0.4.4.post1/ignite/engine/engine.py#L160 """ @@ -159,6 +172,8 @@ def __init__( val_handlers: Optional[Sequence] = None, amp: bool = False, mode: Union[ForwardMode, str] = ForwardMode.EVAL, + event_names: Optional[List[Union[str, EventEnum]]] = None, + event_to_attr: Optional[dict] = None, ) -> None: super().__init__( device=device, @@ -173,15 +188,14 @@ def __init__( val_handlers=val_handlers, amp=amp, mode=mode, + # add the iteration events + event_names=[IterationEvents] if event_names is None else event_names + [IterationEvents], + event_to_attr=event_to_attr, ) self.network = network self.inferer = SimpleInferer() if inferer is None else inferer - def _register_additional_events(self): - super()._register_additional_events() - self.register_events(*IterationEvents) - def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """ callback function for the Supervised Evaluation processing logic of 1 iteration in Ignite Engine. @@ -251,6 +265,10 @@ class EnsembleEvaluator(Evaluator): amp: whether to enable auto-mixed-precision evaluation, default is False. mode: model forward mode during evaluation, should be 'eval' or 'train', which maps to `model.eval()` or `model.train()`, default to 'eval'. + event_names: additional custom ignite events that will register to the engine. + new events can be a list of str or `ignite.engine.events.EventEnum`. + event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`. + for more details, check: https://github.com/pytorch/ignite/blob/v0.4.4.post1/ignite/engine/engine.py#L160 """ @@ -271,6 +289,8 @@ def __init__( val_handlers: Optional[Sequence] = None, amp: bool = False, mode: Union[ForwardMode, str] = ForwardMode.EVAL, + event_names: Optional[List[Union[str, EventEnum]]] = None, + event_to_attr: Optional[dict] = None, ) -> None: super().__init__( device=device, @@ -285,16 +305,15 @@ def __init__( val_handlers=val_handlers, amp=amp, mode=mode, + # add the iteration events + event_names=[IterationEvents] if event_names is None else event_names + [IterationEvents], + event_to_attr=event_to_attr, ) self.networks = ensure_tuple(networks) self.pred_keys = ensure_tuple(pred_keys) self.inferer = SimpleInferer() if inferer is None else inferer - def _register_additional_events(self): - super()._register_additional_events() - self.register_events(*IterationEvents) - def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """ callback function for the Supervised Evaluation processing logic of 1 iteration in Ignite Engine. diff --git a/monai/engines/trainer.py b/monai/engines/trainer.py index a7b1943211..f14ee7e91f 100644 --- a/monai/engines/trainer.py +++ b/monai/engines/trainer.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Callable, Dict, Iterable, Optional, Sequence, Tuple, Union +from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from torch.optim.optimizer import Optimizer @@ -23,11 +23,12 @@ from monai.utils.enums import CommonKeys as Keys if TYPE_CHECKING: - from ignite.engine import Engine + from ignite.engine import Engine, EventEnum from ignite.metrics import Metric else: Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") Metric, _ = optional_import("ignite.metrics", "0.4.4", exact_version, "Metric") + EventEnum, _ = optional_import("ignite.engine", "0.4.4", exact_version, "EventEnum") __all__ = ["Trainer", "SupervisedTrainer", "GanTrainer"] @@ -78,6 +79,10 @@ class SupervisedTrainer(Trainer): train_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like: CheckpointHandler, StatsHandler, SegmentationSaver, etc. amp: whether to enable auto-mixed-precision training, default is False. + event_names: additional custom ignite events that will register to the engine. + new events can be a list of str or `ignite.engine.events.EventEnum`. + event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`. + for more details, check: https://github.com/pytorch/ignite/blob/v0.4.4.post1/ignite/engine/engine.py#L160 """ @@ -99,8 +104,9 @@ def __init__( additional_metrics: Optional[Dict[str, Metric]] = None, train_handlers: Optional[Sequence] = None, amp: bool = False, + event_names: Optional[List[Union[str, EventEnum]]] = None, + event_to_attr: Optional[dict] = None, ) -> None: - # set up Ignite engine and environments super().__init__( device=device, max_epochs=max_epochs, @@ -114,6 +120,9 @@ def __init__( additional_metrics=additional_metrics, handlers=train_handlers, amp=amp, + # add the iteration events + event_names=[IterationEvents] if event_names is None else event_names + [IterationEvents], + event_to_attr=event_to_attr, ) self.network = network @@ -121,10 +130,6 @@ def __init__( self.loss_function = loss_function self.inferer = SimpleInferer() if inferer is None else inferer - def _register_additional_events(self): - super()._register_additional_events() - self.register_events(*IterationEvents) - def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]): """ Callback function for the Supervised Training processing logic of 1 iteration in Ignite Engine. diff --git a/monai/engines/workflow.py b/monai/engines/workflow.py index 61b92ac5dd..50a9f41368 100644 --- a/monai/engines/workflow.py +++ b/monai/engines/workflow.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Callable, Dict, Iterable, Optional, Sequence, Union +from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Sequence, Union import torch import torch.distributed as dist @@ -23,12 +23,14 @@ IgniteEngine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") State, _ = optional_import("ignite.engine", "0.4.4", exact_version, "State") Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") + if TYPE_CHECKING: - from ignite.engine import Engine + from ignite.engine import Engine, EventEnum from ignite.metrics import Metric else: Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") Metric, _ = optional_import("ignite.metrics", "0.4.4", exact_version, "Metric") + EventEnum, _ = optional_import("ignite.engine", "0.4.4", exact_version, "EventEnum") class Workflow(IgniteEngine): # type: ignore[valid-type, misc] # due to optional_import @@ -60,6 +62,10 @@ class Workflow(IgniteEngine): # type: ignore[valid-type, misc] # due to optiona handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like: CheckpointHandler, StatsHandler, SegmentationSaver, etc. amp: whether to enable auto-mixed-precision training or inference, default is False. + event_names: additional custom ignite events that will register to the engine. + new events can be a list of str or `ignite.engine.events.EventEnum`. + event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`. + for more details, check: https://github.com/pytorch/ignite/blob/v0.4.4.post1/ignite/engine/engine.py#L160 Raises: TypeError: When ``device`` is not a ``torch.Device``. @@ -83,6 +89,8 @@ def __init__( additional_metrics: Optional[Dict[str, Metric]] = None, handlers: Optional[Sequence] = None, amp: bool = False, + event_names: Optional[List[Union[str, EventEnum]]] = None, + event_to_attr: Optional[dict] = None, ) -> None: if iteration_update is not None: super().__init__(iteration_update) @@ -128,7 +136,17 @@ def set_sampler_epoch(engine: Engine): self.prepare_batch = prepare_batch self.amp = amp - self._register_additional_events() + if event_names is not None: + if not isinstance(event_names, list): + raise ValueError("event_names must be a list or string or EventEnum.") + for name in event_names: + if isinstance(name, str): + self.register_events(name, event_to_attr=event_to_attr) + elif issubclass(name, EventEnum): + self.register_events(*name, event_to_attr=event_to_attr) + else: + raise ValueError("event_names must be a list or string or EventEnum.") + if post_transform is not None: self._register_post_transforms(post_transform) if key_metric is not None: @@ -136,14 +154,7 @@ def set_sampler_epoch(engine: Engine): if handlers is not None: self._register_handlers(handlers) - def _register_additional_events(self): - """ - Register more ignite Events to the engine. - - """ - pass - - def _register_post_transforms(self, posttrans): + def _register_post_transforms(self, posttrans: Callable): """ Register the post transforms to the engine, will execute them as a chain when iteration completed. @@ -151,11 +162,9 @@ def _register_post_transforms(self, posttrans): @self.on(Events.ITERATION_COMPLETED) def run_post_transform(engine: Engine) -> None: - if posttrans is None: - raise AssertionError engine.state.output = apply_transform(posttrans, engine.state.output) - def _register_metrics(self, k_metric, add_metrics): + def _register_metrics(self, k_metric: Dict, add_metrics: Optional[Dict] = None): """ Register the key metric and additional metrics to the engine, supports ignite Metrics. @@ -180,7 +189,7 @@ def _compare_metrics(engine: Engine) -> None: engine.state.best_metric = current_val_metric engine.state.best_metric_epoch = engine.state.epoch - def _register_handlers(self, handlers): + def _register_handlers(self, handlers: Sequence): """ Register the handlers to the engine, supports ignite Handlers with `attach` API. diff --git a/tests/test_ensemble_evaluator.py b/tests/test_ensemble_evaluator.py index 9cc977d876..28a2d4f941 100644 --- a/tests/test_ensemble_evaluator.py +++ b/tests/test_ensemble_evaluator.py @@ -12,7 +12,7 @@ import unittest import torch -from ignite.engine import Events +from ignite.engine import EventEnum, Events from monai.engines import EnsembleEvaluator @@ -44,11 +44,17 @@ def forward(self, x): net3 = TestNet(lambda x: x + 4) net4 = TestNet(lambda x: x + 5) + class CustomEvents(EventEnum): + FOO_EVENT = "foo_event" + BAR_EVENT = "bar_event" + val_engine = EnsembleEvaluator( device=device, val_data_loader=val_loader, networks=[net0, net1, net2, net3, net4], pred_keys=["pred0", "pred1", "pred2", "pred3", "pred4"], + event_names=["bwd_event", "opt_event", CustomEvents], + event_to_attr={CustomEvents.FOO_EVENT: "foo", "opt_event": "opt"}, ) @val_engine.on(Events.ITERATION_COMPLETED) @@ -57,6 +63,21 @@ def run_post_transform(engine): expected_value = engine.state.iteration + i torch.testing.assert_allclose(engine.state.output[f"pred{i}"], torch.tensor([[expected_value]])) + @val_engine.on(Events.EPOCH_COMPLETED) + def trigger_custom_event(): + val_engine.fire_event(CustomEvents.FOO_EVENT) + val_engine.fire_event(CustomEvents.BAR_EVENT) + val_engine.fire_event("bwd_event") + val_engine.fire_event("opt_event") + + @val_engine.on(CustomEvents.FOO_EVENT) + def do_foo_op(): + self.assertEqual(val_engine.state.foo, 0) + + @val_engine.on("opt_event") + def do_bar_op(): + self.assertEqual(val_engine.state.opt, 0) + val_engine.run() diff --git a/tests/test_handler_garbage_collector.py b/tests/test_handler_garbage_collector.py index 9f63211a13..c2c5dcbfd6 100644 --- a/tests/test_handler_garbage_collector.py +++ b/tests/test_handler_garbage_collector.py @@ -67,7 +67,8 @@ def _train_func(engine, batch): self.assertGreater(gb_count[0], 0) if iter > 1: # Since we are collecting all objects from all generations manually at each call, - # starting from the second call, there shouldn't be any 1st and 2nd generation objects available to collect. + # starting from the second call, there shouldn't be any 1st and 2nd + # generation objects available to collect. self.assertEqual(gb_count[1], first_count) self.assertEqual(gb_count[2], first_count) From 8cbd59ac083d42a7a5a2b2c6ffbe920c1e1599e7 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Thu, 8 Apr 2021 00:54:24 +0100 Subject: [PATCH 162/457] 1965 - register_backward_hook (#1966) * fixes #1965 Signed-off-by: Wenqi Li * adds docstring Signed-off-by: Wenqi Li --- monai/handlers/parameter_scheduler.py | 11 +++++++++++ monai/networks/blocks/crf.py | 2 +- monai/optimizers/lr_scheduler.py | 11 +++++++++++ monai/visualize/class_activation_maps.py | 10 ++++++++-- 4 files changed, 31 insertions(+), 3 deletions(-) diff --git a/monai/handlers/parameter_scheduler.py b/monai/handlers/parameter_scheduler.py index 2aa0224a5a..35ba044586 100644 --- a/monai/handlers/parameter_scheduler.py +++ b/monai/handlers/parameter_scheduler.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import logging from bisect import bisect_right from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Union diff --git a/monai/networks/blocks/crf.py b/monai/networks/blocks/crf.py index 635c750ba9..29d4ef4216 100644 --- a/monai/networks/blocks/crf.py +++ b/monai/networks/blocks/crf.py @@ -1,4 +1,4 @@ -# Copyright 2020 MONAI Consortium +# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/monai/optimizers/lr_scheduler.py b/monai/optimizers/lr_scheduler.py index aa9bf2a89b..c4488f6e07 100644 --- a/monai/optimizers/lr_scheduler.py +++ b/monai/optimizers/lr_scheduler.py @@ -1,3 +1,14 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler diff --git a/monai/visualize/class_activation_maps.py b/monai/visualize/class_activation_maps.py index b310ec0834..c63e8e51d9 100644 --- a/monai/visualize/class_activation_maps.py +++ b/monai/visualize/class_activation_maps.py @@ -18,7 +18,7 @@ import torch.nn.functional as F from monai.transforms import ScaleIntensity -from monai.utils import ensure_tuple +from monai.utils import ensure_tuple, get_torch_version_tuple from monai.visualize.visualizer import default_upsampler __all__ = ["CAM", "GradCAM", "GradCAMpp", "ModelWithHooks", "default_normalizer"] @@ -73,7 +73,13 @@ def __init__( continue _registered.append(name) if self.register_backward: - mod.register_backward_hook(self.backward_hook(name)) + if get_torch_version_tuple() < (1, 8): + mod.register_backward_hook(self.backward_hook(name)) + else: + if "inplace" in mod.__dict__ and mod.__dict__["inplace"]: + # inplace=True causes errors for register_full_backward_hook + mod.__dict__["inplace"] = False + mod.register_full_backward_hook(self.backward_hook(name)) if self.register_forward: mod.register_forward_hook(self.forward_hook(name)) if len(_registered) != len(self.target_layers): From d56c002c70bfccc60a655cc160bd7293e54e7eff Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Thu, 8 Apr 2021 13:52:20 +0800 Subject: [PATCH 163/457] 1968 Enhance meta data doc-string in SegmentationSaver (#1969) * [DLMED] enhance doc-strings Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot Co-authored-by: monai-bot --- monai/handlers/segmentation_saver.py | 4 ++++ monai/transforms/io/dictionary.py | 7 ++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/monai/handlers/segmentation_saver.py b/monai/handlers/segmentation_saver.py index 9ee7ca67f9..6a98abf3ca 100644 --- a/monai/handlers/segmentation_saver.py +++ b/monai/handlers/segmentation_saver.py @@ -28,6 +28,9 @@ class SegmentationSaver: """ Event handler triggered on completing every iteration to save the segmentation predictions into files. + It can extract the input image meta data(filename, affine, original_shape, etc.) and resample the predictions + based on the meta data. + """ def __init__( @@ -96,6 +99,7 @@ def __init__( output will be: /output/test1/image/image_seg.nii.gz batch_transform: a callable that is used to transform the ignite.engine.batch into expected format to extract the meta_data dictionary. + it can be used to extract the input image meta data: filename, affine, original_shape, etc. output_transform: a callable that is used to transform the ignite.engine.output into the form expected image data. The first dimension of this transform's output will be treated as the diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index 58d6431c74..413f83b62d 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -132,9 +132,10 @@ class SaveImaged(MapTransform): keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` meta_key_postfix: `key_{postfix}` was used to store the metadata in `LoadImaged`. - So need the key to extract metadata to save images, default is `meta_dict`. - The meta data is a dictionary object, if no corresponding metadata, set to `None`. - For example, for data with key `image`, the metadata by default is in `image_meta_dict`. + so need the key to extract metadata to save images, default is `meta_dict`. + for example, for data with key `image`, the metadata by default is in `image_meta_dict`. + the meta data is a dictionary object which contains: filename, affine, original_shape, etc. + if no corresponding metadata, set to `None`. output_dir: output image directory. output_postfix: a string appended to all output file names, default to `trans`. output_ext: output file extension name, available extensions: `.nii.gz`, `.nii`, `.png`. From e7a74223dc0d2cbda86ae9a2040ba0a7ed624d31 Mon Sep 17 00:00:00 2001 From: masadcv Date: Fri, 9 Apr 2021 09:23:35 +0100 Subject: [PATCH 164/457] Adding EfficientNetsB0-B7 support (#1938) * adding init efficientnet support Signed-off-by: masadcv * fixing flake8 and further refactoring Signed-off-by: masadcv * adding unittests for efficiennet Signed-off-by: masadcv * making unittests backwards compatible python<3.8 Signed-off-by: masadcv * fixed kitty unittests file path Signed-off-by: masadcv * adding docstrings and minor refactoring Signed-off-by: masadcv * fix flake8-py3 failing test Signed-off-by: masadcv * generalize drop_connect for n-dim, fix/add unittests, remove assert Signed-off-by: masadcv * fix failing unittest, CC0-license image for test Signed-off-by: masadcv * refactoring code for review Signed-off-by: masadcv * WIP fix mypy type hint errors Signed-off-by: masadcv * fix cuda test error Signed-off-by: masadcv * WIP fix test errors Signed-off-by: masadcv * adding non-default shape tests Signed-off-by: masadcv * remove 3d case from non-default shape test Signed-off-by: masadcv * refactoring and updating docs Signed-off-by: masadcv Co-authored-by: Yiheng Wang <68361391+yiheng-wang-nv@users.noreply.github.com> Co-authored-by: Wenqi Li --- docs/source/networks.rst | 10 + monai/networks/blocks/__init__.py | 2 +- monai/networks/blocks/activation.py | 53 +- monai/networks/layers/factories.py | 7 + monai/networks/nets/__init__.py | 1 + monai/networks/nets/efficientnet.py | 849 ++++++++++++++++++++++++++++ tests/min_tests.py | 1 + tests/test_activations.py | 11 +- tests/test_efficientnet.py | 308 ++++++++++ tests/testing_data/kitty_test.jpg | Bin 0 -> 61168 bytes 10 files changed, 1239 insertions(+), 3 deletions(-) create mode 100644 monai/networks/nets/efficientnet.py create mode 100644 tests/test_efficientnet.py create mode 100644 tests/testing_data/kitty_test.jpg diff --git a/docs/source/networks.rst b/docs/source/networks.rst index abf75bda1d..baee107620 100644 --- a/docs/source/networks.rst +++ b/docs/source/networks.rst @@ -35,6 +35,11 @@ Blocks .. autoclass:: Swish :members: +`MemoryEfficientSwish` +~~~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: MemoryEfficientSwish + :members: + `Mish` ~~~~~~ .. autoclass:: Mish @@ -292,6 +297,11 @@ Nets .. autoclass:: DenseNet :members: +`EfficientNet` +~~~~~~~~~~~~~~ +.. autoclass:: EfficientNet + :members: + `SegResNet` ~~~~~~~~~~~ .. autoclass:: SegResNet diff --git a/monai/networks/blocks/__init__.py b/monai/networks/blocks/__init__.py index cdf7bc3f6d..ed6ac12430 100644 --- a/monai/networks/blocks/__init__.py +++ b/monai/networks/blocks/__init__.py @@ -10,7 +10,7 @@ # limitations under the License. from .acti_norm import ADN -from .activation import Mish, Swish +from .activation import MemoryEfficientSwish, Mish, Swish from .aspp import SimpleASPP from .convolutions import Convolution, ResidualUnit from .crf import CRF diff --git a/monai/networks/blocks/activation.py b/monai/networks/blocks/activation.py index ef6c74f282..f6a04e830e 100644 --- a/monai/networks/blocks/activation.py +++ b/monai/networks/blocks/activation.py @@ -17,7 +17,7 @@ class Swish(nn.Module): r"""Applies the element-wise function: .. math:: - \text{Swish}(x) = x * \text{Sigmoid}(\alpha * x) for constant value alpha. + \text{Swish}(x) = x * \text{Sigmoid}(\alpha * x) ~~~~\text{for constant value}~ \alpha. Citation: Searching for Activation Functions, Ramachandran et al., 2017, https://arxiv.org/abs/1710.05941. @@ -43,6 +43,57 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: return input * torch.sigmoid(self.alpha * input) +class SwishImplementation(torch.autograd.Function): + r"""Memory efficient implementation for training + Follows recommendation from: + https://github.com/lukemelas/EfficientNet-PyTorch/issues/18#issuecomment-511677853 + + Results in ~ 30% memory saving during training as compared to Swish() + """ + + @staticmethod + def forward(ctx, input): + result = input * torch.sigmoid(input) + ctx.save_for_backward(input) + return result + + @staticmethod + def backward(ctx, grad_output): + input = ctx.saved_tensors[0] + sigmoid_input = torch.sigmoid(input) + return grad_output * (sigmoid_input * (1 + input * (1 - sigmoid_input))) + + +class MemoryEfficientSwish(nn.Module): + r"""Applies the element-wise function: + + .. math:: + \text{Swish}(x) = x * \text{Sigmoid}(\alpha * x) ~~~~\text{for constant value}~ \alpha=1. + + Memory efficient implementation for training following recommendation from: + https://github.com/lukemelas/EfficientNet-PyTorch/issues/18#issuecomment-511677853 + + Results in ~ 30% memory saving during training as compared to Swish() + + Citation: Searching for Activation Functions, Ramachandran et al., 2017, https://arxiv.org/abs/1710.05941. + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + + Examples:: + + >>> m = Act['memswish']() + >>> input = torch.randn(2) + >>> output = m(input) + """ + + def forward(self, input: torch.Tensor): + return SwishImplementation.apply(input) + + class Mish(nn.Module): r"""Applies the element-wise function: diff --git a/monai/networks/layers/factories.py b/monai/networks/layers/factories.py index ec36b2ed95..9165a8ebe4 100644 --- a/monai/networks/layers/factories.py +++ b/monai/networks/layers/factories.py @@ -256,6 +256,13 @@ def swish_factory(): return Swish +@Act.factory_function("memswish") +def memswish_factory(): + from monai.networks.blocks.activation import MemoryEfficientSwish + + return MemoryEfficientSwish + + @Act.factory_function("mish") def mish_factory(): from monai.networks.blocks.activation import Mish diff --git a/monai/networks/nets/__init__.py b/monai/networks/nets/__init__.py index 6876293bdb..91f46debf6 100644 --- a/monai/networks/nets/__init__.py +++ b/monai/networks/nets/__init__.py @@ -15,6 +15,7 @@ from .classifier import Classifier, Critic, Discriminator from .densenet import DenseNet, DenseNet121, DenseNet169, DenseNet201, DenseNet264 from .dynunet import DynUNet, DynUnet, Dynunet +from .efficientnet import EfficientNet, EfficientNetBN, drop_connect, get_efficientnet_image_size from .fullyconnectednet import FullyConnectedNet, VarFullyConnectedNet from .generator import Generator from .highresnet import HighResBlock, HighResNet diff --git a/monai/networks/nets/efficientnet.py b/monai/networks/nets/efficientnet.py new file mode 100644 index 0000000000..d8754e3f78 --- /dev/null +++ b/monai/networks/nets/efficientnet.py @@ -0,0 +1,849 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import operator +import re +from functools import reduce +from typing import List, NamedTuple, Optional, Tuple, Type, Union + +import torch +from torch import nn +from torch.utils import model_zoo + +from monai.networks.layers.factories import Act, Conv, Norm, Pad, Pool + +__all__ = ["EfficientNetBN", "get_efficientnet_image_size", "drop_connect"] + +efficientnet_params = { + # model_name: (width_mult, depth_mult, image_size, dropout_rate, dropconnect_rate) + "efficientnet-b0": (1.0, 1.0, 224, 0.2, 0.2), + "efficientnet-b1": (1.0, 1.1, 240, 0.2, 0.2), + "efficientnet-b2": (1.1, 1.2, 260, 0.3, 0.2), + "efficientnet-b3": (1.2, 1.4, 300, 0.3, 0.2), + "efficientnet-b4": (1.4, 1.8, 380, 0.4, 0.2), + "efficientnet-b5": (1.6, 2.2, 456, 0.4, 0.2), + "efficientnet-b6": (1.8, 2.6, 528, 0.5, 0.2), + "efficientnet-b7": (2.0, 3.1, 600, 0.5, 0.2), +} + + +class MBConvBlock(nn.Module): + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int, + image_size: List[int], + expand_ratio: int, + se_ratio: Optional[float], + id_skip: Optional[bool] = True, + batch_norm_momentum: float = 0.99, + batch_norm_epsilon: float = 1e-3, + drop_connect_rate: Optional[float] = 0.2, + ) -> None: + """ + Mobile Inverted Residual Bottleneck Block. + + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_classes: number of output channels. + kernel_size: size of the kernel for conv ops. + stride: stride to use for conv ops. + image_size: input image resolution. + expand_ratio: expansion ratio for inverted bottleneck. + se_ratio: squeeze-excitation ratio for se layers. + id_skip: whether to use skip connection. + batch_norm_momentum: momentum for batch norm. + batch_norm_epsilon: epsilon for batch norm. + drop_connect_rate: dropconnect rate for drop connection (individual weights) layers. + + References: + [1] https://arxiv.org/abs/1704.04861 (MobileNet v1) + [2] https://arxiv.org/abs/1801.04381 (MobileNet v2) + [3] https://arxiv.org/abs/1905.02244 (MobileNet v3) + """ + super().__init__() + + # select the type of N-Dimensional layers to use + # these are based on spatial dims and selected from MONAI factories + conv_type = Conv["conv", spatial_dims] + batchnorm_type = Norm["batch", spatial_dims] + adaptivepool_type = Pool["adaptiveavg", spatial_dims] + + self.in_channels = in_channels + self.out_channels = out_channels + self.id_skip = id_skip + self.stride = stride + self.expand_ratio = expand_ratio + self.drop_connect_rate = drop_connect_rate + + if (se_ratio is not None) and (0.0 < se_ratio <= 1.0): + self.has_se = True + self.se_ratio = se_ratio + else: + self.has_se = False + + bn_mom = 1.0 - batch_norm_momentum # pytorch"s difference from tensorflow + bn_eps = batch_norm_epsilon + + # Expansion phase (Inverted Bottleneck) + inp = in_channels # number of input channels + oup = in_channels * expand_ratio # number of output channels + if self.expand_ratio != 1: + self._expand_conv = conv_type(in_channels=inp, out_channels=oup, kernel_size=1, bias=False) + self._expand_conv_padding = _make_same_padder(self._expand_conv, image_size) + + self._bn0 = batchnorm_type(num_features=oup, momentum=bn_mom, eps=bn_eps) + else: + # need to have the following to fix JIT error: + # "Module 'MBConvBlock' has no attribute '_expand_conv'" + + # FIXME: find a better way to bypass JIT error + self._expand_conv = nn.Identity() + self._expand_conv_padding = nn.Identity() + self._bn0 = nn.Identity() + + # Depthwise convolution phase + self._depthwise_conv = conv_type( + in_channels=oup, + out_channels=oup, + groups=oup, # groups makes it depthwise + kernel_size=kernel_size, + stride=self.stride, + bias=False, + ) + self._depthwise_conv_padding = _make_same_padder(self._depthwise_conv, image_size) + self._bn1 = batchnorm_type(num_features=oup, momentum=bn_mom, eps=bn_eps) + image_size = _calculate_output_image_size(image_size, self.stride) + + # Squeeze and Excitation layer, if desired + if self.has_se: + self._se_adaptpool = adaptivepool_type(1) + num_squeezed_channels = max(1, int(in_channels * self.se_ratio)) + self._se_reduce = conv_type(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1) + self._se_reduce_padding = _make_same_padder(self._se_reduce, [1, 1]) + self._se_expand = conv_type(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1) + self._se_expand_padding = _make_same_padder(self._se_expand, [1, 1]) + + # Pointwise convolution phase + final_oup = out_channels + self._project_conv = conv_type(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False) + self._project_conv_padding = _make_same_padder(self._project_conv, image_size) + self._bn2 = batchnorm_type(num_features=final_oup, momentum=bn_mom, eps=bn_eps) + + # swish activation to use - using memory efficient swish by default + # can be switched to normal swish using self.set_swish() function call + self._swish = Act["memswish"]() + + def forward(self, inputs: torch.Tensor): + """MBConvBlock"s forward function. + + Args: + inputs: Input tensor. + + Returns: + Output of this block after processing. + """ + # Expansion and Depthwise Convolution + x = inputs + if self.expand_ratio != 1: + x = self._expand_conv(self._expand_conv_padding(x)) + x = self._bn0(x) + x = self._swish(x) + + x = self._depthwise_conv(self._depthwise_conv_padding(x)) + x = self._bn1(x) + x = self._swish(x) + + # Squeeze and Excitation + if self.has_se: + x_squeezed = self._se_adaptpool(x) + x_squeezed = self._se_reduce(self._se_reduce_padding(x_squeezed)) + x_squeezed = self._swish(x_squeezed) + x_squeezed = self._se_expand(self._se_expand_padding(x_squeezed)) + x = torch.sigmoid(x_squeezed) * x + + # Pointwise Convolution + x = self._project_conv(self._project_conv_padding(x)) + x = self._bn2(x) + + # Skip connection and drop connect + if self.id_skip and self.stride == 1 and self.in_channels == self.out_channels: + # the combination of skip connection and drop connect brings about stochastic depth. + if self.drop_connect_rate: + x = drop_connect(x, p=self.drop_connect_rate, training=self.training) + x = x + inputs # skip connection + return x + + def set_swish(self, memory_efficient: bool = True) -> None: + """Sets swish function as memory efficient (for training) or standard (for export). + + Args: + memory_efficient (bool): Whether to use memory-efficient version of swish. + """ + self._swish = Act["memswish"]() if memory_efficient else Act["swish"](alpha=1.0) + + +class EfficientNet(nn.Module): + def __init__( + self, + blocks_args_str: List[str], + spatial_dims: int = 2, + in_channels: int = 3, + num_classes: int = 1000, + width_coefficient: float = 1.0, + depth_coefficient: float = 1.0, + dropout_rate: float = 0.2, + image_size: int = 224, + batch_norm_momentum: float = 0.99, + batch_norm_epsilon: float = 1e-3, + drop_connect_rate: float = 0.2, + depth_divisor: int = 8, + ) -> None: + """ + EfficientNet based on `Rethinking Model Scaling for Convolutional Neural Networks `_. + Adapted from `EfficientNet-PyTorch + `_. + + Args: + blocks_args_str: block definitions. + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + num_classes: number of output classes. + width_coefficient: width multiplier coefficient (w in paper). + depth_coefficient: depth multiplier coefficient (d in paper). + dropout_rate: dropout rate for dropout layers. + image_size: input image resolution. + batch_norm_momentum: momentum for batch norm. + batch_norm_epsilon: epsilon for batch norm. + drop_connect_rate: dropconnect rate for drop connection (individual weights) layers. + depth_divisor: depth divisor for channel rounding. + + Examples:: + + # for pretrained spatial 2D ImageNet + >>> image_size = get_efficientnet_image_size("efficientnet-b0") + >>> inputs = torch.rand(1, 3, image_size, image_size) + >>> model = EfficientNetBN("efficientnet-b0", pretrained=True) + >>> model.eval() + >>> outputs = model(inputs) + + # create spatial 2D + >>> model = EfficientNetBN("efficientnet-b0", spatial_dims=2) + + # create spatial 3D + >>> model = EfficientNetBN("efficientnet-b0", spatial_dims=3) + + # create EfficientNetB7 for spatial 2D + >>> model = EfficientNetBN("efficientnet-b7", spatial_dims=2) + + """ + super().__init__() + + if spatial_dims not in (1, 2, 3): + raise ValueError("spatial_dims can only be 1, 2 or 3.") + + # select the type of N-Dimensional layers to use + # these are based on spatial dims and selected from MONAI factories + conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv["conv", spatial_dims] + batchnorm_type: Type[Union[nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]] = Norm["batch", spatial_dims] + adaptivepool_type: Type[Union[nn.AdaptiveAvgPool1d, nn.AdaptiveAvgPool2d, nn.AdaptiveAvgPool3d]] = Pool[ + "adaptiveavg", spatial_dims + ] + + # decode blocks args into arguments for MBConvBlock + blocks_args = _decode_block_list(blocks_args_str) + + # checks for successful decoding of blocks_args_str + if not isinstance(blocks_args, list): + raise ValueError("blocks_args must be a list") + + if blocks_args == []: + raise ValueError("block_args must be non-empty") + + self._blocks_args = blocks_args + self.num_classes = num_classes + self.in_channels = in_channels + self.drop_connect_rate = drop_connect_rate + + # expand input image dimensions to list + current_image_size = [image_size] * spatial_dims + + # parameters for batch norm + bn_mom = 1 - batch_norm_momentum # 1 - bn_m to convert tensorflow's arg to pytorch bn compatible + bn_eps = batch_norm_epsilon + + # Stem + stride = 2 + out_channels = _round_filters(32, width_coefficient, depth_divisor) # number of output channels + self._conv_stem = conv_type(self.in_channels, out_channels, kernel_size=3, stride=stride, bias=False) + self._conv_stem_padding = _make_same_padder(self._conv_stem, current_image_size) + self._bn0 = batchnorm_type(num_features=out_channels, momentum=bn_mom, eps=bn_eps) + current_image_size = _calculate_output_image_size(current_image_size, stride) + + # build MBConv blocks + num_blocks = 0 + self._blocks = nn.Sequential() + + # update baseline blocks to input/output filters and number of repeats based on width and depth multipliers. + for idx, block_args in enumerate(self._blocks_args): + block_args = block_args._replace( + input_filters=_round_filters(block_args.input_filters, width_coefficient, depth_divisor), + output_filters=_round_filters(block_args.output_filters, width_coefficient, depth_divisor), + num_repeat=_round_repeats(block_args.num_repeat, depth_coefficient), + ) + self._blocks_args[idx] = block_args + + # calculate the total number of blocks - needed for drop_connect estimation + num_blocks += block_args.num_repeat + + # create and add MBConvBlocks to self._blocks + idx = 0 # block index counter + for block_args in self._blocks_args: + blk_drop_connect_rate = self.drop_connect_rate + + # scale drop connect_rate + if blk_drop_connect_rate: + blk_drop_connect_rate *= float(idx) / num_blocks + + # the first block needs to take care of stride and filter size increase. + self._blocks.add_module( + str(idx), + MBConvBlock( + spatial_dims=spatial_dims, + in_channels=block_args.input_filters, + out_channels=block_args.output_filters, + kernel_size=block_args.kernel_size, + stride=block_args.stride, + image_size=current_image_size, + expand_ratio=block_args.expand_ratio, + se_ratio=block_args.se_ratio, + id_skip=block_args.id_skip, + batch_norm_momentum=batch_norm_momentum, + batch_norm_epsilon=batch_norm_epsilon, + drop_connect_rate=blk_drop_connect_rate, + ), + ) + idx += 1 # increment blocks index counter + + current_image_size = _calculate_output_image_size(current_image_size, block_args.stride) + if block_args.num_repeat > 1: # modify block_args to keep same output size + block_args = block_args._replace(input_filters=block_args.output_filters, stride=1) + + # add remaining block repeated num_repeat times + for _ in range(block_args.num_repeat - 1): + blk_drop_connect_rate = self.drop_connect_rate + + # scale drop connect_rate + if blk_drop_connect_rate: + blk_drop_connect_rate *= float(idx) / num_blocks + + # add blocks + self._blocks.add_module( + str(idx), + MBConvBlock( + spatial_dims=spatial_dims, + in_channels=block_args.input_filters, + out_channels=block_args.output_filters, + kernel_size=block_args.kernel_size, + stride=block_args.stride, + image_size=current_image_size, + expand_ratio=block_args.expand_ratio, + se_ratio=block_args.se_ratio, + id_skip=block_args.id_skip, + batch_norm_momentum=batch_norm_momentum, + batch_norm_epsilon=batch_norm_epsilon, + drop_connect_rate=blk_drop_connect_rate, + ), + ) + idx += 1 # increment blocks index counter + + # sanity check to see if len(self._blocks) equal expected num_blocks + if len(self._blocks) != num_blocks: + raise ValueError("number of blocks created != num_blocks") + + # Head + head_in_channels = block_args.output_filters + out_channels = _round_filters(1280, width_coefficient, depth_divisor) + self._conv_head = conv_type(head_in_channels, out_channels, kernel_size=1, bias=False) + self._conv_head_padding = _make_same_padder(self._conv_head, current_image_size) + self._bn1 = batchnorm_type(num_features=out_channels, momentum=bn_mom, eps=bn_eps) + + # final linear layer + self._avg_pooling = adaptivepool_type(1) + self._dropout = nn.Dropout(dropout_rate) + self._fc = nn.Linear(out_channels, self.num_classes) + + # swish activation to use - using memory efficient swish by default + # can be switched to normal swish using self.set_swish() function call + self._swish = Act["memswish"]() + + # initialize weights using Tensorflow's init method from official impl. + self._initialize_weights() + + def set_swish(self, memory_efficient: bool = True) -> None: + """ + Sets swish function as memory efficient (for training) or standard (for JIT export). + + Args: + memory_efficient: whether to use memory-efficient version of swish. + + """ + self._swish = Act["memswish"]() if memory_efficient else Act["swish"](alpha=1.0) + for block in self._blocks: + block.set_swish(memory_efficient) + + def forward(self, inputs: torch.Tensor): + """ + Args: + inputs: input should have spatially N dimensions + ``(Batch, in_channels, dim_0[, dim_1, ..., dim_N])``, N is defined by `dimensions`. + + Returns: + A torch Tensor of classification prediction in shape + ``(Batch, num_classes)``. + """ + # Stem + x = self._conv_stem(self._conv_stem_padding(inputs)) + x = self._swish(self._bn0(x)) + # Blocks + x = self._blocks(x) + # Head + x = self._conv_head(self._conv_head_padding(x)) + x = self._swish(self._bn1(x)) + + # Pooling and final linear layer + x = self._avg_pooling(x) + + x = x.flatten(start_dim=1) + x = self._dropout(x) + x = self._fc(x) + return x + + def _initialize_weights(self) -> None: + """ + Args: + None, initializes weights for conv/linear/batchnorm layers + following weight init methods from + `official Tensorflow EfficientNet implementation + `_. + Adapted from `EfficientNet-PyTorch's init method + `_. + """ + for _, m in self.named_modules(): + if isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): + fan_out = reduce(operator.mul, m.kernel_size, 1) * m.out_channels + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) + fan_in = 0 + init_range = 1.0 / math.sqrt(fan_in + fan_out) + m.weight.data.uniform_(-init_range, init_range) + m.bias.data.zero_() + + +class EfficientNetBN(EfficientNet): + def __init__( + self, + model_name: str, + pretrained: bool = True, + progress: bool = True, + spatial_dims: int = 2, + in_channels: int = 3, + num_classes: int = 1000, + ) -> None: + """ + Generic wrapper around EfficientNet, used to initialize EfficientNet-B0 to EfficientNet-B7 models + model_name is mandatory argument as there is no EfficientNetBN itself, + it needs the N in [0, 1, 2, 3, 4, 5, 6, 7] to be a model + + Args: + model_name: name of model to initialize, can be from [efficientnet-b0, ..., efficientnet-b7]. + pretrained: whether to initialize pretrained ImageNet weights, only available for spatial_dims=2. + progress: whether to show download progress for pretrained weights download. + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + num_classes: number of output classes. + + """ + # block args for EfficientNet-B0 to EfficientNet-B7 + blocks_args_str = [ + "r1_k3_s11_e1_i32_o16_se0.25", + "r2_k3_s22_e6_i16_o24_se0.25", + "r2_k5_s22_e6_i24_o40_se0.25", + "r3_k3_s22_e6_i40_o80_se0.25", + "r3_k5_s11_e6_i80_o112_se0.25", + "r4_k5_s22_e6_i112_o192_se0.25", + "r1_k3_s11_e6_i192_o320_se0.25", + ] + + # check if model_name is valid model + if model_name not in efficientnet_params.keys(): + raise ValueError( + "invalid model_name {} found, must be one of {} ".format( + model_name, ", ".join(efficientnet_params.keys()) + ) + ) + + # get network parameters + weight_coeff, depth_coeff, image_size, drpout_rate, drpconnect_rate = efficientnet_params[model_name] + + # create model and initialize random weights + model = super(EfficientNetBN, self).__init__( + blocks_args_str=blocks_args_str, + spatial_dims=spatial_dims, + in_channels=in_channels, + num_classes=num_classes, + width_coefficient=weight_coeff, + depth_coefficient=depth_coeff, + dropout_rate=drpout_rate, + image_size=image_size, + drop_connect_rate=drpconnect_rate, + ) + + # attempt to load pretrained + is_default_model = (spatial_dims == 2) and (in_channels == 3) + loadable_from_file = pretrained and is_default_model + + if loadable_from_file: + # skip loading fc layers for transfer learning applications + load_fc = num_classes == 1000 + + # only pretrained for when `spatial_dims` is 2 + _load_state_dict(self, model_name, progress, load_fc) + else: + print( + "Skipping loading pretrained weights for non-default {}, pretrained={}, is_default_model={}".format( + model_name, pretrained, is_default_model + ) + ) + + +def get_efficientnet_image_size(model_name: str) -> int: + """ + Get the input image size for a given efficientnet model. + + Args: + model_name: name of model to initialize, can be from [efficientnet-b0, ..., efficientnet-b7]. + + Returns: + Image size for single spatial dimension as integer. + + """ + # check if model_name is valid model + if model_name not in efficientnet_params.keys(): + raise ValueError( + "invalid model_name {} found, must be one of {} ".format(model_name, ", ".join(efficientnet_params.keys())) + ) + + # return input image size (all dims equal so only need to return for one dim) + _, _, res, _, _ = efficientnet_params[model_name] + return res + + +def drop_connect(inputs: torch.Tensor, p: float, training: bool) -> torch.Tensor: + """ + Drop connect layer that drops individual connections. + Differs from dropout as dropconnect drops connections instead of whole neurons as in dropout. + + Based on `Deep Networks with Stochastic Depth `_. + Adapted from `Official Tensorflow EfficientNet utils + `_. + + This function is generalized for MONAI's N-Dimensional spatial activations + e.g. 1D activations [B, C, H], 2D activations [B, C, H, W] and 3D activations [B, C, H, W, D] + + Args: + input: input tensor with [B, C, dim_1, dim_2, ..., dim_N] where N=spatial_dims. + p: probability to use for dropping connections. + training: whether in training or evaluation mode. + + Returns: + output: output tensor after applying drop connection. + """ + if p < 0.0 or p > 1.0: + raise ValueError("p must be in range of [0, 1], found {}".format(p)) + + # eval mode: drop_connect is switched off - so return input without modifying + if not training: + return inputs + + # train mode: calculate and apply drop_connect + batch_size: int = inputs.shape[0] + keep_prob: float = 1 - p + num_dims: int = len(inputs.shape) - 2 + + # build dimensions for random tensor, use num_dims to populate appropriate spatial dims + random_tensor_shape: List[int] = [batch_size, 1] + [1] * num_dims + + # generate binary_tensor mask according to probability (p for 0, 1-p for 1) + random_tensor: torch.Tensor = torch.rand(random_tensor_shape, dtype=inputs.dtype, device=inputs.device) + random_tensor += keep_prob + + # round to form binary tensor + binary_tensor: torch.Tensor = torch.floor(random_tensor) + + # drop connect using binary tensor + output: torch.Tensor = inputs / keep_prob * binary_tensor + return output + + +def _load_state_dict(model: nn.Module, model_name: str, progress: bool, load_fc: bool) -> None: + url_map = { + "efficientnet-b0": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth", + "efficientnet-b1": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b1-f1951068.pth", + "efficientnet-b2": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b2-8bb594d6.pth", + "efficientnet-b3": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b3-5fb5a3c3.pth", + "efficientnet-b4": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b4-6ed6700e.pth", + "efficientnet-b5": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b5-b6417697.pth", + "efficientnet-b6": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b6-c76e70fd.pth", + "efficientnet-b7": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b7-dcc49843.pth", + } + # load state dict from url + model_url = url_map[model_name] + state_dict = model_zoo.load_url(model_url, progress=progress) + + # load state dict into model parameters + if load_fc: # load everything + ret = model.load_state_dict(state_dict, strict=False) + if ret.missing_keys: + raise ValueError("Found missing keys when loading pretrained weights: {}".format(ret.missing_keys)) + else: # skip final FC layers, for transfer learning cases + state_dict.pop("_fc.weight") + state_dict.pop("_fc.bias") + ret = model.load_state_dict(state_dict, strict=False) + + # check if no other keys missing except FC layer parameters + if set(ret.missing_keys) != {"_fc.weight", "_fc.bias"}: + raise ValueError("Found missing keys when loading pretrained weights: {}".format(ret.missing_keys)) + + # check for any unexpected keys + if ret.unexpected_keys: + raise ValueError("Missing keys when loading pretrained weights: {}".format(ret.unexpected_keys)) + + +def _get_same_padding_conv_nd( + image_size: List[int], kernel_size: Tuple[int, ...], dilation: Tuple[int, ...], stride: Tuple[int, ...] +) -> List[int]: + """ + Helper for getting padding (nn.ConstantPadNd) to be used to get SAME padding + conv operations similar to Tensorflow's SAME padding. + + This function is generalized for MONAI's N-Dimensional spatial operations (e.g. Conv1D, Conv2D, Conv3D) + + Args: + image_size: input image/feature spatial size. + kernel_size: conv kernel's spatial size. + dilation: conv dilation rate for Atrous conv. + stride: stride for conv operation. + + Returns: + paddings for ConstantPadNd padder to be used on input tensor to conv op. + """ + # get number of spatial dimensions, corresponds to kernel size length + num_dims = len(kernel_size) + + # additional checks to populate dilation and stride (in case they are single entry tuples) + if len(dilation) == 1: + dilation = dilation * num_dims + + if len(stride) == 1: + stride = stride * num_dims + + # equation to calculate (pad^+ + pad^-) size + _pad_size: List[int] = [ + max((math.ceil(_i_s / _s) - 1) * _s + (_k_s - 1) * _d + 1 - _i_s, 0) + for _i_s, _k_s, _d, _s in zip(image_size, kernel_size, dilation, stride) + ] + # distribute paddings into pad^+ and pad^- following Tensorflow's same padding strategy + _paddings: List[Tuple[int, int]] = [(_p // 2, _p - _p // 2) for _p in _pad_size] + + # unroll list of tuples to tuples, and then to list + # reversed as nn.ConstantPadNd expects paddings starting with last dimension + _paddings_ret: List[int] = [outer for inner in reversed(_paddings) for outer in inner] + return _paddings_ret + + +def _make_same_padder(conv_op: Union[nn.Conv1d, nn.Conv2d, nn.Conv3d], image_size: List[int]): + """ + Helper for initializing ConstantPadNd with SAME padding similar to Tensorflow. + Uses output of _get_same_padding_conv_nd() to get the padding size. + + This function is generalized for MONAI's N-Dimensional spatial operations (e.g. Conv1D, Conv2D, Conv3D) + + Args: + conv_op: nn.ConvNd operation to extract parameters for op from + image_size: input image/feature spatial size + + Returns: + If padding required then nn.ConstandNd() padder initialized to paddings otherwise nn.Identity() + """ + # calculate padding required + padding: List[int] = _get_same_padding_conv_nd(image_size, conv_op.kernel_size, conv_op.dilation, conv_op.stride) + + # initialize and return padder + padder = Pad["constantpad", len(padding) // 2] + if sum(padding) > 0: + return padder(padding=padding, value=0.0) + else: + return nn.Identity() + + +def _round_filters(filters: int, width_coefficient: Optional[float], depth_divisor: float) -> int: + """ + Calculate and round number of filters based on width coefficient multiplier and depth divisor. + + Args: + filters: number of input filters. + width_coefficient: width coefficient for model. + depth_divisor: depth divisor to use. + + Returns: + new_filters: new number of filters after calculation. + """ + + if not width_coefficient: + return filters + + multiplier: float = width_coefficient + divisor: float = depth_divisor + filters_float: float = filters * multiplier + + # follow the formula transferred from official TensorFlow implementation + new_filters: float = max(divisor, int(filters_float + divisor / 2) // divisor * divisor) + if new_filters < 0.9 * filters_float: # prevent rounding by more than 10% + new_filters += divisor + return int(new_filters) + + +def _round_repeats(repeats: int, depth_coefficient: Optional[float]) -> int: + """ + Re-calculate module's repeat number of a block based on depth coefficient multiplier. + + Args: + repeats: number of original repeats. + depth_coefficient: depth coefficient for model. + + Returns: + new repeat: new number of repeat after calculating. + """ + if not depth_coefficient: + return repeats + + # follow the formula transferred from official TensorFlow impl. + return int(math.ceil(depth_coefficient * repeats)) + + +def _calculate_output_image_size(input_image_size: List[int], stride: Union[int, Tuple[int]]): + """ + Calculates the output image size when using _make_same_padder with a stride. + Required for static padding. + + Args: + input_image_size: input image/feature spatial size. + stride: Conv2d operation"s stride. + + Returns: + output_image_size: output image/feature spatial size. + """ + # get number of spatial dimensions, corresponds to image spatial size length + num_dims = len(input_image_size) + + # checks to extract integer stride in case tuple was received + if isinstance(stride, tuple): + all_strides_equal = all([stride[0] == s for s in stride]) + if not all_strides_equal: + raise ValueError("unequal strides are not possible, got {}".format(stride)) + + stride = stride[0] + + # return output image size + return [int(math.ceil(im_sz / stride)) for im_sz in input_image_size] + + +def _decode_block_list(string_list: List[str]): + """ + Decode a list of string notations to specify blocks inside the network. + + Args: + string_list: a list of strings, each string is a notation of block. + + Returns: + blocks_args: a list of BlockArgs namedtuples of block args. + """ + # Parameters for an individual model block + # namedtuple with defaults for mypy help from: + # https://stackoverflow.com/a/53255358 + class BlockArgs(NamedTuple): + num_repeat: int + kernel_size: int + stride: int + expand_ratio: int + input_filters: int + output_filters: int + id_skip: bool + se_ratio: Optional[float] = None + + def _decode_block_string(block_string: str): + """ + Get a block through a string notation of arguments. + + Args: + block_string (str): A string notation of arguments. + Examples: "r1_k3_s11_e1_i32_o16_se0.25". + + Returns: + BlockArgs: namedtuple defined at the top of this function. + """ + ops = block_string.split("_") + options = {} + for op in ops: + splits = re.split(r"(\d.*)", op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # check stride + stride_check = ( + ("s" in options and len(options["s"]) == 1) + or (len(options["s"]) == 2 and options["s"][0] == options["s"][1]) + or (len(options["s"]) == 3 and options["s"][0] == options["s"][1] and options["s"][0] == options["s"][2]) + ) + if not stride_check: + raise ValueError("invalid stride option recieved") + + return BlockArgs( + num_repeat=int(options["r"]), + kernel_size=int(options["k"]), + stride=int(options["s"][0]), + expand_ratio=int(options["e"]), + input_filters=int(options["i"]), + output_filters=int(options["o"]), + id_skip=("noskip" not in block_string), + se_ratio=float(options["se"]) if "se" in options else None, + ) + + # convert block strings into BlockArgs for each entry in string_list list + blocks_args: List[BlockArgs] = [] + for current_string in string_list: + blocks_args.append(_decode_block_string(current_string)) + + # return blocks_args list, to be used for arguments of MBConv layers in EfficientNet + return blocks_args diff --git a/tests/min_tests.py b/tests/min_tests.py index 98f6d822a7..586956eec0 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -33,6 +33,7 @@ def run_testsuit(): "test_cachedataset_parallel", "test_dataset", "test_detect_envelope", + "test_efficientnet", "test_iterable_dataset", "test_ensemble_evaluator", "test_handler_checkpoint_loader", diff --git a/tests/test_activations.py b/tests/test_activations.py index 1614642d6d..5ed9ec2046 100644 --- a/tests/test_activations.py +++ b/tests/test_activations.py @@ -48,6 +48,15 @@ ] TEST_CASE_5 = [ + "memswish", + torch.tensor([[[[-10, -8, -6, -4, -2], [0, 2, 4, 6, 8]]]], dtype=torch.float32), + torch.tensor( + [[[[-4.54e-04, -2.68e-03, -1.48e-02, -7.19e-02, -2.38e-01], [0.00e00, 1.76e00, 3.93e00, 5.99e00, 8.00e00]]]] + ), + (1, 1, 2, 5), +] + +TEST_CASE_6 = [ "mish", torch.tensor([[[[-10, -8, -6, -4, -2], [0, 2, 4, 6, 8]]]], dtype=torch.float32), torch.tensor( @@ -64,7 +73,7 @@ def test_value_shape(self, input_param, img, out, expected_shape): torch.testing.assert_allclose(result, out) self.assertTupleEqual(result.shape, expected_shape) - @parameterized.expand([TEST_CASE_4, TEST_CASE_5]) + @parameterized.expand([TEST_CASE_4, TEST_CASE_5, TEST_CASE_6]) def test_monai_activations_value_shape(self, input_param, img, out, expected_shape): act = Act[input_param]() result = act(img) diff --git a/tests/test_efficientnet.py b/tests/test_efficientnet.py new file mode 100644 index 0000000000..7ef56c52a9 --- /dev/null +++ b/tests/test_efficientnet.py @@ -0,0 +1,308 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest +from typing import TYPE_CHECKING +from unittest import skipUnless + +import torch +from parameterized import parameterized + +from monai.networks import eval_mode +from monai.networks.nets import EfficientNetBN, drop_connect, get_efficientnet_image_size +from monai.utils import optional_import +from tests.utils import skip_if_quick, test_pretrained_networks, test_script_save + +if TYPE_CHECKING: + import torchvision + + has_torchvision = True +else: + torchvision, has_torchvision = optional_import("torchvision") + +if TYPE_CHECKING: + import PIL + + has_pil = True +else: + PIL, has_pil = optional_import("PIL") + + +def get_model_names(): + return ["efficientnet-b{}".format(d) for d in range(8)] + + +def get_expected_model_shape(model_name): + model_input_shapes = { + "efficientnet-b0": 224, + "efficientnet-b1": 240, + "efficientnet-b2": 260, + "efficientnet-b3": 300, + "efficientnet-b4": 380, + "efficientnet-b5": 456, + "efficientnet-b6": 528, + "efficientnet-b7": 600, + } + return model_input_shapes[model_name] + + +def make_shape_cases(models, spatial_dims, batches, pretrained, in_channels=3, num_classes=1000): + ret_tests = [] + for spatial_dim in spatial_dims: # selected spatial_dims + for batch in batches: # check single batch as well as multiple batch input + for model in models: # selected models + for is_pretrained in pretrained: # pretrained or not pretrained + kwargs = { + "model_name": model, + "pretrained": is_pretrained, + "progress": False, + "spatial_dims": spatial_dim, + "in_channels": in_channels, + "num_classes": num_classes, + } + ret_tests.append( + [ + kwargs, + ( + batch, + in_channels, + ) + + (get_expected_model_shape(model),) * spatial_dim, + (batch, num_classes), + ] + ) + return ret_tests + + +# create list of selected models to speed up redundant tests +# only test the models B0, B3, B7 +SEL_MODELS = [get_model_names()[i] for i in [0, 3, 7]] + +# pretrained=False cases +# 1D models are cheap so do test for all models in 1D +CASES_1D = make_shape_cases( + models=get_model_names(), spatial_dims=[1], batches=[1, 4], pretrained=[False], in_channels=3, num_classes=1000 +) + +# 2D and 3D models are expensive so use selected models +CASES_2D = make_shape_cases( + models=SEL_MODELS, spatial_dims=[2], batches=[1, 4], pretrained=[False], in_channels=3, num_classes=1000 +) +CASES_3D = make_shape_cases( + models=[SEL_MODELS[0]], spatial_dims=[3], batches=[1], pretrained=[False], in_channels=3, num_classes=1000 +) + +# pretrained=True cases +# tabby kitty test with pretrained model +# needs 'testing_data/kitty_test.jpg' +# image from: https://commons.wikimedia.org/wiki/File:Tabby_cat_with_blue_eyes-3336579.jpg +CASES_KITTY_TRAINED = [ + ( + { + "model_name": "efficientnet-b0", + "pretrained": True, + "progress": False, + "spatial_dims": 2, + "in_channels": 3, + "num_classes": 1000, + }, + os.path.join(os.path.dirname(__file__), "testing_data", "kitty_test.jpg"), + 282, # ~ tiger cat + ), + ( + { + "model_name": "efficientnet-b3", + "pretrained": True, + "progress": False, + "spatial_dims": 2, + "in_channels": 3, + "num_classes": 1000, + }, + os.path.join(os.path.dirname(__file__), "testing_data", "kitty_test.jpg"), + 282, # ~ tiger cat + ), + ( + { + "model_name": "efficientnet-b7", + "pretrained": True, + "progress": False, + "spatial_dims": 2, + "in_channels": 3, + "num_classes": 1000, + }, + os.path.join(os.path.dirname(__file__), "testing_data", "kitty_test.jpg"), + 282, # ~ tiger cat + ), +] + +# varying num_classes and in_channels +CASES_VARIATIONS = [] + +# change num_classes test +# 10 classes +# 2D +CASES_VARIATIONS.extend( + make_shape_cases( + models=SEL_MODELS, spatial_dims=[2], batches=[1], pretrained=[False, True], in_channels=3, num_classes=10 + ) +) +# 3D +CASES_VARIATIONS.extend( + make_shape_cases( + models=[SEL_MODELS[0]], spatial_dims=[3], batches=[1], pretrained=[False], in_channels=3, num_classes=10 + ) +) + +# change in_channels test +# 1 channel +# 2D +CASES_VARIATIONS.extend( + make_shape_cases( + models=SEL_MODELS, spatial_dims=[2], batches=[1], pretrained=[False, True], in_channels=1, num_classes=1000 + ) +) +# 8 channel +# 2D +CASES_VARIATIONS.extend( + make_shape_cases( + models=SEL_MODELS, spatial_dims=[2], batches=[1], pretrained=[False, True], in_channels=8, num_classes=1000 + ) +) +# 3D +CASES_VARIATIONS.extend( + make_shape_cases( + models=[SEL_MODELS[0]], spatial_dims=[3], batches=[1], pretrained=[False], in_channels=1, num_classes=1000 + ) +) + + +class TestEFFICIENTNET(unittest.TestCase): + @parameterized.expand(CASES_1D + CASES_2D + CASES_3D + CASES_VARIATIONS) + def test_shape(self, input_param, input_shape, expected_shape): + device = "cuda" if torch.cuda.is_available() else "cpu" + print(input_param) + + # initialize model + net = EfficientNetBN(**input_param).to(device) + + # run inference with random tensor + with eval_mode(net): + result = net(torch.randn(input_shape).to(device)) + + # check output shape + self.assertEqual(result.shape, expected_shape) + + @parameterized.expand(CASES_1D + CASES_2D) + def test_non_default_shapes(self, input_param, input_shape, expected_shape): + device = "cuda" if torch.cuda.is_available() else "cpu" + print(input_param) + + # initialize model + net = EfficientNetBN(**input_param).to(device) + + # override input shape with different variations + num_dims = len(input_shape) - 2 + non_default_sizes = [128, 256, 512] + for candidate_size in non_default_sizes: + input_shape = input_shape[0:2] + (candidate_size,) * num_dims + print(input_shape) + # run inference with random tensor + with eval_mode(net): + result = net(torch.randn(input_shape).to(device)) + + # check output shape + self.assertEqual(result.shape, expected_shape) + + @parameterized.expand(CASES_KITTY_TRAINED) + @skip_if_quick + @skipUnless(has_torchvision, "Requires `torchvision` package.") + @skipUnless(has_pil, "Requires `pillow` package.") + def test_kitty_pretrained(self, input_param, image_path, expected_label): + device = "cuda" if torch.cuda.is_available() else "cpu" + + # open image + image_size = get_efficientnet_image_size(input_param["model_name"]) + img = PIL.Image.open(image_path) + + # defin ImageNet transform + tfms = torchvision.transforms.Compose( + [ + torchvision.transforms.Resize(image_size), + torchvision.transforms.CenterCrop(image_size), + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), + ] + ) + + # preprocess and prepare image tensor + img = tfms(img).unsqueeze(0).to(device) + + # initialize a pretrained model + net = test_pretrained_networks(EfficientNetBN, input_param, device) + + # run inference + with eval_mode(net): + result = net(img) + pred_label = torch.argmax(result, dim=-1) + + # check output + self.assertEqual(pred_label, expected_label) + + def test_drop_connect_layer(self): + p_list = [float(d + 1) / 10.0 for d in range(9)] + # testing 1D, 2D and 3D shape + for rand_tensor_shape in [(512, 16, 4), (384, 16, 4, 4), (256, 16, 4, 4, 4)]: + + # test validation mode, out tensor == in tensor + training = False + for p in p_list: + in_tensor = torch.rand(rand_tensor_shape) + 0.1 + out_tensor = drop_connect(in_tensor, p, training=training) + self.assertTrue(torch.equal(out_tensor, in_tensor)) + + # test training mode, sum((out tensor * (1.0 - p)) != in tensor)/out_tensor.size() == p + # use tolerance of 0.175 to account for rounding errors due to finite set in/out + tol = 0.175 + training = True + for p in p_list: + in_tensor = torch.rand(rand_tensor_shape) + 0.1 + out_tensor = drop_connect(in_tensor, p, training=training) + + p_calculated = 1.0 - torch.sum(torch.isclose(in_tensor, out_tensor * (1.0 - p))) / float( + in_tensor.numel() + ) + p_calculated = p_calculated.cpu().numpy() + + self.assertTrue(abs(p_calculated - p) < tol) + + def test_ill_arg(self): + with self.assertRaises(ValueError): + # wrong spatial_dims + EfficientNetBN(model_name="efficientnet-b0", spatial_dims=4) + # wrong model_name + EfficientNetBN(model_name="efficientnet-b10", spatial_dims=3) + + def test_func_get_efficientnet_input_shape(self): + for model in get_model_names(): + result_shape = get_efficientnet_image_size(model_name=model) + expected_shape = get_expected_model_shape(model) + self.assertEqual(result_shape, expected_shape) + + def test_script(self): + net = EfficientNetBN(model_name="efficientnet-b0", spatial_dims=2, in_channels=3, num_classes=1000) + net.set_swish(memory_efficient=False) # at the moment custom memory efficient swish is not exportable with jit + test_data = torch.randn(1, 3, 224, 224) + test_script_save(net, test_data) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/testing_data/kitty_test.jpg b/tests/testing_data/kitty_test.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f103760de515b22696bdd5a27e6a7b59d244eec8 GIT binary patch literal 61168 zcmb4pWl$YFwC%wu?s{<7;_ei8cZyS-gBFM4&cWT?_23kWySo(E;;y~@-prf1fA5}5 z@*|nOl1XMKYp?uW`MVB~1i-?;{8wOMVBuh4;oy-F;QtxwCnQ89bW{usbX0USOl$%i zOe}mXG;~}bE*d! z3daEoD!2kS;7F+VQO)lBT?3%NK>b?=3?@Jn@MPY(ac+FkO&^D}LO<#C7B3xyBKR&; zr9rxm=H`TW%%t9aWHhl&8P=sa+r2W(>K=DCeD8;t>Y>K zg4g1vCmdS*?4eU)se0bZs6&3urS%ukU;q`bpCHr99H75x{*I7Bl`u*g`Q$wd8toRN zdLi0i09JC83P>XRBw-ijiU)Cjt8YuN{WV#LkLQUzRNfFAD8runMT;FxPiG>?h^}!> z@lil%Y1`SBXJ07zmRQE##0wbn>a*}YjnR!;7Uf zEO8NS*>za5nPt7MKS60Kc`99yZAc3706YpuAkqkemg#h@U8Y@!@PeGO+0b9 z6;bxk_@L=@*gcUi>U{Q=Hpo2;va3zTPYK?6C6xH)PR1>HktaFjKK2)qP0n_bp7T^( zdApH$!j$rKycug|<wR6jdQMPz;i1eU|`z4 z`+Tm;%sWb${}dr~tp}7jlMG}|sh43_WBZM8sfP$MKpYi3auRS1QVQ;X$qXu*WXB*Y zsBVFHX)qLfVhW}1-%O${m5KOk-mSh1ok`kFRW_;eA;uQL=x2GH3RkQ`7=6#k(6a&j znPoj_&}!(y5jCllawBB{@ShIAdQa@xm_J;NF7vzW6)FDPU}pZu(xGvxXYJT;W1yJ1 z*NX5!r2Yx>yE=kzQ^)ZFkMO+wwTGSQhF`o~UO5BkAwg5k47n0< z>`^g8o&+l|vl^(-S4(`ZEjZ=}5W8O@UZwl)P*+Yz29+*r5$xJKOzpuDeDb4`3u+4Z zsrNr!4QU4|6x_;#Th>a%gd*e-Ik>7MEQ73$wph&1=I)dj26U4hq%l z5{i#YVQ0sNRLf_!88SE7gS%=^!9Vn~WWTw=WVv3dhwf|?nAH*Ssdv^M{NQe<&HF(# za}eyOp54~%wMe$TcP3g^ANBU|2v2J_i)C&>AA(ZdT1#i__-SFLv%G$?{`+&3evS6# z2+*&GL}mVDjli1NxJ|F+F_MI!tQI2En4LM3Q$H!cno^I(`6iP1`9cYYi@H55d44oz zr#!~lsN&pFm6LBhpG}+d!g(gSIB@yl-jx0Ve9uxn)~_rRpKR9*6@;wlntz6NDS&MI zou1Q5<-{8s59F9r;n z(3$X%G*8iMaO}`fBgH5H>8tYvfRZF^kvszJ>sTDd-Jm0j+`8}FYtZ)Ri>99`vJGy~ z={5C7F-vDou|M^dO}F8Iu-lj#$m;7UMe4r?_}?o{@z#-K*cYU%mPMDRnt8&B9h4?a z;NG>0Q&LrniSah#Ezc5A=)UJA#>mB4Nh`#nsK3wT2+)={WIm5bP(T`5zfh)PiN zGdg>eB^w`wg5<$e%LV9^0URxKtDg>8(b5*@Q*A5Q_pJHO7q{T)A_s_fH z_kErkpF+RmYdq=g?d2F4A;@jf1x&=xj+cdTD1x;(j6gJxinWcQ16`DgzQ+F zhVm2(?)Pc}R5WX6ScaZo-Tj%L+xyn<>X_-u z3yVKv5ftw64YK{%fXa3<5Qks*MK2A%(3n0(ZG*P1mimw_lY;ADYZ#9z&{BU86cy5_ zCIyr1X5%un zXz6!38Azj~z%h4`F2Nnsb~sm;n{ho+wAQ9l_o(!qzHx^bwL*Ay;d29Dvq-16PUgb9 z&GIiYmUmQJhxnAwZM;yt{kcW|0v+#Gwl6;k|8i^r0#I<-CvbaE8dE=gTDT%37zS3S zFi)7&Ksam}%frh|R(9ibhDY=oS!+6cb4D?GA_Ol;PSb?CRxFV$nNaq*&Lq$A`Uq7b6)iD%Xhf z9y`wuD|^#vYi%{f35IKpJ7CUSYJPU!3=)|fx7>k;j>9Db17Ue_HnoT{jaxdETlDwX zZ_avDIxhqVya;1IHTFK1J-$468a&yxP5;_ZxN7~*zmxD5f9!E|5_Qy?X%FGW66b?U zxP`GIL9BxWJcYf9hHK5R`skGW>h(0=qm1xv{DI=~3bOZY`pRn8;cA3D8WqaJaRpxP zNE#3ciX_vYRA$Vxw&`|LMfrMWd;vL76u}=av4@1jX?=DUlG>$e2gY|}B_v{i!3_H3xU61%HhiFhtvOy_unSM6OP)Im~~Pbb~xI}s;F zlbmhQYoc{6-%M`T(z9F-=DJv>*Wx+bqSjE~ZK=?OV3gK=+dpeMIK{dByHE`_j$V*r z`hupGIxgb7^xq>+RD}$lqg<|_Kau+}!b9jv-zGT{YAK5dfbE(}WNVqD zjxyqsqkv=jir|Qb1dDTB%zd>|tWVVhZPH3Q>Dl5%!Lbe5^gzXxeMV(c;U7B)V?AHb zq=~J?5-JmQe>|OTBFsN0N%dI-vIh{vSdI{Coy6}_zl$uynbWV}<(biSMjHAL!YDB8 z3pT@L1(?RWFiC}$2q_9Fy2w;iF+Fo;*D2V(rjxosD-t}5!2i``ol-%f) ziPR&;;lOD0gR^etQh|sr33SD!_aP|0QWc9?`PsH{hCZ07rmI(%3RiSY9Z*tzjUox! zQ|WKiZl9fQv~RZvP%^G3>M~AK>2G{TQJGAJUK*|v*RWA&>30W;H8l$0(2Bw)>iqyl zA9K^2C%=b^aYo7V%$1dW-}W-d5;N#rO3&s9#x}sZbUP+$l(_^)C0D6kG_{^Z?r1EJ z1uW>tXgNRqao=!IL>ctefPKcQEN<9$FGW-VL%2NRoIM^@>3~E`jwD<|swSP-{H_ey zDQFCGVXH!AsR<{sg7nxA1rAbt4T{r>+nZ3oC5x!2c?(}F_{QEoy_TI0I?!)v<6n#C z@g&Ca^tqH8wU8{TUn!N6U+O2IvV|C(&~Vu)^m3AY`dXuop0XT7R2a=&_vt;KkV<3` zMJ<9AOM)*M$aafs{14-i)8oEkK89$a-H|EDV^1Ar@>QKrKxsyK`SA=HW-QM*jwkl^d zf83$kP$*l?N}DRi-t)Fjo>KuU@kRlUm$`XJz$1qzk7;c)4nH=+(rIkbd!AiL0rl}u z$=63$m~21qxpl&M-8ex9*=ikGKGV)a0`#%HXCg*FCe^0+9|}|;j5BPD0FcC|N0K79 zym!mrkA;(HS>*#~R96aw&yJL*!oDYiDSrXKK6`o;8aT4j?yM#}*VWxtdc;=oWUyh% zxth6lC}Fyb+WtIw1f8DbtqS%W6cPKiFTFXw%nNOd)=f4WzA~+~y*3q{K{?%Zf8k;f zbflIhV?q}SVB8gr4hWlEsd3fP)bbJW0;9nu62PcYAWbE&KQsE6+H7CtN<1n>DM3mE{9xb#=xh*8g&#yBF43`j}Mg&DOV0F>B@zkoiABi#6)zIPjA1$0dM)_wM1 zb(R80Nq(*q=Ae^DTmikHdGH|92ruRr5B{NFC3sXde0XgzWsfznZGz@^C5OD2V$H~{ zqP7d`aCdiud^{gnIz0qOshl$D-(;6Mx1pI-!lqo~TZDPqh-3VizJ^cnOp==J(mZLC zhIQF@^OQL_ihV2+hQ;%cLc3bH)^is$9kd8+6Rw-)oM`|Y&CO3_()l7RTCe^B1Oj{I zM5nn;?db96q{{tij6y*J@(=xK`lJdD4}_#{_cb?+bgest!%JEJV3ww1ux~;)@)Vb@ z)~MJVFCT1DH!QPbuTOeJ?#&@Hhd(b^SQ46V$J>CnK|yz;bX%QdR(}Bq_RX4?ua;Cb z0};6P=fFsFgv2J3P+ya)S_9w2PNey0;7yeg<|v=p+h#T%D&@nWWz^c5LDp*+c_pp< zQ98hV=x97G$LLQ0+I<}HcNVv-tTe?mx6l~VuG@HGr$Bsilbq2(Prmzes-_peft;*{ z#(DCK0Vh|YDpQ$j_P(QqtD`C+Io{nn$c&tIY0FZPd)&?5!vL&u)K@^N6Ab{s9}}d{ zCxL9SPzHWy=eH_&3aH|~ovv6_LQ%_?by(sUq(b;a@ImRKQXyiN#%s0TnzD;oGgLT5 zC4_`zaECnLv3pcM1>onO67z6)HO+71M z##?qyd|yU7G=q2M-?q-XuU^ISBcd!$TZm7)N2XS&O8uf4Y6R9rK_K7jKNR>EMp5)F zP%JKkE_GNGn&xQEhlfO6z?`2-!73;wq>_S7*!3_k*b3IMoPD;jIAZ?xpw^#h_3w5# z#q+C{SUnF`!r838O>N~p1a3H&JKj*nJXz1G8iIjoJ7K*@xqQMr52ar}9rF~DH|&~% zl_r3)_V$haBKOt}4WkGIgJ!=5P_P(Bt-tXMS||Upgs~H3gCAEuci{8ZUP|r3gxGX< z|1{M0_`~JWIgtlJt} z;z&tT9q3`?Je(MKeTeN zcDNTnOcd?JhymM_9OrB42_!ahq$7;gO)6RW9@iPOEHs&T9F4EE5#~n> zT;qlv4$VoPQXi!D7qL53mnv@K2IJ0P;SEc%t5iO!`va{UQJtUQFFH#_n&Cil#k^*< z<3*eLdFEQAW>9y!Y22xv=ewPi2s%hYX_7JF1=**aN?YN$t-Z1n#PBkYH&=$#WVnV0 z3$tY~AwFFu|JAO98g)BY1kD(Kclb=1`GBVeTy8WJObunP=lT}wjrrS(@H4&0)N8wH ztn)mY*hrh@os+1yLy2K2xJo8~$P7=2GhaxT)B?TD^ge&6$j+IsR*gtGE+Luau3Rhs zG6bTI#(q1XTk)bXZlU!oo)`Zf5v@7v;A6AehCGR(xsb@-1>S>JQAap2L|1IaS7qIz zUO)CCdMRu0zVpOFMb`R6o8mJNVdz^DhD65v7jSMU2_cDUd*eF~S;8v&3n(!J30L{w zefh(=Q1`jUYxU|jn%gu*z*5AIu7l#o9(Q%BLo2JlaYj|h(obxtRQ7>vepoN}l4*b- z{czjCS7EL_M&1^w@A~nZ+^v1y4vMb@J^bDa6^O$nmWM>R$Uv0xEy$CZXemQ|j0?e} zN&iPOKSD&a!HdJWLA24ciB)>Ui)#fe;oT&uIS4me%n#1tN^f6O-j-s>dubel@Ba-4 zR}=eV>Jww}N7Z{$mFY(o{DviIpVbhEdNsg3xnRU=9cKSoBAN)`C7%fcxI%0erV2O> z?t5RBQ&HTAHF$&nJs@c*iL$_pVh(u2j>${|GV)fMdnc;R7Po?CQ7OPdB~rPOGkpTF zsZEB+c=I4NoR6(LdoIA$QGR(Q=J}Z-q31!%m+>BkY8c13;W#rK)CFqfqjcaKQAcbveLV*wDzPrc%jv&9)icucTP?Y#r}l4!>;E2 zuHIvVwFfF|lMLi=st7?sfgA6q?NxTm0--G{3$^O3e*u&V78Jh1U*eRZ_(b4#dr4VN z+l2=|nm{=7$_+xGtCoHYi3Wau=mC16CF(qkk2V0T+A+cs z#A2$jtpN05^-I@sZio$2FJrH9yt*WO7KP)lA0Ll z?D*S2>p|Xuv^e1LMaKn>cy;}z3a*&koIn4(fDk4}5E7kGY>y(a#0FUB8l%RA>oP$> zGQ}}V?2{pkngUA$tK{>1L!!uhQoY*xnPLO;f|@~yw+7~?n>uEnbLpvk>Slt2x#-qL6vQ2X+9y;M z4EB1yv!!qv(;8@+jMx#=h8mW>rgkjWK~!D1@=U6r>6Fj8i$T{Yq%C@MIGMN_^DN$u zMb@>|s$cnjoU(8G{;^ev<=7oM$wh2S{xIzjT`s7UjR*Z0>j&f7&2)-`h~BuzGZ6Nv z?{Dh9&U@b{7G(IJ{RLP7doub@hd(~;LXZc@sQT$ zb1Io_?vgNn(E!??o=&x~hkA3UnV@O!dE8+uFS*f0C~9Z<63H;YMI9ch{Zb^;#}s)r z0*5m3r6SyK#H-(d=+Z`KIzHj*qL%b9W@wBjdhnT8Et5+*9Bd)Yw!K|w&ccfwWQ@lo z`LpJ4Dt}VZ5Px(Hp<+7$9_6+5CaB8>B-pdYV7uxZ! zyP(0K<;TUUj$(oNmI_=*TaMRBkUGNz=G>w&fivQe#K{?(d zJ|bvVWMSgMZw(vuJdwUOI+&{R#f&*uD~YTGcN1g9@}@?9PtXcHxQT8P%zdFq)v;m3Xkx0dCg+#bzmi*rvDg zJ^WOV8bMq9=NcDlazgX5Y`=1u=#Wb#eP(-G{NM>c+o-n??Xv;if=!X3%=FfSZd7XD zQQ{X3y#?m)PvR@qC1NM@1f^&he8ci2dxz-EE;vgpfaDbnw0lBBv}QfS;Mhu9qmFr4 zl+q)df0DpvOviax4P41j9FK$1wmeb-u(WQ{yhC^ej!1Zckw=ntu;P3aa#q-{>wP#9 z-4`_X6(6PYm50wwd2ikM@+J`}KCSWZebub8HpoGbKpS&9NXRlGbYY`&PYt`H!xk2rNRwZ+G2J+1yb^Ie$Yq(_rkB*N3eRkX5m z+~zN<`;iw1%ov67RE=?g0l#a=Tuo?Ox05f%gM(v`!WL=sLS$ZHkNR`PGTm`7G5VhO z2&V;lr=`9Xf8!iot(}uzbBp1V-E0LuuEm_+&^^8V;?l%|O9%vP#4HGO(T*F0K5Uz3 z@A*~ybfSV$gS1U!Puqr|*aIrtc_TC4E!4B=Zqz1>Q+5!rT?&6V9H|eJKA-3@kMw;Z zz|q9GtgraavlbC1d%IiSN2a4oon6s$~;?)?W2%;**i@7R?*fR_AZUX zobUC=hR`@*EvltGbu}DqZwX>D)o^M%3rv4@6b2jFgsnz^WsPG5H01!qBI0kgr8TL` zsjb(Ko{Svd405NMy?kWQvWIwUT<$9Ea?vY%y}p=c_GD*T==+}Y{<#RWQfr{fTT0rr z!RWqDoHMs`L60P`UpSn#jFSXjXU7$`P|{{B?F~V#+)%cd`sOG9OavQRtRWa+d*Qn~ z_x7+MIyPq>NiEpxg;ox|$(USQi>hgJwdlpM9HR86MWcvzn|wT+bvkSmG z`4{3f8Po9P>SiNZs27HjM$3k(`RgzNuNto;#=1J4IW&QBF)pI|>TAjM4My@qH!FVu zyAp5mHaT(awz9Exrs*UxI(EN6mr=6>RV|DkAH7*E_cE_aJjA)=nCfG3_;vCNr0OQksu@_LmFeW+mL-PA{(ieMkF;Oj(zi{H;F%Y9T*96%n!e@t(nkM!qxV@&3ddmJKCBT zshheyqE4qg;=Y*1fLlCzrZx<}tqm5ZZgIE3rWi#?Z^Y}@eh7F7pvv1hu0D9_u5W&R zl}G)-b|`mSCt_36^o3?k!iY$_``hymP`43&$Fm-(0uCa{Q!iYi-k^ar+b~~IknjSEB4GX=7<@wx0+z}cZl1el!8gW=RT$~0tQm+j)wchEp-h-gl zGqovXypKn+x4c%e;S{Ez!1VfV_qIj>J)_z+FibDQ)kDwv9RHPh%Q| z51M}pN|7&$)3o>J@Yof1p(a%>W;sg);1EM9gNJJ(3>oVpQ2by!ikd_IC{&C%mCiP+ z-GA?wkm7*Lebw}ie>@Nww5BAbx!ya8n#sRf{5Ih_gQnFw|DtD^8817XCNKy;4wn*_ z_heHLSxcy)-`%>CtGHjLUNmlxbj0I1+{`CkhS)lKFb zx5344kAiAblEKsinD33jhpzkZuZalVT#%OTbV>NoQYFjc^*Dk9Mm)XV!&MXLA717J zb>0rrMQGr7oPSjCIir|?WOg_4#qk79n|?=|Y9hNMzFV-lEz*o^2U56QdC7b$6)3Dp zHqSM!(%8xH>ZK}>6dz%pYY-T~BTQd}*&f4^{eBF?dU3-@D-`GuDqe%HWITvq$S%40 zCfvzzokWtv%zy8Qi}0Jyn~^EHY6SQ4NYZid!J`HTB({%oncY-=7_iiU0wVM|v4-UC zxJFJU>_|>PEOx2LdYpwn6?<r+uA%axTnOY|B&~_C<=2QS4xTLz6x~X+Vj@Dt`q3F>dQV>cP~4;#<1(J)wRo^AD!LAvQFg2>rYekh(oGmbFp!O!RbZ2+`ne08xBR~H&~??V`99_Ynl8%x2T73ZP)xjL6@I8 zAJ1_JJA=@NT&pb=nL+j5R`3bk>h8dl;G`?bum`C#HVB?)K1c-pJ5HFWU2`#gD2?dc zG2q8iRr{mb5ub*M!$wWk1p_@p^ha=Yz)=eRbLR4P_D$ycU%-pT>!NUmFO<9}c?Mz4 zEV6GAZG6k@LH0fuHJYU(mFQ9oPb*%2mK}p7%x&d0`aHMP$$qB{|8OsaMZ^nD>$|WL z7sR8XH}hU}4DBz#7TO1rex>*){87Zvj}h1Cs+dess{lXtCZf@BQ5LdpxU%UucTIMq zsY4`He1rdESA=Rd2q$hj(h~(uC~UWxR~_0QA}Hr~+G(}7WV3ZQwWg?OkT5UOb#b4OkI6N07{L3zvkUFs> zQqpN>vI4PG@D_C%mTK^%Ke_gn-HBssaJj0w&e9_VDQqvSRH~XM~w&R`%*}Mw%1DD?qE9#VTsS0;;+SwEemGtVg3k}MyWxk1o4rK}R>vzs9x_8zK;NFv;*7)R zD&7lFAPFrM>YsLF{X1)*DmbIf zPF_2*B}h|B0`r~Pk38&^2}B+v(`r(&F!IU%`gT=D*G#^wDf2H}E z-PH+!Jq8=^QIsBQ9rNdH7u~-A#=C~S7RCNV8wdRH&Ir{|bpUau=I3PI$(=*b%A(Vx zk(iAy*bsV^sJu4o(cLMzyUKo8Axe(1sUMleN|-=}T;DU9MF&boI#4jg)o=<`!kQwO z6vN}b+;Wj%m_3S`<}ev0MCtiu!KmUK8o`fFjLFH= z&S7v1JQ=P+-Ys`qXcg(hZc3WLJbJA@X1PH7{u}MP>kRstRyRXU3nD8wakL`|&+L3> z^DyzogdDr{D&Gz@zUgb#Pl4vd5F4za?$H}R@@{?&iADq5jH$AKe&Gev`W>{>kPB&< zh!+#D#UK5;%`}CrcxP_m?kUF43*M}~el}Wh+3tiTUZg5f8POX?7$ng-Q_cVpUEFU? zv*!gtnwxTLM&l777Cx5)!ne975GO|wIR|$;!r@op>iWhW7j@=& zMH`-XXvRD@tYLYkln}Er4u~=4nj~lgdq*&*|0hMPNJ5duCHE)ToczRB)!@S?gAr4^ z@6!^{G35_VHE^~|vNt?g+}Z!~bb?}rP{;E7FD99Tn~MY8C-e4&4V{5Wu9hGqIM?Ye z2`JROXO*R3f$lyjqMYbn-S5;b?TqZl#7qx^=DG1QKm*B$yQjT@u%+RM_{Z+oFGm+L z$0DWOSCzV?=NUg=3@J3-fL}m<4VMaLEv?Cs#CuuMzQP?1PUQhh88%Dxf_bQYq}AZ~ znY<%CU`B~JjR%5D)lj9rl`yUt!0%DyzDq+$9ad{+XgIRbzejWTL6%$@(a4-ED=?Mr z!@T`N>nnKXX*P)P*^X;_jms>hv5zCTbbJe#YIr*-SIm}H-|?e$-#1s;E6O0ugvaOf zqWkx~hxIevB8b;?k*YG(sc)t3)P4S~upRmKZS9{!GHi3w+PXJ@+ggQaX|A2=d$1uGSpHR_z$V}wsMw5 zo~g#;`<7IpC)MNVZop5KQif!#KbJGhI~#y`+)x!AKDt$}kCbhP`9wMAE&I*2_b`=H zMiF45N<_|$_fj87$;K^?)}S5x{nwz=KUpKj`lBf6Wr<_! zFM(0I0Qv^tgVc`#TobZxjqnin496@k7(Q=m#n1hq|zMolJ;HrBTu_zud7>MwjuBW zol0!=%_0qPX=--=&LO_W$rH7w^yDvK+=Xj7@%#hVld^j91SO05_rcZZe$7ssa8|kanWZkf0kPCAc8ri^{(8!@`zLKD8 zTHJZ6BF<)6Y-~m5;nk7w*Td(ReneoDt&~tHmH3_pBC{pJfq)7BP+K8S69?<}d-~V` zbx8Uc*UMw>PvJhh@4nOF;;$1$JNQE*AU zD&Jl_QHFl2g>JVQ<^p|Jj)GsJl2j69f0}x-Wdd>Z(2hc$KT@QG2@Y^prqz1B)jXM- z3TIau)4q-Mm#wvS#H*Z3b*JPE*wA@mO;r0n2Z$t>R9o@rfGAMvb18#w{sJ&6?{nTd zl*TyZLX~MuV|%GBbc`DG%X40@ZceySSuYhj4GW8YnIv3<6O)w>f;#NJ68I%I^t2hH@e)p z#WHW-7Dy<>6mvbggz-%hBFh?AAl!#vM=5bK59A-Y!L7n7O`=Hk$OgN&oO6uY02}$K z0c9`qZp-8e6|7L?W^Hp{8vzbCvsc)G-^#hgZlx7)0a%)@>$Y1B#TJiftxk2Hto-42 zrB+D8B8~L%A>fQfqbw-71H(o4Wr$X@D%I#NbtabGbFCBffG}u zLpp?_Te&tb!cDrvo-SWJZCA`T4TL@xXlNwpA)mt@>>~0n6qiTd~cROzykXIN_v24NKG1dU-Q$VGUB8AGxG--ttF{q%j2hZM~rltVgvFI zWn9nE(Q1tsk8kmd1^9M!3yx-SE~P#>5*oNavB}mz7`S4ma__RZuJKJ8lBg-6HGkBH zEeLggt#EGgz&66fFrvVI-4Q>uErGk)P~0r#l%iZyV2KL^A9?)<;_fB*3kZn~75&MW zH?Ctm)mI)-vt%Tqz~mehQo$E1cHI7)p2yfHV1IXQNL4D-_$Ra2#K?A@GAG8KFoYqr~j+z_ZM7Fq(zEpNeRSl;Bi;rTldf%uY@l zxa&4{;blQT>Pk&sWh82DZkI5ZCvjCzNH=y>;K)st%}+<9#YPmldL5GI52A?egk%bx zFIfB_aQMivt*i3zHj==YnFf^^)iXV&^@%5hP?jW=)JU#UDqJ8@TVidpNIo zv3E|A%ws{eGU`O0_~~|JpcfwX?Q^2%4vSif$RtLRl$nzBuhliCNcbW@VqCD1o*MBu z>JR^XRh{?z^cX>Bx-U|QKg$GKj6AID#yyBw2D}##**Ci69Zepv-p1cx_uVKfh)Uxt zbVcEdN}428kj6<9_L;nRqmld&A12MSskh>-=f|-x>-o8(qgPYuEvS=jZr>UE%T8|S zQN1@vzO#pkuAgbWG89Kn7@g^ti%>^xNapziv9;4#O${80Exo=HGZs+ind;Hl$*v|T z$Oj6_`1RRSVY#k{1Tb2!u0x_5ZvO)A^N80rE0dQcV0cEAT?Fx0iK5;EgO>8DmOka*>@awCzWoT zTlfe*WjLyiR>h-GsLl8aVc%O-If|djgy}MNfc@+QFQ`L1Ec}!&2ND!Qh(-A@X>(mi zcPChoS6&jHn!rhk*dz_qw@HP2!bZ)Jp>O3`DM&m+qVg$3=~*5;K|?%6dyehJS0eng zS3UGdnn~1wymTgjjZ8jIj}-*04R_#@=tY3|nYavHe}(S#Z?=BXi+Gy*TT%J3TY@^7 zSnT@ViGehl^xH{!1V&QkJb4Cju4e8m7Hbi$?Xaepmu0A?-pdxb0R({?iOGrGGFhtW z$esmtdl~8o#XuL_0-CtJOX*}tAG}y;;gclAd&u+Spq6=>g|P%TWRDPdHtN6@0CSI{3<==Drzud+^<$fqmIC%#VE7T1OnrrU>BvnzRaY*+0OF<##5P~y(& zMXL8utFh)kZR_nzQ`0{NuiQUA7c-X}9OXz~ODwg(yh>wO>%3t%@)FfinD&0a^~$LY)u zeOa@ZYS+r2D3CV;3KxV3?D~L|c8r=94u#%YM}c!-k^*0pA6YZX%*hR3hGQp$a>)!} zkU}v1i9k=))o3^pFFHxehj>`wP_qqU(yICiCTJu~8@RBK(Qk$XZm80*A&RZJAFRd) zDhZiZzusSxC)~#--|QZXugv0g;I`T}uUO6%@pK#}htW1pW3)GKbRl^q=F(~%B4EYI zg(-13>a5JGfbXMSO?}7KLS$*`)2Oh@F7gc3BhSCnr5eaSQNs>{>|b%6#Lo;2Y}7?! zN`LKlEfse+SNlo}?FYwMxKU=E@yu-u!q;(JV+dv)M&FX!kPl0aJd=4d`6Z-A`PEEL zgFqnLi|e6(A7ZEi%-q#dzUj1{AS zV3gFV{H01H&y|^3&r;j*e6)`#wx=spKy#GW{9@W8@o-{r;Qd(Yk`Ss_M_t-~s9p5m z3#NOkoi?vhnyg26J(}EUneb)d)|O?MXf0CTpaMy{OoV4R(tkI9p{9kzdq@zu9K)Fc zZiJtrk@K>ZaX!*s38Ca?#q?8OSoHE11*P?@Vb0f?L?q#G0cnK38cXhOrp(V3;?q<$`g7b;%SWB+5Ztf z-5i@0E^l@npA_aYV4)nlHrJ!(P++R^SC>JoJdd4V5UOFqrfgy}V*?OQcXOPFb)#;T zfPk_a#LiS@k@%(PB6^kb8AjBAYY-{GRsWT<8{KXE+TBm+W=uYiI3lx2-#8kT$KYhK z0rwXMkB~_Wv>zPR3>Tqg>gBeC91yk>UoaVg@a7~FrWs#Ne~X%jCUPHSWvs3UH8bxDmS@?#t@_#!t+XOM6qV5X^?}$iyS}ar?)e`luDLYX z1oM#Z9_D`m+%P!mwwZUT_N8)*=m~@{T;;pBARp>A*vI8Wt~3rDciHSc5hPbCB`~O~Dh-^B0d@ z%6_+NDyg$`XhtqYiediEsHFoWz5pHDuCDkH3M$k!7?`rAs^G*Z<8q*Ry!f}=4j$GC zjX7KpmVlF;A)G;9HT^NxYvWh!>`L3dMs@~)7sKFnOn=sM^9|)RHfDKCPR*W8QnP>4 zPRJtG*pwlvd^?)}YhBDw^4BLd2`BB~r+L~rw1#vxz_`vhwI4$X)7#~U-?PXlx8*U8 zv`g<^mGTS)$YaT7j!P{G$^o1r7irQ|>dl;Ihw~mX!r$6LRsI6hRW_^JwVJeXXN!dk z!+sn57;b7DV&;0)YOH4=n+Qy`$r_IxACVEoAYr_>tdOyv$*8kf;?@BKo^1}2)dMmQ z1W39f#KjcTIQGj47zhNR{cJBwq?Zou+RIFe6O255=fli(jmoW_O4B^5EU+$#A+KNN zvc(+B-5w-q`}pZWn_V2@YOhovygJ?EjuwI&Lwk6%5)uO(!bIAucv5h?RHJhwytW}` z5`?sRq!Rog>%Q*hJEUdixqr%t1c2J9%VV3pk$Y6EnxO>ugJKY8X+37l#p&>FEBdG5 zef#hJ?IMdhMxH-+42Va_TTvtS$oHbBg;t)EUQjC*Yg^orU=wK8?if8qw+a|y3TYSI z49t1`&pdJGfYPhFoAF@xUu}YaPL;?7dQwckF9AI>ap$vJQr;U!?f9rN%gx}u-r7VY zIv5{Pdy||zsDtvX^r(dB&ZK@H3!vLnqn1|K*#y|G{2D803mEgNc%Uq^dC=$B`z^!u zb0$qKil01iv{&9&I)~r=YZ(C;{i=%XgEXZjX0yD-Dr#KIPmhw|;wX(%x@dHPIZsBq z3hwvzVG7ERf0E-r(zD~Cr@j~2Fl!*n_ZPsUQUeECQC_Hnbr24#Lll%QTSf~%`08N zv0jPps*x?z<~YkfIHvg!t2}r-;_?osU!(CU<2FMK<`*KNQ<=E(=ga0zyanXwe>%8x zC1xt!tn{yxQ~lyL%#)?xe1zbs!{Qw{q$Ccj?6Sm}LOc2q*@Q<`icyxycy$?#7uBYT zCXQ+(b;My5^ID-vZ^sv98lXD=Uw{w8s!aY}MSi`N>R_9ys|Nv@_{4Hjl4D`iu>3k@ z6y+ZrML=M+)0R@0(3&;X%~PQ4m4H^!QP3dBVwdX8L7puO4pfUbOW=TWAfx+NW^FaOgKl~J zkO1LId(`4@R0La%?9VbM1{h54>RZXJS5y|pRQk=z&%Q0TQb`g%4u79%*kiD;tBX=espJPCDw^4sVpMsWNBKU(fdQ%;Nj+}`kRTe1wO zG4uhRk)$e4$KW~sN#j|2*gM;S6sLU~@ox*6H}})n;%Iq1e*vHSKjWVn7u(!fIy4!X ziN>~2b27Oxda1`^w=h`q&v#0kaL6^vo(12RsoEZXBjW$VJR7)g!t@qVoY|<4GgVel zd}U2@eqj-twg3VlP0e+(kYJzY1kbe)sw=q5KE;`5D1TS^DWqOCHE#QmB*}$MNQ%7! zRyKes5SBSpyz{9-7VonwHyVcRO1z+83_chp-TR1wNIEfzHR<6lROeaw$XqJ=pWULD zPz0 zrWUmKN*^~5joW@Q@WqA=Whwa!Agf2g+}FJvP}CfxyhuWE`3*}N);xdcBf=s-nLjF;LE7z9dWy~57{N?!5x379z4LfAf}mP_w5U*bSXpNO?{jQlr=(RqlQ_1Kz!&5n zETx8^FT}O`9C;IQA+A+YE~}r8eQFfwtS{UJZ33K%#)4e49~B3j^USlYOQkg2cR&F? z_LW+RoVg;vZSwb{}(i|tOxWX+f>$vQiG&s(pF&T4A8%htHLM-`^yC8Nf0 zM=Bweu$XOOY@I%@p zVlU=u|9%>i@V?TcU~Tkh#(|YYAFr*A9nPrn>6%Ot%eHFGakcv>!0~?oaX^m0sgCm9 zLhs3xhR?p(tuc<3;#s~D1U z750NrlTg&|CX@tqk;n`AQ0@oPvn{+0rHhCZoN(s@9x^xg2XW&&3w~B(jOAUu&(X4+sF^O#b{b0_7iJ8lu_$Y zX^t8@eJmLz35+<6OK08Ktx$|IOpJ~T8-DEx1Yiym{*?a!N7P?W7qD8yk2U-v1dI?# zEx_CP{Fo!ow9xdlxx1c8+;<1Ke6hd^%u^@piYV`~UpEd!R^dY|ATH;V8f~HJ7h?MA z=0qqWe&Lw@Mj-F{edv2jFNf_Qx4Aqjw_FlCjes26A3|!|cX0~Zcb0fnlTK@k!cWXr zMvre&rA-G^a$6{xz0j9Zjo6bIKn8h^m23T*+UfDDF)tG=fOFd%KTqvcV+^+;T>)-~ z-nxHf;z*;08|*Af{71u=VUe&tna|Rb$WXsQ(GBLUZz`4G48xzyM>_2>>fhm=mx|ie zpSy3irV7x_f60=h=ISpxQaC6J1V~AnEp^XpG?!<*)^n6*xOmo=Ss4H zZER5d(*-2)vi|gr&i?P@DsI5EaoXx#skw=oTc-a246s%{CIJInNIe3w#xd4Yn_V? zNBbR^e(kRqZN=uX4p|$v2wto*PNR7@#%hfaEf{DmO5ku0mx)Hn82qc(?r2R$_9?GU zr>tCynI!Nl1;k|Z5vbTAfB-o3P;r_bD@)ByX1=`CA5GCClQUce@yOjm$sk0Qs~QP=Pj#lxoJSqj z88Ojw&WjP@VF8-^#C6)9M#?Et%d&Rmf#PEB$X!5gcu8-=e01rx3W;i;#Q0izYqZ9?^TZTQiEQz_=zr-HewsR#}DaN z`|UOEcUcsF{xE9T{-`vOo75>rdu3|seKmKCw7BJwx7wCXcqKa{`2oX#DV^q-cMFoO zpI#aW#Ot2*iS{PEtnVe$Rr?Q($K(^lcV%JkSm6Hvu~V40>-Db%``4WGXtE9xJ*z?q zCu%6KrEA?}(L%BXQ;wB9V2asez}RnD8*{F!7R$TM?dRcARb$qK^Qnq`EBNEWse;)YG~rg%T(eHA+{BIlT+JbJpnMqXk&4_ zpR-fY?tb6c(nq*g|ya{$^gD$_5CK)G-jdC-$y zz}Zzi2EBhx#k~-kV!O>|8Pu)CzYAlSs&1thc*2im876kSRtiTL<=Z(6>QBR(26 z9=OFMtSOqyq+4C56}p>yfx9R@eW=|iOK3Pwc*n}XkU{qwgX=@4pLH4H;v}cb<|yc< z5)~In$o#l~#wrb#iXPkRefqRo#3ttrhm;VP2!DHv7=QVJ_Y_smkk_^bT{h?AVua<# z2Mma#_tVSxhDWHR{lf<0VaK*r$L1bYDZQIWxVE~tk>ZKSJW9kBBa;KnA39sIlm@a# z8Z?f(Z_)DZXEF%R;=?&71UQz%V%{AoEH$w9}C3e_krAhtvpuj zM$Go9mq*nt?=73-EH16uSS4}7E?~*Zu>7tt$jkX%J&7V=4{9_kNG@jMjGK5?>x%^k z7X_8WdZUwPFYw6$KP@QGgj(9%_HR;;_zc!V1@K=gYcJi=E5HVR}Cy9LUNX9)YqxQG6#y5`c_T*fj zi+Jy5LE(~A2*y?Po(W^PAH>h)RIb+8MBSsi)|@=adc#n*RdLLcw*&tG_D1K})|9-N zr2G}TsO0XF7RNlrpiyXD7?41nPCykZEGYdvR%Nj%Ev0j@Xx})^2y-(D6Do@SX+MC zLf5MtmaA!T{`o^;UMB!@<_QGjsI7Zd+97#ya3hVL=i*{@W*ERk?uZ`b1N5Ta=oO-_ zRbkg)HnBRjss?NfB!V8~aPs3vViP`lRXMK=ovpYxa}-5J z_?BLHKnMzY5yGksIKI_x?M29&j7i?Bh@2KE@*rWKTpB0>?GnlF?w)5nFLwio#^?9O zPnXL}yGs<>UA%D1AU89i69m~+IEVmwgMe$VX!=~y_HRwNiIR4+(r>MdSR72o-R?FW zSOtvxG4if|_=ljqm5rtA#d92TLu}T@K%g6WAxTsmeqd$6`cSaZPtclUb=<_E`I0$p zjrXqSq-eSxlG;+sea9?`#?mm!IPnz&puoX3#k(nP(_c*KlOjOOm^|AD8LO09vABZ9 z;v|xA-Q~RMt>Z@nDx=Gb2kk&7L9nidN4?nkq*d*;rEk{>8rTacuEHqmwR`r6>4a zn#wcZ#;5j^`Wa*|CVzCO@~I_}l1ZJ<5^1qvobDrHcTjq}90Grb17S&_*DP#fyu5A| z)9h{<-qirXc?Zk4zai)=9h=i{yI5nJT%D}6xZC2dd!d6L7~zcvMwq5TFCbjG`AuVM zoUGp$#TMSfXpvh(H)wUpo==P>3p-~3w4}CJDF-+?1<1e(6?2ksb?aX1*LSAn+*qJi zyLhmoTrYwZI|Ymke?SH(>F(`X=y#B$Fur922 zSB35;oT%=iQ_Z+XGlC9J@PUkGqc)|{SlZhnGTO;=4;(Lh!iA9_k0^*fZVdUHFL{D^ zRPf^ z5OLk3W9F5C4Zt}PppXHoWxkuJYMQHS8nhBiGvQ#jf;+k2pZqcgwCvr9 z(QOlmJnrAMl#{t;Vo4hpIRTS*K=0^$gavM}<AJq943|<_Nu^k*%Cxa?mK`t~dR=L##c6U4y`EAd z;yju_dE||%O*f-%HncQ4#-@Tcx{ljai;HGd@qz9)``1hCKWN_U_h|rz5#b^8x3R8c zeQRspqgl%&QaX2$NH)klHtSsjv-6~I+S{=27ywz+ZcnX7QLUAfqb`Pht-VVNh-FBp zQQj|%9I4)}uuko56p4exfSig^t!ZNV+S(+O3|&}(Gw)~!F zsgji?;0EIN0P)5)r59J|ay}hB^G~OOCmuD)@-(LA-W4Y)!(Hk*HKs!ySe8qI+>vqo zH5UnO_<7NiBx-ya_NYp4`@dTG?2CPyQ=%RX@miD>Hvkyl^{pEmzgk2!kr9q!v|)_z zQXjQK=bx1^%yASL6*xdTX1uMD{Gp3DF01PdfK@SCt#Xk+oz+f(ZlCr^HJl zhAt4ML7ehj?tM?C5#8}%w>)$h#cYe5lg#rpKwg7$T-Ssr>I!2OqeLjewWkjjC~*J~ zh=Z}kWUl$jvGv-FFp>{N9Zu9dutvkgc)hl&Mh%_Nvh1>C#8nt{;>9j^o@|LD3+BY& zAH5%S!w!3(sz;E?`_*4lxj;)l7(RK;eIKF61-~X7@_h$fJW+qVk}@&LRJr{HC^ve1 zvXK;5g@OFBtYH1grKzt)1eotzZ_0NROns%aat!FaT?hibc}aONt%RGHU?HQnyE*1N z^&h7V9dD{zM-da--2mSy_>f5S!CKnV>dxUqaV}esJu6$3iC+xM!rYHBP;xC8hNZ=Y z$nO$ssNbkikM|o<20KZDx?V<9{{Z5|(f*@|ed@BdiyZkJN>uJw`gzkRZoUPvISmsG zt1n-Ai*{KKf5Yx#iR8CIcWxAtVPPuc$nfqyw1WCmEwjPiF}EHiJla2BPz@B!C1sk% zR#wQvAu7szvMY7JYwL*M)UDR|ZUedDl>z6mVj(3dYZlr+OyFY5vptresrg4aE+^S4{ z0}l+CJAt!*4k;Dg%y3I^Ud16>d56Wu;PC}J6OTjo&0abp-H5OBCbbe;YB97D+f3H; zajyx1oE{KKfs#z-O+UjFDkPq2h9>28UD988LKb%)UGg)i) z-{6{;18P!3@b)vQAm73ODHj+1AQ8ND=e=OfjQ;QQs}f#C zs3Y3RBzCI`qFllV2+kYA!#kXg0gqa!cC%c$idUQAj7H^IRJwpc4#0trE+9R{auJ4h zN(n#5B71ovSt5~&hw!9XAdHVAjq9T8mQ8cn9dlKM?crPdjWOY7&lh?nkGy43(5oQ! z2Dt}g^@Omymf_hAYU2($Ob*{x>cQxFED~Xp*;FKxdWK2da#@QT9#g~xB z2t6u@n>hZN9CuUR@Ne9S9VZKbR$PTYUJyRC9F{N(xo)nwA%;ylLu(0gd_btj7w=@T z*i#w48ppJEi4y^3Y#@qBoAZ%z9}{ECh&~5JB9rTEvfW%Z2;dxXfJX|h9ASOQ{V3MJ zE3n1Y)}eVeL3r7?I`k(#nA_`DYbyo5?G4Z}JoeDT@mH=T;m#L+E%`2abK0v~HwBl( z+_>Z8u_%NblI4&S?Y=&hd;B}ol07c>Se(oxkzNNesBRM}#{1(2FvJtLKPrBegSK1r zXSM$T34>8}nV~oB49NLcQSgfkN$5&DXWKNRwuqX}lF%=Bsc95Cdo-AGL{`b4`kLKw z<-HXKrx@1c(sc<$jd!dG+Eq-b3jz|j>EU1xs+zA~T)@)ZC<_`~$tBS$#EdTz43G0R z-iD&tTC<+^X^pkT^8^n392`7>P~cVe3tJnPxH}NSBJvVFd>{FUqMeG- ze$v_OG@BmLU4>}0d%N9AHIFUDB7}c+i6k0km;V6F6Ox%mxt4cLG7=8oGnd`dGzs22$j6f4v1UIiqaI`mx?byp;Vo=CG|XXw=*bL)cv|9S0Zh!NlQG8%{zM48 z@=9*hLnmZ3JyS%mk(*VOBZFJJXu%69VB*b3MMmH+h((d@;G>moGS!u zd9iG9k?MRzo`Gu?N{Xay#r18L4L%gcR?TTyXc`^Ad~BW&mbT1inrD2T`G?HE#euh$N0mCU(V$r3 z_@@?-M}rlpY$`A14u3K0gOle=bm1(w4J4y7O6*LGcSQ#t5$DrBwJpx2@mEb0NTNGn z&me3CUGOqbGH`#TW}ULB1Zx_^Hjp%0dKkwLR=SY)YTjX3<-VYG>s7nip}J+4P?kw7 z)Nueu!&@mo#Kg-X_8Zd~(kC%I-RF3RJRy_*Y{zqz7}`-_2ybrWBMb2MtmrZ3=b5Na>{&J2 zE1ji}MblwK)b3@yxqZp+JU@0dNWRo8US+q_{4zHo#?Sf6rM>p?JOJBo31fbSpJ(&ib+ zJ6DI65p35PFecA2T9Z=9yW|b36Ir;lR?{$i>Ipj0AXIWMF`DWXvcVc>rz$BUHMCKF zo|I6F+PAof{^UxvxQ{#3%NIJ7L;$5|OCU)3RNOmnTL|-81;!P z1FC^kjb;M@azWpvUaunH2f5qUt2(0bAq+T=oqPD99C5UHvy+szb6eO$N_)Qx8jdK< zxXULQ^EG0*v`E;n12n#A5Q3^VrsTsI5y7aeV+K9CQ)^vFw+c+8spVC-jz)Zh$CfF@ zk!XN;{6OsJ-c+r(q1KB#t12VLp^^UpD^`gihl)3i-Xpsp!!jTL041+mLK@$R8S*kz z1Dua)kVU#k4->Y;e87q{b6{lKK4!9%h|1z<*+|E){^M#nu3(35;LnD_}BjAZUUQaFYPBA9A@pPv5!OP==g{hwI0 zC*tK%m}L!|T;m&~{{X~K%EgG;u}0cLE-ar(?Jc~r#}MysAZI2>a7PX0vxAlkw;KGw z9Gs68H`8U7#hSwD_~o;O-X@TTj`q>U8A<%Wsm?~@eqdI(ucXZ;aW2~F^J+RJrlENo zB(apRy|s!?AhTP7VI%y}Ga)CMs5t3Z8pXe8qKfLp#I~2P+@!D)AH{iY@W>mMNWHd& zv)AJXojUEspwhLQ%__=FMcQaf>8|Jf-Shypu{hf-$YCU7B%JfE9r#t-7_GZS6`j;^ z$ziBYq9|nfVDA|`BOL_M+n-Jh^Q|x?CqcFvEO*jgiBNZ_A)YBp@R<~;E_de3-bL&b zdDXT(64z4EuTAW6O)^7cZ*I`W2wyyslQezo5;oiZ-Yc4I=DPl7o$DKwR}Dl*)2F_mMGj#(Mo&m)%A z^Y?9|i%RW9;M0OFy2_~z{A`IBQP23ZpY2^+O6_Ec*~t6%Z-|EKSs;vnzyQdwFOUk% z52ghby#e-6(@ujT+_WE6g>`S)@QMiE?VAs@7b0|Tz6)6kvPsjiDS>bL9W_e!KZhU_)PL!TjQd$ z$uy7pDvzynZ^E9(M|s)Xc`j`gyW20>J0F?&(pde7WMDo0YT5Xs*~?3PKE>gRAuYzb zE+JkFsebMlg5ds8L$Mt|BC|=;G({$}9!@~-eJD)r${VMsQ zc02rAwG(Nn7Yq|ycNZ5ZKrq1U4a1z`-Dh zRHP};%*67Eg-fPeYCnj2<@T?6B)ToEmd`Bnt{b_H_zMV!BFe0hk2mhuVZ}^tZRPDB zXrdSP%NCELTHxD(#}rDdm~9lUIQUS;9hY}<x$-&#HZz_vVx0=RC@9v{^mQgh8 zYRbHF5CJkWpUMXWYB_O8F|vU}E`aOsmp!Gr_jnlY{pA@n>@t0%ezUR=MwRO;Rd+y}kBn=&?b|P5-g$@P= z8OQ|VbB{1H&bG9t+}@=86`$bAu$C40(!#-Rg!)hWFB5M;gVX8&)fU!taM3z~Ft& zBfZl#o07s~c_?ALQkeLC{3Pe~sfMd`%*$;G%H({)GD3cWvuS&-UAK38dji`Jnl_7% zKT6Fcmt`X5@+-g6^?O@vtmgn{x_jkNr_TUaKEAZKu$$ubJw@5cAyONLQpeLBE2Zle zy_}8nb$o1RQQ%0SF{>XwLb=AVd!bv-zZ0ik-!d`blIBnSBy{wtsCh_dHB`;C-q9P| zp%Pm{)`@}pc=*`=0L*vK*1Cp`e-*vBMGtOB<;9dE`H$AQW|b|S^ntY7X)QoJt_k1R zjg52*6rE#Nkck-b9$%$PD)5HJ{D;p3)>D^8E|20<#hi*V&stZIM&kyTPc_Th7;>rs z%J0kRNF#ZY=Q+U!mfTi@T#mD4bCnq1G;=D*yZ-SLXz=!sFL9i_}DDFd0Ss0zY3?- zL9cEUqoW-*_Eqm920|2&Jq<5?KYk)X{S7#|kr~OF-UdJ9;Un!@pg^IPD8lC$1kmrI zlrOfjhd5_Y-@7K?sHQPN3=Rv3#zxq1%73*nvVtiyB#@K?uZRKqnrLm);o0YpytgBF z55}Y3sT~U;1EM?s0JF5Qvf%KB$J?cCCD=k(Gbvti%AhdvAcLILS9)%nBMup@lZ1j6 zLRftnNFtb8!jQm%JD?*OJR(@;QapzkBz+BDD#%LE9hJxUqP%Xh&9MZ+fth#mAKHse zHY=a@dx?0NK2(@@QlRqzhU4xkS#-3$MB$1jlZz@#v@ZU|6od6zv}txS!2-i_m*z9b zyTvaCd5_DQs)}Uooc8y!6G#A%rlDe3v5rI>QS{0YoF7fW#tvyE$KmbmqCA=dG?@j5 z0|Y8S`6Nw?Dajigjj%^f<^CVt1@2WYV_k_XVw38JApJq8z6~UA_G^hCmu@JfjaWMK z@h9~kO63ZA54hfrXV}^Aq)6;7mg&BRBVMihhDrb%zJP3fab6tT zIa9=V;+OZwpONRjJ?lhS32$c-{8_$-P}APh6D7W-WW!O?-9ZkFj61|$ppFp09(~yA z)$_+~;Y)pe^*uip_L`iFsN45rhq@Au^MraNqDB7zlD>odY-jG1Qny~_>fOI?yq$59 zvag!3$Zx3~Yv*lNaj57^8UPzhx{*|d82EXCbp!meESUcQ?$xisHnnA=urf`ncD{C= zGZdQ7e#S_^0az0527LTrSJ54l(!0GIzr1Kx3y6i+b{V`3Su>J%&JY8RtAp0LH{o1% z-Oi1uiCJ4rlj5ExBNE2)p8-Cd*kR^Z9c#7fR{sFu_E*|Hl$J9L|K@} zg=fE?GBe{C5y1r9k5RuWc{^j*eS4fW3y{)vf3+vezC%TR>u*A4Ksv>s$#L;E@7yE*nKJ)XTnAC zW9D}*%32w;tpa;E{CB1{omG4>alkl$-(kiWk1kXfY87=TtnKaAKKD$HJBUP^7IiE* z_Z-5nGq&f_t(}C@=hM45vD`=9PYYW#h+7T7A}qsi<7{Kw&Zu3X)-7xzg>U4KEt4U4 z!DeYy)d2m_2IH2*R9NfD%|@R0`i+i_9AVb(FMtzH$HQ?T!z%;60zZ1>^5FIrZ92Hs zdpMG5*hTGTpthQNNPruC0?PrUlRM>>JeKNQ;}y;=J4<)ki=9a==2XAc?q5pNrIh~w zxmok^S53bpgN;jhOn-K*nr^XcbE`(t+ZIXfptsU&G4fgChRz4_?*=Yq=v5+8d}kGc z;!V!wMfS@{hSRfFS`DJgo$QMytqKs&6g<+~I5{9lIPtl8i2R#4(Yq%$q}MZCN8O3% zWN`BM0+tyB?T`ZCfs)5;oV8keE!lfLGf7mry_w>Ljn)0ZI5#&00U-x+a&o-3$OC#O zT)Ms0XG;jTaPGF=5gLKw1+Z|u_atq& z1d&ajQoW6!ie{Qg=N;U52PzKZg-IkYzUMhOtddbk)fXZ=bXAU3wm}okFz;o54+KDS zK0`c*GuDZQIAFDs;$$(~N&7-17nenoL#OLH^MEreE(?gKV>nDX~0Z>4Vlp6){T z7d$d9?z#nXe`#(6&OQ-IpEg5i#Ei)nb(cFz(9ds5HL%#h-)#zl9((9;*Imap)A!}l%Oiv}a>-tm;5b~xh zappxr%aQ3`Y@oasOaP$zR4y}|^P}XCo&^gecqh`OM;4V*Yjs`g%dq8As_$Nn31^2Y zkYlZQD8+kW%uwQvHyiR8tm3r?mz8W=mN81#z_w=_Y&llE2Oe3iy(?kj*i@RJ8UQnZ z)rUcX0VnHI6jC`m@=GUGByLx-lvw_ObCUCvFDmubErvW z;g%jHKyWZl*~!IQqmmy*#vuwLib21bB&9)Z@?q!}JxxQ@1f`gshUwaC49XfPi^^_wuH;zlpQECwFUTi=BrIhwGB~ z{VQB&8ZpZplcTy=M;18aYlhD?SYTp5>5L!iQe4jPakDd~+=*2RJjfWw*J}CGUi?Ar zOOfyv@LWQ~fJ(3_?Scj?m};Mh+U@nnaV^p`tlLKmx7dK(s>W7d<$EEB6A5O?J~!i5lF+JUbPZ7grhjf2Djl(CfFiO?Mm_a(Mh}#0Q`U zr=@FWtn2zD?LCc(#=wH@Z*eRw%$|?xcCC@YHmt<+W4g2L{{ZpZn?@bgq?(K@{{XZW z#YC#7lW-H`!TsD5<(-H+q}}c=>;Wiu=^YF3le1R(4UO)dcIJL7jQ9phcSr$I=s`L2JJY$fD0Gb^SY1hotsNd2 zr6t}%(YJdbJx+MH6;08*RV|*UC9H_?MkSaAXwI2!bKIr7%%OtiVO3EGl&(fuVhq806gy;B0va}FcM9sCWvZ;y$eDh~axjgo7-m8~pG{1m-Hs0zY zWYqO}Eww9lQQ%g$V7x!LIETJL^sP!XiAmcX>e^yx_8LmZWcJn;m(rWaU_2%hGRqW* zG1JDRd3`GFpmtUJKVzXK%&271Q6PC#L8Z^b?_{YMts zV2!m$W>lJ;-83mjGA?(suZi6m}3@jEPEdbKWwn}BRwcL@GMr3n|l(L z+H5!a=D8bO+r@|hu&um^xrIJbJD?F)9rK}HsS1mVw21M%q0NhJO?tiPb~A? z>;-pOyDE}gq^{lQVp-Z7A&w)CEHkSd5rKqJ%cT-D7qUVwCAYSgOlOKW4UbYAEO1Xe z44##d@--3JHHk)mLqa=@9)Z(K#j_StJ87=IDXy9bUZ3aT^up|-1=Z-3qMHA2r%WGert;YzA?kB?oiw%8R(3XNGD18Wf$v4~f2)1QYx@#>4V~OTby@ zcGabOo0SKKc2W^n_d)}o(w;vISN1FIsNghc^D`ml>EH*{^FK->PruBHWS%1+8>lA$ zQTmRIx|9odr}0U@2x)E%Fl2sG2Z;SEpKC939L6T$fFGHy7lnPWcgMM-ELsF|(>gqJ zuYHL10u_gaN64b5_;@ApMStEEJh<=;a_b#tdl=F;luQrJk&KTjvFXg3mljJzxcO4C zP7Wp1th)Ef3kadUm5(-$V}njKi|d&{jiiQX&q4@cRak9=EE?L{LP~|>bDDLhGuez! zDMH6(0~MXM#9Dc1C|9371CTox|SoUT9Htd1sDBjrVh#N zoq@&xrqJ#IEt*$k4-pPR)9ZNhm>c~mqAOZ7ONLXi1XPkr9C&~=mVst!NWOL_8&<(Sb>09vy#w$YJ zRj(E@dD8J26Y+ENuVX)(6$pCBe4Y08rcpY;im2-35>?2njll(Q57Mgov>++S zuR{`~XPYVVs7C#inKcv0@+eOP#Qs)Y{`6hMX&y;s9`w7633D9WUe#$G9g$1fn@i>5 zh&`RgcX;+cdQExQ8Sb)OyU8*0bo=uM^s9}$%D{K|&qo~3_Z0HPg1E;L^y&}xrpLvJ zk;xou{{ZQ~c{g`-(yXKMJby6vrka1@(Xiri8Z;}pQa~f?U47LRcN?FVtphlT5=LM* zToJ>}Rg@7wa$U3MPTFgK;Cg1N)(5&~Lkfekki&AMb*jhV&t_9p(;~e#z{p+Qq&u^K zZ~z~AbNH+wyN6SaSGx$jdva0^)oc6@)a-Ntpx<6X*A|wNERv%0BRM?w=e>KshvJS& zZ18^;&n$TUp4rhgPs2#G9VP>EAd2Q_Rpak6p+V#iumt(^t_80w61iJEJWJs@LJJ;e zIj^9*UE0rzo3=WY+{vjsKrQsAJfm5gc>&iL#4SJBR@ zqK}cp<;vvB4y|&ns9V`Z3rg*dJRCBPpIXd%efEiS-&xNsg~m=Xuy3a$Rph@iYAt&v zNs-Chp8E>wxAHWaoj?LFp1fK0h&B6J^Un({i}d#I#i)}?WAa~AtFmz zM#Wus4WE6h=}yCH5%!C-dVZvVK)ke8!01mG67Bo0qMJys<=##m(}~I;1Ne`K{XD9Ptyv@6AB@PfYkAp8%E=Ll)sgYnXMFk` z^c8$tTNfsUU6<5W(^As(%w68kt3e-hk&uRUdy-_4-0rynK=N4dH)^u$+C|KoR+Xnl zvB0-CpSHHPZU~PYhbf)OJU|v=Nh*wUmI9&D9@j{NLe(a>9~FBL7J*a61&LN$WnNq| zI-z6KamS2vHyb|8T{Msx@XZQb7gRyQld_0_Ry>wI{$0oJXO((5;O!Y@D%n-idltHa z=yp*e8yT)r;idx(J-SScWo`av?okweRUt>_$<{hm-lJ`OHN3_f*1tDAQUEZ_jOwKM zD{vpJbS*x_LuCYZP!*2W$`px4@4}(PY7hLJU*>;G7g4enGTX$mg=wYWm3Vw2Qlo@t zZGy#&`t{ni$t4;xsW!^J#ns}pI=ct4k~Orsp4_No6skvhp!o%p+;H{la-9rUvLswR zB6EY)STD|?$=QYVcX>fYWt-XgmQok%0s7$|*(6?H2vxn(dVd-!{| z$0lqPA6<~wla%za-4Cm6Tx;=$@pTyQY$K7ap|OkNJILR6 z$RMfn9uhz1^{Y%b_ff@f42oIRph(8sl358JSls<8x7u9+FSTi8l1}y^5t3E>juitU zW5~0N`S6oic{?^Jq%_C7GM9(MC8x^=6_Y{AtscHH-~N5;sM%iX~H>hYuEgvzqI) z9xZq~oOg|O$KmFa#hx6pqV3O-EOY+=Vz+}xH!;Y>1(sFAu#!#>pkjDP^C~NnX`hT1 z&m@a^CZlMespFQ_Vo}d0jbvesx8lYtpy=JTu+?E%ZMBPekl}MA(TtLLaRbBpgS|be>}k8pxOFBO_YmH3_)Zdmz&1dpjSt5UUv9T!oG)<+5yfBJ$(#CepE9xi)tinPNW zv&Ib8^V+*-b(g~4yX8-w*{$WVS>tPGZ;DnNC0RnY50){XwQ{C|$mhY3=e)77nke!J zj7G!($YpQ=s4gzAWp(&FcCe2bVqz>$ukM5Ekx#Wv1}jNOhSg(Y4*)k#j(qW!P)O)E z=S!Nvo#KM+3~Q2+PGD@~u0|Mo>fnwZ9`A%Ia~m!R9+h&u(pupbS#Bk?k%!GFjhG)I+g-`s)G@D=1ZKQ%vjgg*2Q~N}Rh#zWu60GFYT3U$Z5JzRdO4}v8F&Q>d89g zPV@!kyQ#?OP+CpGG7r+GxV`Z2y#?xS6r(Mayv$o_I9_1v{w8 z^AwZC%b`@Ljim)n-8VEdl1V)3Spi{;e|jB@XT)%8BZFEi$g+tuQY@JE^PrXSob6kQ z3m%oJ#Lvh*Bslc)skp^2Rz7toVrd*gq)*4i%j;fgF||Tl6{ivBT%aC7A@=pGf@ozv zb+~NoE1X8>;3pZZIAA;nH3)tlM!nt)63v=;zz#%mD*>1BA1X_zwpFq*O=p}oLj7|} zuE3NLwQGWJq79TSUNw|^iJU$%qp*0I06$UO62SZ-8Dv7|&f)01i)tNS= zXr(BYMYvf94-o{7w#cBh1STTHaW6()t1vmofmFAvWB&fNiLKs1N~^|koN*3Swl2|* zN{^iX02A#8#NWXfC%%N>`s0-={4&%U<3;Y3hZ-Xo$vCFg}?T z!*p#$yEWP_4bsx#iwus3nED#_{RUNzbk860FHZAdxcEB+vX{u3Lf{r|H(r6x>YR<7JB2@KjihI0;o)7dD<6S+$h+~WkaWUJk=4nNBu)TZ#r8qXQIEF>fcx~ zy_``qUAv!=T?@*i%REu0e1Y<-HN1tcB~-~EVMBiL$3JWmPo+tB1d?di5!lU~*}qWw39fN;bpKH10KlXinc zj_&%w?jArRnOMdKa?W@M*fm;T0x6si_=gx9?YJNJ=R@2g{AGhaGw{m9o%dWH>s={P z(2KK}YMMI5+8I^|#E@S!w$H+t{gD$e$@DEB)7p%ePG+@KEW&tX6U5+yi5K+7%4*Mj zrL5r|LzYHoUYtt6kL6KcS_PWs7|QtJE^cNK?i-dr+u1xdz-k!6-d-^9PS&*PfQVA zBUI6@`#EuA6}VV?(TF?1yey|7j^5+%R>|mb&l{9ch98OM$B`z^S=mp-y<4(6nnM}X zELK>FxYMLn?+!Lsw{jP}7|06VAQv1>wReumelJ*djs6=@yAs^0c-7WRXF_>#DDffW zGr~rBF|Jqm;hyYSDU27J)M2&Ir0cL2U z?}>8;FhEjrSLktHEt}Ic+v%h1JvXcOV!YY+OLm0ByjHgo+{Ouo z)m~D52sAmt2g590TadXQZ+iKr4%ch_m6y1@d$zZk_+(x+A z{{U){FYOrS9YQOH9dZKwanI>fl$S%s53-Y~Xi!4k(m1OA)!-6%3n0#U##$r{Al96B+iWQSP}6a(RlRX&Rca9DyAw z(QkMN1RVL+JEyX(1zg-bBMJ>H_{k)beA6i|k;e(g)}W5vd%=uvOw(&Zp_Q3pgCho{ znMvO`tii&o?NHrZcVi=!TN)=O197pT(q;#k7^wdM#G{%*nZpW>N$|%FochzalF)3A zk(VI*RBga_8=P~YJN$!!ewC2<>T~T#{^E7e^<*vUQz^$<30wirqzq0ooK+}-VT&hH zcCC%hHWUe$pyyrlx2ws3NYMximX{(+;`wIs68`RDS*6@h5sxb;FvE#WdU2GyFgUBDuN?P@M0`9z#YZ!d9OKHC2P zOT2-a2_bfq81CC7d-+m7#1F!Syp~(7Jyr=gc(4_?N%X+lxcUD873|#n4D5g$f)6_O z{Xeg1agpJAzry2@^M;S1z8=|uc(_<(5DvhRL27pv)^WYf)xswR0WbN9=ml|TyK$=p zDrY|9YF~M+Uf2ML#KrhtQa8vMAA0TIfEEG`a^g#(W4M|k zJH6EKaSe{d;NW?49V^SpCp&ESvEz;?@rm^O5j3zCiJDIGMrSMl4qK4)RRy!uk_B7s zw2tb|7+5Tw)L~hO*qzI5>T&OiMW(At_V%aMYX3I70lC{PA>029=C<%vF(7Na65a~?1c31i6nRKb~K{f7P( zh9RBU<2e4O(0bOg*_V&@pE9u9_QZawn&Do|A)r%eg(W?ybj2*{8npbrb z87+&j@xo(f$3A%ebzqGdm&NlOhB@0lf6A9$S>?QEk=yuA-|P3N@wQgV%&?ntKM%@C zcI$(_-+K8!@$a;+t!wtWS^_Psn}SS!X)(lqnEO}NeXO~T!p>uI+$vd#;Tg{g=a-jX zt$an=8Jki!SF#h{CDEO-cOdiq59wK&{{ZAPK|gKCci4DyIq(Jy*gj)nS6xZ~CCq5U zhB3`PUod{PL+p+B!rz8g3XQ^~k&(=u>#0u<#xE3ZQR7m?t^go^Pb&IM2|Ri}E7sc1 z?4Vs+2&Gpd7??lEjqCfo^IW4^w~{?;QMU?1wbO-G+vYhR&byYW1a95}^ETrc-_EI* z9gmk#?Jk|DxsTzM)ZHl|TZuVfbp&vO^`j+4y)Jm;ZOh=kXqpqjYYb9%YRL+#4m^{L zXJfTqBeuD_NWaDPUh+F`F61)t@Mjsv4_${oRZ_Pm6?b_F#^;B9kC6V9K)wkH@hqxN zc!AmciR+rWzLDliSGHFF02uXA{st`-$%Zh(G&}A|B>HY@nfNEB4O?A~D}a6)0%l{z zM+xY8fOj0JKjKel8&tgzcf3L~y5~Py>c4;*LfYxF-`v}TkbsoSj zeQ)8gc=0Pb#rNK=$)W%()-436J`S$gih+wie}& z!rO4J35QM`B%kAN5kA7Imz|!tlHuiughnLys{l?sk1>`2dXYv=wQOigx;Xu%l=jDo zpb?SFk1dZp04mp^sd;~GbKQ~o7;peRxzXCMXLR>o{#UuammC~iO~5??J0GDms@mRb zb}4IYZSlF{gs}t98UA&Waplpr6J0Xz8z2L^n96zhj>FcZ3e0khnLTQ~Z!^ZD;$wX0 z98-=QpYqcQF5E&MGF)=-_T^v!sA zN6()0{x(Il-hktXLGmw(npj>sJ|!5aSYZnl1*$N__>RV+;cRQ7$gHvqt5KG}FwIu2 zu7Ef#RvkF;mFg-l=?Ydow~vhStuL*!8OI~#MO&%i8(`Eoa1#t`HJ2-q$d&r2~OB@mDnt5%ifgtY#pdB+?YS#AprK@<5 zump^XM-??_tYyK|=s`WSGse7O${u8V%kC;mT`Wy*(JP6N{Gbe+d4PYFCxBjQwZ1BII3FbeTri5?6X85ki?Q~h-IJLTaNde(5 zqbg66t8)D*^}d&NAtK@=c5Y<>!{e?wgYB9o%udXr=k}n1hX=*(4 z63KBZjAx92;ory%0r#suq23s-I9x_ta45z=W4{x7bH||brM;Ze<*~DNxRA7xJ8-JL z$^pP0_!&N2g*#?Q$9NEutZ)%C!cQ~r)2(_qp~X?=WS=PtY@lf8iJy&F3=SvBjZWv6 zHf#*zklQ@XI7k`hGDi{N@ic69&UPS=YF4*GQrr{ngs6@=nYf?GfTJhtM%z47Bg~!j zMup4)IY? zaS-dp^L0^^kG4%Pl}h1)u>|3^+^OmA4n656v2vo_nV4{#5Zw=m44$mhhow;eNICEaNE8eQvKFMKZIIBxI617naQonepG+c!;cj8IZs z&!OI!gOIlz()9kJvr_;VR*l-yNvB(N? zRE&?HK2*LPDG3e4rZpoO7&y)jPob_|qkb{pS=)CSqP^Ht;E-Wo9z};ddC@ceEZVx3 zx3#%OV0h8w&PnIE01@=BOa4YFF3$`852%Z(v!lJb*zt+XYZGIMpPoSdE1&kiwh&lF zCAODzh2$P6kdjpO>58mseZSSaK_H7vw~{j090{37_VS}UJNRX)>oRd69 zDw2LC@l}B7;832Up7r46o5+1OIPqsi(1pgE8W*r>$L~pU3wtSG2hfr4MBZv9Qmq6x zwvoiei-eR%^>g9RsBOIKliGIk<8YB)Nq*-XFSy2A~nfmghAG8wQNajr| zPSe>Go;&Fh=5{~ArHUya_1|srUR;`Pk5VydTNrEosCh8G+3U%x26^=fgka~pw|2vH04^{Gw>qcmou{?5xJ1+u1;Y>bla-M3#zI7Z9Qsw0 zvo?3yvzv=e3hcy;560?j=#l>bk|hH;^FQlSly{=)NiCKxJIP`1E#Q*b8^SKH8^j0j zu1g=U+LB+{%@E;tSh}uc!EaEY{-&&tqS!|hNj1IBv3PgyXPCyL)gw}N{{XzzZ(Gx} z+rh(h-cZb3UzD7^dn~I@1yDEkjX}GPLW?*;YIr|!YV65z-X`~O)LuCD_O~+>^ zBpXdpZjfVT9x^>gIzLHcWX3)`9PR+6$@Hr!kLqQr3uZ*hp~36QiCM+D%P>>r&XUc0 zEQ^TSEz=aPSGwH3=iZ=`k!e&*RQlX|p*%;|YFPJD%_b1w8c_@q%;2y&&z99>(<}(# zEW6N>qzJnR(`_V?hZcF&=Sz#a01-;;Y{^w10Z*+^t~j~YHwR^TA>0@%Lf@bufsA=k zHv55E-7Z&iinK4H5@kaB!JN{-!-#{Atv|ehS&nzjB%TowBxe<5nL8nlP8Y^$lnzMT zQZp%8bL+h~vXn6!^QE4KQL)0Ht<`g0RVJlX$33Z=Fqu)c6C8U2cda8{rl#V+pcBk- zw_4O59nEBJGHN$&BeyDhIKgfs&uWdE*8;XRjj51^n60o6Ox6k7uvpdN$+l?VP_knn z0i4kY=0Hka-M#l*&tEBJn6cdJ20(oR64e^J2MUae5ov zQta02OSijV(jZwz>muxawH>jZ(4==9QbnF>a#W70PTxuj=_KMFE;G<1gc`=5sWQ#M z@ht3i;FdF=`nge8$mW|x-pb}hh`Rp(v|H!I(F~FPWT)0^Yc~vwfTgqELP){#Dr&J| zXz@rUh$wT7)Nqsi>fdCy3@~G1w&Y_5y?hP%Jlx5642}jE?x$zs<_a$K2Bh=Dx5W2J z{82C;aA~5UIXK~NT(&=I-b-m572@a6$&fynt?%lM9(#OQ&oyaa;qPv)W9~UI5gf7q z05cqqwQzsLi}${Nhv0iXIu`!`e;T&}DBFvTuowf)R{-akIIpHPe#-n~0Fy`&!Qyoa z3T=&!Mr)PoAA!lJ-j4DK^%>dOxUXkmiJV}zJWOypkTE=f7HFvUat^|N8)^Ez z+|Q^jjrHod2=A(%@HRM#h(N(sM&ewZ$2=XY=pVyw$o~L@Y5IM*h9h}nX&h!m$c+_R zMG70K@i(ygRf^NI_Pv|2u(P$Xg(9|fLXkS6pdf%Sm;_~x*cjp*`PIJ0*(~8PLJ&rz z43W(FAKp8W@0`}&DRgC)M#)|O0BWgjhEfI*BUJ;`;E|qux_X+-khoZ+3<8n>!6AIP z3}Xiz`eu)gBZ1zp7bCjlgXn&>i6a~gr#NpG`{3vOYLRG#i<%JXVt86JfH;)kbH>Db z{ivjAZEcKZLo@f74sc2gV10dSX&k)rNsI&IT>ZaX16o2H<2!*T?K9A0G}DcunB_*% z6)F&SoJW0+5Chb<-(iZDGE!V|#^dsVC(J*m@~Ofr@Rix6;Zf8N`vdihIA>nzN6}OEX$oDPbFW?bpsR5ae2Rs2#nz+$iZl;;@-o2pQYInw6 zN#?eaUMXORuXd=cHU#q=fyFP=`wDeiD+|3=7;betC}Fp|D%p{OXE_7SdJ&V2tE=8= zCgwDTYiGH*XpD`2bfzhDoU!xa-^lH^rF%++4Z(YG?FayYkRC+?=g%f1ZN00?SB>#K zT#aOnoIAB2gch2JN%Xif?I0vPg;G=}nm#ZXdiCeMbKO_)TK3vIgJfoz;_-KGq&Qz- zH|e;}E9|SS5<)xsF%!J(STH^r_RbkUd(>yzfRYUJRC#MBZwNLYI@1C;!A5?Ze|M0#{*kT8>SLLxZju~??pK>Z;B{d zKV|H+j_piqsKh)kBEg9h9D(5~PCV(A_N!&1MDtqO!*v_B2gF!SJ{vpu_?I4B%_G;l zQ>bb^&9wckva;^ifvvADIGI1q+(J0Ije#_oba|q4EE>(U)5^R>pflT#r+I>UZWlD} zHMSf09bIZuU8~00l37Aehq{%mqXYPtZX=)Kr7f>p!+VQePr53+CSQV$+5G%x44~tl zNj{X8?#314i(0>q;Ppu*k=T*Q9wC#zD#GY*n^MIjtWl&X-e+zgcVV~YKT0Q1^GXct3RyIJ)6QDJVfV7GbSxo*4>g$wnr+1afUMzfAv z+j%b022Iwfal(t`!wO*s{N$SEJ&Nr9w{^iax5vo>Fa)o|!EB#g9r_#seXFtQ-I%=Z zmTgAz-b?|xo=J*%1>@KO{2)}FSzWb3Hx?^r0{k`mKwD;!V+g*;r*rldTD)78G7#-{ ze|q6ilkJ{gO1!p_8mTKF&NnQ8lm7rSWYVcD?L17et4ronIIibg6-Lf>L~_rU(QQPk zf8KDuPul{Tz-FJrB1VhIaK(dZ0GIl8)dm}ZHxTx5B)l(6f zE)PLkY)?w}4P-ANR7{b&Mh^9)%TgSuz{k>}a7Z-vQ3~$z0Rt44S(-U8b{_Qpc$tBv z7dE+4PX7Q(<#rYoXR0dz9r&^5O{0s?HrwE5{b(rk+k0|A3^^T-T5AQ!xWfa3=qR&>Aj&(@{6l}gI;x7X zRGKx@%uy9F78&)==yEF4Xw2)h419=E2i#Jt5gLyN66f-oe*#=eD^u6XvHhDb|eoa>y!M= zL`EZEWIQLJUIFw2as29jB>=I`eQG&m5&)c!q0jQGAt*(6W|B(J8z*_ejDp!=k<1g0 zn;vx0Cj=;uh=tz+na(nQS{{3MEUcr6lpVAB^37s7gkIB>)J){hxOoPn^&&n@>f@zFLw(g%_FxvmHY zs5^gJ@M1%_1QW%aAE2x>JWi!RV!Hw6KkpSJamrTSM3=P1C_f7`fu}o2jQdk@u>!5W+;0$G1);8+}0@V2Y>P_G05ql%b8Y-WK3g{hN-j7$?FF8-^>EkfN)XEz0&S5+eEbDI^!qVzv!Xx4aImq%; z%=Pg2zl8IodWN9(lT9IINrn`p(qOP&qCg72QHLB|IEO0ey60zYuH%w9pn~S!RNaqjrT<=wzvnh13-_2=j-Y5qz#-)eKvfIta%#WEJ zhmwlu&)L0SNx8nbi7exJSUmIFTFVh6^Er%$EzsoBJxfg0@AU?P=50l+yR~d4rz;~9 z(}GBfDD%i9^fY*I(-)U6iV|v62qe-TbiRPh#DbJ=z9Zg^1>@B~39!a!Rk|yKtk~e1!>O@GS?Z_Im+vs|Ix~CMe zxNZLcWp0VYPd$Q@`qU>VrLytICUG0<+xg&*OS?;}pptPS%^pDV9wMVSK5bfQ)_U#j z!rfbF{nP{yv)#yIjZS*9CyU#Q72K{$kiy0rcv>-pm91UaAAAx$@@liJXj+bB1L{fLkEt}XK134Nx;+Tvj28PWx=T945cpD&r#|E%;U47ReJRRnw$L*t3{glm3b4fwDZp3uvNi3Fc}7d zHw8u{F~r`fiXzWYvRIKGDYMUor45>rcD6Za78P!x#`HfoQVLCWh2~i~$dSsO2vI>@ z-h@-##~w#OyVR3w9n7UdTB-9~gZk}B^-kA(GLTl;oA_7*T?oICq}6mP)Voc03vi~K zjK93gP+In^Na1cRBTw>zf03kmmufC9oA!Ov0(k-$)ONQr-4Vu$8~mUe({&&$=~f> zJ8jmM?VLnZRBWDRrwGh;8&()c8S$PnItrvvj*ufpTV({-z6wGz*irmP6i0sTv`P?q ze|pf{NiOc#9%io^@v;mb;o?*KNx}3{n%zaLV=N?d?~0i%Ao*i)bQ_KO#m=LfW;{Y-VAfEvl)n>PHis z0QGeTl0Qi;iTyA;>lNA`(BRWpY&94_7j`!h5zO%M54g=%g~h}t!#so2j+|I(x|)%| z_aqM>O;#K}7_#R@pHGcJ@py>l2;5Vc6dlMT*kYI1+}>QqrenG0BupA+h*fjo&+yjR zBP7`-Kt2>XH5{`{U020}%VApgpb?PX!+g}yT)G1YJo;1&n;t{YDo^#Q306q+D{(N+-@TJdpLxP2334&D38-puX|Xfh zxZ5NG!Hx#|4DCW`TFf{0ji{q4#L6&uy!w06xh0~pgY32m5;KNk#k|S$txSQ&GuOqR zr}V1G?KPus-tdJSSv;uQU0Tq_Sj;>~TLHO@Rp8>l@kw@uB8+PaA|g&j8sk*T}r#GT@~D}j-oMw(RC#8GxQia-g^3moM@=qd~A^jM0W z!;*kweD6crUH;Q@2*JiS-^zv6XT+$T*lr<%;>Ov|Ys|?jbb45!NPyu|L{o+<#uuKz z*&io*-+XH+LUX*EyHc;3g5uC370O?hVq$K6K3PHe} z1Qw09A-^f|tg^Qk5|TERMSbF<4}`+Y0e<%Mlpq!@}$O(T!0=n$F}d7BehGa0=YWefW4+ zm%b<)jV3Fis}z@ExB%|O%f=7TV4AS%HdiX!-S_RBV)*w>j+l{G8TZ8_)HM5-kGqcQ z7|N;RhT`F6Qhb3q+wERt*Mri<_F}`?qYH;Y94Jl*Da22)Cp5}CI4s#(LhKcHAgNLH zC;Czb(ltbW?mauR_lUopOGd}84m>C6S|*4k0O~e25M=zAcLUos=Snp6k3!XML(8pq zMYS9+a-pzs#^4WpATx+)H7mXU0Px~iljr~xNlU~n#dk8Z515eO(xs6>cm4iN z7~6dwj^|CBD$jDRdNMgd_5zs3O}dWma`BDS_gucTHJmz=ovspf9Dv0zxYVxfBL+yb z=Yd(2IX!=`{{T%Al1pOy(+$`lfF(aGbU#X>Yg%>ea~RAEGBM@Dh923gmBzhof5jYz zSR}^83~xewOwOV^o0A|PE1mK0wQt81yJiW^%c6%5W!maM(8%yfpO!&Ztt&&gvmjkZ zk>)o9Q%Kq2BfeoEJc^EV;>t!i;o`yRRCLMPMAjCl0LI3h!)T3wK+OvbJYyp&!ikNx!V6Z}Mx{{Tqm^)yV7$tlgn z)C=Scyjc5JlSvY(D={nk%L&Kbuqw$v2tWeXapZhl{-WMBjD7jkwo=)dh7xxq{{Y39 zx9BQqG~F&8?zIV7zji3WHS2fN7C^RES5vlrX`}=66;!B3?6pBM1YYeGMmkX<>;)Tu zeW+V)qY7h4&pdO*dRU1E8)R0lO&O$<3izbKEW^lPZ(6*a;&@n)dUT|d_qL;KkwdR& z1Z{~BWOEynRgBk>J-6Vq+P8O(G8a7vqKhUl8=2$@q;{ILQzsOYEMubhSNobI-$jv$ z3H4K&vXUg?>;twIvCXEino%5Mc4*EVsOCC-tLHw!cGfF@#BSC0Lh|xCETx`1t2JYe zH!*_9x#%;Td)Lw$t4DEc>h9zp${d5|(+0jo{Bis|d1aGV)A((#U_Ih@Xt5pwP4J3X@DWDX#xajdt6n70-HD=^zyxIhz#w$=74!GuU*ndgr{6=M z_Tu78%SjwX71W+7U~WT+zcTsfab4$CyZAWt*2&mlk zN7EWMw7QMBdtVZfo-Psq{)Zm)@)=l=LM(*u;n<&FT1VOKDjQ2^k|@kkfys6%YRBA& z;{-A1kju;M)7rfhw0Kf!%SMJ^#DLig(boli!NqDsgLUEH>Y!s4BQSIVTu1=ejAVL) z<>^tzBVQG}pI~x5u~g`ek4Jb57|sD5IiLIVpyjz^Jh37M;TwWB-;vEvaF&-Yq2yGQ z42%*9{{X+OVml;_!y|Ewf^*m0)Y~Ycf#jD7!z^;fyw$KBe$*R59H6g*et0E+Lrmp` zQ2zkCWgSSy{{Vh<5Q773@bd>7ewCVRykb^wba^0+nmHs@CvbRx`;Dm#w$gEUxTV1S z%&k|Mq2O}J?1YSQD-aL&rI$P;9{wYd`QzB1ddD`6c+)A;1c-4MVFY#gPw7i;^~8vN z$w336l7B)e-l78W0AB~G0B7%2YuL*Zz)SxClYry(rBTt+lez?(R$G&Y4;y+gT-NZS zOyCnBUZ;;aq1p>ZIJy!7I=o}~ik{xu&5!##1|CH4;*v;Y%Pp3j83VYPwj21fP1e_e zH&kXjZna#DHMm|Ek$%RfeP(!ZlI2PL+Rox#Ol<3IZI-!3h<@_zN-ytYk~8qKMRcQ@ z_n2ymZC+^?X#i8vm;2J0p4sIOIu0Y$M~~$}V^P}w0MZ%cZ?+E4ro6}EZRNIX^Ug&d z5Vkn(-YIY}GB9g4>2DyyNQF=3YLT?K4(Y(Bp2DqCXpO5UismF76Wi_Q!?hc3@SUPi z1qltZs0w7s9#nL3BH&|xIu_7Y!(#)dIx0Md-!wcb+Y!s9F^NF~i)xcA6+sXHxF=&p zHPDTAYFi|NHRnpMuLcN>_wuBQ*zJd3>xL8JP7@}6sh4eDI>2e zRUD+c5hTa!a;`ap=Sj6$4)`DR(3X#JZ!RRy0P{3u#oRKf35m zTPq=x`ieI05J})Tq}LI$(5HI#rqMq z+wJM;Q4yYXCq8@D!8|-o%b@e1+h=q{69XscD^?jfJt|YV-m$=2kgo4YQV_W{w|2}n zp=xk&Tfp!ca{Qu_>(P&%!--9)o=Q@CI6RgAtg zPD|v0`}>72BZy)B<~w}{U!?;imXTzvZF1keDItI96l}KA&HG!qL~G{U@#rXKxU{!9 zf**)+`Q+vHt#Q8J^Zx+Rotr^(dv)Np4H|hwz1okcsmxG8kBz$Geq|%^l6~oH*Vk7h zBQe3`h&KCFmezrv4VG-~h<};nKU&ile{yVQm(j<8X>O=bCVvxFUK`8Pmn=t|t-=S= zrnZG(R$>-4^IgC59naFeCG;t@f3$DY{cB2JAn~FI^vF|rnXU-tBk-vAV^Tf!ov7f7 zB_wo0Sba{^B=bpgvp0lt17SflEuD!uI2aV%tD!68c?PoC7~qLKqfP$wc)n>j!!~@* zDFhZK*@NCX+YWePmp@LQT6f+oR=})$3g0P^?%zsvCQ3#QMS9r%ZggMsWny)2~UG-FKpgS20VcKTkW zHROHljU%(W$lOIiJci!9{*~7|N3U5)q*{x4JI)9ZLC*Uq6wg-D7Wz4%l@~H8*mM=e z^~fjD@8z>+W{x((J_rV~PTHf{^zJfi>|gs4cOBD&X2uDs=i)Axb*pQH$8chFjZscG zf{gSXIqy~N16Y>G31Tg|;mG%{nV@LbI-3j4M0n(Fm4@T~#V$DL*_$GR=2f(B!pGHa zl6#Q@xxmB5LZ;mYcdw><6`)0=+S;;59B33Gl>@+VIFue_5&^;AuJuyUp&EGq0CqCI z7TY{aS4}$pG{8nsq;4~iN2omOb~vb>CVcbBuLb_nB#Bf*k_OoFqXdA0Rj@|K9FM<| z_o|dyqQZECoNPeOMn9OX=ItH0Zu2f10}@0-0zE+HKT%rw7-^OZm_*3JFAHNk9B=19 zK?Kmu3%(fgKPk_++LkYDp(spcCyX7&;~!t^_oJiNZ7vXTH+BfwT!Z#ux6+!`3sOTD zv+qwE9G@jSXU`oCPGOUWF*7jgGr0b=8fq#OGCz4AB2F^CgmtOYQizTq%fde}JFmE@ zNusf^lt#w>Ecw=L|TRS#3iC$iV*i+aC2x)wOs_;$Hq79&x!iK4Y)g z)^yu@ER}Q#sLuq&g~pw61NCU75`q5!D`WovRaUPw#%9fp?V)481d1)B{{YS5KSriE zx~%iWaWq==3O9~yiO_m5!~Un*tMfLQdA+Ulnskgm-QRfPC(FK3mHz;par{+XFzF znR-IqTeh1oF8<7MNAA#*{{WzmYDW;J7JET)0KhAO(xH>J^W-T)Jwe))-)NUG#4bdU zUDy`Dy0>D{=g9IX-@?Ejde#dp>_-r(I5E`dW;;P~F&H_|of!rEl98NybfBTrm2g1X zn%mpNa8%%mJSFmXxDv6g!h)cq^~TgpHuDdbLP+zh+r&7B4*PFSqPG$NSDg+E$T=EY z#LbTOn!{v zsMgIEAk_vUV-w@vsdqP5F|pXUpx&&S?9oJz!#5Ass55I4TTVuM#ICL0O zTLIVYT4HQP};}c+eW00UY@n45!WenLQld3hSC_6want)FU&tX`<|ZF1p3@iCwHd6 z6-L3>P!pGti%3fv71x=q5toRQ<9(7%sZT5BXDoe9Pi=C{M-khtD0Tt7`qR4$RJTa( zH2^@9{mr`k@_%7kV4Lhoc001SGp8BCN7dVqqtYOt{0Iq;ns??syuY0YB|J;<+&oM2 zTSjmb-^=;aHFofKfqT!&fOt@O)|yC@V80I)spAN4Zd>JP2kVdTLcC0lUD(TVd7fc? zy=jZOUY8Zr}}3vs|(a(yW-h60i{$9WG4B;dE7)`FUOoT>-RnDFCpqs;q%S`u5y z;4R*9cTq0vMswrmdF7oRfCyCDpaxgfK!yb6&pRl3HFr@6UTwk@S$O5TzuxxQK z^*(>gHO+f#9ks+QB!_}n_uw8PI+5z$XUx-WOSHYrW*EYW1byR{I9THY!U_H3jGrKU z@G7NuESHvnm~kBN42%}rF2r=nVYf|%87&vY+LfR#z}ZKAcv@JBgpiP~d5`bCcY7&f z_;gXX5`sMPHb2xF|b9P;z zx{huoEz8##9+YLg(uqrx|BxqZK%@~l5-?d{hyW#ethQN(_u^L;RV`Br9=W!0oE?G&!CloEyy zo&%m|_i^Xt=Uk^zxVc^?jh%qzP-h%&|}wBHv6=@Z+@X?W3zCAD=Z%6TzN?~IIrxWO1Ex~Nv|=z{A}%jMQLevX-qze-zq zs5I<*(;~h;VnNTpodo)fvmjX=q{l8Jk}DC&BwrD4y?0?@ns%e8Y2D}eebYjc{{X#@ zgfvqRCyjwe`h!LZG@U?%yodY_=ZMgwE_i~2Az0L4Yaa;!cYF( zjB=ZuXp!U1ma!Ye@puEe9`+C6 zV0S6$)0p*%w5crQAYe8FU>t@Akq4+W)C;p=ZqXcrkw@CUcEEhO%V?fo*bR?N_BHINLc|R%A0XI zuZMaPdArqRk$HL4=@mJ5XryVPJXu!tstY`+H!{nb>^jz;9m23HxvDMJi``9-ykpvy zL0y8_5U8wr(}tghJ@JuQwzox(#JHhu9byCzm!|cbZRpdWE9v;Q&OJGbLvWl&ezeZv zgpJ|*6H6yzy!j58Ln+u%3AX0EW{Oxq9(=s(U`aI+zG`@vCp9KKNl;*V*5ONZtO1i!1Gk|4 zYm%|qoFWXL7nMZhu*v3mR#@cZ{VFzLy*riZ2-lWE_#Ep{onLC&LJ)C3(u10ELB`pw z7~;P)x+5muk&C;WgB*JbRU+h)XoqZ?g@@Xy$;sO$)gVbD7|9#ZQ?3Owb@MefzMmRL z@tT){-F`(p+}50Hlc68dtX-#RgnzZ%Myz?nj&P&+nE8J?^Xj5GU9Ds?+`i9r%f392 zeo^|?x}Eg5=LC091L2Hv`qa99pWN_GBw^-l;f(Sh#C=D-MlQioSe)Pi)YCcmLb&SK z{vgsE4=u?&g=PF$2s{h&k3UmThVI93gc1&AZ!_iWD5xVWaz})2MLJd?8GxirSoq)O zC!q5B(C;3^xg^sG6_}ncI)ULJ92`+#03&)5>fgQu8e-6qjORX8r*?PGDjp89ovCQc zEM*iCzD`Kqsyc>_S;D|OvRlO?^Zx)r>0MUh(PUp3&%GtP(^UL}9d;S1xDOe8oPz%V zM2Q=^gfm;H%e#-5j$3u**KS@^C-`zA2@HSi@nw%vKs>#2*x#u&(=PN;GXUd;H9SNP zY<*2ElF3&*Y8EaqanB)hf$g2>D=isOL^^z*2(vKaDIXaa-{&BTn(IuK6=X)sDhwW6 zXCxo#S4&+f1*8}lEQMU3B1u2`t4K88*~BIsOr8eckyDTDT+wfEq**P}+$oA?1($&B zfWVLMgYGE%Emr#7P)G}>5m^_g>;9swYosrHyKtD~+kQtss5LWaC5j-&@H9H zt-)7BSs=5N@j-S6 z6I(tulO0#1r%|}v3X7GxER`!SQeQ)-$r?_;JgLVEDZJDG6WN6iEU;e814~=%?pAz9Mpn070#mOMGS9Z!&t!F}O^ou{UB=uqK{Ar9 zkIN)&%zxNDYoSM|Xn%^jV70f>qmxy)R+dcw+?eGfn@gtLbUjQhvGVG8#CnVu@ur{J zKJnv?Lfc)SjB+kAJn=~EqDgpKCJe2}jz+q) zI;`EPp4#u@zrlNT01IoZBM73@fEW>6<7Jbg%gMf1=xH&u)B8((ac!wu#n}$n!y!r0 zqwcLPq#VSF{{RL<`@_O>oOQ00c;WAu z;?nhNXR^GD=Z*S~gdR)}LN=_6Qh-RC;+Y|A($RR242n3jM>U3gNrn}&!Nvoq!J?vf zRl_dyMJKhKOK%;vppA6|V}>>a37{TG$EWZ%SIdW;7?z#XNgK+y7A)9;=~RyCkL6|N znd+zANuc6Yav63#X|1F(+rN8mM~HB`FydEUs>*gF(9u`mAfrugCeMY}Vlz|Q!11;m zax}$lQNu~6Dn+N=CPTdzhhPUP8B+s2 zYE;NRwI-~h&}xr5^K=y&wg)=cZZkou(8i2#F`ADfncMo*tUUhIJfxk4G-Kt6-++6G zIL`Zz=|MRsIqOA8riNv!TYZU15D~p#jkm4XZ&F1P;K?`pwdvK5m0BwLB+U{ug#`1j z7JuEL>*{?dE4d`OAj(T)kn*BucA7%Z>;U9FD>l}@5iU32UD%4#9qAL6y9cw=#Dq;M z@hCn8{p0?X{p5r5dJjrA?&L0Ghiq&}=sbll3Ou&0T1t^LuWSu1_veat zX>M&GR@=XjcyswjokL}AC7&{5xF`p`m zNR`(NIrF7)89FJ#%&MXVz*iu95t_-dB1w~$TPGfVb!KZ-X3jPTJg8=YOpbX{G*L)I zEX(Dwh8=vxNS42smr!Ypfnjnyx5W*{Gq&nEd5T&Y$kAe0!7Mt8Dby_U9D)Nj;;`Hg z9v@-mKPmpyEx~A7LfqdT#C7+kp?L<%NYgmiAME!Yf09N20Qo2DP}oS<343c1{j8C9 z@h&!Zk0gQWP`LUEj=`Ely|)94ytI&~`O^{s{{WEv=nG9m*MU_XM4oLPR zBkNs3vF@Kg_~x5?Vo4f>nx!it*z$o%agNPGI4!Djj{Ely1tBbt!GfvOLvfxdzby>J}BGlN9$8@ z9x@=)^V+*OG&v+s@ryuHMwy$0PK4yxxjeE-Xo>k;l;q1l<7mOdMEtwR$@yE)qYaNjE&aIc z^_AVM9k1-v0zD^EZ}!_{AO6bdpAg4HX3jjDZlbH&ZK}R_hSVyaj&8>DX+i{29cQHLzxh4LH*@D7=EK;wGQYa#8NWN6oZN>0Px7ZW6Q{nhibZFp@?a; zwAahMyMj38+a3~j?|=gU?I;{43>APj^4xZP%rD*D*xaso2qr1hIN|{A2q1zE8Au@D z(XS{`Ja)lG4HX@zD~A67-kDwLDRBY2(92B#8Il5XBtQQEA?r({W?UX(l;c)|x**Zk zk`)8_Rlmh}^RUI46 zdi39F8U=oz$Iu{?>nQ3bB|hjh_a_jQK;H3 zYh6k-xwOG64Bn*x6I@J2BO9&+h*!ARTJbJDathvEGdQzsFrZ&+` z)lYXZR@n43G;sH;;W-AIYxe7M<)hy_;)SrcDx)VLRV#@;*plkN!u();Xi4Vql@)Ef zfEe2&W9L+R{KY#jOwm*;(e;H95Y8$kUM+_+QjZSR<1}eO8O=(cY8?)0VE7wQs@TTm zxAdtMv9Q{p&1|ptrK<SYFaKDDuW)bgiF#UPPrSq2ZK80C1z z;(3-iCz-7)>0fq7^D-9|t`2$Pw>z5N1srBa9tg(~_4KET1JODCgewC4&Es?Yg%@nt z^`$E-$mQJb$0m+~O~4Jg*b1~=l4y4uVln2_LL7lWD`cE}qhUtJxD}xhWSGdU8+P=m zNCKk9`K~5LM&wo-2DWZ_tq)otWG}fn=R>;VJ5eWm{JNZf7bDi8K10rllNchOtB`*5&Q%+qK9n4h@18^Y z)Y=-)M#USJeLU+Eo&cd(_bu|Cd?&4B9IiQepGwq_10(Z|||*G!k1PKl|fidgOb9$)i9!4LjZ5&D|Nnu+wA$WG=H z71I68Z~8L*sLehfiqN$yM)|Ggj_EVe9CuQGIM33BSjm*@)&0DH4UXe)IwMB9xzcqQ zFKk&-cu62S@hhnqWMTQ0l$9f>Ca1WzC82C?QwQj4FTz7u^gr+`#5V0Dwif}O(pg3vs5~tj`AChG zVdeqkMQ>qs9kHI;{Un!5y$ZKc$Md0G(rEmoLUKI8Dt2nO>MI)a-hSH;cWPps?fGlqm+PEj@9c{Oq*+#;kS3%9po1f{F0}b{zM(Y*wZcxU0~&k zSl^y!7>ZNtoGZXW{}%5L zebRUK5AhcLe|bH}^{Mm}YK_7{U#(6hz~69aROjhL1S~cc!yb6S-973<2)b62{xBJk ze(?S8@AvbsMHy{}I`nMt$_kzbQ;}T4h#t^^%OZrhv}n+@e`>LK9|Z;#w+qjQ%zxiX z7UnMOdQ{p)5mAp=Yf1PgILW4y%$HCJ6MeVAr5&Ks{wYgCvkFaNe>Iovr)`18^`1|Q zr_p$V_JU=%2aItk^yyUliK3YajQF}$tF)3wb#DAT&f`5Ro2-uv#T#!UK_ZQYM=yBX z4tR!k^Au_)bzFz4iIm8bzUxnHF>)NV@> zGmLW0C5-XQf_c>zgl^BSPyhivYXlB^RAu@ORkF5Cc;=FI@Q+hj;u#z4DhVGObH!*R zBdr#Nu)ZP6w&LrW2faD1#MvXtiinMc=X&vNyr^#X*1g_unra1-J7$J`_o+*A!OcW| z{RKQwq$Fm&3_?ja`4@Kft((XSl5iroNVDUUclPqFF^z&xR1I@*ZY44|EMW(bsE*?m zvjNX-swO!io~_##qr`1eOgp$zG0nV9{VNf8qysy3ttOYek(;6A?N8Ds_Cyzb_M>fF zD{*yDPQru$6GTHHT&j%bo*-jOZZ;e9rm&5MX;rH`V~FidZGqw6CbUG)BX`?19y!$P zxyM@6$A#Y%=$DcVbFCVtVS!M;BS*-jO|YmNdLfkm0BVelgK za6w%=$k{(Sf>ZlZTDx)TT9yV#xR4)T5vW!o@+ypi%mDhFQCfy1zPp(K%TDEo{Kt9~ zptBkf{{=!SrqU7Nl*3 zLVDC`7?n>L@STp-&*0?CG*=h1WhdY}o&owP4#WIwY4j6eFlGa%*--;(HX@%9d8Y0Hs^CWzB@?&|S zAC&(95^uMVBcP?=abSZ=!ZApozsZaUZhEgE-rlu{<1x8c7(GGhLm(iAU6@l#?+4bc;}3`10-YKoNJ7iF>$Ul&Zv5Y@wHaQ z=z027W%uZ$Y+re)m7BYE+dCRrd2imgc^KQB19@@rGI1&KXJhB@Pc&uth_NQes5C3U z>Di4Wg(@F!BTaQE34<$~VuQN1HqDgfo^>xm0DyswLz`!o>UYt=4dQ($gkAt1BtRbppnMT>DqJ{?ro5 z_N~mgUVGD|6vCk)rOSM)xu|8B%Ixj=xq4Q-MIh(%oP8=F+1j+FN`$*)hnD?nGt|~u zx^k~)r%H5=HbcQ2jh)EAs4=*$_F=ay)>KzehM`Ygs%VHED*;9;b=#FZQNt@%<(;X# z1eFw)*C&?r?%i+(YUz-WNEykmING;PmAfu!(G~?FR7H2AWJ-jt54CWB%rVxWlLItN zf`*W7L|`cfbG;1PW{Y9ghmmqT@%mD+kS|*C+=|XQ)?GU^kP%ZPs=Toe_0-xH*1 zmjfii^4dA}J>q_j-)det?ar86FL|Wua2|1(u8?_fw*!8U`cMozwco9EP+9B-@Se3P z#AFJE4&jd~_ZJi9hzFV;r4^>3-Cn7Z!tyh3be?G4N2oX;`HJ*Wzh-rti!1f`yJk}j z!i|N+yKWtPk0u^WJt;!388v3O#eU!|g(r%kR#oPGY#?!KYpa3uz>s&RjjbzxY!0KF5*z9+kb^1y6dZ-Elct zBgQ+q2Q~KUJvQk`l;Ov{TOg5dZFuGYrHuVxh z_VYS80hHj6T9ostwlWyoGP-a`aylO>g7P+q9P;RV#cK-zj+H-n9ZYhZspM!_=@mUr zaszL-l~wMwJ}^+7j{e8atorSv!psjYVDzqUt9Oykcn&goc^@i_w97?U+u~K?DDAk~ zsM_?RDN47<$IgzP`av0BaC+yhCcBgTN+wo6v|hD7^gUNXe}(aJry^eJ(+`d;bMvb7w?^Jy56Vp&aW{J#0CF+*s`Jidmx1|? zUbO3LV#seRrQAEQsTACG#-Zz^=o9NS2#w%bz} zu0}l1rB@=8)|p#AalJZZS|7tF7p)SfIG}CMhk7whSrKMhpl)Q;$0ng+%=y;{f#md| z-F7rvEk@vPTpzf(#yV6kxLvx@vPLp-P)fcWP-7u$4fm}M#9+}Zn^5vD^)ZYt`LLaSx!&A5D<}?(bL0>Q8C0x^sUT1Oc#+S89gY`!uDPWkw18u5Aq+^?@&lO4zzUq zC7t;L$axNwAj3JXpqivP3+hErFa~+wJ8_&=qHIqX9F*}KdT&CRO9D?b&by!?j);5P z`620ZZ!|x2Bz~Y$sU+_>Po61;jQ!T9Wc%$pey1PSfq1*QWJ9q>W&0dZ@Hr)o3BmD4{{Sm({!+emKB*jHNbfAyg3|R&h=Y`FKi)_5 zZeD~|ZXpjf_MI5WWAT=M=HbgI{{S*)>Wf-w2jgw-bi=z#`+HpUnMW`Amm}X4Q0M`V zQO=}EIIw)G5_TR{G%21`7*Jv^K%zChxLJ_FHV#LGn&!Iv4GgcuFjsy0io0L9 zbs!ut{8eT{4w8Nuee@V2)qXs=`*Zz43gc;+|Y_+FYJ| zEO0@fbxUC?_p$!~&FN7}%SqBbd}DY+rZb98bxs_k^5lJLaUlzbIL6e2QydayIgVnU zn2uiq*sg$Gh(7eL>Ob4@roi&0v0Xatk`(eLn8!GgM;YFlDwwWGsyFzCRNw1RrQy8L)}+<}rxg-3l|8#*S-Q|9?^B22hhxt*Xo5_@ zIVQH^H3G?jCF+e(>O5>-v8>@~&}Nkd8HtH%Rse z%=I)(F!*wOtC9h%C%T#j<-;PM)K>S&jaKysA#o@Eqftt6%8I;P>eqyFGZRP1-VFjA zf@%&RWI3W_V2gv#l>qO;#M3~O$!6?wi!!9Pl` zUPLBRvaU@mx*j#;L8@ty-o!5L#zTDL$kkHzC$^IeG1GoW(y#VO@UGCw{^mi~r773+ z!7OdUxhucYrjvW}QN2;l?`_+FygRQVJgEzYcEq6hZ&w9FmP~lLZ`69w7rJIaD!8i= zu{<^^>OkpHO%KDKRMs0-LBzS|P6EF8QLBgQRLs^}L6{k0s3y-gv&XnIC4~J{qbfV82S#IidT) zYdzrW(t*KYv&hzX=wT*nVJ3ReLRl$n)|va(rL*r|4NQ)*KzY=Ndgh@AsHu~DVxGZa z5soYmI(w$u<$=cZrqMD*S~SS82GsFDvCX7d1FbOtVugWl1rx=Jy%Ccz-6}V&i8-t{ zBbqc!frUWhb3`%v(DH6ga6w<44#y^lezZ9o(y@`OyC&40UF6%AX|(5KN@UrMB!)qj z`zZ9RPbUZ`)}fA2tGNTphPb)oYz|#$-6N}HGqI9_c*#DVw8dk{%TUsca-m-#17X8i z;qK6qJZv@|L(Zlt!{crIhn+&%a!KC0f@yCT6Ye&nB#7*IOyPPe;m|M`Fl>Y$K7KDkr zySPu2io~Uo<-!~?xgh$3wGSi2F3xtfxrX2m6it=~%c%Zhh`P5HmvTg>F@u=TnjOdJ zYAezO#8*YwExP$*_2#JH_P_fib(Ibx%ROdsi4 zm4o}xdl)U8GVfAZKZ4>Oc>&klf2L__N>{BDX>h4^5hrdu9Qhro7)COl@(AW$X^7j~ z@}IUx&{l##agpy{vT(D#yo3dO$CJ?K!hZO#Ngobzu^l|AXnP})F(*E>y5mj-PYpIu zczn%o!Wnn2pr%tUqgZ@04!r!033#GdLyf={X4NegRBgAaxy7MiavFAd9nbgu;U}mOC zI)X7tl5l9K?f^XNIzbzxl`nhYrSVOj>PDNED~<|jY8I1yvqQAs`_yJ_^IR}XNwLQ| zh2+7g8hrCPtqg+{P_j3YL+8Ir(8^g(`PhmbCdQRc=X#SJ1KnK|*d6OSd`&BHH&eX`=w_FGLY-;x1VKp^%2pOk(RfBE zIV0icP7HZm((9%1ZB=Mg#m;z%&3J%|n%u980-?EX6~O0R;3Q2qMcRXX-=6uR6XhiN zR3LFkNTqv640P1vy0-*u924v}{*{xKW^dml{mn~arZFU#`6D>@I}hhth474dQ=pn~ zcr#J=d|UpMNyh-qYH%Z_06b_i1u|6%nKTjr2tKr0w-WQ95(YSxTllLtV}kZ$%f*_G zcLN@L>up9rE?dZee@dGf1zfgZv6G%+F`sl}{RpBo*Y5?zvEROrDgJfbe^EejhtlK! z0O{PY_6&gk05z{z+!$w$PO-2x#X>}&9D~lb5(0fHEBO8%>QrBpK3CZ8KSNNfsTXW!fy7By=I=OV{{Y2YpYsiy_227D zR$%s)?8D_P8UADW(J-)*2TrKL!5&9r{U{u79Jp*6DI3!whFF!pQYv7jd73E%#Qofg zLD>4$Z&SAf@h&nvsodyWJNp{0>WOg(E{BPHs=hWWEBeF-0F=n{^{SQKxP1QrD!Cm- zIpb*O94@`j0wsd0@&;!yZ(r?b;^E1Pt<_ zKn#sTN8#t?LtI-21mg@pTFh5K3=jRQcf3XgT=VNnJ&qE58ZH_5j`WEp&#fJ9{{Z_| zc5wbVurphkPC3$~xB~;FN%0v2&bb1^np)B20B&e8HA5`tJpJpaq6;(DrsFNj(m2NF*CaNM(6sm3>`__)CzQCH@Pau_44eIeiF`~8wWWEQ|h7*p4ykBjp)`>ln z_lE;|*N-P^i#hYHl!{<7Zfm%xWo!-W&p1&+%OPrD8avSL;2PZd(6Tin9T80-J5W-N zE$EUsj`fb}r&cf|aNy?@#?aw_sLXsKx3$^D6&e-09I9qIS$2RN(_~@YjQLWl_X9Yp zGz7(N!AakG0%z|Ux44sfhCdT+QF|y#%Y?wF;*ZCJMR1NDRp?VJICo-j1vLs9phs!{ z0NiiTf2pW{D5)--2H;d3c;7okGeZU=@=BDB}p-- z-7Yp+p>O?T`qrrVsN+BEceyA23I71hYB**CAkiqcJi3#;hSp=h9DFVAyMHWDV`G&* z)rFKknJ1hR1|G~Y{KX019)`H&GD#Hdc0FoBoLr43S=bzAjkmZwnt(=8s~b6o?mD>o z>}ZP|_n1GJ6(ioXvtcNiPv(sNo79mIG56YwiOK9bEtj21-X6Qu0f65ldLHKGRZe!G zFhsK}d(!)s1RUUsS#y5CkO{!+N~gP+5<{MTba?>Y{?6J?J;IaaRXKah8A_y_Ky;_l zUB@8(qX2#Csjc^lOpu-`dQV_8b$rBShCS(PU8KGjUHvI&)WnB^a8&iLG<-*j2Mjsl zyYv?9l1P~k6hxIjaYIVnTb+-c5+sleFh1hE>%^xZ0C^uOM!p72DkkJVtsfMeBhr?{ zv|0uxv5v+uzD9FW@%?C6!N-+MsD6~xD(sNC0*01d`P9t08P4W~X!1=IhHS#gQzl5r zs8uj?)X`|53|Dq?WLQ0OiqVIkIxwbr3WcLP^q@H$dkrMB}bfxQ;D3V9l`B+yqL2Gyx^{{UvA4aHI@139K!12rm639TG~&uZF7#?&N| zQ)vs?w32`g0?o$Nyo^}lqX&eNO(WRLM%yT?p%nv3#BeG|ydV=mB$DAl^Qi4fG78by zzG_H+W&!J6+ZVvqr3#qON>6(xNx`We?oUgJB4Wp#|O*)V@e~Fu6O7w*Eg7DjZVqA^=`k~n#HY#Pj%27 z(B^Lm-hnfhYLQ%pG`C}=M$+72a1LljknO!kG>3|#GG}3(sYal&DJz5LSvcURp9Y5t<`e$_76!+-+`%oJV`c#V`10$iP=vypWeZrZMdJsq4REaSibBax8 za8}|Hrwzt2ovS9{xY!MdCY6kbrsa5dBR*N6t|eoTr#_jWBzYWshv;c##Mdaqc$H}x zK&zR|IWD06DMi)lus8&LYZF~g>O_pV#VVHCSK={9zS*u!Gzoif**qs6M>Lk>zhH3W zk1Xbky0p0soQ!#jSv*sy`G`LCNXZD*gR~-6@tykSqIjf)j_r#9%CwqEVL!DWP-;kK zNrn}^^lrsO@m?_5#}}Sxfhb^AcAzDV({Z?{)^<*(ZRu=c!x~YrN0JB?98r^sVFXy% z0a+3mn@1nL60rDosUe3Wf8|98_RcBj2w5yd{XHrZ4r#MO4QS90DtgeDv1&o$+K5Pn z5+K0kn%pKguA!45DS_osI&YqIVHF!D{Hk(A6VNAhk^Yq`#z#7xW8Svl#s^vn41w8p zH4$I0olfH%sxm3U&^ro^g%IR)sAT%rF&G@15=pEHf_1M-C}td~MQAy7HH?oxI%jFG zl>la{t}=EQqU|7Q2;T$Fxk-r8d$QIZVgl!Sve>&xf_AE=i)5>VxixaKj3SKIh$j0Y zG>ZUM%_TqyHTPR<-YW#V1?-8@i?Ljof`DYcVlZQ0cazxE0bCXRjC+^w; z4ltsUWH=^|zI;g<{{R5zN^==Jf=I`nD@Ekg3={Zq%ux|c3za6BGnZyb;UuZtZ;EYx z1ebC!{{V=p{{V0)G~_9w?(TN7bNTIn`w}UZfc9mZyEtxDvdbAi(t(mt=W;=zC20`l z8)_`%iw&DAT|fc<0Ej=GLvbU|kGtesU=n>@IW^QB`h#t~|B zBW&MsMm_nGJhu*U$kGvg47{CDsC$FPFtYuPPh+6#tBYnQzKefDNp9|~kRIoa8iBly z?jw|t$UKb(x6BU(#3W;=-nl=KnK8QKY;o3(2pP%A>IG+l2}VR`*J|AsATS(^bZT@X z64&@M4yro5|++3%`>sL4I=e5>h%vIvfrlg!~xuq zPhh#&W6Hf=tr2=4GPgW@>5AYA^?Iy@eUMD}2c;^WUM4uNSEw)`HU){P5ja$lUawMD zk&hSQ1ey|AGS%w!bMB6#F-Nv*Td9OY0=-_Q>Nah*Kb$pOfCho?#$N5=m3>h(-M0|p{qThuQxAat)+sQrebHu*=W zp&h(+>h&uLp>fNtVV@w8(!E}yajbKAu-$P?@6K)%e+aKvs8PlvkwT5VE8Ksy1lOz7 zPF<2bxgw{za%^Ndw79wJEk z8WVs@3EQc!SEw=26yivm!i(owitNdOBIDFotJN2xm9t4%5q9DO(9jYgkd{}-{cF|g z;i%L~>NrQ Date: Fri, 9 Apr 2021 17:59:57 +0800 Subject: [PATCH 165/457] 1866 Add TransformInverter handler (#1970) * [DLMED] add TransformInverter handler Signed-off-by: Nic Ma * [DLMED] fix typo Signed-off-by: Nic Ma * [DLMED] add support in SegmentationSaver handler Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] fix CI test Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot Co-authored-by: monai-bot --- docs/source/handlers.rst | 5 ++ monai/data/inverse_batch_transform.py | 10 +-- monai/data/utils.py | 8 ++ monai/handlers/__init__.py | 1 + monai/handlers/segmentation_saver.py | 11 ++- monai/handlers/transform_inverter.py | 94 ++++++++++++++++++++++++ monai/transforms/utility/dictionary.py | 17 ++++- tests/min_tests.py | 1 + tests/test_handler_transform_inverter.py | 81 ++++++++++++++++++++ tests/test_inverse_collation.py | 3 +- 10 files changed, 222 insertions(+), 9 deletions(-) create mode 100644 monai/handlers/transform_inverter.py create mode 100644 tests/test_handler_transform_inverter.py diff --git a/docs/source/handlers.rst b/docs/source/handlers.rst index 9030fa3ced..7c8498e37a 100644 --- a/docs/source/handlers.rst +++ b/docs/source/handlers.rst @@ -125,3 +125,8 @@ GarbageCollector handler ------------------------ .. autoclass:: GarbageCollector :members: + +Transform inverter +------------------ +.. autoclass:: TransformInverter + :members: diff --git a/monai/data/inverse_batch_transform.py b/monai/data/inverse_batch_transform.py index fa88114c84..edfaee3758 100644 --- a/monai/data/inverse_batch_transform.py +++ b/monai/data/inverse_batch_transform.py @@ -9,6 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import warnings from typing import Any, Callable, Dict, Hashable, Optional, Sequence import numpy as np @@ -16,7 +17,7 @@ from monai.data.dataloader import DataLoader from monai.data.dataset import Dataset -from monai.data.utils import decollate_batch, pad_list_data_collate +from monai.data.utils import decollate_batch, no_collation, pad_list_data_collate from monai.transforms.croppad.batch import PadListDataCollate from monai.transforms.inverse import InvertibleTransform from monai.transforms.transform import Transform @@ -42,13 +43,12 @@ def _transform(self, index: int) -> Dict[Hashable, np.ndarray]: if self.pad_collation_used: data = PadListDataCollate.inverse(data) + if not isinstance(self.invertible_transform, InvertibleTransform): + warnings.warn("transform is not invertible, can't invert transform for the input data.") + return data return self.invertible_transform.inverse(data) -def no_collation(x): - return x - - class BatchInverseTransform(Transform): """Perform inverse on a batch of data. This is useful if you have inferred a batch of images and want to invert them all.""" diff --git a/monai/data/utils.py b/monai/data/utils.py index 938365460b..d39f2702ff 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -65,6 +65,7 @@ "sorted_dict", "decollate_batch", "pad_list_data_collate", + "no_collation", ] @@ -379,6 +380,13 @@ def pad_list_data_collate( return PadListDataCollate(method, mode)(batch) +def no_collation(x): + """ + No any collation operation. + """ + return x + + def worker_init_fn(worker_id: int) -> None: """ Callback function for PyTorch DataLoader `worker_init_fn`. diff --git a/monai/handlers/__init__.py b/monai/handlers/__init__.py index f88531ea8e..b0dbb82127 100644 --- a/monai/handlers/__init__.py +++ b/monai/handlers/__init__.py @@ -28,6 +28,7 @@ from .stats_handler import StatsHandler from .surface_distance import SurfaceDistance from .tensorboard_handlers import TensorBoardHandler, TensorBoardImageHandler, TensorBoardStatsHandler +from .transform_inverter import TransformInverter from .utils import ( evenly_divisible_all_gather, stopping_fn_from_loss, diff --git a/monai/handlers/segmentation_saver.py b/monai/handlers/segmentation_saver.py index 6a98abf3ca..279b514bd7 100644 --- a/monai/handlers/segmentation_saver.py +++ b/monai/handlers/segmentation_saver.py @@ -119,7 +119,6 @@ def __init__( output_dtype=output_dtype, squeeze_end_dims=squeeze_end_dims, data_root_dir=data_root_dir, - save_batch=True, ) self.batch_transform = batch_transform self.output_transform = output_transform @@ -147,5 +146,13 @@ def __call__(self, engine: Engine) -> None: """ meta_data = self.batch_transform(engine.state.batch) engine_output = self.output_transform(engine.state.output) - self._saver(engine_output, meta_data) + if isinstance(engine_output, (tuple, list)): + # if a list of data in shape: [channel, H, W, [D]], save every item separately + self._saver.save_batch = False + for i, d in enumerate(engine_output): + self._saver(d, {k: meta_data[k][i] for k in meta_data} if meta_data is not None else None) + else: + # if the data is in shape: [batch, channel, H, W, [D]] + self._saver.save_batch = True + self._saver(engine_output, meta_data) self.logger.info("saved all the model outputs into files.") diff --git a/monai/handlers/transform_inverter.py b/monai/handlers/transform_inverter.py new file mode 100644 index 0000000000..42c5bdcf92 --- /dev/null +++ b/monai/handlers/transform_inverter.py @@ -0,0 +1,94 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import TYPE_CHECKING, Callable, Optional + +from torch.utils.data import DataLoader as TorchDataLoader + +from monai.data import BatchInverseTransform +from monai.data.utils import no_collation +from monai.engines.utils import CommonKeys +from monai.transforms import InvertibleTransform, allow_missing_keys_mode +from monai.utils import InverseKeys, exact_version, optional_import + +Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") +if TYPE_CHECKING: + from ignite.engine import Engine +else: + Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine") + + +class TransformInverter: + """ + Ignite handler to automatically invert all the pre-transforms that support `inverse`. + It takes `engine.state.output` as the input data and uses the transforms infomation from `engine.state.batch`. + + Note: + This handler is experimental API in v0.5, the interpolation mode in the transforms + and inverse transforms are the same, so maybe it's not correct as we may want to use `bilinear` + for input image but use `nearest` when inverting transforms for model outout. + For this case, a solution is to set `batch_key` to the label field if we have labels. + + """ + + def __init__( + self, + transform: InvertibleTransform, + loader: TorchDataLoader, + collate_fn: Optional[Callable] = no_collation, + batch_key: str = CommonKeys.IMAGE, + output_key: str = CommonKeys.PRED, + postfix: str = "inverted", + ) -> None: + """ + Args: + transform: a callable data transform on input data. + loader: data loader used to generate the batch of data. + collate_fn: how to collate data after inverse transformations. + default won't do any collation, so the output will be a list of size batch size. + batch_key: the key of input data in `ignite.engine.batch`. will get the applied transforms + for this input data, then invert them for the model output, default to "image". + output_key: the key of model output in `ignite.engine.output`, invert transforms on it. + postfix: will save the inverted result into `ignite.engine.output` with key `{ouput_key}_{postfix}`. + + """ + self.transform = transform + self.inverter = BatchInverseTransform(transform=transform, loader=loader, collate_fn=collate_fn) + self.batch_key = batch_key + self.output_key = output_key + self.postfix = postfix + + def attach(self, engine: Engine) -> None: + """ + Args: + engine: Ignite Engine, it can be a trainer, validator or evaluator. + """ + engine.add_event_handler(Events.ITERATION_COMPLETED, self) + + def __call__(self, engine: Engine) -> None: + """ + Args: + engine: Ignite Engine, it can be a trainer, validator or evaluator. + """ + transform_key = self.batch_key + InverseKeys.KEY_SUFFIX + if transform_key not in engine.state.batch: + warnings.warn("all the pre-transforms are not InvertibleTransform or no need to invert.") + return + + segs_dict = { + self.batch_key: engine.state.output[self.output_key].detach().cpu(), + transform_key: engine.state.batch[transform_key], + } + + with allow_missing_keys_mode(self.transform): # type: ignore + inverted_key = f"{self.output_key}_{self.postfix}" + engine.state.output[inverted_key] = [i[self.batch_key] for i in self.inverter(segs_dict)] diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index 9464faa503..7c4ea398f6 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -17,12 +17,14 @@ import copy import logging +from copy import deepcopy from typing import Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union import numpy as np import torch from monai.config import DtypeLike, KeysCollection, NdarrayTensor +from monai.transforms.inverse import InvertibleTransform from monai.transforms.transform import MapTransform, Randomizable from monai.transforms.utility.array import ( AddChannel, @@ -379,7 +381,7 @@ def __call__( return d -class ToTensord(MapTransform): +class ToTensord(MapTransform, InvertibleTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.ToTensor`. """ @@ -397,9 +399,22 @@ def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> No def __call__(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]: d = dict(data) for key in self.key_iterator(d): + self.push_transform(d, key) d[key] = self.converter(d[key]) return d + def inverse(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]: + d = deepcopy(dict(data)) + for key in self.key_iterator(d): + transform = self.get_most_recent_transform(d, key) + # Create inverse transform + inverse_transform = ToNumpy() + # Apply inverse + d[key] = inverse_transform(d[key]) + # Remove the applied transform + self.pop_transform(d, key) + return d + class ToNumpyd(MapTransform): """ diff --git a/tests/min_tests.py b/tests/min_tests.py index 586956eec0..47892a143e 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -117,6 +117,7 @@ def run_testsuit(): "test_ensure_channel_first", "test_ensure_channel_firstd", "test_handler_early_stop", + "test_handler_transform_inverter", ] assert sorted(exclude_cases) == sorted(set(exclude_cases)), f"Duplicated items in {exclude_cases}" diff --git a/tests/test_handler_transform_inverter.py b/tests/test_handler_transform_inverter.py new file mode 100644 index 0000000000..48efd5df53 --- /dev/null +++ b/tests/test_handler_transform_inverter.py @@ -0,0 +1,81 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +import numpy as np +import torch +from ignite.engine import Engine + +from monai.data import CacheDataset, DataLoader, create_test_image_3d +from monai.handlers import TransformInverter +from monai.transforms import ( + AddChanneld, + Compose, + LoadImaged, + RandAffined, + RandAxisFlipd, + RandFlipd, + RandRotate90d, + RandRotated, + RandZoomd, + ResizeWithPadOrCropd, + ToTensord, +) +from tests.utils import make_nifti_image + +KEYS = ["image", "label"] + + +class TestTransformInverter(unittest.TestCase): + def test_invert(self): + im_fname, seg_fname = [make_nifti_image(i) for i in create_test_image_3d(101, 100, 107)] + transform = Compose( + [ + LoadImaged(KEYS), + AddChanneld(KEYS), + RandFlipd(KEYS, prob=0.5, spatial_axis=[1, 2]), + RandAxisFlipd(KEYS, prob=0.5), + RandRotate90d(KEYS, spatial_axes=(1, 2)), + RandZoomd(KEYS, prob=0.5, min_zoom=0.5, max_zoom=1.1, keep_size=True), + RandRotated(KEYS, prob=0.5, range_x=np.pi), + RandAffined(KEYS, prob=0.5, rotate_range=np.pi), + ResizeWithPadOrCropd(KEYS, 100), + ToTensord(KEYS), + ] + ) + data = [{"image": im_fname, "label": seg_fname} for _ in range(12)] + + # num workers = 0 for mac or gpu transforms + num_workers = 0 if sys.platform == "darwin" or torch.cuda.is_available() else 2 + + dataset = CacheDataset(data, transform=transform, progress=False) + loader = DataLoader(dataset, num_workers=num_workers, batch_size=5) + + # set up engine + def _train_func(engine, batch): + self.assertTupleEqual(batch["image"].shape[1:], (1, 100, 100, 100)) + return batch + + engine = Engine(_train_func) + + # set up testing handler + TransformInverter(transform=transform, loader=loader, output_key="image").attach(engine) + + engine.run(loader, max_epochs=1) + self.assertTupleEqual(engine.state.output["image"].shape, (2, 1, 100, 100, 100)) + for i in engine.state.output["image_inverted"]: + self.assertTupleEqual(i.shape, (1, 100, 101, 107)) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_inverse_collation.py b/tests/test_inverse_collation.py index 3e07a8f0e2..c302e04017 100644 --- a/tests/test_inverse_collation.py +++ b/tests/test_inverse_collation.py @@ -29,6 +29,7 @@ RandRotated, RandZoomd, ResizeWithPadOrCropd, + ToTensord, ) from monai.utils import optional_import, set_determinism from tests.utils import make_nifti_image @@ -113,7 +114,7 @@ def test_collation(self, _, transform, collate_fn, ndim): if collate_fn: modified_transform = transform else: - modified_transform = Compose([transform, ResizeWithPadOrCropd(KEYS, 100)]) + modified_transform = Compose([transform, ResizeWithPadOrCropd(KEYS, 100), ToTensord(KEYS)]) # num workers = 0 for mac or gpu transforms num_workers = 0 if sys.platform == "darwin" or torch.cuda.is_available() else 2 From 3a7cd4666f618c5abc5502fbfccec20cc2dfc465 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Fri, 9 Apr 2021 14:23:29 +0100 Subject: [PATCH 166/457] 1889 - changelog for v0.5.0 (#1923) * changelog for 0.5.0 Signed-off-by: Wenqi Li * update changelog Signed-off-by: Wenqi Li --- CHANGELOG.md | 84 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 83 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 56e65a7d92..7e1c785aa6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,87 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). ## [Unreleased] +## [0.5.0] - 2020-04-09 +### Added +* Overview document for [feature highlights in v0.5.0](https://github.com/Project-MONAI/MONAI/blob/master/docs/source/highlights.md) +* Invertible spatial transforms + * `InvertibleTransform` base APIs + * Batch inverse and decollating APIs + * Inverse of `Compose` + * Batch inverse event handling + * Test-time augmentation as an application +* Initial support of learning-based image registration: + * Bending energy, LNCC, and global mutual information loss + * Fully convolutional architectures + * Dense displacement field, dense velocity field computation + * Warping with high-order interpolation with C++/CUDA implementations +* Deepgrow modules for interactive segmentation: + * Workflows with simulations of clicks + * Distance-based transforms for guidance signals +* Digital pathology support: + * Efficient whole slide imaging IO and sampling with Nvidia cuCIM and SmartCache + * FROC measurements for lesion + * Probabilistic post-processing for lesion detection + * TorchVision classification model adaptor for fully convolutional analysis +* 12 new transforms, grid patch dataset, `ThreadDataLoader`, EfficientNets B0-B7 +* 4 iteration events for the engine for finer control of workflows +* New C++/CUDA extensions: + * Conditional random field + * Fast bilateral filtering using the permutohedral lattice +* Metrics summary reporting and saving APIs +* DiceCELoss, DiceFocalLoss, a multi-scale wrapper for segmentation loss computation +* Data loading utilities: + * `decollate_batch` + * `PadListDataCollate` with inverse support +* Support of slicing syntax for `Dataset` +* Initial Torchscript support for the loss modules +* Learning rate finder +* Allow for missing keys in the dictionary-based transforms +* Support of checkpoint loading for transfer learning +* Various summary and plotting utilities for Jupyter notebooks +* Contributor Covenant Code of Conduct +* Major CI/CD enhancements covering the tutorial repository +* Fully compatible with PyTorch 1.8 +* Initial nightly CI/CD pipelines using Nvidia Blossom Infrastructure + +### Changed +* Enhanced `list_data_collate` error handling +* Unified iteration metric APIs +* `densenet*` extensions are renamed to `DenseNet*` +* `se_res*` network extensions are renamed to `SERes*` +* Transform base APIs are rearranged into `compose`, `inverse`, and `transform` +* `_do_transform` flag for the random augmentations is unified via `RandomizableTransform` +* Decoupled post-processing steps, e.g. `softmax`, `to_onehot_y`, from the metrics computations +* Moved the distributed samplers to `monai.data.samplers` from `monai.data.utils` +* Engine's data loaders now accept generic iterables as input +* Workflows now accept additional custom events and state properties +* Various type hints according to Numpy 1.20 +* Refactored testing utility `runtests.sh` to have `--unittest` and `--net` (integration tests) options +* Base Docker image upgraded to `nvcr.io/nvidia/pytorch:21.03-py3` from `nvcr.io/nvidia/pytorch:20.10-py3` +* Docker images are now built with self-hosted environments +* Primary contact email updated to `monai.contact@gmail.com` +* Now using GitHub Discussions as the primary communication forum + +### Removed +* Compatibility tests for PyTorch 1.5.x +* Format specific loaders, e.g. `LoadNifti`, `NiftiDataset` +* Assert statements from non-test files +* `from module import *` statements, addressed flake8 F403 + +### Fixed +* Uses American English spelling for code, as per PyTorch +* Code coverage now takes multiprocessing runs into account +* SmartCache with initial shuffling +* `ConvertToMultiChannelBasedOnBratsClasses` now supports channel-first inputs +* Checkpoint handler to save with non-root permissions +* Fixed an issue for exiting the distributed unit tests +* Unified `DynUNet` to have single tensor output w/o deep supervision +* `SegmentationSaver` now supports user-specified data types and a `squeeze_end_dims` flag +* Fixed `*Saver` event handlers output filenames with a `data_root_dir` option +* Load image functions now ensure little-endian +* Fixed the test runner to support regex-based test case matching +* Usability issues in the event handlers + ## [0.4.0] - 2020-12-15 ### Added * Overview document for [feature highlights in v0.4.0](https://github.com/Project-MONAI/MONAI/blob/master/docs/source/highlights.md) @@ -173,7 +254,8 @@ the postprocessing steps should be used before calling the metrics methods [highlights]: https://github.com/Project-MONAI/MONAI/blob/master/docs/source/highlights.md -[Unreleased]: https://github.com/Project-MONAI/MONAI/compare/0.4.0...HEAD +[Unreleased]: https://github.com/Project-MONAI/MONAI/compare/0.5.0...HEAD +[0.5.0]: https://github.com/Project-MONAI/MONAI/compare/0.4.0...0.5.0 [0.4.0]: https://github.com/Project-MONAI/MONAI/compare/0.3.0...0.4.0 [0.3.0]: https://github.com/Project-MONAI/MONAI/compare/0.2.0...0.3.0 [0.2.0]: https://github.com/Project-MONAI/MONAI/compare/0.1.0...0.2.0 From d1787060d3b49760412753ab505d106e4c776e9e Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Sat, 10 Apr 2021 03:36:49 +0800 Subject: [PATCH 167/457] 1977 Support different inverse interpolation mode (#1978) * [DLMED] add TransformInverter handler Signed-off-by: Nic Ma * [DLMED] fix typo Signed-off-by: Nic Ma * [DLMED] add support in SegmentationSaver handler Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] fix flake8 issue Signed-off-by: Nic Ma * [DLMED] fix CI test Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot * [DLMED] save mode into inverse dict Signed-off-by: Nic Ma * [DLMED] add unit tests Signed-off-by: Nic Ma * [DLMED] fix ToTensor inverse issue Signed-off-by: Nic Ma * [DLMED] change the replacement logic into util function Signed-off-by: Nic Ma * [DLMED] add more tests Signed-off-by: Nic Ma * [DLMED] fix flake8 Signed-off-by: Nic Ma * [MONAI] python code formatting Signed-off-by: monai-bot Co-authored-by: monai-bot --- monai/handlers/transform_inverter.py | 18 +-- monai/transforms/__init__.py | 1 + monai/transforms/croppad/dictionary.py | 26 ++-- monai/transforms/inverse.py | 2 +- monai/transforms/spatial/dictionary.py | 168 +++++++++++++++++------ monai/transforms/utility/dictionary.py | 1 - monai/transforms/utils.py | 46 ++++++- tests/test_handler_transform_inverter.py | 14 +- tests/test_inverse.py | 5 +- 9 files changed, 214 insertions(+), 67 deletions(-) diff --git a/monai/handlers/transform_inverter.py b/monai/handlers/transform_inverter.py index 42c5bdcf92..68201e44be 100644 --- a/monai/handlers/transform_inverter.py +++ b/monai/handlers/transform_inverter.py @@ -17,7 +17,7 @@ from monai.data import BatchInverseTransform from monai.data.utils import no_collation from monai.engines.utils import CommonKeys -from monai.transforms import InvertibleTransform, allow_missing_keys_mode +from monai.transforms import InvertibleTransform, allow_missing_keys_mode, convert_inverse_interp_mode from monai.utils import InverseKeys, exact_version, optional_import Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events") @@ -32,12 +32,6 @@ class TransformInverter: Ignite handler to automatically invert all the pre-transforms that support `inverse`. It takes `engine.state.output` as the input data and uses the transforms infomation from `engine.state.batch`. - Note: - This handler is experimental API in v0.5, the interpolation mode in the transforms - and inverse transforms are the same, so maybe it's not correct as we may want to use `bilinear` - for input image but use `nearest` when inverting transforms for model outout. - For this case, a solution is to set `batch_key` to the label field if we have labels. - """ def __init__( @@ -48,6 +42,7 @@ def __init__( batch_key: str = CommonKeys.IMAGE, output_key: str = CommonKeys.PRED, postfix: str = "inverted", + nearest_interp: bool = True, ) -> None: """ Args: @@ -59,6 +54,8 @@ def __init__( for this input data, then invert them for the model output, default to "image". output_key: the key of model output in `ignite.engine.output`, invert transforms on it. postfix: will save the inverted result into `ignite.engine.output` with key `{ouput_key}_{postfix}`. + nearest_interp: whether to use `nearest` interpolation mode when inverting spatial transforms, + default to `True`. if `False`, use the same interpolation mode as the original transform. """ self.transform = transform @@ -66,6 +63,7 @@ def __init__( self.batch_key = batch_key self.output_key = output_key self.postfix = postfix + self.nearest_interp = nearest_interp def attach(self, engine: Engine) -> None: """ @@ -84,9 +82,13 @@ def __call__(self, engine: Engine) -> None: warnings.warn("all the pre-transforms are not InvertibleTransform or no need to invert.") return + transform_info = engine.state.batch[transform_key] + if self.nearest_interp: + convert_inverse_interp_mode(trans_info=transform_info, mode="nearest", align_corners=None) + segs_dict = { self.batch_key: engine.state.output[self.output_key].detach().cpu(), - transform_key: engine.state.batch[transform_key], + transform_key: transform_info, } with allow_missing_keys_mode(self.transform): # type: ignore diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index b66567e71a..f96194c262 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -371,6 +371,7 @@ ) from .utils import ( allow_missing_keys_mode, + convert_inverse_interp_mode, copypaste_arrays, create_control_grid, create_grid, diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index c8d5ceea40..c4ef659c69 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -16,6 +16,7 @@ """ from copy import deepcopy +from enum import Enum from itertools import chain from math import floor from typing import Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union @@ -125,7 +126,7 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) for key, m in self.key_iterator(d, self.mode): - self.push_transform(d, key) + self.push_transform(d, key, extra_info={"mode": m.value if isinstance(m, Enum) else m}) d[key] = self.padder(d[key], mode=m) return d @@ -193,7 +194,7 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) for key, m in self.key_iterator(d, self.mode): - self.push_transform(d, key) + self.push_transform(d, key, extra_info={"mode": m.value if isinstance(m, Enum) else m}) d[key] = self.padder(d[key], mode=m) return d @@ -259,7 +260,7 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) for key, m in self.key_iterator(d, self.mode): - self.push_transform(d, key) + self.push_transform(d, key, extra_info={"mode": m.value if isinstance(m, Enum) else m}) d[key] = self.padder(d[key], mode=m) return d @@ -826,6 +827,7 @@ class ResizeWithPadOrCropd(MapTransform, InvertibleTransform): ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``} One of the listed string values or a user supplied function for padding. Defaults to ``"constant"``. See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html + It also can be a sequence of string, each element corresponds to a key in ``keys``. allow_missing_keys: don't raise exception if key is missing. """ @@ -834,18 +836,26 @@ def __init__( self, keys: KeysCollection, spatial_size: Union[Sequence[int], int], - mode: Union[NumpyPadMode, str] = NumpyPadMode.CONSTANT, + mode: NumpyPadModeSequence = NumpyPadMode.CONSTANT, allow_missing_keys: bool = False, ) -> None: super().__init__(keys, allow_missing_keys) - self.padcropper = ResizeWithPadOrCrop(spatial_size=spatial_size, mode=mode) + self.mode = ensure_tuple_rep(mode, len(self.keys)) + self.padcropper = ResizeWithPadOrCrop(spatial_size=spatial_size) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - for key in self.key_iterator(d): + for key, m in self.key_iterator(d, self.mode): orig_size = d[key].shape[1:] - d[key] = self.padcropper(d[key]) - self.push_transform(d, key, orig_size=orig_size) + d[key] = self.padcropper(d[key], mode=m) + self.push_transform( + d, + key, + orig_size=orig_size, + extra_info={ + "mode": m.value if isinstance(m, Enum) else m, + }, + ) return d def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: diff --git a/monai/transforms/inverse.py b/monai/transforms/inverse.py index 3e5b68e8e4..3baef91717 100644 --- a/monai/transforms/inverse.py +++ b/monai/transforms/inverse.py @@ -76,7 +76,7 @@ def push_transform( info = { InverseKeys.CLASS_NAME: self.__class__.__name__, InverseKeys.ID: id(self), - InverseKeys.ORIG_SIZE: orig_size or data[key].shape[1:], + InverseKeys.ORIG_SIZE: orig_size or (data[key].shape[1:] if hasattr(data[key], "shape") else None), } if extra_info is not None: info[InverseKeys.EXTRA_INFO] = extra_info diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 86c94302a1..9f782bf8fc 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -16,6 +16,7 @@ """ from copy import deepcopy +from enum import Enum from typing import Any, Dict, Hashable, Mapping, Optional, Sequence, Tuple, Union import numpy as np @@ -208,16 +209,24 @@ def __call__( align_corners=align_corners, dtype=dtype, ) - self.push_transform(d, key, extra_info={"meta_data_key": meta_data_key, "old_affine": old_affine}) + self.push_transform( + d, + key, + extra_info={ + "meta_data_key": meta_data_key, + "old_affine": old_affine, + "mode": mode.value if isinstance(mode, Enum) else mode, + "padding_mode": padding_mode.value if isinstance(padding_mode, Enum) else padding_mode, + "align_corners": align_corners if align_corners is not None else "none", + }, + ) # set the 'affine' key meta_data["affine"] = new_affine return d def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = deepcopy(dict(data)) - for key, mode, padding_mode, align_corners, dtype in self.key_iterator( - d, self.mode, self.padding_mode, self.align_corners, self.dtype - ): + for key, dtype in self.key_iterator(d, self.dtype): transform = self.get_most_recent_transform(d, key) if self.spacing_transform.diagonal: raise RuntimeError( @@ -227,6 +236,9 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar # Create inverse transform meta_data = d[transform[InverseKeys.EXTRA_INFO]["meta_data_key"]] old_affine = np.array(transform[InverseKeys.EXTRA_INFO]["old_affine"]) + mode = transform[InverseKeys.EXTRA_INFO]["mode"] + padding_mode = transform[InverseKeys.EXTRA_INFO]["padding_mode"] + align_corners = transform[InverseKeys.EXTRA_INFO]["align_corners"] orig_pixdim = np.sqrt(np.sum(np.square(old_affine), 0))[:-1] inverse_transform = Spacing(orig_pixdim, diagonal=self.spacing_transform.diagonal) # Apply inverse @@ -235,7 +247,7 @@ def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndar affine=meta_data["affine"], mode=mode, padding_mode=padding_mode, - align_corners=align_corners, + align_corners=False if align_corners == "none" else align_corners, dtype=dtype, ) meta_data["affine"] = new_affine @@ -483,17 +495,26 @@ def __init__( def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) for key, mode, align_corners in self.key_iterator(d, self.mode, self.align_corners): - self.push_transform(d, key) + self.push_transform( + d, + key, + extra_info={ + "mode": mode.value if isinstance(mode, Enum) else mode, + "align_corners": align_corners if align_corners is not None else "none", + }, + ) d[key] = self.resizer(d[key], mode=mode, align_corners=align_corners) return d def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = deepcopy(dict(data)) - for key, mode, align_corners in self.key_iterator(d, self.mode, self.align_corners): + for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) orig_size = transform[InverseKeys.ORIG_SIZE] + mode = transform[InverseKeys.EXTRA_INFO]["mode"] + align_corners = transform[InverseKeys.EXTRA_INFO]["align_corners"] # Create inverse transform - inverse_transform = Resize(orig_size, mode, align_corners) + inverse_transform = Resize(orig_size, mode, None if align_corners == "none" else align_corners) # Apply inverse transform d[key] = inverse_transform(d[key]) # Remove the applied transform @@ -573,17 +594,28 @@ def __call__( for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): orig_size = d[key].shape[1:] d[key], affine = self.affine(d[key], mode=mode, padding_mode=padding_mode) - self.push_transform(d, key, orig_size=orig_size, extra_info={"affine": affine}) + self.push_transform( + d, + key, + orig_size=orig_size, + extra_info={ + "affine": affine, + "mode": mode.value if isinstance(mode, Enum) else mode, + "padding_mode": padding_mode.value if isinstance(padding_mode, Enum) else padding_mode, + }, + ) return d def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = deepcopy(dict(data)) - for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): + for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) orig_size = transform[InverseKeys.ORIG_SIZE] # Create inverse transform fwd_affine = transform[InverseKeys.EXTRA_INFO]["affine"] + mode = transform[InverseKeys.EXTRA_INFO]["mode"] + padding_mode = transform[InverseKeys.EXTRA_INFO]["padding_mode"] inv_affine = np.linalg.inv(fwd_affine) affine_grid = AffineGrid(affine=inv_affine) @@ -701,18 +733,28 @@ def __call__( affine = torch.as_tensor(np.eye(len(sp_size) + 1), device=self.rand_affine.rand_affine_grid.device) for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): - self.push_transform(d, key, extra_info={"affine": affine}) + self.push_transform( + d, + key, + extra_info={ + "affine": affine, + "mode": mode.value if isinstance(mode, Enum) else mode, + "padding_mode": padding_mode.value if isinstance(padding_mode, Enum) else padding_mode, + }, + ) d[key] = self.rand_affine.resampler(d[key], grid, mode=mode, padding_mode=padding_mode) return d def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = deepcopy(dict(data)) - for key, mode, padding_mode in self.key_iterator(d, self.mode, self.padding_mode): + for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) orig_size = transform[InverseKeys.ORIG_SIZE] # Create inverse transform fwd_affine = transform[InverseKeys.EXTRA_INFO]["affine"] + mode = transform[InverseKeys.EXTRA_INFO]["mode"] + padding_mode = transform[InverseKeys.EXTRA_INFO]["padding_mode"] inv_affine = np.linalg.inv(fwd_affine) affine_grid = AffineGrid(affine=inv_affine) @@ -1171,24 +1213,35 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda dtype=dtype, ) rot_mat = self.rotator.get_rotation_matrix() - self.push_transform(d, key, orig_size=orig_size, extra_info={"rot_mat": rot_mat}) + self.push_transform( + d, + key, + orig_size=orig_size, + extra_info={ + "rot_mat": rot_mat, + "mode": mode.value if isinstance(mode, Enum) else mode, + "padding_mode": padding_mode.value if isinstance(padding_mode, Enum) else padding_mode, + "align_corners": align_corners if align_corners is not None else "none", + }, + ) return d def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = deepcopy(dict(data)) - for key, mode, padding_mode, align_corners, dtype in self.key_iterator( - d, self.mode, self.padding_mode, self.align_corners, self.dtype - ): + for key, dtype in self.key_iterator(d, self.dtype): transform = self.get_most_recent_transform(d, key) # Create inverse transform fwd_rot_mat = transform[InverseKeys.EXTRA_INFO]["rot_mat"] + mode = transform[InverseKeys.EXTRA_INFO]["mode"] + padding_mode = transform[InverseKeys.EXTRA_INFO]["padding_mode"] + align_corners = transform[InverseKeys.EXTRA_INFO]["align_corners"] inv_rot_mat = np.linalg.inv(fwd_rot_mat) xform = AffineTransform( normalized=False, mode=mode, padding_mode=padding_mode, - align_corners=align_corners, + align_corners=False if align_corners == "none" else align_corners, reverse_indexing=True, ) output = xform( @@ -1283,10 +1336,6 @@ def randomize(self, data: Optional[Any] = None) -> None: def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: self.randomize() d = dict(data) - if not self._do_transform: - for key in self.keys: - self.push_transform(d, key, extra_info={"rot_mat": np.eye(d[key].ndim)}) - return d angle: Union[Sequence[float], float] = self.x if d[self.keys[0]].ndim == 3 else (self.x, self.y, self.z) rotator = Rotate( angle=angle, @@ -1296,34 +1345,48 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda d, self.mode, self.padding_mode, self.align_corners, self.dtype ): orig_size = d[key].shape[1:] - d[key] = rotator( - d[key], - mode=mode, - padding_mode=padding_mode, - align_corners=align_corners, - dtype=dtype, + if self._do_transform: + d[key] = rotator( + d[key], + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, + dtype=dtype, + ) + rot_mat = rotator.get_rotation_matrix() + else: + rot_mat = np.eye(d[key].ndim) + self.push_transform( + d, + key, + orig_size=orig_size, + extra_info={ + "rot_mat": rot_mat, + "mode": mode.value if isinstance(mode, Enum) else mode, + "padding_mode": padding_mode.value if isinstance(padding_mode, Enum) else padding_mode, + "align_corners": align_corners if align_corners is not None else "none", + }, ) - rot_mat = rotator.get_rotation_matrix() - self.push_transform(d, key, orig_size=orig_size, extra_info={"rot_mat": rot_mat}) return d def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = deepcopy(dict(data)) - for key, mode, padding_mode, align_corners, dtype in self.key_iterator( - d, self.mode, self.padding_mode, self.align_corners, self.dtype - ): + for key, dtype in self.key_iterator(d, self.dtype): transform = self.get_most_recent_transform(d, key) # Check if random transform was actually performed (based on `prob`) if transform[InverseKeys.DO_TRANSFORM]: # Create inverse transform fwd_rot_mat = transform[InverseKeys.EXTRA_INFO]["rot_mat"] + mode = transform[InverseKeys.EXTRA_INFO]["mode"] + padding_mode = transform[InverseKeys.EXTRA_INFO]["padding_mode"] + align_corners = transform[InverseKeys.EXTRA_INFO]["align_corners"] inv_rot_mat = np.linalg.inv(fwd_rot_mat) xform = AffineTransform( normalized=False, mode=mode, padding_mode=padding_mode, - align_corners=align_corners, + align_corners=False if align_corners == "none" else align_corners, reverse_indexing=True, ) output = xform( @@ -1384,7 +1447,15 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda for key, mode, padding_mode, align_corners in self.key_iterator( d, self.mode, self.padding_mode, self.align_corners ): - self.push_transform(d, key) + self.push_transform( + d, + key, + extra_info={ + "mode": mode.value if isinstance(mode, Enum) else mode, + "padding_mode": padding_mode.value if isinstance(padding_mode, Enum) else padding_mode, + "align_corners": align_corners if align_corners is not None else "none", + }, + ) d[key] = self.zoomer( d[key], mode=mode, @@ -1395,19 +1466,20 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = deepcopy(dict(data)) - for key, mode, padding_mode, align_corners in self.key_iterator( - d, self.mode, self.padding_mode, self.align_corners - ): + for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform zoom = np.array(self.zoomer.zoom) inverse_transform = Zoom(zoom=1 / zoom, keep_size=self.zoomer.keep_size) + mode = transform[InverseKeys.EXTRA_INFO]["mode"] + padding_mode = transform[InverseKeys.EXTRA_INFO]["padding_mode"] + align_corners = transform[InverseKeys.EXTRA_INFO]["align_corners"] # Apply inverse d[key] = inverse_transform( d[key], mode=mode, padding_mode=padding_mode, - align_corners=align_corners, + align_corners=None if align_corners == "none" else align_corners, ) # Size might be out by 1 voxel so pad d[key] = SpatialPad(transform[InverseKeys.ORIG_SIZE])(d[key]) @@ -1496,7 +1568,16 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda for key, mode, padding_mode, align_corners in self.key_iterator( d, self.mode, self.padding_mode, self.align_corners ): - self.push_transform(d, key, extra_info={"zoom": self._zoom}) + self.push_transform( + d, + key, + extra_info={ + "zoom": self._zoom, + "mode": mode.value if isinstance(mode, Enum) else mode, + "padding_mode": padding_mode.value if isinstance(padding_mode, Enum) else padding_mode, + "align_corners": align_corners if align_corners is not None else "none", + }, + ) if self._do_transform: d[key] = zoomer( d[key], @@ -1508,21 +1589,22 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = deepcopy(dict(data)) - for key, mode, padding_mode, align_corners in self.key_iterator( - d, self.mode, self.padding_mode, self.align_corners - ): + for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Check if random transform was actually performed (based on `prob`) if transform[InverseKeys.DO_TRANSFORM]: # Create inverse transform zoom = np.array(transform[InverseKeys.EXTRA_INFO]["zoom"]) + mode = transform[InverseKeys.EXTRA_INFO]["mode"] + padding_mode = transform[InverseKeys.EXTRA_INFO]["padding_mode"] + align_corners = transform[InverseKeys.EXTRA_INFO]["align_corners"] inverse_transform = Zoom(zoom=1 / zoom, keep_size=self.keep_size) # Apply inverse d[key] = inverse_transform( d[key], mode=mode, padding_mode=padding_mode, - align_corners=align_corners, + align_corners=None if align_corners == "none" else align_corners, ) # Size might be out by 1 voxel so pad d[key] = SpatialPad(transform[InverseKeys.ORIG_SIZE])(d[key]) diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index 7c4ea398f6..67da9ceb35 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -406,7 +406,6 @@ def __call__(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]: def inverse(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]: d = deepcopy(dict(data)) for key in self.key_iterator(d): - transform = self.get_most_recent_transform(d, key) # Create inverse transform inverse_transform = ToNumpy() # Apply inverse diff --git a/monai/transforms/utils.py b/monai/transforms/utils.py index eb1b194c96..b73a899153 100644 --- a/monai/transforms/utils.py +++ b/monai/transforms/utils.py @@ -22,8 +22,18 @@ from monai.networks.layers import GaussianFilter from monai.transforms.compose import Compose from monai.transforms.transform import MapTransform -from monai.utils import ensure_tuple, ensure_tuple_rep, ensure_tuple_size, fall_back_tuple, min_version, optional_import -from monai.utils.misc import issequenceiterable +from monai.utils import ( + GridSampleMode, + InterpolateMode, + InverseKeys, + ensure_tuple, + ensure_tuple_rep, + ensure_tuple_size, + fall_back_tuple, + issequenceiterable, + min_version, + optional_import, +) measure, _ = optional_import("skimage.measure", "0.14.2", min_version) @@ -53,6 +63,7 @@ "extreme_points_to_image", "map_spatial_axes", "allow_missing_keys_mode", + "convert_inverse_interp_mode", ] @@ -756,3 +767,34 @@ def allow_missing_keys_mode(transform: Union[MapTransform, Compose, Tuple[MapTra # Revert for t, o_s in zip(transforms, orig_states): t.allow_missing_keys = o_s + + +def convert_inverse_interp_mode(trans_info: List, mode: str = "nearest", align_corners: Optional[bool] = None): + """ + Change the interpolation mode when inverting spatial transforms, default to "nearest". + It can support both single data or batch data. + + Args: + trans_info: transforms inverse information list, contains context of every invertible transform. + mode: target interpolation mode to convert, default to "nearest" as it's usually used to save the mode output. + align_corners: target align corner value in PyTorch interpolation API, need to align with the `mode`. + + """ + interp_modes = [i.value for i in InterpolateMode] + [i.value for i in GridSampleMode] + + # set to string for DataLoader collation + align_corners_ = "none" if align_corners is None else align_corners + + for item in ensure_tuple(trans_info): + if InverseKeys.EXTRA_INFO in item: + orig_mode = item[InverseKeys.EXTRA_INFO].get("mode", None) + if orig_mode is not None: + if orig_mode[0] in interp_modes: + item[InverseKeys.EXTRA_INFO]["mode"] = [mode for _ in range(len(mode))] + elif orig_mode in interp_modes: + item[InverseKeys.EXTRA_INFO]["mode"] = mode + if "align_corners" in item[InverseKeys.EXTRA_INFO]: + if issequenceiterable(item[InverseKeys.EXTRA_INFO]["align_corners"]): + item[InverseKeys.EXTRA_INFO]["align_corners"] = [align_corners_ for _ in range(len(mode))] + else: + item[InverseKeys.EXTRA_INFO]["align_corners"] = align_corners_ diff --git a/tests/test_handler_transform_inverter.py b/tests/test_handler_transform_inverter.py index 48efd5df53..87414319cf 100644 --- a/tests/test_handler_transform_inverter.py +++ b/tests/test_handler_transform_inverter.py @@ -20,6 +20,7 @@ from monai.handlers import TransformInverter from monai.transforms import ( AddChanneld, + CastToTyped, Compose, LoadImaged, RandAffined, @@ -29,8 +30,10 @@ RandRotated, RandZoomd, ResizeWithPadOrCropd, + ScaleIntensityd, ToTensord, ) +from monai.utils.misc import set_determinism from tests.utils import make_nifti_image KEYS = ["image", "label"] @@ -38,19 +41,22 @@ class TestTransformInverter(unittest.TestCase): def test_invert(self): - im_fname, seg_fname = [make_nifti_image(i) for i in create_test_image_3d(101, 100, 107)] + set_determinism(seed=0) + im_fname, seg_fname = [make_nifti_image(i) for i in create_test_image_3d(101, 100, 107, noise_max=100)] transform = Compose( [ LoadImaged(KEYS), AddChanneld(KEYS), + ScaleIntensityd(KEYS, minv=1, maxv=10), RandFlipd(KEYS, prob=0.5, spatial_axis=[1, 2]), RandAxisFlipd(KEYS, prob=0.5), RandRotate90d(KEYS, spatial_axes=(1, 2)), RandZoomd(KEYS, prob=0.5, min_zoom=0.5, max_zoom=1.1, keep_size=True), - RandRotated(KEYS, prob=0.5, range_x=np.pi), + RandRotated(KEYS, prob=0.5, range_x=np.pi, mode="bilinear", align_corners=True), RandAffined(KEYS, prob=0.5, rotate_range=np.pi), ResizeWithPadOrCropd(KEYS, 100), ToTensord(KEYS), + CastToTyped(KEYS, dtype=torch.uint8), ] ) data = [{"image": im_fname, "label": seg_fname} for _ in range(12)] @@ -69,11 +75,13 @@ def _train_func(engine, batch): engine = Engine(_train_func) # set up testing handler - TransformInverter(transform=transform, loader=loader, output_key="image").attach(engine) + TransformInverter(transform=transform, loader=loader, output_key="image", nearest_interp=True).attach(engine) engine.run(loader, max_epochs=1) + set_determinism(seed=None) self.assertTupleEqual(engine.state.output["image"].shape, (2, 1, 100, 100, 100)) for i in engine.state.output["image_inverted"]: + np.testing.assert_allclose(i.astype(np.uint8).astype(np.float32), i, rtol=1e-4) self.assertTupleEqual(i.shape, (1, 100, 101, 107)) diff --git a/tests/test_inverse.py b/tests/test_inverse.py index ccc4f366c2..358bf0176a 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -54,6 +54,7 @@ SpatialPadd, Zoomd, allow_missing_keys_mode, + convert_inverse_interp_mode, ) from monai.utils import first, get_seed, optional_import, set_determinism from monai.utils.enums import InverseKeys @@ -572,9 +573,11 @@ def test_inverse_inferred_seg(self): segs_dict = {"label": segs, label_transform_key: data[label_transform_key]} segs_dict_decollated = decollate_batch(segs_dict) - # inverse of individual segmentation seg_dict = first(segs_dict_decollated) + # test to convert interpolation mode for 1 data of model output batch + convert_inverse_interp_mode(seg_dict, mode="nearest", align_corners=None) + with allow_missing_keys_mode(transforms): inv_seg = transforms.inverse(seg_dict)["label"] self.assertEqual(len(data["label_transforms"]), num_invertible_transforms) From d4093c7b90a84faca7f74025fb9bb4f0d49cb94c Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Sat, 10 Apr 2021 07:15:50 +0800 Subject: [PATCH 168/457] Update technical highlights for v0.5 (#1981) * [DLMED] update highlights for 0.5 Signed-off-by: Nic Ma * [DLMED] add invert transforms Signed-off-by: Nic Ma * [DLMED] add more highlights Signed-off-by: Nic Ma * [DLMED] add checkpointloader Signed-off-by: Nic Ma * downscale image Signed-off-by: Wenqi Li * downscale image Signed-off-by: Wenqi Li * update docs Signed-off-by: Wenqi Li * update desc Signed-off-by: Wenqi Li * update Signed-off-by: Wenqi Li Co-authored-by: Wenqi Li --- docs/images/3d_paired.png | Bin 0 -> 84825 bytes docs/images/deepgrow.png | Bin 0 -> 112051 bytes docs/images/invert_transforms.png | Bin 0 -> 353100 bytes docs/images/lr_finder.png | Bin 0 -> 265198 bytes docs/images/metrics_report.png | Bin 0 -> 259387 bytes docs/images/pathology.png | Bin 0 -> 291620 bytes docs/images/tta.png | Bin 0 -> 264279 bytes docs/source/highlights.md | 68 +++++++++++++++++++++++++++--- monai/networks/blocks/warp.py | 2 +- 9 files changed, 64 insertions(+), 6 deletions(-) create mode 100644 docs/images/3d_paired.png create mode 100644 docs/images/deepgrow.png create mode 100644 docs/images/invert_transforms.png create mode 100644 docs/images/lr_finder.png create mode 100644 docs/images/metrics_report.png create mode 100644 docs/images/pathology.png create mode 100644 docs/images/tta.png diff --git a/docs/images/3d_paired.png b/docs/images/3d_paired.png new file mode 100644 index 0000000000000000000000000000000000000000..dd751c8e164be060a7c1534de7cca7a1e4b3802a GIT binary patch literal 84825 zcmeFZWmI0-wk`VM1ShxzJ|sYJcXtvzgaEt}%i053{dmCrzC)EY_HBs8| z6+f!Tq3yf-KUn=m#nfN8y*SscAtCE8P&A`(f_8Q-B5*F;-&GLZ5gBN*gjv6?oz+<{ z85TTV?T-*M5Q~5E{owrkoe6VW#ogX1yZ?PS)3`_fh*{NQc5kic;@wrazQ4h;zrV=I zvC#0vBZgan!}Axe79!`%3U^oO?8{k8TCl8>*S-)pTN{J1~!peMGEdFk9& zU+}PP`E*lRMy!~bV&~lFm*khzmLs8*-v>wN{>{B&@Tmv>aR$O|FWoR z?|x#!!|wCVG3SXSbXIdU9mCQ4-iy$3u+z3?gkw`TI=#G!*v9F0H8j&>)@|Ot`Jif3ALSy$ zd$)FCg#Va%M4ucDb*bh0aC@f^{jGhg`~05Ag4Z;HO-0MqZr${<=W&@>%~|?Xo5fys z+I4nKvR_SdoAh?m-OKNujpt%v@_BgAhx@e3BV#P!sF6G2ip}Fi%D)i4m5}zxZp3BrTdP%w>yD3*UQqT`s6#kzp?iHM!>X>GX!7%Y z_ixve?%$|FYg#*3-%Wilv4XCb3w)zSH{-Bk`m*F`Y4|2a}J zgu373yQO8JosWKzFJf~Ey(Xvpn9{ZEaC*MGj~Uv`aNa2^MTz+U*lRF4H0erGpLn=i^OM;nuHraDqa%9&AA1kwx#JSJ@{xCl=# z&YYe-dpSPv%l;(^=cy<1+TLBuGG0b;VdK8vrVrP?yAk>?yXufl3w7$rEpnO)A0ZDd z?BmBVB+qq;zaY?rqvWs=TxX11G3LAX)NoK}8R`ik*qTEQ-CH@Xi}oMs;thI_YJO1b z+pmc^(_m}#VipNc+Uz*~;rAkyNX`?)8);)0Jp=3DL(tt^HotQLim*+NV~-Qtt_ zKRx!3BVHU^<(uouW4T zkqAYmlE_X`_r};ME(R3FoQ3C~>w^et)aT~~?1V82t-2xd$igoJ32U8lJ)B;V6_;s6 z+#x6Q|7soHGb3&nC{#pzF!9(QVL*OL>Od3a+X8{8i)Md?oI=q!8;0}*IhM;Q=1+J`9nOuJodHV2rv=khW~=`p1;DkF6z@^GB~Tv*S%lZNvS z{5)`s(78i%R9Tmt5n{fLz*R+Cl5yB#5{T20s%fPS6yPuOr+$C1x;(pv&q6mcUorG5v zT{;drEU2MXhf0?Ptn$+mVsAuNS9kV9G&2Wi4I;7-G*QiJ1YlfcF!!~@MQ#iV0q^EJP!KOp?agmT6 z-H^KKK*^QjTQrP@k5mB~644&g&IevTXbgDW22Mzp4hrZV>QAJIP4=H5A8J?@7r^}( zuZqyrqPEY|4Wun$C#K4Il=49!H&-6)DP^uDqc6%lyZ1H*54-uN_tLo18s-OOxDhqj zB|f1?Jp~lPHRDwYP79L|dBwQ&_*>PDbPSWQ`1bfh*)P4_O77-i0+5nNhltB-I5}j+ zp=JIG@Fb*TtePj)#N4G~r6vi9Z6a#r=){nQ=LQPN_J+5N_O#dJnEQzf?Z0bRHdw}O zi%|W~ISvH&-)X5Ak2nPX#AHI>(~!Rp^7iFj#g>%0AclH2Wnt5iHKdcj=vRot+i5tY z@ltL7^`5@4D8H#epBe30zB((jDC#-wE&2{dEehSOaics79@)`btK^j=>jqtla%-g{ zi9qrF9BJ`RK{sak)x-!-dCaS2hS`ll`0hBq`CR*P3Gz zr`gjFGMD&?nLBiHhXO_Wt;BTRVukn--eHC85ys(8c121ZI!V^sl)rmha!_KaeFsg~blse#dMot)(-U(+&f58Om$hD%r?R}>x%A@n{cqvcgX zaDKDBXY$+JmeH>|VRxQ#b&u8}3+eaJ)DUNksH5*j$UX&zhUH%RzRXn#66YN@;nqov zll$6FWndEQ`VhS({mgi^XZ43^%FoO*3^G;vU57G_)uOGbIL;y~>=tUGTqZ5kr?Zxr zjHF=~6WuFmKaORFAvh|*F%UZn*9fcuJyUA3vSC|QLN^WQW1PVga(xwpZ=nNv*QUP} zj{`^iawW^XAE11k`o8$I1yd!oF*gbRd(vTXVG;Swy?!Nz+tT3-u2=#2*yw4y*gliN z(ECIaiWRw42A^QjL2suWN6M!)q@SN1=n_Sxg^H^gZ+{QgakV}|3k#XLnc5#^aW!mSB0W`A5u=;^>O(o&3feOT7> zgZGOd;ttv+5bvZGN8y}t8~j|w&yyQzN+vHnSMK?AxQYy=?Tr>Dk2&j!U~i) zG3e^ML*n6111Rf8rhdrei_n+w6J|KqNKJ>7JnkOb?$1@MHW&|Y+={=Yy)#1CShe*v0<$W zo`_Kg!VD@RPVz}1;*hrkiTNEXCaTae)RZwG zM?4u*6T7_jTR6l{Wgx!xZRM9jt9B*9;HS@vajGzYGVw4?hoh~n}==S8Pi&Upod2!%mZ7;_dLd+0}d~$v| zX1&EVboEz7A~%MVNm6m_Ten4 z$nyENK@a0p;%_4Yw~;NcSDxzPv*5JyG&H^YLOVb?nqg-z6^#&tp2v=>ZuzhpmB>Ic zo&Q7a!i(6pO6DVlpNB)Q=c)Hmk*sQ{xhpCA>^Ey3deU7H5HA*>+4pKZE4R7uIP+J$MXg6~I%p)p>e~~zlcdfL)Fx$JjGn^ZmG5hJ)ZXY=rAoRew}KdR)?mcLNAh9B z5uXheCVCmi%t^n7F1}q>ViVq-EJjVUOz^j>*4sk)nvwQk=(n+7s(^F&*kai2^RC3G z!xSpd8En10Bc@lc#E+pVIF!5($=(`jd*+Ce(}pB+whG=gNQT?UK#IRVbTT-J#7uLE z`7ZTtyBhP2j#CR&oR8*P^+I9326&=Wx<{l7vO7f&`gpb8CVqHnc{o-KS)@sy>6CD$ zXCW@7X_Lf5{oU6H`8Y()I?rz+jC2$_S>!rVj&<7smj#Dg=)iPq6Dq&;_TW{YX`F!_ z@mGvSF}nGw7PX!AP<;Xfi5IhRW2}qc2C%ZWMg`UjQsb5;ds?y9wT|WPN2SV* zcR2Gmctk1Kt&CRqImqBe&C>P;!jP}pM{E03 zd1-rfBX%)!F6((l58o&oso)?b%tr}Rb5}~2<5Hs!CRUV+G|BP>dD-_K8YyAxnlr%D z=9`csJK`Iz$TD8Je=mDVUXIQeajX7RP5jNROjsC#QkXRKuw0+%o3qBJ`j4{b#d}qh zQo#~~dS+p8Q)iy2ly}8kzS*n!K5Ni$cPGqqn zs)(eHL@{=%~!Q( z4RqR;iDy}tyzO5slE%$8>SIFfp%eECjfp8ZGmdROXs6)GOC`WV46}D86;XM z!QIYeY(k>Cy~u|vJWf`MgsE~-@^-}&mk)i-qwtLf4+}WL^DtYQATG#AliZ7IwRvHJ z{ZzW=TOa+Cm`DWWB6GGqT<4BS9FYSOwyd1!~)R6LRhkf?(g z?9K#QRlPQrM8rBcng{GPDc|5zMz#*%qLlm1TRigC!cQienfxbDnSa@<{!C{Jy7 z)3Xp^cB+zQh(IRB>6L-|k%N1jm{35_Vpdb!C3HNZ3>|Oaq4OGX{&m@twxRzssW%@* z@%<^U9Ym5#1mTqmlVN`eV^d0pxK#5sr-fsTaTg7zF&Tc|>|iCFPz3t7vGr_)vKvP+ zj+`N-tli`IIU;ePA_2SL4sHH|&@r>Ixm2@Z+6N>1*arQvTOV|~jG)su!IdtRO+t0e#nd7_^^_Aa=rJUERS3+{lld0HfLA*b;zh|-B ztwnvi_OOK@4F+Ympb|bgKoko8l2I2OLf(u~=0ET*!Z?yR$5U5X=hf!ikW>(%;ILbI z(Y9*P$sknqvh3;vUb%3k=deO}K(knHcusn{kci`j#cbWN^q0zKf<3Pi+{5{vw|6Bi zX$9`JN4r7g875=K9$S)Ob(KE&B_HTnOOcEA38iE>a}rfUpp*<%L?Vebn8xmmGN8$@ zI+~8%A=6I8OUwA=k^5tbhsnM;ZC&(&R10<}(#(DqH-*Q@3ns#vYQj9{*WQ$BJ?#Zj{_YEQRzAl4;_2V2um^6fVpO)QL-sd7- ztQu;ie0JKR8l`)eM|W4NGN7bV@Nq6s{bNL018);s^3NZkhpC%=Ns^N61%k8Ld*5?w zY`eY5Etl#P5G4(h6w;B}{mmCfx?hMDD%}fwiRa><47OM)7y4P1#9VmhSNk=h_&a^< z!|y@@9s1Gz>D#rK(W{=kgNzcb1E+;!R&PQ~6A3MCp82n#;q+5=eafG`X6##($WF*X z(iYqZr!mgNl?h}j#~7V3ce})8va7ccP>f(nSvqNica7K2_{=duPI*+B+f?J97NC1R zSw-DHTR9gg*q-%^-qD!w$(r7PfcVI*U3S&;_!apBH!VaS=1i`4@6t@(H(4oNX@rZ4 z@@hkVLLKB|9+*o=c9yFM55DlgI6A3V?tQ-vS?f@=sd>w?Xg9vDaz2m1FGC;YSy`kf z8FSq3_(9YtrbvX>yA`8!vur7G#OdS#^62n0Mq01_?8$k?O6#v2@ZN6t^5|{~FTU@s z%zwJN!bS-pd-PFT(a$w+$CSmYP1=q;$ZthKkhZ48Y6cqCi=sitBUQ8C)V|$ zZvyu{&19@BPphOSCrd7x`1YXO5$}{;zn4EB41)WRao#OuFO9q=R7uTd_bBPs*6_WG zoyVX<8v~ob8SW-_`0s4Bjb*pDVK;+OWsz5|@X3yvmmw^n_*e;cQqa0!G>=a>Q?no0 zj8w>OXtxbjN9`jTki_FoEM6PZQkoj8_q#UeWh)wb7H~g%+L0*$(BavbS5}46Kg`Gj2^Z!raBupFZS53rVR>a;Ho|V|y>Z zAECiP74Yy}d>}znK7jse{b7{2t?7M=*>3y9rmX8+RQXl`QPt2@*tZ`M#9jBu3%nsR z#xzSo5C~j=m6VjKf|L|g^-nDxDDW93hK8TynHlRe*Tr!M0b&2` zS2jmJpPGZN^l0@N*W=rvqKCYF&uTvbxL zEge?n`!`Sv=quztLiwOdH}PFp)OqA?J@xTT+jD{dRe2gJXU$;jO_%k%# zq(4WiLBQ7$F80dp5Z6JCFe45k(Iv(w#vmCa!65b}aU}^zPASlQ8-OpgNu)t&R%r2- zfgfIq??s1zSba_{ZJoSa2N#>Rid;uu`zWLU3*P}}O$;T_&JbTH9_QQp!FI!=u)x4Z zG^*t4j6kDk5rkk*c1gw8yrT*SB*g9^M6`sDJ0Fc)#D?Ou@ej?QcK&oSHZ*KKzrVNO zK}J4Pz`*dQzq`Np8N0vtCqmc*o9PHefqb5{d0l2*r2v88$XJ28h?bI~poxPWtC6XL zu^FqUog=85Kp?_mo{mN)HfF9+V>1gYdlBk`<~C}mm8l5zOCBY5B}XYUODlPAXS3(t z${Hr#HYNh5)MBD&!k&Vl13NQUBdDjHt-Xt&rwH{Q{R)D=VYk_+p?@@SwGpA#Qc{IV zIXIg^xmmec*;!;ft=u`OMbV(b&Zg#q>e8}*bpftKs4ZPx9R=CgJUl#DJ-ApMoGsWm z1Ox=w*g4raIaxpp78fsjS0hgrdlwp57k~C4ZRTR)Y~|=`$p+1dTu8!oOg?*AI^Kfd9j0m@Ts>SituZq6oVGVW&ft~7ri)X~<> zk`#W+9N~-^S2eyqCR(6hmya8+d_m-w6|7_>z=4|^% z8&eZDGg~t|FcBBfGsnNRhn3O(`NqGk2lnKDn+O>0KeztJe*H0*KlW8n+QGyPHmHKM z2sLcJf~F28R;Ge~{Aps!&&w{rFTldi$79UG&BZCe!p|dM#$wLTZY*HT&t=ZR#s0Uh z6zpAGjqFX#U|oUES*<`n=6pOxyvAl+EbQid0xaA-rp7Er#^&ZMz_QFt*#$T_jk%5g z)`g0*74QTj+rQ5W)|Dyf%E+9b+t{4Xh{eQ=-JFFR)}Ij%A2$oXIS)6#5f4AVIX~YY zUBS*lP(oEfgqoA}&-wjvN7dHI)!f0^PJ~*?$ONkL{9kP~tnAF5yBfiEjf0nipPikP zi=T^|Ux1IF`(KST&756;mBTvaU}xp{^NFd6pe*Rp2w0nyosoqZo1?wOAFsg9MNrks z)67;&8uV`e=c^p-+<#8!Uyrr_{`jv2fE5c$Ihz@|Iyh@MIM|9%!{!f#jr)h=L52Tv zDM3XC6C;@G{n6OW6y}wG`K6?h1=}B&!fgLlfzvQ zqvEV$Y;9)Z`d|0+pBn$OCv|{`F0RgA3jc>i{V(l=|1@fO@UDZi*Wbo}Zszpo)t|6s zYxT!gL7{&@g`klMY!^kSU5wn#O#e7Pu#bN}GO;wWw=e^k{Fg)iQG9H#@Fau=gi5;N%o!=N07SVq)hIWM`-T^T?sXY%nPP$C8C% zV5Oua`1ef_hJl%&0?cHeyE!`ATA4Zj`)2*ac>Z77{q6m~4E6u?`rn)V`L>jUqZe>@ zOIH;S`~PzI|IqNy9u%!ifM+@US5yDDCc=NX-3u2pXDJ6e80h_N(h|0RUH!vAV9`s^ z792hRGZs@bb0arfS8CDcCeCJHOiKsXKR4Od>aRZj*z$jLBFy%mM*8!OzeP;&>Yw*O zBnE+wmWEzqJGZt;YXjUH`LP|E(SP zZ#DiO>-zs~yU_ln4Vc*j^63FG|0PqhRiGLm8$XkihTOyc$!YzV1nxX=l-G8FK+y4E zzi^QE8N}cwimQT>49W)L0|I2Cp{gi22m}gIke1N!oZHp&@YQ%R=eM7k^`yVxJY;n2 z9bE`kF8Y=ZL=6XrVDbgg=+vzSz8=BX;cAA`0JP6vU#LkS7Q=_p;XK`Xv&5A)XgZha zGjiVRh~+_t*DmA!V4>H*kCV$|n8jt-C#&8i1P2}o4WB_ZM?CP4KQ7n;UQ+z|=RfWt zN&o#i3O?_zrhh*FU#?}~)N+1UhpM+pT*m&{ZM8o`LlnWA6azn!XUWWs>rS)^o#P*# zscC3U+WHk-9&fIV=1L9NieP`azw;SPl9Wv;;*G!9?19qjM8vZM z-l}?iefF%W@b_do{WOclK@|^WH%!}fD$%k!xeB#dHTwPBOGk+{X?CvOjYqo~IMgPDn zo(K^xkyg)ffvxT4A$jjgZa76J7~bheI~+vZ%Ib-;vopKXs&ot)9~KV}&&$jlVjgS! zyMz0?0}qmC_+O9h`4ef4&i5CIM6#f$M!oYbUR?ZfQef+}N6ZwAIy`N&1Fo~C!7bIX{;3?QsgnYSp*%Vg2QSh_-{VHqGr`?HM-)n;aoHq*S zdFgTNbfb$!>AltKnUTAbk77n=yR$?$x2(xIKiAg{?`|$*mpmGE!D-29YtJG@1~Wu~ zBmesKE0M5)p5CK@fdN$=ohV=xpQkTw{ocNNms?tjae8_>Ej;GhylAwnqM{;aZ_ld1 zICZ}}r&+2MD5iUk;z%Cu?|*r4OsNouuLfx zj`elg!2h+{!kTr@28S3!R~J`BF4l8P@;agT(8*7$d4s@8&ZJM8NX z4t143;^EONcV8T>Io;o$uU&3t%e{Urf=YF9wNo+H?BSs6vFLVjGNHmYU-QDCJh0wj z8SCBCP%b139HWT#H&jxGd-cmfn_d`{qU3Z*4?u78pY5c;QaQP}5;8Mm%U;z>R!pz; zkU1biZa_DdQ=bSJnV53NEJr`vy}GHsB`TU^e4CPzFB47JaMVNAaV#81DHhl2bKYsm z-D$|4^ZcW7L}a9uGzB&}f56F!tF)}F@z8soA6^t-+xi9uf~Ks$?k_gu5)p-ed3`OJ zKV}F#sHm(gRHsaKx?C?IEvUh)eqx^;mgB0cIlFs$^8D}b_+7VftgWqKEg|jS%U&6=6C2j)sHv$1?(W*&v}{<& zOH0a_Aw9%<2=){+xFcYET-|oDi9j$DD68&<+{*4+0OBDpMv5#NSJ-!x8I^DOxvyvAs@(i%m+o^HD%h$do_)zl>H zi6e;y2bHUo&inFhZeC-w))U*X zk^Jh%sdtr87cy{mQ!}&NSM~OAiA@&RFbjx@!Jwsk%@h6drmg9iX#rF0P=Ui0CM$SRZG)zDeC9}_BKUlFi{ zM@vyV#KbP>lbN-Hd;nm0kW}b3yN_CO8;M27#f8n<+W&55e1|qWJ4>XbIx#VU3w}Bj)CG2D3z(6)sEFi^1t(WQn!iswUFj+#dMLDJo*<)Z1g$>BK5$ z3QByOSn0wRlh)U#P=Eg1X?KQWFk6h$yY-9qP%tJLZ#(dw%1RvBd|Y7lLGyb(!1;5g zcbtB$Kfxg9#(DSdouV2!wPvpQ*LN0~S4K&Sl@UO$WfLsY%dwwi+o2+HT7$ z^>HE>7`UzP$B9VIGM!vtB@!%xL}?$>uMBQ;$*ZVfHw|a|T|MAU z$HWRp(9b^4%QCRlW!BHW7PQ}Wr}3!(t6^(7g!jALAPRu!33+@42XDkpnm)Bq^yyP4 z0ClzJ&k+<7M*(cnK7PD%xYEVI!V)^MZK`|HOX+_Te~vAnS)m_$b>(eYr}LR_^t8!( zijkX}JI7$zPhRn92t(9Ns}J9Bh5$nJ*Px|VACx^zXMmLm2VbsPl{5GQ6B8fSf>4@( zJFs>H<>MnFARquITdBoN#I2!r`MaM!o11hel0K^)?6f$l#0Z7RSdE2Bt>xIN$9*Pi zI?8%DrC&Q6G4w3=S$bEZVo+{Qd?Bcu}HhG?FFK*WceEg)I`2M6c3G zYW8*Yar7*|R2W8}PFc@;9-GS7*Y4mAU?OB}-~_UkJWWGwQC{&PiHFq!n2 z+w1f2-nEWJk7Y~bf^a z0CBwDtLLjWeW9tjdeloPXKTx>NQd`xC>;e-Ilq_6ZUhfQ$aMRr$lFD)%}m=a?K*3M zyNh12)yB;%5FrmQeYL8Lkpw;V2TNZY_#S<|7yf(%<_hBy01 zuISC76viWNe73tinKNG$4B~)>2L7#6@AdWdAM5=|L@mQNH@+|ez`?Ro`<<8>Au50XS*G-d`l7FEK?Qf7W|iQ zV>@fHK)eFGce1}oW?xU%8%Jq!oDZNt!o{%f4VCBB_Qy1M=S@A2)z9x-{|_sLcuk%2ERU_&^FZ~(M{83rnU>;k((rSK5oIlI+3?aAIek&wrJ zHw~3uGR`6 zsXyeyvy|#s8W9l@ODv2>g7|=MnJ*>wW_yy;$i;OpG`g%f@}~-0PnSP}7>*YxFfcRQ zY7+i|3fZsENPZ4yMw@+0UI9d`3jn6-ix;6n`wa+*v>n^7t6#47rQF?#;pL)__};|#<$?H{2GRX*RAVM2l8a1**y;p zVg9JsHOP}rgus4oE$f0lhUDV@e$%gw2 zjeWzzJ)pzzw)@)y=I*PyY5lH<1RBR*lTTuryzpT_tHn$hLn%ff4&|l;r~^9nH2j4y zYF($q2%W5iMJNzhVHU~%`ii^!z)=KvUv+ggyTvd$4BYba;Dtv_+5_OUDh=TXS#?n% zS`Cg=0Pmwf3^4)|h&g$E*nv_xYfC)p4>2ElKP6A+u+&20vEYYNI;KfWOPk~0b{BhA z2ID)=3l$|Q^jkp;M(ki?j~lNrXoHe)_+B_N>oswUi+339W|}z7@6{XajJ!%~A>ns~ zg7A`8s^;l>l@CDL+TA7p^JhW?WDIdxS!8u}b(m411_gjT2DV>zbF~unWX-VDq_NhCD1K0RL9Pq~)a|JfqS-ndBC3!#z^Manu9xrWLU5>2;hh9C!Q6naNhyPYdWTjq=AN&{IM|0qv?Ie^5Cb5>MN5T{YIWS97QqsOmk0k{_9=5-ikt*1Q$-5@62FLBs|9W#igaR;HJe0MYZ`5sq@W+)9=GB?3!bby}5Tc z7i;lG6?R==n5KaH+kWBCRHlxi-kz!<(r$KV5%9a=PhbPoc!SLp7`rK0z3J(%se;_x zz_-cCiMdi?57qGdJ44a?0jUfcJ_3yD4IKDPb{aDyXb{18FE) zW)Tz1zGx9u*Va}5WK1cJLf~nJGChfqJ9Bz;+uaq@<*$+ou+sIXQedopkf@)Qf@Y`+4qvtZWMUdQIJ-`}8vL;w|>maUJ39ocsq z>6A0%fxXIo`t(U2L|3OqU9f$E?z?^EupGs-BM1px`-)P6(=Yg;2)DgZ%!MnH0N7x* z?edpQ9Lb3JJvvB4*9sIQiI15yOQ96;;TOK5iSORYL`6r>HmYRsOJE=)_}rdO8zcd+ zMAS2Id%@$i5=s;?vPlzyGhY0h-Wgalx@Z@OHtH|5We_Ex@uF7@+D8=xyV^jeh$$-$ zr|^#O6s8!@V0Zha@q5-H8`zK6PFa1Q!Tr@sU>z_3PXlBCb|}}auq7q487Ko!NsyzU z0*VXO$j1 z31HFSSpi6HVABK#1;YY!#cYWdeTRG(m}3_d6yN|G?1?4o2N76;R)Lx|B^uyi$mH}i zo*KRafXXM#%%+^}JEw{7mP={z=oF!Vco%@2*>(^F)I=0Y-#3#}Qwg8#<`cmIjgO9s z!%P-XFe2AG5N6(h$yR|ZJl3Yxp;d76h4a=Z6*v(vrN>0icHH;oWN6i>!?7vpAo6B1 zj|H69QM~|Th-8S0Y2I6EZJr&91zh0|=u$|GnAoNT;ffC!;d;%8C{4EioxuG3_1U&h z|8+0mr$J|7fM6T56Gu$%Jl^?UMgv3%5urPfThoGcjV50BH){X4Z|J*^k&tMan4-Zk zjpxf@S9`fsGGA?q-7CPAeAcQ%4HXhYW>U2Zk<>l#M~*EW`2uj@eOfcMk2M$xtZi%n z2mZ#^76TG*z%q#%*5ZUnV>90;psC@5lo{kkW22)1-aaC(f{%pgXlT~*%4-1IQ0$70 ziZVXfsk>^gOy}3N@ z(s}u*qy(1JffSniN*7P7el|ra)%z4*vVuhR?s=Te~j`k93*WbnKiO z1AfErc?hl5VTm3|UXIr+F-ul%)yGcxhV-h8rEe%JHPus%uzP#rW0xg2j4S|+1T(z| zWXm8oSUK{n1fZX6;<9{u&SNE_ATKX&XJ_|@rv;=W5?M{&aw0w|`^gTLmax<&N3$e2 zEHB^X|73=utP?yKDf4JD~wh9Ql79$(ok$51XyU!j8UWUKI@IKpp5ZnQQ z%v8If!=fM*Eg~b}YxTz`aF~fr(NcvEiLK2C=#Y=fnUX-ce6wJS^@jF|?cI2T(;7_m zSpgx^G^-$9?2fp8!5JO6R2ZNeDIO&c|?$YasbPIR~Qo@VWdso%QJwW4bl>j0V0-Z zMvL8Fj--md76cFu@H}|pqY9tE{v(ft!0UrHN}#Fv`|XY}dnkz-U?NM^#2ly4MuwIF zA*@;$>sM~!{Ze*4C21M5UmGQ0XMtTShE)8niw1-G;^)3`Yd_Af6OxmWvo>d&o)>G( zRQe&93yUIfSqw`1`H6aFg+oy#yizpca3vj2w$ywkmeOQiQ2f zkw}}nq&D)s`KAV#Jh^}~?N&Wq2oxOrBSBX~qkVvn*wXJxZ*hTa*8>ER8MkafH#DG| z*}J<0FVXipsTOO*sHk}dA; zJVH^!Pk8q(6lh01Y;*Mv@d7SC+c%G?xVX3&dQWGnOe981wL=Wy0;CIa!fA;rNn?>! zM2~0z6&}z{0AmR8$8}X2b+LQw>-qxr2NS3|m02&_9xF%!O_&|95g=5p?~O=FNOZ^~ zFnM*Vj5a`3OQvQ3YXSLB41Ja-Jj<)kA=5iZC|Kq-Bn~Z?$kJ-vKx)+H+>A(EsW$rSYHeC5y@3JMD#9wCFYS@7Tn7y?Kr zk2gnpf%G{h((I$UQ47d$sdf!AkY|`)mUr9LFCv5VG%t@DrqF|^WO>p&Fb4GT^^FZG zkg0@*hH8DbB>`Lq*Y&QuTUr<>_0SN+MqM6KOpu@i{S*fg_Dy0UyabNcSMKnxK;YF- zkkJFL1vHZ1`-EYEm5(CzN<9Cjh>76D!+XDR6J#os(8rIF!1y6R!USc7u@BFP3W>D_ z2^YL~$}RwZ|KZ4A2}q(j>ZJ;5+zlxj8rmVq+*Yc3DWm}LoV~ePqrC6yB;_q#ZeJe& z%YkXMem9&xlN3aO&=q^HyBp+09?m%l&AY^&k7(bfkWn9#m_$y zYv=YPjS7e#Pbg;z_v6Qgq7xa)3osh$Wt0OEG6DPTMR0D;6Luc#;; zJxJKs*C#F`BSW*H{w2%m0w^a8tgQBL(F>=n$r4O~Z~*e{6_B|0n#7Cr+!ccV(ZNC6 zJq39$m7pLgInhjoYz#RZSPuyIxj8ujPXxTKkKgOXh&?vv-gO6zcnc_A`4;#jB#}Vp zT5#X7OXYW>=H%jpN$X&_)i^uAMabxqw(}zGp8F)r=64*k3CCMuD|QtqB+XEyzj+e? zY=~gl|6T;(n6rPs+0#v6l!xJBH*iUGN|N_?w`oTac?NBMo@=pusvvCzhupE^SSc+1 z$?!vA;VK}XD3IOdw(QsDoN1&^p@b|tP*G6|OiavDb{RD_T=S8vSTIKrzo^B;C}H>w zNGiCwXlmd24kK&x==&>&+)tm-Uc7jb%IBazb1Nz&be(vP4b;-ubY4(QFa+SLC;;pl zRE75D>J)K30p7vvHRh2JA-D&8vjOBPpfWK}*L$$kDgfbhr))H$+z_60zaMFl1c!d!}7 zc7rXmMwwdhrsr8ek$bNb`GhRJQdS6{7*re_3EF9JKraQ0GX}G?9WR>t6+qYvm!Os) zft|g^FRh(_xEjp{iXz56gCr8B>0UOd#H6JUlpdeu`OUIl*NEpo48-#>jY5Wxpgkg~2bAQ^O`t*WnFjI&~ zq}s>ud#N@t0D1m6gpp%dalzZ$dz3OW1|-FnfER;=9#$3bSn@&k-J0>>D=-&8rGI;&U~jjELN%m@swC z)YR0lGvq;xVn+PoFIPcWS}89t-=0l*%O$%coEV#umKF)*?8=7Ka=m64bJ+sP?_8Zt zctiv$Onrieu-iy8#8HSuWr_H}N(D@9+9ak=a)UmCw!m|`K@c60kJK!YF*i5Q_kv04 z*(2eA&%--8Ia$tDKbDO}>lU-A(`mU_jZPJCq0_0iibcBD!dZ7}OvE7}>Gwk@(?kbR z8VFk}Ac!;oIhfsbOSR=}&ZelixZ|b4>(G0jt9u;aC%IDLd8I8FK;9)pO zH+Vo23jw78AbF;SGV$;b*nBUwa(z@|G13D>Cm5dR+(UHzZ%;dxc#?3zLkV^Dhjfam z0f0Oq;qEwA2jB$33Td{yG*6y90KhT$<`|^PCLmD;L5n?o2spTLji%swh~v@fSI_fB zk1F3w=a##xoj0@(RD_QZfe*__O2VU3_04C8!$^>@Cl0K37KTZ72%ObG74Gw_ZQsz4 z46nYN?LWQ+03_e(*U6Jott#rRdR?IOf}$r3UTbZpu_g+Yx>mnNr=6aHL=Fd$s`NuE zAeasNO*@A`9fJw>o_lrEFcHOd5ai)8ARgp=`9d)wdd&=+0-QQ(@s$3uLI0$w0NhslyakV0x~Y#dS-cpPH*d%d5S*H%l_iALZ6Fe@OX ze|>yF83Z*1ZU<@;0Pa9jc_k&YV>8f{8O0mqRp9(#6+d5nN>Dk%1&SJ=E$kJZTa9y{ z?V>lnrO>djV2XI3SZchDSmTFPmw^Z>?dr-6)({Rj052QJzd%-cOgx+_fB@>0#G=0Z zu+mCM2qNJ2y9-r#+uPf)oBDp2W&mJxDb}bP2WR_2aL&Xx?!S<4=63e3IOc!^6jnWY zh=<2!r;FogG=P11>jz{mm@vPwVFaQn0~1ql5EBuI0qmr)z$}Z4i?x7FJsW=MxM53v zkLh*fqYnZhcmmK5h}NFJ*;I_j#EA52U%cpW=mM&E7?8_hB0K;C87V0QSWyEk(B$V} z+M##r1rYQ?L0U-#90W)Z65wkFkU+s~0vH;s1~vmkDUg_f&jA85-~=p}bE|rtHwNB~ z?wo=>C;oEt``P;DWdkYuzZeZd3P^O)9li)KOtNO6 zIBbu905l;G;20ne&w|QIN?@dsDvBC5T?&!+1_u#(DZ4Phlmpha8&JQ&;Sv2XAg zXoYdGAnSHKNp_c?iW^3HW}pi9706kgEvAXIR8&-OfGIYtSK0BB!E$|I9fo6hGFsK9 z4}g#73%eqrvC_a)@0Qb_uZV@$&->LbfDjb)N!5wG_mjRJNue_=`YjD= z0_I3gA4SL z9&ofUq5;yxOuq8dHcXu;tpucd5g@bwc=uAj<72j86p&hOSz}GvHtWU{Kaux#Y&gTJ zAt1D{-F;NrD(Ah)5|+dXq3e3Z=H{Q1)4!2|_UrM7sG*Igzg~ED3wW_9j31n3ux;Ks zv8?6!J?fzU@gYSYR3uV?Vhqbg0RiF@EdTuZEKjJDvB6m!)#$|lE8+3IC?zoP&_(-R zzGdHFP-kqw-Tv(TVj~uT~tQG)IBNlXJ01y!zS6p7+^X^B;CO;Ndc>Y*jAJfzm zOT{me$GG?uxreBsatsYx6i#XwCToSU#Gt6mA7>J~yB=f$zIbqK)HPz#d}1k@v|$XZ zCBr^@=JPa@mtk^tz>!=C4h-F-HOtWJeCR z2HE(|sl;Q9K=IeEI%$^RgO!2j4zd7>-J5UuUdtV#k6I!m+j+fs5Cw`AIY7r_PgAsv z-Itdx{hgEk%@-2RqIH_yia%+r(BHVAZeOP}1}n6b`~)Br5f_Er>w+V5L>@^G=|$=L zLaP9QYe#zeVVr_PgCmCubFx$NdTn&TB-bc4kyZi+u@JGZq;l-(>))ieuFRieR3C@< z^&I<-&bW$%oNWOC8k`RZ8z9S&*esk@)VpyN@oLov%A?8N`#PYY>EgPq)raMca<+@- zP-DN=B=5}yq5x>V<*6qp{pe{p;@8#1g>~t-rKB4N{WPFj(DgKm(Na`c^2-tdMQ&es z^a?J~hXB*MzD~fDkRSljO)xVUEHk&3SF>J@-Y^s5CnU?uHnixomGG!Yl)O$IGQS`& z9&7oDa987)3P>Kf5s^2;z?&XstqLz_K1eLPD*+*>Fs=~DLif#niS!d6<;?#uaJ-LO zGo2CPZ3A8;`$p)O3iaZkFH-SIv=12HMQzxi20_BRIwE*q;1_X95nwgu;XLgtHl{Ur z1)QP3zn@slPZ;)X$*WhA)u|6dDD=`y_B12@L2Jv;C3FanT#NLuDL=@W44PMm)P^o{Co-mxvr`=`pI(as)*fqs!kL5iv2{ zW%WyQvQq-yiG+@pE zKb4+=fh;cKZ?1F>8bN=ZCrPSNFlk9h6kc(I_6me4MSJ@_A~6=^UCe&at1z?)t=m>J z(pOeQ8=0u_oMThV<}JqQTx&vL#o&kMym2MW(sy)p#2fMyw7k3PjEC|%@!^P=l12+K~G;F?#1@bPQG`aYN>0m%D!v~>P%xx zn+1+Vu<)X4MzwK;wpXoud1xtovcJx*OBOr9`8pQ%K7{cy1s|bkAFAzAvW!Sx9MEe4 z_+{eX3M*af7lSZ8-$$#mk_xZu9S6f&kJchRjWevuSyz+te$r1bS)UBMr^FH z{;H~}8Nb`-h_Zs5k1+ttq#w`>i6%dWJIpfdLS(*NUqEn(n!;5ZZ1}_X{Z4DY_Joql zR1eUNvC^yJR)ngb-#^M^w(U|zOwhuPgP9PwZi#$R=s{KKKtAgOLU)dj3DqLG5MY`U zo(;g;2dc#uk>GlGIw>vgtZH1u$}#Y*AkrafR<)(U6Q~Y1A`ifvF7`2cNZB~Z5pV!r zkyzwIP+}c_wj*+WiU@$9lJpEjNPRXg);Bx&I(-4rD?l%04Bs^S*4bmqC&W4@#FmZW z<&J#iHP_79y;`?)w0YXVYcGCyy=QIhUHFy~1yBPLf&XCK3a|_r8yf>614ygtanM$I zP2K~c2?=^{*E>uw)7(>Mz5=9??`JA7X;(KjJr_@-6z%hL;nj!NVN53}&2?1HI{}gy z0SXOj-qJ2xmM&WYBaLj;f=t~~D@5Q&aQ+0oe-0BJBEZu?iQ20YE7ZIR`a+8oNqu~M z4-n0|n}~pG+KFYmW2&K+LOlJC+4w zb89MA0HwZab;qJZ8{fB0-_N=5vbIHR{w1;%1FNBVKghvNFc8}}xyWPH>c@ORNpos3 zMwHZ69OX;rI9&Yl)6DBJ_)*VIeaIxu7V0~0=|V*J33>5-b87?jRnhZ{$xKRrR)cC5 zz4s00E|tf{`~E<%gIUS!?d@R}iw`2=g~{XWGPpUVa+%2I0KKUJ{Aw^{ zp(HQg99(pcdc>8EE8z-|nL z3{dU>+cR{d;>3puoYP3V@0j_{Mcu*ojrjGVL30v9Or8{NOvOcvAxN&l;3b13Q@xbI zZ6~%>Vt|soW5u6qRLa%is?|#WsS~_LKwTPeXoL3jPy#v{(1O5nv41h}pYPD|RjVfv zxZuKpMF>1+GFzCmj$BWrh!#XWS=I0|L#_Q=W|^E_+k;_j!t&^m7Bxa}DbXuCNN}(9 z##P;TOI&m-4B!y!owEuaCVMHNzi@6PCYmhM84E-`;cstmLB|C384MWeXxep4U}9vY zR8ME@{Q0*8(XT69QG&%QD#k*|%#0Ga&&Ug~JObBY)>}Zt)YXJkWIyPaZ1xS%4yM!j062A7Cvgb?{i zsKtmg(OrwF49uY5#|FMM>J*gu`FY@vfjv(yE(JhZgS)yzoM8x@>8!NbG9I9UgH{79 znqy6e@sWm5u6yW%N=Z;pzS}#eueSD}3@4Ekiqu_@6-@S50 zHFlasb|IiJm(rJQUiky#El`|t3iwGe0d-`5u7HaO$TK>iCxAH*94@$Djxz-^o}PR_ zA~mvC34>RFyTwY~w+6$zQG)^S;1LriFvZSk$>QFbcMa09AbQEred7GK15`d>COA4i zUcIi-Dm73oo<-@kNC8g**gk=!9>BM<_I5&;`P1Co99IliAb&;CzWVD{j2vH; zwzlFgEiD1y1!S@qQ0;%Z#jO2@ee{z4dUs+x3c_blx4`dGXVIhtRKRaK3oo!?lPH85 z$Bf%B6~A11V5!y*I9V!PNiUJ*Ng04nk^oqf;o)J#J(KyKgv!BkIjkrWeoCE2U@P1x z+kie)4iKFhWo2c-Ox1wNg0X=>Zv*7GRoiNgVWd}Pbu}x{l=zuOkFV$f*#!6pP~pL7 zS6y9=TKk2haV{AiDIOlbeJ~8!5kX#C{(rrw&Plr&cFXR=R=mypnfn(ug7!ig91I%9 zsJr=M4Z?B+5_`m$pVpE9qYOSR;7b9PtJY%&5Aox83_70)cLqL3p=J!A70SS8f;|sq zV6=pz)xt4FThQ?kVx<*Kj1S=2bJhb^OlesJbJWKg$M zk4F9ku5>LM^^?n|Jr0T$h!XBeG}~pz6RAu{!-lj|q~13$;8Q zvg4r4M70>TRV4av2?v zkf@6u+=fY)wQDsYT+NMbRIO-m6?#P}aOXRf$@?dxbm)i|YGzRYmIvmm1!)68RfDSi z3{&BVU9y=g9q4`>(ASI70P)$s=Vc8z!U;tExq851b6iTxwF43+0cc0y?tEo9p#G5| zi^nZaZ${KJw{njE=rt1cUK5L2*1of2`4 z&S4#Pc_#}Djnb-cy}G4RpmV5qvGMRYG6@saRfPXW?8>xgl*Wx~?xJyy(mM-F<-$dT z5K@A*W+n2M9`+t`8z0;kFCj7$$@P@HFRxd3c7^V1N*P&bp;(xEE&_rN@yR54%GB%x zHIC6HoLI;R;9+Z7TH<_Kn@(hes$89?3MFF*24zfYcDXJddcS}J#TOX=;Z@Q-fQhH1 z+zk;AN^;R;c)s}lrBot z;3l>`hD(TB3WbdiZO??AU>I&VkXiw*`u^WP>0c(KzvPmHGhP7#u)L~j2)L0O8)5MT zn9+b$%<*~9lN0>Asz{M0rvTcgfy)VIJ5ULRM@IOL?L=q4-m zjD)NT=!Ke%@zax#m$H^tRw9G;3lus|V@dD^`g{~${NYRyrKmN+l%YOHjR9=+KsAG@ z$qR?_zz7O_gTSMj@huQN59t5^SK8I?EM+afG&9LLtPCM)Ocg;Eosue4zh=bxL#2AS0E_=y*Tk`u{0lV589{eXAc zrUmA4Cmbo6Yw|Gve%Kp5d4`I$G3O-Y^s`&&Lm~LQfW`pSAUd74#|XIq=uc~=Wo2c# zRSdw*3|I&mrYpJoE3+{_mRGnq4H;z4dL@wX%u}q@lzeeZ))?@;%3U(E)qJVb&)F zz9rLBhqhd%*yWX#p)C)@pBVa~)w1$Uy10nIt_dDoc}0axIwP#K0XqqJ(BSB0f|Utb zJ9Qc|HQu;ESk~IbLK#|p#^3g@-r=89(_b3piO$r)Q}E0d3UEgXkdI9sqldpBBB8}* zw?K+3;6PEgN1a6xr_fEH|2cQ);4VlWK}d1L^}1G%TOyfV`8>5{u612643I#;E#MFz zJGz6`YBG^T6NyV4P2coIt|_0L=e${{ZvzLH7NkzV(S!2^WP{x6*ERf}L}uD0T@=I# zNZ{252NFJF69&dS>n!j89*Bnu)}8RU*G(I9#w-_lu>Jkt-%@M#HndwR>E1F!U6Atsf-;R!_Am)#7EsXvJcJb)aR0yr31B#` zcLO@1nGiba((p^~gV2nOjFJgVD0Sl8iH=Cwx(;hf6ZV^3CLSM_Pn-wd-6g}#bEl~O zQmJ#ZL;Az7RmDoT$U!3pa}tPa098yn273kC+6oWrOSOIqQOq3*F6cCpY9PvrrV;BGa|=uTdIIX%iHY zw@6Zq+Tn_Ia8=@_h3ErGsQTnYx=y?NQR&G5I%Q2Ao%sPtebcKe+%6hYkic9O6J0PK zr|a|!H8TldV!+JB#>QGzh{$D9n!Gs)f>uEd%={ahfM@LO&o9Y2`nl>9N6mtSRp4LlY@5g8wDv4 zPyvLWv74c2MZ6b577hhIzGf0L0O5d~;mx0lO-O;^B)~NoN#^#9=B|I2d3K5w<-%!P zILsl5V;2NT4JRij)NNCALcy0vk=)dP?FQc0cGp#WaBu^j6Lc*Z8pi5-)Nua2k8@G# zwz6_u=;!5JKTyHchTX!~!b^$?MnhmB1Gj(-<~3Pb!qK2!0%Lo)s%Gmhd?6|)#^j9(;YtGcb-;$)WB4wceQ$u{xJ*%bVr^87~=O7V;u(y9p1UhSv7oaPDGvOj` znQf%C$(vD^vORN~7w6{Y0v6E~(|?EG*{o!|1O`!pYRlUZul@yq>AEBcqpKrhg0-tE zlAI3Q&9t8xg zPK_X$NkKX_4jxbX9~_UC;X7(F=veXv8Mg*A0mf?OZ7CGGdj6>W6e1Rb`X7`bo5rH) zllB-h76Mk%*Hk6*xm4aCND|zVbGV=<4jy`!0wXxgF1y1GUR3bO!xSfw5n!DGI2chE z8taB~7aTXe#kbA^jGz?cZW;-;lv19Q;g-@MK?hg};Drr0L5}?VN(=>k`dva?oQ$3x zA)u;_KMa8^Cuj|DDu+zx`M>KeaQMEyfaiu(Y=&-o3q4p%E~-iEKvBP}W8*&Sv#~5b za}59s93Bl#3ecD7nVFHvlYWQCZ9%)JE_>%H&9)E|2du2JCgyf_vv9$vGaM-o5^`F_ zSflB&k-!!S(|_vg6VcG~h815z6~P=QAOHhwV5D%h{j}-|G#hLECe|=ipD5~=zL?;+ z>*Jt3UQuSG-%hl)NCI#!zW^`|5F5;}w7Y8u>{WSVZh` zTs$&ujP32r7&2t`Co?C;W7qe-g=(lXx9Neza&_|KdkF>A3wCNribKmOtnr*Z0*lo$ z1WH3fC}nhci>$bfcT^3k`XC?$I0C?7qnm1VL6@AhLEN`!|9_9%zgdw%n3>qQgB-jq zV2pPBrRHBI7tnhtdC&o5IYL5WS`}qkpa*YnZ+if%LFB5?SV4z>g9$xo1(pXSAW>sV*DDRXB3S0@pjg1|3Vmf;) z*sNFk0QqbFli05TAFa9VT7Y&C>_eo%@1f(Y@IQlG?Gckk;)+sYuY+_`y?aa$-K2^_ z_4&MBu9?4_j2ItOBYIbi3c3N!Z>B{B9cc)>Qd_=VbgDL~3LP^Rqh=}K%ps#FwI_2P zJRWn0KJ|*vSR(78z!Ct{C4t$Bjg76kzFr;-#6XE-5cc|1yK$nVp`oGm&d%h>kXy96 zE{l0>{H@|{$J1f((NzFAC79$1=)7S7@&Qdt$iztL7y99$HA*ejx0q}e(m|nJdXrDm z9{-_v;mydm8Dn0k@Skh6Ciup!BKIEl&ez532?KS0H%L-`9h@24u6rEBZb*?09j~}D zy>)S>AJYRFkbpZXGSQ)OYl%7uxp-Q=o)Wd+-jMbC4!e%UP-A8_yRy}hk0oW zG##~`&@=H8os<#rS48;=CkVUnOUIHia;Z>V8%ocr`i|#XMhtLEWm}2I=7h57lzvmn zPj3Wml8+<9vosCv)4c1C%SPx5iqtjl7Wl`m1rbe-0DTg~q;Nsyu`9oC*oh<;7`nnM zYuXblJ-2m=CDMmflPmyVt$~Z2#+#Cx#2zQ4H}VH(enul2{;R)0fdW$nJ&2a{xjr+^ zsq0tBs;Z&`O9~1g-+)~aglJz#1Iv$=Th79vDorG0W-FtO6!SewgeI zoL0aW3^JOjSK2yrM#OVSlQKx-O$yMo(fxP9(V7qxg3%~W$8g3%j35T2s0eAsUG=q; z+CV1Mc3yOrt&OYPn?Mzc>IANXPOZ?&b(Bn6X>WtDST_Z@G=i9nMd?rOd44|+aw?V4 zXGJ93#$24=lw36(4#vmEh62Ptxfc#5#ok`(e*rH+E#18Gvu^q%kj<<{nmH_83}@_> z3#pR*tL-UD1@LQiO@oYzCmKjC&!o@nA}UhXGO>{9b(JiC^O#j~vraSoQ~E2@BhK}* zL$NFj*z$lv4M@&r$Jrtb$!nJZ^>I9Gi)uJr<#cSWU+yt}4TyI}KfbQmcGjR55JqhD z+`omiUUq!H8{i#|PJNk-5bugVK`n*+*HyR!dr)YUXV9Qk?gmopaV*vD-LS4Jy%*XO zkhC7ob5hJ;iE=)j;r8v)sCx!Xw;+cBpew)u0I(P}Dmi(HJ&Ne7%W|w~)zz-c$=Wv6 z^pp@QlXuhMU_gtgj#U2nK>!J0T`>ilGQlhiGIM26C^T)K#mvyRXrC@lM{B9s#lk|+ zx;>Rq=wY*8MqZ}r*mI4LD|SBOeZ!}c-Y^sbW^2ykH&~LnXoB9ZFWHfY`%{;&k%-{g zizVT(=|yp!hLD}jC?=F(UdN)M8D+m}!yTJZmx<5h0hH#?g0J-cHUO#!e9cu0!6Z(Q z4*?7o%+vaNq#UtOi1W9f@~)Y(&@=@a(<2Dg?q6&v|HO09;{vKW$TsIfzX2lCriE5a zESHS{%AXhzBA0~Wg(UwM32j>BgOp|b|E1>PQ6odXsT#)zl`MuMd@~*TMUT$ii=Ksv zEs|V6Qj#%5TK|bIPb8X8Tu@`EqX3=E1dhz4>s^FW`V;D%5(%{42OrzcJ92Z`R$oOB z|4X*jx>mUK+__L~=$wX#FgNkayH0Anuc!dk37}39CVV;*4T*U&1H<&5)G7MoZ3LvLi z@xtg6z`KCn3=j!`VsnSyup`S==PXwxQ~-Y%d=WcxH}%WRo?P61f9J$0-jK1N7bndQ zmf7(Gz&SVuA~4vh4t-e}I7NOQ-eXvPp67V6^f<+qDRCl?SlU5<#3ZE!LvLtUX~I+z zjD%8Vl^)CZZZ|`1vZ*bN1EuSTY4NCe`U;bU?~}u%k>If-4J`?N;ra73rG=}&{Wl`x zk!7`6|4EiU?1{!Pi9&GR%&nnZG1g;POpgt1Whp0XJ9Cl%*j(kBLGb`pvS0fobdx09 zPEQy8Ly!c26u9GnOb1`aCy^#)(W5)oUJ)kZz#AL~@(9O|piNJc$3#sqe_F$8TEPI6 z8DLAAUGNd%-Ve;4zs#wUTk5<)4(4emRqTvhbV!PT6ltOApx*hOi1}e4f#4tML#j8GAUm8zG4aWlqR7iKN9q0ukG#5H0-z$N z=H);sgPHszrWn%<+oH8Al$ZUJ{bceEI`w%@0%75trB_ZKPw3Xsu_6N zfTPj)GG={;ZuY7K9Zw^@>VZMJU9Kor5;kwA9S zz^OK9(+a&zyliIrv-^x~m|I#?L;;P*Si0OBuLRN@7&3u?295zVuMpam($;t`H&p}K zmw7SABJ@r4c;v6f@4P)A7=d6t1iCXVGmkYH9$uJ}iy(+ef+GY325d(}y|w|Edw{qA zYC%Ld%ff}WLX^el9)Y%XXNuZk@EhNkjSUS7K&R6pU5RQAd43ODQ35y+kkKbmOd^jO zNg%O>ZCL=;Z;w%`cY+8qbYTqr9|uJGpJOR-yro_gm!n9R5Un+%t3neVrJ|*T|E$%J zhoi(iqK8H(>)jA+;8%RUgC~dAyHCBCm%Q0a+((DeDo^Lxef|IWGQ50AA>c6858M)G zpGcmg%Xb-J&OsULEb9=g)oz>P+dn#t_-O0maJsTD9;5=@sU3}te+GtJdsK?+F4c4e z{H>%Q8+LHQd(E8b=P!g$I3W6UMLr4LS!*N5PiRa-aCpm^ens>JbDk(8dRKU)jEuZP zzS}|Np?~$?yioz1z?16~^Wz%F6Y-^X^q=Nf_ZKps>t>p-|320`Hf-EAY`l82jgEQl zu)?qkVVDqOpiX|IhRJ+DNeV0Dy^~EMt zd_%iN?`A*b$q{y2x<7xu-DQzdp-*cG0$4PV$Ttz4yb+tLW|BZL% zR{);uMS6BmB!Q+VT|YT03Hdbs?^~~UhefwXi9w@OBO6lWG^fZ|BDz8qf`k!L8X8An zy>|WH92OP^_x0=7MXfHX7JVhd7Agq5KwZ{rBza*y*VA=r5!r+6?zEDbFW+13F+g)^ z|M>CS@^a;q8uN!?0?&eIp4+-nk=cuk*HA*v?r(;jH}fZj%GducFJW;GgP^qhq0B=G z$@ADb6QR<@<%Zu-!>X-%YsuDazNd|Ujz)GAk`1+Aj=IKKeg}1IRh?)ehho?eK;93? zd8I$xyC?WefK zMc&G3(REME=fB-9zi_4;+6ufT*ePeKPU2&*N}fMpup2X*i{azBpPKI5xQ=< zKKu0a2(B?0FccjfSthf17z$N&S&4xd)0QVOUl9jK^ny|4+upxCIyrWdBJDx&w(5qz zsL(lV7AvUHi>Gkw3|n3{(3E=g1{j}0*md*s#v`|T$o(2D1qsVp*e$)!7(zU}GSBH? z-MAmaZWP^7@kp0X(V<{IdM#LDxPjhuR@`Pu`DdrCPyEBo2$LC!qVR9Ns>xBa^es{B z6|=Wkz)ClZya=t1)CeW!!zv$)LVZ;xi zc8&YaR>+dI)%iY08SuRxD>4xdW{hUiU*ik(2LgYlY^D73IBI?6nZ7n^ zH(hbJJKTbH{ZQmD51te7;fVu>FBU3m)W!fGEd_Y)`8_z3dx z4SaYdIS!1+)4;=9=9JpXgHiZ$ov1@yt%#9EZy-na;P^HgJ8SH5vFWFvMs!R}oO}@Z zcObEKnPHE5#*Wyuf9kL$MMj7~s70OMc~>^CrKeBk<(?%KsUQus^*nPm$8YNURYVax zezOdhnJSF&Y0aMZ-~RN7j^A?!CpKM5$R;x4zTB>I@k47g?oM;b>ZYt->#4giQ?253 zI6cJn$|sP&Uggyv;1Dj+mhi<$HRAjVrVNCUL68j_0FKL7g5(vSKE0cX0g0hpU@;yW zL)|ZUi#=}Vl?<;&8a0Q9^5|&o-{ag-TTLLIVrr766Dv=T2da}w)xsss!M=;-y-EME zoPz}BgNz|nZ^j5y$jYAkP=Y0Xt}+{ z9{~rv_MO}D9DdU-7DL94g-HFto=8efZh&Emy9zFQiS^;vi0-NC^$^bhH$igmckkNX ztO@JXWZYeg7c93UB7tl{g$}Sgdo1Z2!kvcU1WB`nj<H{ekG?+&@zaiu0+`6XIj zaql#SJd#y0DUOM%NcoF8_#Pc#rlaj&C!U|+1kph9Z5px(-2Av)bGFyHe5RfvW=%^k z)~J8}{27mpmoNfm;R65Pxw~FszplSrpT)fjqCdT>sbK>dh8mNrZk6h_oU7~94jw+a z;t}{_YW=2_M;itkwterlk1CA#8iz6bK~%!*SR{J*IH0?N_5l8`BtAZ|u!WpM4^juM zYT31FQxlVi$cH@_l~=}60pSd&b@6CdR&cg?%pX2{fYn~G#wdN-Zv5k2lrr-N zXjN6r)|NH!>`{sUrMMASPhz>dQB9CsU#_jS_~=Z$KK{y;ue72fN?=A*u6N@C;?{Gf z_~k6g-O894e1x~%-(l(WqJ3>vLS?FivC3z=O>W~U1eR3~#jHY_&O)XnAtf9fk1{QZ zG^LG)cZ7|CH_IPFCM<)+knB?~XedWHvXgUP-rVovfp^%;Zig3u;Nkcfg@2gvIRQBhGnJse+pzsRXsay~LC zn|BJ0XYyq5coL9$N2#POUzWz2o0!s%gM~%4;U5{$$k}6`kR#grp~^tX+9$24AcW(@ znT%2K_W`kL5n2vstHfXNLt#}GoVSlR{z1X}lcdx}@SpJu&Ta&2J{OihZ`_nzg17HFF>uBxv*LjUUl`=1pbHmoFt;w+vEKDk_%Z&;HItGLKM8t zhx!Ig&BX}$(4m5tqH4h?YXsPkff4Zq0|UeR>;7b`TuHyJfJPIgEwTnVw()A{t06sa z{-2ck@ny|j&YiY+aq`v6wcub%@o2g*1K%q{=0aE$V{}55DM=J$e;O*MWBR}8TI|jf7o8q({~Vye^Ei z9?z6TSyDYVd+TU+xr$9^=Da!fdqTvHiv!4aNGULHgU@57uiMacZN6MpokWPA*7WgU z`W+o}&HHcy3@gmIb(@xMuOj+> zgIzm21#f4pfxWw+<1eQ{qschIE64yKHkYKk34w1aQMIBJK6I{q5aHvJb0#QpKdN+f zbr+AXUPwJ^jdFcj9|T_eo6(R5C18Y=1le^tu>K2Vi)_c!nKbau)6($Rl{995G5M;J zy!-i{f+U9;Z^(v@ikX{3_wmJLh#$sA&@|e^A0ILsz2vxy{G6OkkhI(dX%3-iYw-S5 zBMU_RWmV$OwF9}fwrW}diVO99IUZ$issN_Gmk6YrGj7|hXR2pB8S;fs)w<2zr*zaQ z3%`CPzgbh))I2{ZOW_B}Ll3Iud1Z;Sc@$-?$#b7nS;B==v*>+)(yZbPqpUp=eT|#r z8a-7X&Y>6KbG2VbM|1P?=)t#HSyO`G8M=tN+Ocmm#dqbFtB{=VVn%p%L8=hw`E7J`0c(~^@XvaqvrzBjofMk)87a^+;l5s;!j{dfx@H;n9_fQ*NHSn&TV5|PD^L*_#CpH?=4Hf0 zKi)TOT8SG8!Uzv&kgOW=58A_PE5w-ucRi#nvCsc8BB(SF0T2IsG@87qK}x-KT1TC9 zX5SI@PI(7&u`N==A5mcp+}VqphkdD66*^l>P49(3busfCA%d-)>-p=`v@;Cu*szoi z?_nlN_iF#)7G|W7?)0~=U+IjoDzjh-BwMCxXL22KcixPvpOsPckLLZ73WvEW`98_j z@*%F}X7u8Xc)P`>@$4+uRo{d--DpLJM|n#*MxIOupQV@SOkW9+D3m^G0za z4=qocxGhFWr1UmR$7r+GMr9?5SG60>9IS&wgd&7)mR?@ZPwZ;Dlx0oYdv#Gt0WiGO zlWuPe^BS*v3}N+LHMmQ7=k0feG1B%=`)Va0gvk%)-%AEtK?-Qq9_DL|z!+FmM16E6 z9uX07d42t1c0aeEK#$UC)jP_j=HC540Zn>18TZw&zLFp%%ca!uw<6!+qPBnDEZ&8h zOlPGAbx3n6p=tAYCiq+c8PU(js=oIw>zKr)jG-j(w8I~t`g-+H=y%$!JM>cG#~#um4AV}#NTyN zV>PGe8}(ND^+$n!qY1aumH02EM5N(kV$H+8rlk<{y`skZ)pjY-ksJ9=fbG|9|8(=8 z_OYcSB-t9hZ6IUAR8%mt_P=NUiM7BV;V|~w^;$L_JJZFAbxFODn!JS}K8|jMtR+~h z#fbgRTo-TGs&VV6Kqd%6Qw`_wKz}IGcZ&t^4RGlu0EDkg^mKoHQl-UOja?HeCZ77k z)j)AZU+)tHaz!?(PtCWc)=xWf8ovj5oZt+(zC-cX9aY3vjE`cLBN5@WSk~Q6r&&wf zvz(ASsht~_q-3Y$vZ&dLDr$A^m;nB@OlwW%4_n}d?t%q?AW?q$E&EIR>wCgYsuMq@ z5+8DG=4s^7JR;udXoaiB_-YZ63(y zvvenFdT#n@XpigH@WCT=VlY;+;R-UnFv${%8RZ&RX3yPT%r2TxVJQTH3Cf;VF7O$F zk?_pVj0tDNMmyNo$W5=2?!!2&TY(J);L2)^B{x^KB# zqKz&qy6q@Xg%KM*n#(yvhTV7kD|Gu*)E-OrUwkgLa|>r|``L@R+BMVuPCK-fFTBQ& zi%iGPvSCygS~{tl`hxmK%|A)Bu{7yFKq#DqJl|^&jOx@3g9o^@-`>&D5^&`VqX*1Z z_CPqP_|t;t@{cybawMe%ljYw*9u;GT$wh zy_iF<3mj-On8GR$eb;yK1fF?`(JE&xWp1YxBm=ia&k`q}jUGt)R5zxjforzg&9+}{C> zrTmA#3j?;ehSE26gZOhX0jL4e@%iO)Rpz3PpNFYo{kQ!kLXjVZ8lqqhxUgzGzGHPH zV9<`rab_;vdP^vSwf2z+Ni79#>u)^u2fz-fW>Ai5s~Tt7gAtCDl2WhF#UMzTL>|rF z>4KJ2GQM^r!9ht)4TC3eb@KPa0|-D$Mw4X7JyWc-xg;U&pZxY=kwCbz$0*(5vmD%= zydJFikD%a(cRsk^ zLv5qwT*$Kzotm1(qNLmeM2;Pp3C+MVC9wW41H{<)0Q5@KyK*~+qQ8tIgKsQfI242C+iq$tkUZ04hYC2~qgTB@6cg%go|diJM0J*i^m;arAheq;T+lC}imV)Y0!|Q1q+p zTW&$rP=sjf8;Xh*$nye*)2FT4tFi>egEN_BQ%+)rG-aziW)MH$OBDFG1O#AF4tKUKNX=eeUZ$!uBP=eR7Q|P-J(MEx-`zPm zt%7^Gu|6OaA|8F+@*MZ+AGL?==rqy9uNHdv`^JFK?`yiGDuG+(G1id%VuH#ysslXc z^DQlWQUjwHm{|!2Qw5FjGScK`Ma>SOB~OSG&uZCn%Po0QZ^QYx)RQpZ#ULc2uc7a1 zcol8qc!RA07oh8|>W+^G9ArC4Vp!G~QuDU|zLOv>r0y21Yz9XVtN z1&G46mYs!QwV8;Q(A=t$ihZo`&1#rJ(>n+Cq2V)!v;iZ*O3|NtUdGT?^x&mMtHCH8 z(M@G9#`E_8E6GAjeiYJMX3y>u#k21-t0aU$MN2E~&lh!HX|rdF?j#DpzgbWo=ErjouL74~tzQ)ElGSRrw#moP za9v*=!?)(?y-wC;o-4rn;3x&juORuSBY&)_W8J0di9!uXc(9G!H8_A{KF6{h^_2Fw z%cPE78UO{v-+Y&m5>`e|cP}Cj^B6y<;Kw1RDMxyB{UH%;jBpH)(epwkO`~U`PMMwG zUwKrJ-!?_ojrze`W}tPP|$^BWB}~$2+%-ER~8I^o%3h4H|FfjQ{OLkr<-;#MjVkZ zX~YKfs(nmrapZ5Ff9=^mJq>BPi9cowjNf8xdf;O|4Dk3pTQ0MvTZb*uILirX2vn;( zlEbn5<;|2`E_$jz_GV+FE}druQ#6aBUC%5E76cNL*ALaK927cSWU9?Z*g3fkr;ibmb_`avhAyo^-d#gNz=rGVd zFd)Os%nXlk^6d(&1&>}pol2>F7`}L&%PipOTG^>Z>vC|l!08LJ!bxi5U^H2j-B~^OwiOhqqxH9DU%@q+s5N`Wkx_D!<8)| z_3C+{{p!@JSQvUos4dU0b^HOqCHW+K@`2D~}Phh%r7RuYbD80~w1PS#7R1Om% z-E4Fel3;CSe6$X_SDKnIxYS3>=g6j!Wsj6jG3pbf!13mboH!$O5qaVzB*moT>*Zy8 zdZMc!xihYM_C~+s2eArlanLr%2aQAODb8G6mn}HrIzK&WJ@KX z5L2cda3zoaaLXjMotSU9$xNc?OQ%?nymOP^N=V&!Qe1{1k67-CC8rI%MZV#GFggyw z-rOx97TP;`ux`q7yQr@xTt!0glW>z1hyuux6ak+N5b?Le_PhmLTEO_G^6Zk)P&oSe zeSc-g%u)?*hADK{6-x>(Z55BtjDmtdXYGRH)SlRgEuj7 zVVhg+%!aG0yT3TXz8j3(b`&N%iasi`aBBOySKATnNP=BZo0e(|4uFs7U(Nmm=%S$^ z$!{G+EE$Ej?FWJVs!KH4(^p23o-2dKvA@5K_@0Bmq-fTCEjB;pF}q-S{8o2H18j|peIhajngcnE(wZdEZPwd(^&ubhTazu zC&hUrdx>y#vl*kC5CTq+FhPMyfn3pZ4w9 z<-=}nFz`aR=xQ@-Wv_g$RC+_3-@N3-Y@Gb(Y@}6kfro5$CCiG7lC&yxK!!uMrjU6{cW+XmmMZ@$OwGZ+Zi{$U`SR^QqUwj!GhPnmEkgaOrSKj>+oR4qnhAoi?DBt;pv71DY z=xuz{K(#;68^3iPnDL~%ySqC$J}xt;o8Q=I(yN2a>Z+QS(oyq1%t!y^T+kUwKb>*u zAP3Cv?!~PboMx4FWJHU%SLAWg?EZMiKJjV#nMbd%+s%`O!8`YS6>f{r7)LJ)uUNH} z;9@Ez>(oE*;&3X35`-rnvM?o%(fIe23gyk91$cE$ZEn)SxHCV$9IvkW_bgHIQ;FlZlMji6>fL<1;V(Ix z*YTG}HMiV~Dqm3i^gO4H$l&qM;+T z3%+j+97y)RN(4-O$KWyWFy|ux3JFfGGRau(T+^gXNE?N0U_C&4DUw<4LZ9eYY6{4(Q)DGn^H#9WV z^5PTf;Y7>&vu01eFUg0-9IUGg?bsmpO7Wt4^zWTy+20oE4wVBc3##+;Rf+K?Dg#Nh z^=9XnXPLmj`*3b)a`{`DI^l0*G<#8Uz0MYqqHg$saEKbcrlh~$97~5$<1n(#eWq@; zIKC3k^ojy~8mQYf26Z`EHSs}@y*jjG-SagH0ZMfF)9I6Ygd;7#s&&~jn>iEo_$wSl zyxr+@DpXZSM*FK?ZQmY4@Xuab_zq)_ZG0`|3#s<`YHYrEu}ePG<|S^gTWyHXQIXG} zFa{tDV46zJxKYHr&Ml4LA=1yoMyZ6HYkZy8Wh7pAM6cd@nLc&ublv^Lj#%Lih`-?Z zV|Cj4maoUQuT7{J@0YtN?(t6?JX^d&XT0dS*Rx?lL<#tyv1NYF&G30qf9f;=&6Z`v zbcjwe8ltTazoGmHQxQ?1&MqZ14>B@mtqp-HXqTT*9Iwgru8{g`ezSmFU zDW2`_Pdj0@;vyDuW~yeyGl3^dF6sXIHPnoogMdmqH8srRDtAhpy>2*IF>I8-Aymu+ zLVkTnUPO_)^dXAlg7^-Z`#sjT!qY0lZ`)M6A7Yi$be`YvC!g@VG{|8ktlrD$1G|L|E)i58IF4H~U%W$fe z;5|rinQ$<J{p&S`GdZ{3bc|>2-clp={>AP}I zx;)39G~9of>DaoYM#iLml^Xw&5?1e^?r&`dzrHQw`HI?qX=B#@XaB+JKO(*Nt*6~I zF}mfEk9>A-l>e}1ssi<&mmjhnk<>aG{aci4LFc%iH_@1n;th`eafESgyW`X@TF$^K zuEXoXJBtlhV&MXY+-Q}+z?a}Cyt>r8^bDfn+}_8F1wMZ3)kc3WP-Nc?SGtQn?O;lu zrYyZ_E2*n=$lbw6T&a`siFss^rv2>bYQZcoD6f6~UwASe0kIrre*$V|lc?Kpw?vCX- zzy@iZ1_Q^J6j=@3gkBkw_7!?%?nlwqR;K^zwdbaMR7-Htm3y__$qD8C)o{tt2=BU% zAb%aqS#lA^_q-TQ@O%7v{3w$U<%mwU8S4@79ki}g4eP!g;sYI6jBk8%uB53@YUiH8 zv(%tTn>+jQdY159oJm)|S-_h?26JRciSRejsR>VMRWyKoz#PZeCCi>oOD=`y;~Ne- zEU5IjIN(8@|CQ;~01@9|g}cpLsL%n5GjxpoTNbLLm=rA*^NBi69^O;sxo~}l*RNg6 zK}Oy6@AI5m=T5JwC?7S_NLA z`ylb?A6(_@+AcA3Gi3{kB*oWn-srb@K&cjZ+^dcoA|C#0T~i4xNEHBJj{%N_T#nZ|0pjzjOu;_q}7SYh6Jrog|+PUny{4U#Q$X7a`E# z(%!Pl$}Weos$JgHL_FsS2HioBEjdeY8BYIqC6d_K$nVyYr&$>-O=oY;Lb{woJ-JaQ zcq#ii<8DikfCVvK987~hWbCwZfW7EGslZ$SI$fP<8q|0Ksgzpzd+ij`$99IE8HS4l za=KpPL_{6`iZ8wSi|pyrCdB?pHp0rnpcYqFQDYupjMd-LOcI-OhY!pQGx=i)Osled z8=IO^<>}MjB_6hpcyJNLRw9`GsZuBTqJ>vSM^aWKrdByAP5ZTWDlkK}{4rP+3b9N+ zo^2~NXAc;yPebY=U^9O%1@xW@lW|)c^Exz2&kVn=T5EBmuRrRxEt}knmtmm3u62l? zOpB9A)5Wjn6LBM1xBREv9ue1{9t0K>=!rwEo%|PF^Ig2?MdnBdwUwRCk(D%dM42ph&rZ%EFej$r;|(=^p4W5NIJq`mMfXnv!Agp91vHLy1vLYpDg`)PUNbT*$qqV9(K;C>h9YY%4LuhopINJ0Ol`17eJV2+F4U0YE8u05h zKdH4Sz}jLCCL1c<89z!NxerB}wb$|F_ISsuPY0%R7}(hNFcxg8d^@4VpoP|Blch~Q z!Njn+ItC9ISR<;?z{?FK^;h~d{N+&^D#+rHI%?v?L+Of+AXntw|ut*fR7|~qiBNjFa3Bpb_h=FI}c>VANToWn~1}LJRT8d zn`98<--y&{h3HV&!@1{5? ze622f5X1PhXKf{kI%`+J#p_QNjRc^#S`jE+y5e||*_%{YQ(N9wa@F&}!dDy{(_QZn z@t~3#hW4k2*J8stf309EHoS;c#!!o>oM-p!BWU_5@-^o+#T$j80R_3fyRYo|>{*>+nv#%q3B(a0O&JuoiiRgcJ=8bA3OHLCOK@NXV+IUZo3`6`1kQ%+8h~DYB-=C?52b zyD_9H7|k(2psc^8BGmFDj;jr-bm4o{4f0!+o#)#Mwb@*I!U3MSz!N@wu%|4D61!wF zKdJIxryCY06?zK{Y1s$*q1#Un5rc3ilBn{lz5%dcVo|Vf?%i;xdEGpf>LhX9zu3#C zGE16v`Nci`GMlq}UENWe@Qp@*2}`Zz?x9?U(AFKE*hD=|Ct*feWyDPw&5GLh0lWKEmSI($D#(hqI_haZgqUden$ z)kX*Ac{wtPHR2HI=I=$Xw4cZ@! zx^lXiICFcll6~z%5-(KX7;5hFCy@G8-A4lF;Q69MsFiqOakPMpnXMnA+@#LyZBa=> zuO)p5d54R$?AsrXh770m2_1t*o(i)xS;vdK7S6KsfLXPP`&pGXG$2 z4`r-co~i^Muk~%cllES-s`$F9wKie8=*Blw4#UPcz|?An+;#jpF7JoS=i$6&$BSlD zN6Q#mvrX}P{r)`W$|k`d*WxC+OO5a{!Tw}vU)deHw!r2`8^c|jNcW-J_8p{$&L}A? zbi^M;E(@EK!>D+w;=r0DLPCTH`xUW$6kZ~@m2~PN$HOnaFhZaJ1t-uH0uy?0oFDb~ z{^Uqk6akEm&i11lk`lTSG?l%-f3?xI=_Ot>GCpvOkG=(4RwnO_Ia8EPul{?FDkWxDxof(~&F}&e`SmYK-il*@%l}1_`OROBYxU=I|1#S<&tHE> zoMVNkmyt(`Ja!DwjuofJ1y3F-7MgjpE9^3c9eoIontb&~8JBDn4*prBcRU+RH9iQX zvw!2dGDTM^f9pNf;UOI(ecXjn@OR?^nf%K!)es%e^GKi_=ivOkb+973Nq}7VdgS0~ zEw?Yv-{%J2y~iW|@5_Ci{d;iCV!cqSEF==$nCa*0UF^HoG_3F^cjj78?zl2&tAsd; z6d_01n~w!7p-Tx$@Q`&toBsX1T)lD#@WryS3QcNe{`*hy(fBCaFfppbvTGl$Jw-JB+5FTm_sgF1mcw0EpY2f4I23xX zFzemn_y{WeiG>rT`SYd%Q55I5nl^*9t}2ne==%=Bv7c z$g~FFHqZE)kvVe&_RF=4kwxEd*vUIn5u*RhgG77&`Auwh9YNiX5(7KdQqYT}72M3t z5k0PQZ?)v7jlaz@5k8WBwJ(jK%$8tQF@>9{eS|77rYi0g8B`%DH+iW3P;AH4A|*kq zF%&`r(v^dst@d%%zljr^Lmbd3mE;Tgach|-GbEBK zzS2pqp;zjd{*BU2&Qxu}Z8h1+O*a2ZZWrQ&eL*7gVaWUK*J}A;AJU_w?WH?sM~7Df zafXzscINEtWn|NCLK9urLpj<^>*F9lIA~JyLY^Jn)@uTTW5aEvce{=w;vct&IJ~DZ zOYsNC?nVB_4Nybsf)|~Xk`_8nm>45@i1M7fQXeGTQ1q2rtvcl04iCr9{R7#35b(aWZXEc7UPLigJPIK57?6xXK`Arv9T-Yxp-zP} zU-;oB&COW_GVt$7;?ow_*`CtAP*1WR(S}lQMf7&=7bM5&ijl9nODNMg-u%)ZJYS6F zqCit_D6P#grW5u5))ne^_2az$eQC5Gl48PC(9s1Bu@w?``z{n`s?V}NcOw#h_pG5! zZR0OBc2B1$#qe;PRKN_-BtQYdM-^x=R&NFc0Qdgk7V`WSRP>#!y^a6gjwr4l$q6Ry zY13-7P9uNY-aRu|Y=ocAE-il7{qMg9d+suGmdMlE;xsXg+35 zZAi(+T6c~yUjSe#6QIjrVNI{DzC$i0_#xwLbFfSEyf5`5$bJ!+IApk+xMBf^4}Ti! zLbZK`h$9sq)7lKk&!3cHnl~Uu7a~vJo4azs__X=%DHL3~?jY#QZQ?B;v!nzJ#EZ0& zX4@p39Otg|E@*C`;+54%3sZL6SsUe8^9V2m(7 zS;r3aHc|ZNzEsd%*<_ymb2ng|90{5EWI`#!l}t?08!tB?cEj?>WFNN&Hp7{6YtcDo zJ!uopYmEf;UAuQ=04Qg={>$C3bI9Ew&XgJE@%mjn+tl^w2y#-!fTQ`?r>=WrV8q>h zX!`7HayOkxb@us=>(YPEuNj^8BR%Ou;o$$c01l0>jr+xE5{a2^dLT4}Q|O`#?@h>f zx#f^yMA~7xnJ!MHl|rnPQb2Us$2WR|pj%S~r}kF*tinNUTAzQd|`K|;UE>N71lN+&Xw z&35BrLxtg(OleJJMsgvbD57UDVtH>maGBf29bo5vq{i4XnYX33a)rjVyZ}A+SkKdN z@*PH6DOipjcc&H>gCL)|30yR2=RNtp>(pqa zT4iYyL>Ilw(_pDhd&62Lk;{_0f&a?h83o0_QjkTUo&;x>5%;V!DsBO0& zxlNTc;fz3E<7qUK?)FMA%B0!ghUy2s!QTGA^%uG}U`UAo))t^Rf6Z_+!*=R5pFRFL zK`nq0sx9QHhdlyPC`YgZZ=?V^J_u-xzd&~34G6g(KQy@(dQqT&BgA8gm>T|w;JzJY zSvK^8^<71L)muT4y)U=d&#AlZkfU|mU-r5zgdj#;$mwZf+<%LXdL88L2(I3*T;I$O zi2-pBa;Ayz{zDJ+@^=F}FK!4~%w2kC1Vg2|)MinT7@{ih+RvkS#C=ehDV(Sa;Uw^7Y-MsT^9a&xe|TAOM{&W94QF( z+=Wy4dH{uf_-UpkY(Fpr0*A(dW%CHgag%2wvUG7tN(7IheVg6+B}-Y1{)W(6mM>Qe zE!57O@O3`fmt9KD8v%G}xPgJOT`-ac0mLbHt^PG*5>ZUjfPhd_lUQBNq2s>?j`7&3 zm6Zu4lQTSk*+2SJWw-D~TY?n;uzP=z-fP)otAn6$4sE(Qw%;$=fL}x8wZHco?^rKm zJr+d9mu&lKR)%NGkx0{6SiDo>o;slz`JLtUk$_9ye1in5Z^c|W!JFe3_(j^YKUSV! zMr+-?yUrWDgE$V5=WL;T)EqYrdWlVZkxHlqoTX}i(*XkO?w6|qXz6kVoD0tHzyv~h z<_lmV{6A|!h1Kh^C%s=%E$*@qpDJV;-(jKF^Dg;^ZaQ`3;Wsk|+Gvl+5WN}zR!HLN*$}Azk>Q4JZUg}&R8&M$EVo`1a8#ll zx^IvD+0^g#YyVRPFQd+!&yZWpjQ#Rf<9$)-#Gtg*FZku!dzLOjLq5mHmXCwwi3YeA zKwsx_M_LY~rhu#Qq8$P#Ms@1}%ge!nfR+T%wE6jYHgPVgR5%F#T1x?3T9^=hckr({ z;D#Aw8#<X&nF9-dx+ zfq)JZRF+X-y})(W=K~C>v5{rifzol7S=X&9Waq%00&E^-Mk)SW9m-9;u|#fKM3&Xp z*9yo!z3`7f8?AGEoLfj7Z${pTV` z$jF~@;|$_|yZ`w4)S2zI5YMIY7k(~L|5QHx=(sbK5V9fZ{QUeax0b$~Pm@pyPPZ_s z40(FJCa3%Z^{eY^F!lo6X^69v12iUDe(49>C~=1E7}{)DERK&cZNaP5aWex(V@hK- z!_GKxTLIQhHYi!7Hic9i#Zu)c_fY*16-+z@DU&R@6SJD#P!FH2sF#y>5aq|+vt8Ia z-!|qvw>(BG;=BKWw`u8?p>q+L~>ua1&0?T-pc#=yF>luu+{UVU?DcJGT-oDr2 z$ZT1UAiwD)?(7C zs%mPE)$*pqZ?i1byDWqpZ%vK!rR3hf9(v9Ee)Nl$ovPRXwgAhD`4Hc^*-t5)(ax}Jr*!H zh7tL(NsBAi$bIJH+iSTtZqkzDUEBO&Sa@*;&15SkO%ym)CHizxBdGbdP`v}CG+ zXCrMx>V4ESJ_a+6)pA5Cwdg|9oD~l#nCb(@Pa)9!C^MGCU`FE?d(rKFbv{~`a2;~h zHm8?K$tzdYAk?b$7MTEmIlykwQ0)J#Phz5b4&-z7QXG+mCjx#5NJ{m2+J`(vzvu%2 zc_c+4kP;LF7N;!)A&>Xh_u;y!(ifi&(PWD#-u@-wa){p`!~ORf3!Az~m1b!jw@4-d5(sO^g}w){b+Hp7LsM%i)8)kccu3c{Bf%=?h8ju)XZ z>8#DW1%5D0L@~;0D$v^`08ZlvfiM1YJAhK-^XJb5XyIk#_Ilxx{DBhp2ybx;emMC9 znGW+7dhmoCN9MUOdb*o9tyanz^3nF9(S^PdQ%U}e8QKH^TOp~cLJBl6czJ*<2slK5zI;Av`c%){`;FLD zD2MDs2nA7I!N6mG@;2cm{030|W&m4$ex3wmx4p)G_rL}7cz>_o>F=|4`7!|+&v^sX z;b%+otP7SlpW=?zYK~hX;8*t}=hqPvTFtp0b799m>%0mP5F7M)N1o42fYEuCjISW4 zx{h7Vc77F0MRy<*aDC2e#1;GdqFM*FZu$3Rf{IB-teeU1Bw4EGVE5YVtL59g58e{gwY;zY~9i4kpF(@pdOZ;B&+ z3Rx+~QLHg(y^I=0x)rMWo{`l{EL#$iq+@hbt*q9<87goj0qh!`;0|nDi%R)QZB0sj z32aXt)}jv|ct`ubTGD#*VW68HZ7+kOelCX25J-G28I8A)rw z!r_IDZnr2b+14)L>4EvkM$|#p7z{PIuU868gxZAm|NYbHj{N>nq4x3yrz|ouHigU4 zy;BtkS;2Xc!C|aer)LGI$pO35E(G$go+t};D~qoD#f~A@am=po-xGs#ntr1*+OA0N zRqF8M!oohWR*RPr6q4;I7!UKoxU2l`5n~ZP>_ZYF3f9n2c(PnGNrGG-`r|zbm0zA~ zfRHQF>dmE4yC0p=Nkh5IsAomyqs4a6Uvp@rW_qyUs3kTwuaT04fsCLPWPa0B5p^Pw zUyq8FYg_j=mol^VzHwVGl_Qiv@OV8SZdrz_$DP83MNYcXXn?s<1EOQuKfRw_N_s01%^NXMWCSa*}^RyB;*jGk4}={=hpbh|NZp< zbp1yv$k%d(p$fP$f5|uzyQ)h(4d?U^RtvnFS#mpMe^@7fI>AxepP$zzZ{~|*F%8^W ze%M-cy*r}1Wyg2$M*h5s@hkUxmuJ+| zvU%9{9wC*R|A7`{Lkn_80O)*1AbHsT*l%?+-r1in)5r~V?z!)>{kr2gpO{o9&`b2W zKCX}~{<~C>M6_R1#n60X6-DsnxO!X%ZF1Bqd(=Sm>kU{-E?6NT%&A2Q>zv3{_{@** zIoVdD$Rs5tVp&I0m90x%l^4w^$*Dw-ci6!^T#FIgn1@aBQY!-%0O%rF#`<_^&c_@1 z$tVBJEfmFxFi?TH+)J=M8`0*J9MwR56$}Y!>;O<;qb2dFXl>(4b~%92ET7hBW7;Qq zfK!yrToVDl9?-`J=P6c^#r*Nxze7=!|vZe=qEi2)PUV)!`%tD9OGI3 zr@P%_run>+ZyMi+@up-cKe^#SE2(?VpDEFC9t9sSW&oFByN~*b>m?2u zbd^T#<8LCLY&TkP==9ce>VK~TKrQGaoYP4-<$X_fckR-)7cyT#jWAyzU$c!SWYA?FHfFSEVkXQz}LJ!&hS3z%kAuwy`rZ;fd zA}27%0oQFUAj%D!?(!@R*4!IEla>}Qn(1pw;W@24d#)E?;0#2{Z-XnvVRSHIKA7Gx zg{0%gc93&UV3BK@R8`1>2JH5NDNPur zlmM{-VgQ&cO)f5eGs^7Svu{Q(hIZ}uJ(^!a`c56thR(qSi61Z90w9SNkYW$Ouy3F{ z^ikUqaJ;@b+PQ3h{(ax0urYssn6MiixERPp0kIeWOk2>!!Hszh^2Lkg1sLZvJpnIB!(^?K3IuJ;)W9RuFFLbR{fjI5(Kq$o9 ze4gT)TE;XQ9VDwU_hB#^bg>=lZ?{Yp^8HnA4f28ypqZutx8{Mi9VRxbou8YlM?rD0 zE3&peF0!PVzQ+CLO)38+fGNRHyi^n1F}3aDvFa(wK-}Wn>Ze!lfeO4fR*pZGRhP0F z^0A6t(7pzf(MG3I&Ft;O+>Df+)VbHG3P3+lp6^8l0R570-_Yk=E45keiDcv?=_ObJ z;J@@U)s{~YSS5gh{LRNdKo3buLZTj>HPM&WFPB2s(Lg^Ucy9c>?!d^S?$?7zOaY%* z^SbhH`;a($->QfkVeX1xOsHI(5E8V*@Fos%U_G38V~YB&TNDk#hyMQO?U>s6p>@av<7=fJBK5yk3ntz@gtUww`a z6h!5#7V1IvD}(t7f;Y_He)@jp+vKpM9&V`76of{%Wj+)Af|zxAiu_aIBJ=Z%#Fh@QLh zEegh#&ESpRzznlbx=@qgATbEp)M*p#JLbk)neY584Azu~F^GAVqfHhEpI10R&|Rrd zpPEX(C~)JBi!s)-37f^ug&J|ZXpzDsX@~-Wf(B5RW5O-)31gw3Z?aei;ZDyVTV?k5 zljOEiStj;JHz;&M91U+Wjov*y^eCN7*Sw!PpC?e>GNXE~4A9I7rnO+&+^I1Q^L5d% z?aK(#GZ;K@9=E*Q;)8+br)P@K`(@R=w~zTTkHC$USs(DGiqx525uml41<6>&BPq%d zzLqou{9>@6>3uXz60p=I6#!y61-j~uXBr|rCl+iGA$f=thF5#v89-rxZ8838PD(AgLw&HRVN77khi}y&@#-HQ}Z;IV@#Fc$dszo>jklxF%JB(z=^^f&mtZ!Ru=;gI7%rbt{iapCCqB`_pa zl{fl2vcKzHf~l)3qy~6Sy)gyps|IQU0`YjP&832mG1ddAO&AEt97;dolwCHA zKz;LQ!jVY{eKi^aJ&t+{^r3~|7+H5|98HV-pj=%%qP`C9KaJbe43f6Tma1?AI_cg@>99osDb3 z^+h7OQu;Ih89&!Y81%B}cY%Lt^FC!_AIy|+k!voN_vw(O#Gi}ZnJLjXER-kZQ0;%w zuTSMS-sS(Yz;U__shK+*%ZiozTgl+Jw?K$_tm1^cq)8s%fXWP*jbU7!;F|>x4I) zPbi}N@b}K0>$E6vAZTjUOk%;i^EYzUQhe9rFww(9*<(~sH`!R8M=pi}&*`49&eG$A z@d-@?rwDg=fKu>>^GKwV*dK9C#<)W*Of)pmzQzfd?2Xk|{3i;X2+<6?Ajl!(15M^A zcc|vdv!u3Lac`))n%n)h=fUy@=Bg(HW}t~vW`yc;-ohN6lLPx*1oCGT zot>fyvx06082SaqoNTJ#Dcfd<82?i7E#S<%6;%!IEN0qM6#D7`o**bu6W%f*>z>^w zqoH=j9(Y+d^4@N5cP(d;{F^XYu}b5_sF0ubq^t}lK>8fYUgM~rfUVi0En8|}z8tU3 z5)MA%uYz781wi&jA8RBo3KEy7XuL9GAB-8Ckgcuh7-c_nAL+N3^qC>D6Y1d)yWc9D z2x%|F>V!nLU#EtDo{1jot3EOK>t^42IAljFd;C_+q!vt9jT7MHu2OyQ#h86KVI+doV*;$GF;#3u66gFHpColkUR_GdE4ff;9oRFApk z4zjeka#RLfC|lzg%iW5duT0Sj32cNjge;dWA{N=cX0fI#eb6gIl+iIj@OM~@_|7-# zG2@BDWv5RBthMj%?|@nnOgw^zhZR6)N8gs>9dO~pZ@9=IfHzw!WVeun^Uh%Cq{H=s zf~BYH80L!yQJMA-Y4(1X9vY5jHL%X@0Re0;!EgUbwgp!)0_yF}S&V8-v6+)o0`sc( zXA_J|j2Fj<;g799G3=;9b{(g!2~wZE`pW*{5ErbDmt^t@jy^k-YWDQ@!T}nc65G<2 z*lmCBxb`HU%2xeb`nFWyqyWZcgTPxVY1pce2@RdiN_p@ba&!gnvq^qCg-c<=51xeU zj%!&Xw>hy_hJ%&l?I|#Q{S5E&9Uycazf$R*#IJojRNOLi%D87F0-n#r$S?ADV$nOL zXeY!@{GyoNKfsP0W1D0Jo1}8$RLtYV`gY>envfscs+uqYa(0hdZ7F_l=o?zJ5n?%! z*ZIig^33H%93TwI4D8g54|S3pc+oE>vb($cuibN%25zT{v(pN*{EW5!h1t{CECirT z{D_{>qJg0@Bh8N zRcvJ7e{Y>*#Gt3Jy{Xh4t{-b1F8k1k(LHkuQ7y-fm(Bjc?bDq6ZVC`)N9nBeop?e$ zSRdF%C94b$Yn?NZ{uPyT=@|x($-n!49)Z(RgE3kSsd^{W0#oNmzV1Q-oS2jygQ=Ye z1abr^6qsnJm7{c}$?m13q=12yBol!>DusVjsCVkWh{6A8u!m+)UCC{CQ(1gX@7@~E zaYJ%!L>v`sy)QY@aKPD6q|VVYgz!HuKokR;6(a!#0xp`crk`9Q)c{Ca)?g#bO`&;m zpW1zm&(w7I@2?LID)K)b{-wL2Ad8LS`)i30149e5%nC?^YxKq9#aQjAg*WF;$Q?J$ z6$C@@IJ7nTUmQ-g>2cFsO3Y3|NFH38=I!X4Jlure>kKu7mFGtH?ZT{`zs2hS9@g2) zgO@_TFhy}t1U;(iEn)P5Ui|0Qxexfrx^ItbM8D%)sqq?KUGVhE|IRSV1MD;Fyv7Z{ z)yl_g%)t<;v|9jD*LJ3I#==2;!bj$a7q5TtI4}{Z7xY6bQE2MgymEeU=(Y6<8!Nkv zTB=BM$GqeCwp&(y{s-<5aTB=Ey+?)M@08vSpDg}!0z;?rWM$SQPxuVu{V}L(BaFTg zcw}HoItVnot^|*f(#TTc@aQzKCzRHnDOo z{TJ^vD5XTm(!c7(I#%Dp#^A(KK6{sv$tU!=!o0c_IdTR*T4 z4h{i7iI=qokWQRzmaaPt#e8&3F*mF{qfiA|u9=G2v7 z;T}J!9!s6S@z?P3$~PYE2nl{Q@$T9GyJ=U!IQ^v9N4eV~)e?|(ygXn~H8`Q$$CWdW zCn6kjRl{`7O7zNKwT^&mcqps1U22Iya$vUtorQ;v$+~u<+bd#*K|jPncy9mFd*MNe zjYx`~LV}(w7!=unP65W$);2c!0@8e(3_+RVxJ`m9Q>+l=vxUL93OBD!PhVIb5s^4B z@kM}xO;co;L+B{=byBCag^8&-vD0FT>9ulZE_yW*zNa^VT}TZ*`I7bPq;Fk`;jh!6 zl~NH)ggJ1C=6+%*;xmwfM3eGZaG!wl6=ot%Gq# zkDyFQrg>^m5V_<tMtyje5a3 zyR*A%m}H{4vuXUr=SaAw(Y^6TuuXN=pecY7Ox;l-(7CXY*j~Ji2V|dp`4j{x0dh@y~ z__d}c_MluC;D~ITl1G%4rm29T`Jt}dzr13@>`Uk<;%4MDhl^`!Qz=d-W?H4oEhdHq z>ds$L{7Xu=R~5xfKxYR&C-)B(Iv)XbMDnM>0GPV5q$|euwUXB>#j4YZ*%`7hc`4JP zi$^T1OIcpIBK+e0csenFM=vg`71ERIeMZIK&c`vl6BDD`o`T6)#2SJq@*xHWODrPF zRgofS+fG-+WuvT}fojWX7@v7HdBHw4Be0J>9}h?cLCLD4ll`|E`2GLpNI4+144sdr zA>>-g0!%KoV?o2XJmXPRb>Q2F3fd-RTCd9U+|`+j$10tV6^EW%XDwybR9#(mt1+lz z{}2Uqdz{?@&hqC?%Y)wjIY&Q7j~XnF3Zui&E>w-eY*SFFsj0yx|Klt!h&KoXBLK1C z78V}eQR;{G4ZRZEiLNW0agS;^#9$GJ0kQ)ez+qrOE_N>?ca)^C{i@9pSFT{{O_g7` zMx(pygyc7rlIQ7{hoh(Am&>WSq}}fQhO!QY2jiu)JwHGHW!<36l9}@2DhAflcawii zz8b`3)pMuaq1asd?ZEi9f1CiHR8h0vYM4D(Fjt%HeXx1SEc>#LgY^*0-{Z`sLmb1w z>JHwMIO6aS>4OJDTK19S=^lj(lok9w%x(JrQuKmOD3PefcZE@DJED#QPL{PhZHdRl z?4D2aHMzZ-8TcRWf8^5Oqcg*i@BQb~O8jUHnuQ!jf0M|11^68cnt@}VVyBb3RAetE z?sepSX!Q(pL7v3v8i`Ms-bEX194$O;>Nue13RKER$M`(Nr7ymPLNi-oquMOoK*#x^};S|&j!37t;Etx=M#`&Q?Sp%1}h$Ta`;Thwzp85O|MAmIrp zy!GWgXjadZ$>VlK4@;Z$7AYuO1zcS4l)ZPdZ2CD3?~G=dm)^00{M zm5b3yE1FM@HQGN9S#l}=WYD%EfNxr9CF*MDMIpyygXyTnIfwRCd(`@7|DKo9wDg{m zO)bvLt?1RPZfn;4kw;zBF;)WMEAI_u^k-W`FZTM4s-Y8u#%{77 zv4K6shBB;8_>(!9Mh&-*%jM?8d~9qCNI=VIJT*KM)iy$U%u?gZ7URfGv+|6~y%)XN z(>|gq7ZTS1{(r#QZT%Ff| z-N?Al8)wM4ya~hTPIDN-qfAhe$J9t$a}=e9l}<)h$B+1{_F1C1V4j?=4zp(^h!-1Q zAUO&ziag{K^|mN1n4PeOD{1$;N|D_T-G+r#N2Z_@tWYMbtVJ@SybSqXKVwFG#x1rh zRp@ct$*P^kQ%)wtQ@CRJ_xjCa_Yl0IdIiZ^4RrUsg>l{ALUK+upIE?U0F$XmGPr_P zdT7LoCo4DC-9m1Y473iQRtFzoR|M!deqF$BuXTv_ze8Cq{I&}JysefK8~nH#vr%a) zf-~HQb;|*B);b>Ir-RCaaQo%YHy{~#@)l7ACC|rdh5mymqb|=0aN?z}l zY-j%g^`VYPt8B^x;bZuJuvr_*tc0Zg7~f~@nF!G4&*!OLn{+$D@%}ob$3ZazFfN$x zNCgJIYi(`Kqw(Y*Y960ByY^%LsrJO{=@6-%5`-1w=q3_9osrITHL~|Oh+k`kuTDSq z@4tlAR!0Kj@BS5~-z$ba{cq)JnW)7AYmal?I%V@JoG7xO ze-706z*#tyZB*~4Jt17M_#%g_FrOrbo>N71mUlA)Z?QfuyCe-0?wY_?$HmxRoCZF# zP6S}+xG;J9@E~HPKX|(fNj&F=ME1#H0e|8Y%s(yJ^w9$;Sq~@2b9ST8a-W-Tnu?ip ztR61nBa7BOQdiR_!x0}A?oH#b+pkdq0_70tRt->5yD#h5yaudsQ=|lS7d@m=*#bCF zNi=w~p1iOR_IjQl9FbOb%glx~cnZ(( zOtC3TE8KnwD))%avu(vX{mkS#>6t%X-K;@%^EL^fkJ>ChZN->|Yu7zAhjBa9nF{ro zXbG_DXv3@}_*C5eamWgzvTjaLk*Fd46PS>+yerfQgkSvX!PF*I7<)D_>s zPOpJi4L}>g`OC{d03EmVlw69zIHlO$U68(tFtiBthmNR_0J8S0 zZ9UP<g}3WFIouMpdJgM5A`( z^+x3B($~wAw7XMjou2&bs)0Y8vY;U+(yUd}fOFN1L%9vsaJ^|T7N@yL6k_zRFFF(g zzaKCbF#5!q2ztA_#qkd~kl!^q`VyY<$h-ogVgN%dWQkW0BN-; zPs$dfYC3GD)!!mpd2s?pPmcbSHNA=is)KX4h-~zh&Y3O9Ct!&l~I(>?Ue67J0Np(9pYxCbd{uFDrAu48>i%p8r96^q9u9V zCtRs>Ktjz)z>L$d!tX6*@q zv=)RefEm#(!hY+Kzslc%8aUDSHFL?FTN)hc>)3+Ed?}lhSnx_PUj9v8tEZ21rJ0 zH4tuc&t`hzx&BK+=az4F74lJW=|m}7lZck=FnO6n0AX%0O}4TH=u!YXpCf>3#wXF1 zF9w|N5AT#m9ek<9G|b^|t}NETm{$hi_5isB3RrON&WpDh782MoZeXAgdgV7K5Zt+Z znhb5~jjZbuLzenhP|P8J7;JTUz0q?hur_ht6_=iakQI!7ZDNhX6VmK9ZoW2% z`}cjL<#^Mzr}E1e>#<=!y6Xy#}gR^bPIfb~|@tRh{wC^31D zl#T#c z7Y+szE?W5H?Ck7s9Rn7mMt6aontPAqG;0py(EU%@Hx1_ngGXIY=J0?#dpBvL+j6=in4)F%S4@IzWwO|k4w&?fx;w*^q;=jGYiaImHS zoEAbiE`3Smhl!Cyex^f$SqabtfieHi&Q75n8L$_Q24g#5_Y8zmjpOhcIk+W)^2Up* ztl~*ky%eNDoxX%3*dnlc<5D%_VO%<5BEOrf>BBFg8r?DI4sUE{AZ1C zoiBkZ!Y+iog5T(C()WG;G2~P_-S%0^B zriEeqnj5xWq_*h+XVve%o3Ghua^O}Dqx=77^U;oF0*oLwQ!-dr^fK`@pVIg zC?~x%!m1-toSeQ)LgPGc=^{wnAD_voSrtZG|p~UsX4hw@-U70*PR)!U^9>d7oQ29t_C3%JPqj1*x#jhc**{@uO!Pvk$W$w-W5Pe zm;670mc6dFw%ce*N5+#~JYoBBTld#_pf~j^7P5XHfu;rA(^bGv1Y;xdIz8#CR{18Z z$UcmqHPcww1l#<76?2E+zq;uXfoH3%NI}rtp$r#^be*0DBI#0v)y8>iHSbixgg*aP z5_NFqw_nvwuWs+wZcnv0F)~%#=3y2?T3WDg*fE_=-bKK^4Re=V3h?^p&qn!LG9{!d zMp^tRzx+hs6aVg?7?YYRCoAPQM_4zI0R*zRVOb;JZ-k(8c8gvwg&I^vuvRz2oqGEZ z7t(w+J57rzRTnI#P(K_0B+WJ&VkZdy9?Z@nv8f{f+Jj`|Pt!}?_|IB>Z}s2By$`Zy zu{>xb%>4Z+K$5O`jSfpDU}ve*G*RGz{Ank8&ce4n z=0gd&E!Bg?A=1N*^U(leGP9o1nx^?HE5x8w$0S=M*YO?5T4z%SapIE)#ud+$D)0w* z;r@e$bd#aYB*cs<>i)Qky2lKs#%KIv0{QnfJJg3)%%9aNc_kQucpRGTqfT7v0(kCP z$e`~1@z)0|NB!uq{>Vf=$ydZ8LS7ue+b7ieHATR=AXBAoSxehz1Rxauz>PW z3$nQYH4$+Y#5jRw+vM@!Jsk-~!IDRBT4CE@s3ILz9*08s{d)C(cmZdN3K zg|?kZJ6_L8To4abhtFBDVhwlvHCyT?VtKjR}f#d6!_&d-L3;JF#!Gql857#wD z9Kg6iyykEcQx0y4Pg$>VcDgE^9u<(Rn~ysoW&<&>KtvRAo7D2zI%epBf*Dd1rD&3$ zCO7{zCiCL`|Iu_6Kvi~Kmqxm~JEWvby1PrHOGKnax?5VhyQI52q(e#R4oT_w&wc;- zW^@L}0lm+2V(+zAR9}~!Zk&b~2lo{7^(K=cD#13IuFqlU@Y3#lqO5atFWa`3oyp_5z_ScyOT4uXrmz5Fz*4 z;qq`DHhB48MEjmZu3uj$hB?S2NXbZXao>$UE-^TWZo31<(UHa=v+ouJDg*;vp z-@o=?perODV97P-w&wica5bzB$gtPX9^UF92?)9)pK~hb)tPit@)a2-5Sk=pHN5~nSLA1}EfU2wLHz*B#)+`$FA20iWQP+ zlY5KM%59Cz%{U@ECC+*xer{V8kk>j%BM-E$X0x>M00aPe9Ywd06#k8n@mx>{4 zvX=&Pvg2bGnQW4QPH*sz_tk+s$=uEk6`=a1`kxl6p&?^%gupQdR^V0**sI{{s7lsR z&UAte(Y8EBH48-du^lJKMGHQBif-6D5!pXz>cb&7G}xz(#oJiCP+hDW#H#pTK0k_( zcaDc1mceaH!PJWSY4T_>iH9IMDwz;!5)PVH46R}$o@b7B#WcZb=#{!;F0`2hpH-fb zb~Ycq^7(4%q>sYhfTzz3!3O_}O9WFX&eVR$El}ls^6^!$#BD!=$n*s@_r9Fhmbv_F zgJ)DLCHIwlSZ?JJYAKl49q$bd4FMCF5FpzE>QIQ`c5e0%*9%qJbvUCmZ~6dG40C|L z)+mJocdr*L1+TfrNHQa}&F+_Hmze-gIC9 zRwUo5yE%$)A|r@OE$mSf1_6r4Jp`T4F@%p-KNjvrhH`?nhA$g%VvJ>Q=Ms>k46sFg zZo3?rXc^t0id~2tAJ=prah^ZAU^Z2)MZn>!Fb4p$c*qCQ?Y#I7U~AAofnPwYL1N2erz~I9h}YCdjd1Tay__X-)iiZuko|!1kRD42s6C2oCCLEeGSIdG zSwD#gFI*M3zc+Aq6u>i=e{&x_kDO9?kSVa)@K&fX@-u6r8Ax&v_5+%ZmbNq~ zZ$z>~KP}t&umJ2Tl@~lyP>F)U2T;ecu&`A3cw`yLoGP!Qj9|_mxdKBQ$Wu!zq5_s| znweBC5`Iic_g*48$LGT=?BWwIZ71`S9B28SzCjQcJ&?QYE4XbKKkpcW_-JyF7rf$i zIbLZU0%mMqC9D+fjvv|tf0MT&4I*~IN>}ECCu-ewk;+Qvzh z4dY6=TC+D2zw$1qi@iqm4h8%r)IUV)z`k4dhYp~Rhyw)_7xB)XY_zLP>yCgu>~Y>9 zxhor(4xMi&sG7OCCG8N>)^hRkB0x2-2cT5@Jb3{A8v1LUitacXSq@Rr7@E+S^%Czy z=!`3(uk0L$!!uu%g2OEJApsrjM(n)yE5mvWkKWsD6!rmlCsOu;Ofl8aL6+=|w%tQ) z?l2VO29eH*lv>L$%)yqG!n9)la^0snt{+oF_Sj#9S(Xvy=w;SJ5q;zs!Z+3fgHo!X zuF_)>Lrj#}Ff!YAX2B#09P}$dx(H#?fIbtP421W)zj>d)=?&Cyp^~BK;59Yl6H%wt zXpKh8va^nYk%*;@h^yE>lr}Cr1lr$mg50P;_zoP5BEelU56$|Ff##|4MnWD;tO327 z7L-{4xLAI*U16)Al)%puAO{{E=>7m30_1?`or9>Ukba(d!@&R8 ziGI(|&q+{Mz=nRkWiPAGANBz&#CP%f)UO49&0|(FmfbjT@~M`_@tRhL!<}Ng+vz}M4J=F@F?A-t&bOfVJRB zfSOYx7c<>I?=4&X0f8V#yifOLQ_8f?)* zK2*b$8#0>%q8KRjjH(ELPhVMC?cy5=><%M#Gj-YNNWnIjjw`-w_jBA=%*>1;fHFNc zF=o~mf`&dIp-uQLatw{hD6GEDE%4|>hd#mIQong8=^bXc6hvJDvD99|8_zqt`5P+w zNChg)85EQ-vvs(f5WH^QxCBMHZYE+~D)eD|uf3GpbJjgmx0{5sJ_+3K2_fh5MkL=2 zUZJR+%`^?39lq9bH-U{+YoL2w+_ZkXVGvLZL)IMFp zqfq>sa5jMbR;7Lv=+Vq=ZIQu>RQ4;d)CUxyvxld#jf@3MVd?K?Q49X2!D+wFcmi=k z?Ut=XC?oQ6G)lpoH5UxQ&fVKBeFlGcZquuYyFq8B$u3mq${`zJr}p(8HLVkPQZM|A zrFhoDqL26{^=QGcg|F2{u$5fZqqr8|C;R1z@Tkz|ueYjOG?lrc3+}1MSN7++cKp2+ z5j1C_i?k$|AVJioRgc#Iy#<+6j{8$ofK3PJIJyl%zySas>I~59tApD{+E63=5*`Uu z3f^Hw^fVbrr)g^ru#W)D8eHiqX&Jzp9m4ly#0^;63!OauD1rX^V};(mo0@=~;W!!=acV7{B+H-xDuDL^!aUgSy9$OZc<-9GubS3QzZgc7 zz*an37d+66a-w?2-6&@V+7PxA6HczVIeX(FTE|!;6oAnxL{jMZ@0YG*Jndr-#UnIZ z$CU=K%mOW1?hXee2n;0M1&5M*yn1=QxuJ|YSAF=CtBNiRfrf4tIy1EJiXx6XH&$k;D{4hWS`(hq?) zv5&c`;X>nfv2d0EasWN9_6C=0f;zoTCk-ijojz^^Vp@?n((AMPb}(%K76U2S+1594 ze-Tf7vznBhsTwUM<1@46t!Kb89lP{n%+_*=?a3{9l!gzp*GVNqq~oQ0bVpDSM1-xINcHXA9kIKX4MBkScK;g_Qgivw{B~hbcKU>s z-|1;I#n|OFa^p2=x3x}Xf6J6fp4$vcRT4jNz=j-X!Y?bAdOWqrpr3Oe5?b|#m~Auz zjZd=Fn6y+4+2KU_R6PnkZJ{h87t_i<&mLxyML3#C0u_dxEOnC>Gsw&CJG}%nJ5EH? zJf<^plGe(6L$*3`w&mJf5-(5)XM5-%O7kU4nW0I;CKI@Ez zjxMN`XGjh#IQt+`A>e!f7|D9H&|e!NrQr-`%WzlBPs@u(4{^P!(iiOzV z@bqj0PXRW`s-JNhfJ0y>0J1gUB$3AqiQ^U`XfQ6b)}tFpeb~ftS#1Ijj7#ACGx1j* zcCZhB=;%MH<9$E*^y~pf;ffh0bwBf?aZzsJ6yWUL*>gQQqJfdYX0%?A`ZNRjK}60} zjSi?XcQ1L(U-o^1dZ_@dqkW6)v>ODuDObF2!~Y1H%~}T$btb8^RCwXXsE`@Y0kQ3# z)-&AS!Ty2X-DpilAquvvEOPtaUg$kS7+u{Rc9J!)?)QD{eZqELpH6(um2=cx^q*&~ z*3uf0M~$+=9wjbb@e%np^?zZaWAVuj6P@M6uo8HA3wVy8Tn4Qn@NxyJSTIBfM>xXS zxvh@y_k>B$+s*JO$;&%oKt%=v8c_c&KI>S(*Di&TTyO3HM`+YXaLTWCLrP?^%Ecn=F&zc*S8;e8ensYhzmXM{=V z<6a*r&L={5JHjj_=#@nGIfLoTK?FkPFcB`1qJdr59KR8Xdx)Ci`f{eh6!Sj{fBV=hxC-u|Y;ZbUq$l zt5HdiBh|9>9i8(MWsO9v1XZ5N*im;#p{_^dRpWovov%Sm1K=D;6Ag#hu9m&7>Lppd zMObm2e}6j_DiQ=qQ*Rheb{^r(k>i2tG^YsJMG(I5%!W6CQ6PV*Kaf$fi-L-HklLMc zK=Zv{rtq_qChPB$%c%8pXsw*|eZB++G@>DL`~eU(G*IFHx6h=g{UHj&Aw(!g1W&u1 z>3YI_3HVp?XBP;C?Jl2$$*ms}zDO6dQJt{&aS)z_V>{_5ZA=TR3~B3-3TQ<~h|MW_ zy!BY&Km~>ME|XZccpM%eStFD7aW9y>-Lg=ObYTXZC_p(X;2Peo9ec{)L%^|abYCi3 z7DiqZ&(quc=aIgv@9vKr=Q99{CmdTd=W%1)Nn#BAlK?kP#NiTgYI#0V)%#GB-+i6( z`JCd#F}3qMHuv0lN8r2jU4LrqRc;eL2DXo0DAmT7ReoBq1u%aFPl<)wMNsu?IT9|m z7AGxp^A7KW8`k>{yU;ckeUmXs)5Y(;f2^_Z6CM=NmBKSUeo9EM>5Yc+_&oa1NjM)q zMib32gc*s=TypS0_6lwaIC@MR*)}ZEiiijMf*K#-KcJ=sL`eOnc*fjzCwz>e8S}^I zA6m>~yg6X#Ysf4lmRG?+8MLn|MMuXG>_ktTQaoR{P+Z2$vaF6+ctisNn`-7 zHx2m|4GM&XWZu=t#05@-jZ8G3=VF^^_jx(8Z>XPMV+eOcUB-W5reV8VgnS&&S^d< zN>3yAgt&VhkA5LSc24Nt!4T%7L(0R&bOM~{`-cZI*)zM4x5jMA`V!(uk$P8W&IB%$ zbr!i;9Jep@&#ifk)pn!u3eA;DfRh}EAv|30ygozrv?->&W}y%ium zPuEE$g7IO#PL+7mzoOsFxu`B{)Elq1$|AIoR=dZ3NdNdi?6gP<<~pDpj=p=*p5=yU zGn;qQJ}#KgzyCr8+#6ao>(%XqxrqCc2aTdh5Xe8rmL*jjL3gxax!j)@zw7#Tq4Q}( znQH!b_7Bb^JxEeK0PDt|JAkc&7r>Tpq<>^DaF(nx8J=DHvspCSh!i#v# zS(RR3!?TWi-uk01FZkM_RI3-vEpgg%whc9TetY9$m>7A)#|MK09h>w5`rQ337`rJg z#TU!P3)QfU0u-m>LE-Nn;MW{no=eL{>dNK^(w5hj2eDD*ai8`I5k*R);&JBPQKf(R z=u(U#mI$k!2$tcMj!x2}6O+Ij=$FMUbSfhFYL?*D!l zpG91<%EbpGYAic^e<7u0Z6O#-LdOxd`m9iW7^;`G-%#6>nnbj0UqJlj|_yBx8xXroN6I$bL4Al)6)5#5FsQLJrp`owJx2Lq_MXz+%p_In`yxyXG=Gr=RExuUFDf(yx)J zdz+acD_5;3aQvbNRdw+`fZ}-rrt)Hstb4k#MhDI8M=Mm&u1Q~zh(;h%cL*{Ga&2Q! zJ(FY;ov@K!py;9-6$h40bb>b0$08^k&ggdL$Vi=n>Lug;*XCQzg4`j^+bhOZKg1d( z{k}%D6*fkV!hE&a^G2-d!_y1BC}k2enzF`Z7JGvwl9UH$oS_B{0|mI?xoTk_Ry8TX z8wHJuvJF$|t{Q`wI<8F_!Yt>zlJ}4QG;X>%z7@e{%3`MwrLjfqDfS-%?@VHxpGm_~8%&JsNA-QkXP3-n(CEv-tS$i-jze zpN?=>|1JlJzN8mJ-_4iKePYzHXerW|U~AfzaK!y%SFSCaeHt+(GUJ*__P1RQmaKrg zAa!&n1GagC(0&o0o;9eH}YE6(Fm+lZF$)O4J2XL(qfiQ1kL4{5YpDzhJs& zBFmRbK&w5RWn5a@D6A%~vzpn=5rD={G@oKADf&>M+3SlRw<+x- z@z~B-^>e6vVX+#-2OV0Gl{LPykBtpY@ohNrJGU1)khIEar}m{VcOYgkLZ4A^JChfcnP-S{8{3piWBT5%cW` z8PIKjXbp(#km+8jmTlZK-3V*tsWORf+#67{c~N1E>TTQMAcF~L^@6|SG<(x0l5=-b zfWVoQz{1lh&t^~#!(*Ih%}~CjEpB+n{9W+EFWv-Nno|2v_Vew!d_wV2>=v=~Y>eHT z@O36WRl#dc*ICytE(84J7yY8(R()A4>EGIzp+zH!A1`;u0lTa3c#K8K{9KHe6{keN z1zOvYwuT0)I8g704v;atn`^w4$}Z+RFA9@#&O6#Dea}q+!(Th#Tq*zt4mN2apauru z>Fy+GB>=c%v;lDm^y^dMRFOiR|yw(o?p*KPNLwvL=>{ z-*b}h!&7;QD7b06q%5~wsO-#wI;N_XfnOvVS@#fN{tJi!F zf5@XU^B%+q1cR|DvkL^51bE#MFNe9x0&pNe)tck}=E})LJ{&(L7?XG0@rfPXzb*v^ zsdi~!L&48}e@stbhxD8@!qlj$iK#Md)#|aqic6oi6+4Qqcb0)pK7eIJIm=jBIrA7A zT#tZ*`pyhpU3?kOA606aj9!*E{bcww(Xwdza*Ii6PS?yzm%MLZ-t(yv(emp(MR=na#Y!X+OFs+O=~85`C=#^8t0ClgET;e6hdR*0W< zl%r1oql@V2Xtcdk8H++RkiSXv7R+P${bes}3j%JSO%W^G`TGH30$y7)9o^dqDb%1a z6rD^$Fw>ry*WHNeMc;-|i;uY7j=%4u!Q)4GvBG>gvuy&o02MJn<4Abz#5hy!h^h1E z_x42HwGxd&jU?kTXhD(hYV(tIYsRx^+V}F#T_qi=fYYdARKxmg)tY^`0OII&$2@V_n0k436(uR z&&}RDx)b>argJ!%q%I_)f*U$KqE!w$W@yW9JyUe(kFeBj1{hUC+GqRMDs&2*5LaXIa!{BX9Fh1>Yc50(w`B5G&DWE1=V5vCFLymCx- z)nj<$`nfwWehC~;=i53c&bB(_X*XnLuNrk0cYXh*IUQ}H$ll2&-+~5M=~RMXKW;7k zU{hk&=$|2fQT>W%4~qQR^pkQ+iGg4lq(S@%GKjbVABPK%7ul>VkJIQwjg|{C^`DKJ z%YmthOrbZz!eb!^NZ%+^yWC!k_iwbd2LO1YGzy|3D9OFAz{(`O_CikfdL7tx=#BFH z922>)`m&Ix@hj4Dgl?td#?t-%b6N9tI8#cz*L|e(qKY~+3qJYPT8JgT;l~ksY%|KA zFnCDqy=Y_^V2pwt4q>qZu|h8x?;SP>F8ekZj1wV*_U-Lrm@g$4ePDBHBO%k%D8q?_ ztvM3QqPY?&GUfFLuJc6TvIQUxaBRp88L6bri(|ZMjaF!w$@s2>2&=I(DjhZ)=+IoL&A*u@Xk_6^0aS|r`KC4Jh>4y(@g(?v1%U!Ub^ zRVBEz-@;B6v-GI+Z?*3Yz1II`x-a@FcZ1^|Oz~`tYqCBE#y;>b>f%SnmEnjLZbB6;knOPJ?vIPiQK6I1 zX(X5^OB{}FwTPcCyCJZmd}l_pLB$<}%{khbQlC{S1RW!*3ZUT-G?MDCmsU1ugCRpU zB~cV6u15-3+QG4rPAy)B*zLuhXHZJVo&^CHrw=AQA#fIO%L*)=UeeQu&tvN(UlLVl9b)%CIEyHA@SnHnTh^`29|a z<7E*?tU3L4&y-V#1)ad%k*8>Cp*PGx+cutg-i>rTpkiKV_~D)i#t_uk={Q)IjAoW< z=sEM5?SXcmDHeGLg)B};p*9}?k!p7_$RiBcbMWfg-^T;b*Hc8kyQOkWZvWOf#b?QFFw2csD1ZUXePnxTbp9HU-oSMXu}0wekI?4 z6&hs0$u$H1q9L1PZJ{)ANsicnq6?SnTNbys3*;IW?phcmW(@Eu3TN^MF+u!g|CrSz zBU!~XCoJ+UD-+qpEToMRv6_@hF3db+8{vJrD3+T(50!l{r+n?CS+V(H*FKn(6&nts zrP*<^hKl{rQ<1#xyUEcA|C^HFa!WRxwV5-vFDGdbc?WfZZX22WXGUO*a0mYP)7Nq$ z546XYc3-mVVS@MuKORcFk`67|q(wk~UZ-KkE0qRbEjYY(r2G!u@~FqsbJ4!utET$# zbo)GHwbXSMp&`GdX*j2kV?HNeAo+brXF1>SMSxPQzYXxqetQn&?rG0K%`+V+!4i=1 z{>0lGL-3>lfg@j!MPin_$V8g;-T6OhTz~@L!vPo)Ok5v*d?Pg-i+^-KQ~&MAMTRv+ z_4VOmBkQE7j<>cOV@!cQ_PM_~FaM%lV}7+5%`CEDO5Xsbe#~L@pqONEO-pw&hqi7_ zQ^Hu>L5hknJwJ~aGZa`;Q$yb%08EwlUY#U+{Ftzk1aNR7g2KYxP)WMn#BRaeq!3AD z1Sb#)=rQv$U8)iEH^^;;zI`tSlXWlsJ_dMLCbr7$MAGrp&e8U~Zsoarll($@B_Pyn zgt#tFM(6cB4#3&ppKj+qKOvg^ybwfVcm5tzVY*p#+KGJQ<3Rl9;A=+AQ#EZNjY#c6 z91e=}bGy)QhgKV6xg1)7Oiy8Q_qRrRMZw`CNWj6B9#*nb&br&LA37?Qlp`2cT@E+Y zDAH=spp_qU_fPW?h{{&>c?(6kh|R)cL^43HtqlZ+`o#$``M9q=L5hjfF9o~Rb|NUk zZy8RQoexw2jgJkbWf8UwKY@OIwm`T*Q;2Gq0MpwB*}sRM{uiy=xN-2!i3j1!)LTb{ z5F{kml;*a?ZsX@0wyU*I`472)qim+mGlj}2rqSz|$Gc_n?-Sg9Xc-l+a-ae~9c*B3 zVvfun^qZ%2&;)BBU$TD06G}(Z?+sdAco`+L`WZ?DHW3R<(ll;|Zd%f)GM6T75lA~1m0_Ng+)OCpaR)15^sO&+F?9Ox+ zN~T}4t8+qI^9Jxi;ox8^aH95g15(qL5^S^|{M{{RiT_OHiU$rIeTdM-C7@)TsKgW# zE8_v4L1gQ=eR98(DfV}jf0p>=`Rm*h9d2V9XBs6Vv#g%VX=?*d5g<|kD%a~QIBqDL zJkOc8W+Et!{Z>x5dh|106G8mYP_UOc3lGtp&s!IaKBAm4CC6{KHv20~O~!c797+*Y z+b^MHV#Ou-sxSQ_%yH>Yd{iF4isTk`NV%v0LW+M6G~$_>1*w1&9B7oMdGBs4M&9D1 zU`_L+A<|#oJ#LN>Th}t~O3+h@Cu$UuS7y5*dGs32NN> zM)1w7Za0{EWOQ@br&f0av{-3`xx#3>zM zEr1)_14d*h*UtW8evUozL~`wWUeFQ)q5seNb2b^b;2iThP|{1v%XOcfy^U~t`+NbY z>U*f0pOeO+M7&Nvo)tDJQ=^k5Z*h#FqlLJ*Ge^L@Ya~F4lX03X$7@xm9JYy7-DRTZ z{;KP+zXMF50S6bIOfU#oyfM!Fat-DCvtM5l0YbDIL+$u;r1};Poi+Z)1xQRhb%?)X z#GqEIxVZBIXp`M;nI22+UdcyETvle{uovY-g=t?u5t0*2WCXr~_Ns`5N!yI1_L2))PaV|{<^-D-Udarg*Hj?4=01A~IV2m|> z+BzSi7y_o$Q7!sITLbk<^f!~c5>Qs$!AVbah^XKsz)`hOole_h zS@`vL44loayX>jw0G{qTSYd#D_Zn1Tz(H)_Q8l24GTrynL47l7r>1y888Z+d0U&bY z@(y@!)L0OdafvJApbwYidd@VQ7S?PDv~v zI%85woJcacrZEsUC%A$$JbwG^W7iiOwCA;lx`k*h8euoL?qZm03d08OV4KPU{y%2DX$V88rM;z~)&ZrnXE3#~AZdgu2>*Gx+=b)%7 zxrOdV5CRva=I^{74Wq$?2+a6jJN~E<=28Or0YNsY0B;L-has;jE^U)IPXKb}1JxoM zy8Li633w`F${A;uaceCn^&~6_;z=KS@*{cC|};y`?e6FA+&*;MI(la~WPUoVfmB)gBd@d~CZ7Ezx(X#en%ca8j)Pw9E_U zieCT9ja?%?hv&?gm?KuUUNDQnXq4iDm^QAhUjKNgw30OYF4Yqr;>{y+Q3!2#KPN6W(#!o% zllW5QXQm4mC2kwJy#h*JZ&ZrZCA?gNfixwiSc~BxXms6o(AOUP$Ns8`u_$Fo{-6qKl#%*-KhGU>CN=V`Ox zW5o3vU3PZ? zmG_SYryD&h5k0wAOvv`tgXznGC~Oi%b^AUcnfeIKHnD93IjzXOLHqSIk=S>&v7?17 z!GHmL@jW($knjg3E~>OL)}MctOe7EIO6yr7KkOwS;X~s{>91$b96XFO?1HrcNDn~7 z=?)?xYd)`u@RCSyX?tbVej*WmlQh6QYPD-Nmx_mRV({bloH7R*PKZ4RqVr+&8Ts2y z;gHIsXt`k1yy7Orz`#I&Re>N%0DugF-hJl4W%liQ%U_qS)bKPGNzTHe8>-UJd1hyu zq*txh8pVm!(zlCDhA<@8xh&6rQ@grDUncjk-XKDAO+h>MT3Cf0^a|SDE_`%6PsZ4c zZ|#XM=E~igo_HN~ihsr}9+x)ox})Xf(@#mc!!-_4^9~RoVF)}R0Kw9IKQ|Px7gF_5 zYZ%=5Tr_$Mp7KhGl_GIGWAQ(13;&9UAg(#0l*eY7l^8GML9#f5Vk3ixv%i1(&nyX2 zb^xAYmA_1^2RlYWIrFUV>5q=qw$}0+)`Nls7yMc=Y^+#s^~9*C2!21tdryn6 zh7M9aZ@MuFEfCbL5pV;eCj4NP)WqNr;Vefm!{lEb*tvX}dC+*T{r*=&)lJqK#jmvk z=g+!)Qsrqbdp^Qn_Y2GHf1NmYUb3FvboK$NSNoA-4aWGi6(`XueBMoLOrchksN3n% zx94Xc{>+JXE{n@?RSs3$;-`feRiArK;g>Hln=XoQ_APeN?N78GzkF7{+`epN{u%Gi z!kzeffvF`G9ciX$ih~)_KG$Z}pzH#5hcJ67kj+G*sm-HzAV(WdQd?Hu2ZC;*Iy`SS zLHJ5WvN5UB&h2S9lwS_Ai5PR{466M!k3d_}Pxno#PUE~>8ZUtcCT;EU2G#EsAs>1z zICEQDiHp5&xmn&f;<~!JRxjG^ADspQMFrSP+{C~5^_-x%Da&-3e;q-GoyIxrf_O}T z^aCMAdDVohVvOur+|X9+pgk0^hRw?ydWB;&`I>l3qCfK+;jQ<9lh2calq_2nYhO8j zpY|1)tvxzi&@UE@vV$_sd-4-%&ITd~-mJq>lYJboY)rf3Pa8L_~&Zz2Jj0|{4G z9zf6J;O8IS&@yRZdlky{6V+DdonxX>3zT>U!BTRkYHlz%e|~xgr7bbZijk-9CIM~YkDN+|QH>H(kr>Pw_q}Vy7n}FV7S=?^kJjP$ zDMC&W*X5w>e(K^yrsJ%->`^4yeb1@aNK^?Itvoik|3RRhis+L|Vp38b@K*xy^#HC& zzD%V7O}j7;POI^_xmq3!Z*a1knz5|P!hmB88G@s`uC_Mhc-N~ofhHy)Ise;s+Pimo zPAi$@M5rRSj#!>zVuNm5MyrAv{lNu^cH6rvaqjGfUA!;H{LQ=dO1J#dQY6=Zt`Oz9 zt?fs!oq;1RhoIoknoSRDNjwx}gsV-7ds|Fz<=B{w4O38yCnhFBhAPuGF3*DKBQSSU zR2j};R9gDsvW2`}dq2HPYV@E>{4{u{@$O7uDN>$r(g$OVDehrp9)$T_>d7futl7up zB!f{^4n?wU6^cr!-80g3Wgp5?$Kx3oJxVB*f7X>GMt#snx?Ld#SxKiQkA2EKOVxOG72!r#Mk>`-5IQ zJCbYG(9poh#3Zh%iIbtiFi!|vssSCIG>yqNZ(sh+?c0=F)r9^U;l#aOPugr8(A56= z#S;UIhP0c-F>!Y@9~X@B(r8MPq+dJC%)Ekf==d+~1*TTnk8{+SBOW;griHm;b1<+? z1+iD018Lz8%~nNrJ3J9iIK5K0@Qi5fqMhN3St}fEHh=EIpDD@utIN3iJbLvRh4>&a z-~`T;At4AE9*3oRtu9P}DFGQILu{I`R2>3Hyb=AV2>2NFEz8(a1(CWE=>$GsPz|cx zVY4pH|A>4yJ+~4B@wMy;fG;VngqF1Tv9oZ8i?e~-aL}XikQ+h7Qxah1sD~4|zYU3{ z0*3_PWK}Ppv`BoQE%R@lTqdQ(3oLyi%b2Y+!)4fzwKfh)B1Tbp&+X@m_b^Ras7}*% z?*9i>UK}Rhou-vIzwZ+vY29XU7;N!sQqTx-uq6uu6LC;PzayR|ZabWPjoz(EeKXTP zEHjJ;e0|pwoLJmp5*i}wRgpsONj|D_^LrlEEOd~Bl9Sc1a^zPOb$m5?r$Gux(_Nx^m zT>d?JG2q{J-|c)-6ZF5pr7-LnC%(?>-tp=p=mdE8ztTd7S!d#duPiu9 zs@J7B5C&6Iw2EwZF)o@fD#>0NsAlc=u7yB_}P|e)0@j$MHZ*w#Bg9&!Q7VKGF~KQ_dQtQCcNnzPAzKh z-+vn%lwq;ehqDgH+1g6b9uuR?$$f?4Q2n*&D;5Vvng&Dq?H#iarGmKPOcYXID9-eqJ|Jea>UIk9hVMgrN>{1crj( z0}f*Dy^ zfAJUHea2>S!e0G+975n1g=^IDY-sp=a#CIXU2jXxByR2l>4foj=mVo@Q%cLBX%_^m z@QqjRg1=N91aqHPra_5uLMsh~n`9V#$)k~f4-J*zOvzbvwm92~>5X!Oq7To+Gtk8_ zoX3C6)SJH#Rb-ztZWTDX6985u*nU7<;3Yz#%eyy)vQ3GP8l@|G@b=)KO2Cz#-deZ> zTOK#(+qZA+!&6gJe&A5Ds3k&2o1J~o2oUR(YM0HE;B+;+I3S6O_9yhiq|XR(g5blX zBg@}2n~GrQEu4qyX-!8og{n`P;H%!ZBV^wtJ0&i{>U~O3w;y>7uF&T~w0*$QflFku z2;Ybaq<1m;g6Sp&EoT!_aN;z!m7;1fk--E#E!kdt$#vfn;x(pOPhrCzx4wFI#sThZ z@WAmKSOS($2Em^Ng@nM)ZZQfiSr+$H3jP^7C@l|Fqc^hn5+o7+$9~>RCvu>-Eg!5{ z^iWtx0cG$5yn+ld*(-5s;y`3%I9P&%;Y%vDeoqMtA;4J9&r%1KNaFYJAFvB>Mx3&L z2S$jZ_dUrX6xu^)wVA=UCtEpw_yzS`G#100024{uLxqPdIDTc1Ohd%A8G)q#E9z+V z{*}&K?Fh{Wv+AoGNu(I4*ci`^>2@TDj}Vw%K$#rO6hbZ`PfsC2bGx;xv&-3HkUkLNq0y|$y;qJu+2z`rw^=?<`V_+9of zhbEdD8a6;qoisnGjJY`t*w8T#IRVNR&k-Zxm%i85Uia1nCN7p_M(v^jOPoJ2Gy2Gf zE)F}Ogk(~Z`lm9>#1`7@A|`9n>Y9%i8^S&b_;D+hz^E}Hc597hpfZA2De&zQq^iLH zG!Eb;{G{lB!RxjwiU_}pBPie%Q#TS2$e-Mun;D^_DA$)#%$dR}@C8I~@%P7LYL z&*t#NsfQ`KF|F+}UR4p21?5|a)+sW)=h1CSA2J}pgJ)e-Tg ze`Ao%Gl0;vz^eh}4tJEnHcfwbuvgf=<^fk&tOi&<J z=lejY7y<$UD3Tzib_pXvu!!hl@w6FEKbf2p=t6gxL&_|Azp}fmhY%P{Mea23#Tj^+ z-9e3M|IynygFSvCyHJYA_1;$EpnK_1(?TM7slh{ec7W3#wvj+k@q}X$;IsbsJi{*m z6cvDe+q*dsss8@`_y}Ggz z$7kOP_u^VeF#5VzZ)12}^B+z1Vuwu~ZHxexsR@4?4^@65WkTh8KQ^{sTp}4kmk;OE zU%1}x#mNWCAvbYpHyAo$KCi*VFQtsgKF^og=CgF^v2yfhm!UrBi-{y&MnE_Tk|Y9; zs=9m(hHAc?8i6IsV$`m8DO7{aq;V8+iGN2L^t@M(B_E_3Ls0yiS{wI+SQN7teF72+ z0h^ShBrFR)3Wyq4*VPSm;Ew{L{r1P=7)-9hvisr+^Lx*W; zg51*5uK^KkXrX=nGcy_-gP_ML>G={_)|V0Rh58ojbqnnZBYII>LNt-eWV)A-H`6%yT^Ck};u}@Fd+szmJ{Cc}#0b>(B6V6d z!8i+d$J2Lw8`Hk${;3TU^i=O0mc$Ut-iRE8&9yB0BLn&C?zgr6-=U40h@!X9;PyIO z`dGDbG0HwP-*xV~aufJ)mW$zSGx#@j#BK)l^NbiDn(q!4h>+SiIA1n~I!-R9q5{CEbS0IQAzQ@86L*kVCJ3Z=Sb5cTR0 zVk~S`l7;J_@J&cjEQnsbs{-06`@U|$FWi*2#r>S?+*2IJ4wTh6ziTVw*pJ-B|K$q-d?%F@>M2+LtBm zE5YJYk63!i$f?<4$OdT}D=L&Ug_R05KoU?tz^cIr88dPa6fMqsbt5ndzreRjfbx&| z^=nACt7upDvD)oDn>Y#_jMu`uWFYmLzq~j^07^xwFpPoCfRzg=0JMO_Dqv`<2j~IR^z}!0 zc(0lVZ_M7OL^CJtJBR;~?~2pX-l6uJL(BcfI5Ptyz~fZ@D}aUF2~pTqvvZo;gS^cTy?R3w63|k={s!wR=xOIE1r!b`fw>? zIJG}7JVBx#65{)aJMq*-~l42;0Jd3ZJ; z($2q~NA|*Cg>unTX1KWpU@62ia7-W01q7$GM;@!0g=z&^9l|Sru=;7=boo|BjRyI7 zL{vT=F0}8>0E7C@^npXKS;ID=wE@2AY>_gcG}wIpyho;)JQ@r?jxsg30tddb;0y=- z(brqaT+Fr0?ZEzAm!)L3WE3Q8)_19l<*i7dg5u!Br#(ya+m_B1O z((~Y|srT~o^S{cTyvY)m;Y|+)*wK4$GD1RdK?DG<(;o60*udt;i_twd7-~?rAlyKU z^7@HC%OLB1>fC*L;HS^+S&IoFGD)(_x((~%jRp-fgpLI!TtL~ZB6PydPa4fMfYf0; z%|Kzs85gq>a+*vO^5?*DZX7d#N&(7`)SN7r74&}p@uYh}%?62Olu}&z2u|p4Xy0sX zY{cY0Ac4~XoDo1H%;TUEN1gF#2rln2dM+^j(Kwh(lz<+9p?-tl>i7?K&qE$gp&7LA zz!s|7c~9jp7s#G51aBPV>TGwcG3b!yR!szNyxQqxNf?}(A`djs_h;8xiGO`nK-fCT zE`1kFZdkKV=G9tHfuf|dBG`x^^!JI{?)@UElBy{O-7J5xFPw=FWmZ-Mt%*M7>z{`3 zJ<9T86*{jcgieJa-V#s_2ZhN_a?+{gsFszbk3*1rC{PJNwCA1E-XatbJBy_y12C|4 zicjLjFb^glz4K8bt(;+?$iu1#*gv(<-(67APXY?y>BYrVln^axoZ;DzA5(q=Av2ju za;2iij}#!3v(l?v8KtMCsU1`Xwatt>rr$`JmqeH0G{pV|91<35I#w0TK#|ultymE7dWLz{+odgTKauzJ8EM} zYV>4{>40mJQvApzg1l#VvY)_HuaBk=)Z^{tdHr~mvb29)t0p5IXvajY=@T}hxayM+ zh|NNEv;>PQBl4JE1xpH_=Tuhe4P&i|-jw1*nR?)p|7F-F{l+1_zP|@Ya(U+koLfMp z>cCIRRf*X4CmIq50{FH7K>tqz4g%1j#4NSx2$XS7r$K_6CYYk2USpWS6YpAZ=F#Vg zQ9DIVCOA?r7~{wSV}$go1aQItXcz=YgBA#6CrShs4Tol={Ph|`e41zyy|OOLVokVa zq2g!M0spMm?|1@SE&*f7_gEeKIV_`UsS)Q>{CPE`>d}k}W$K#QVbUx1d=TUUboMp? zk-!+5QSN_7RtEs=S7+gAe8|&rxY{;lzqF@%{ZuWPGNE162I7|qG ze6HIORi6NGgg{1ZLT<R0QnJEVLxY3d(c-AvmrS)Ymr~5{ruAdZsS2GAr@E?q}8r+_Eg#;?XK_zF!k`S07&T2x@20*uGMU6K+Jjf^s0|6fyI8CF%) zZH7Pq&2jPvs`sBj#&~aG*sq{_Gg@cZ0#w zPfWOe-)7}UIZ3+ztdC%*8~Kx$9XHtp@EjDX0z>X9nZ|aId8aZe5{ws`*MNWjTEkg@ z)6WXBvt8#i^l+Zh{IQCQUwFMb1kOIG+cCsx+-o@VN8G*Hd)0A=%Xjz+fdz3c}x>g^yVE#UWxVD!1`mU zOVDGduX4WR;;+@@qi1q$xw?G!+}%f`)yGeG9!`{JL{VUq+1N=4#59ecX3`3722?|u zS3o61m?7Y$7i^B8q}4XEyk={p6xa{dI$}jmNY30i)f@&@UA8`peeS}tY33mrr}(e_ zwk$TYU$dDr@1h7!-jC#_l}C`<{rGlJKgznq!5sRd9L_m9I=ZT!8{(0$dO!t2evZn7 zzoju8pU@$bgEp30duWi?h`#&uJ1e*q5kLlePc|@ds-g5Qmr)y_N_GC#r z%X2POuh|$CSmtl{yde`~(@**x4|>cXQSywjo5yTZaT$9)Rz*cc$HF4&@(oY6m^+hG zXI_jv>3&cD%h;|OX2{rlEzAeMNB}+x2na~YR3YxD8QA z6bFMWiF@3vEo8gG!3O6T1?`t{?%q%|ER(5Vt~M47DszX<}h!9&1IM-SyFv zcT5x}gZo^XDuVNW6&C)?+mDMgvh*JC<|f`NFa+egm$7A_`_jQ@7$}~GXl5l)F80bC z0Z4TPcc8UpLnC8~3(- zK_~}63;$GfZ z$%piT<%rf;*rP!WDbK@kC|I(PxgNB6^kGQVdg+5z=HCFd>($z7K}cb@r=&wGovO)l ztlH! z26Ki;y>0@!`i=xyS_Gv&bi`l+1mFx9^lZzeKWd>upS0#M30PfNTFNVG1M9FGGsueX zwwHilzEE)9SB#(IOLDG1m*ZD_L2l-`BP}n+CEqVfWg_4TS}3MK&7H%hTq(akyx9~- z+t3iL_UuZ21+zXUn1sTJV?!Zk@#DO95WA;*OVHh-1%AW33;uOGBc_NMjd)UuIIBcz zOy<*!)t+k;Ed~yLmwP@*c>Z86UEP<1UQFB4(z1UvD{9g%!94y38=YcTGnd=GwRlWu zNB)b4;&B^MM0_9JB%MtLF{I(X`bul%t&<`Ar?jFAG`?$E+$h2v+}uyZ>ZPg@zf_Ns zFk&sjcqeK6u|tb{{f#^W`yYoM00q|5tyVuB%4^6^Z+G*f;h} z?7F!gOK?VsuAH<{m1)we3rYZ_4_YtqE*&eje*RJI-Fj$$-%h^CVGkEKEiL72*LL>% zf3#suiw+-#Gbo-ARcEoH3qE~ngl3C-%4kj)%tXM0iENAZx?0^2iE`&1eea>gOiHIL zun>mN&dAC-Kc?tAp~8ORPAv9$?%%(>n75{(_g`mTze36}bGdH9kGR>L0%(?pj~>OS zeH5iE_? z)|ovCbMta_j4TOFULSOuT&11^vQ4UmF|<&m^mr~JVY8L0RPcR611sV@**-I+tPA%# z-`M{2T}oIcF9!9LzXom!a79fpbV8ocC7Jb_?Fp{`sFM4|!$^77vUut4x6$#@{E^eQ zv*@|xESLqet=@h3fDZ)Br=p_10oVAdN!G2I##6kv?2;B6!yfuXchj`kEhe}>?xllV zDDVu41$Ak+mnMNk4LyUnhh&ky8;i>ysJNp;n5aU8$S5gdJB zt>=N;B76>SCY7${cW({VhL_o`BcX7z@X(8yZP7%mu=~p5#Mm*xUIeFwXa_eC-v|o- zDk<>}_P0^p1e=a>b)h_ah80gyK$r{bH;1uRPZlo#)-CWeaz9$5FBb)bilMc6LjjP3 z1Tcv4gl^gzx$O@h4z6VRUASG}!s*N-*|+tFWRPo3rNj&N@sFeldNHxPfDdq**$F4A zbAuif#sb->U$W&n+R{*+Gc`zUsZ@;TOT{Y8o+IKe?32dyhLHWbbIOBC*KtY}k>2+<_=9y9G2jUR?h z-)}-77yZTZE#u1jD2zQtN6-hg+G#CCxiE_kn!dQF9R8mZ9{fuVVOTO=k_=w(!4IY! zcSWIV-oCx~t+YEMQJ?8bVkjdDqfP1~r1k{0VW`}p__f96e3)&>1AGQM)wHicwqJfW zF*bvIj^S<>kJrOQ>-IdX1B;G11vxp##g~~yUnmTQ4L;@O0(EL<;hDcked5>AQF4c% z`%NR}zSMLd%J={h-l0edjQ-&8K}&k!;_NewsO`rCt))^@Qc@(^18zD-+4hdZpf}+L znS|3HZnn?;(5M23)YjI9cpPF;R%PSbP%|AK`T#QakbHj8I2FZBSZ9@vImZwfAB`5C zu=0~{&p0E!dh;mYUd60FQBlsid;bp90q}o7$tayFtXL4GtlbZ{F#^AZYtH>yshY(u z(`n#rn3kwIseY^`rse8yT@ALTM#2KVpoIIXdA~m3s@tciC9shlxC|ZO5}s&FW4Wu% zgs)z^*6bu@3%xNG6=GZ3+UZyyq*COme#()g{_x$W>Encb`M7)~*#W%}=C{{lxGumu z2!slpm2)=PXeKEJ8mb>g-wn2X>OMt(e-!=#N|TF3Z^jC`za+F2+RE!*s&jtJr$|jg zf*;tuXZDsVRrQle*(lF6Vaa4pwErh*YREliyFva7TUd`$-rO+a3mzoo04=w0>KUr~OE24pQ3Pg$y_LGGApFo1_Q_Q5NN# z$xY=c#|sQBQq?{LTgE1hX3U<9L^u8PfNe@;&K=NGvui ztw|tVp4$Am%kS+2%oDw$q^}A!|E_yyO0h?djQ`A;+NQwAJwN-k*xUBArCts#ECb+K zV0;sZ>tdHFC17sIJ*dm;w^PYo`<8cFwjKQIi?V;}9iV7;T>IUl2l}31uwkDrpkAP% zCz*Nx6J{mGk+I*kzqH(uCzYUn^?(|J0UPnwVKKq`naOS1OmQ!J=tB0;^(&4^QRG|3 z%KNBncEg+7%?}Q8X?z6v6+l>(0#&|vv~jy-_hXX0>)-(Pr>&Rs(%H8nAxS2ZJ4O@W6dqE)(E z0F1-x>V6xCqI({_-;!b`cO!lPm?vzrwrJRPpj+NJK{@27Em>N!rQ}#0SP*DY3CC&K zu(4j`v=me#xS%HAf{%5N3`(I~0L(1xUQRf19FwR(b0q$(|8*+e^YL64AO#muayq2L zv3E;DgA9P?gJ7q!ap@t$`YLj<2mN`a%XlOw=d;4`T({=muYD&}v*AF|5$dc&rl^8l zcHew+Od%%*#aa87zpGSi#(J!_boJ+#ts(|tGYo7%uxrDk6#kWy?n`a0S}Hh?+Q#3< zUq=zq)*XBkYcHwuX=MCLsZoNZ39! zvMxdY1}~rAS!^MikwN{MS7Ef|N1A&d>+*QjEIfM_?>t+0R5HU(%mzcn_v2FfA;Cil zvUA&>&k(76)iYKgljy#4^EBi>J`ROG4xH`()&Rd$1Z)veBk1Wp2YyF`2bKS7DVDP2 zke6llknIJ!7V}h(BACNLQ5C%(NUR-X_N5toY2XM4T>#9FZys_p&CSi-4ze1%U=3YA zEVGbSo8Y0vP@trjhVyu0(HE-CW%66kIk`?IKLb|zH4iZfX>8{L&E;2S`a!iEVTU&@ zSSeo>z5AdjXoVbPv8pT!5s`+|f{FE=d`fbtxmkEGTDw2V2ynBu=qvZp`5SY`HvM#N zAdAZ}JMjNF*&*7jN&DW$;(tBz0X4NQzvrzq&F|Lqu+Os_}cUBO}9b6BE(* z#XDH_U^X?C#9!kwwX2K9RoD7ktfA29LSQh?B(8;xJ1Iz1Y#lx zUZFUeavI?JBycI3Nt4Idajx$Q%4B0vA9G8I;Sh9matZ}rSe7Mh z+zH)dKlJG}jDC~dU~Nin`Jfb7ua_Wx@#Us4Ny!3sC3%)6wu1{c4WN&qyM$vN+M4Xk zJZD!9Svdq@rINT)oH>xG0EJCPp+F`kz%m^8y_B09fb5QovRLDd3u% zcl+YF_^uJM&s(SJHPO(Uj1W_3X7_O_F>HUHm_DV?(0A963svfCEn2U~q~3EKiO5Y4 z-HQTmZ0INbhUk@dkG?P=pLEe&=HQp79|` z1ZJCDq`jDRO-;LKNKcO85^sZxyEOA(j+Oz(Hw>Z|Y*a7Y6;JB!!4fb1)t3Njj7a|YQBn=>GCf!(kc$9k`{rplSjAP0uY3Rh>B}MTyyU@bn zx|ODxr3)$;1TLG0Jq&(iCRRP_oN__>Z{~oGSh6YYzk*^`qD>!?8osG)zB3}B@8IG! zVarg85sSvX7@~@$KkOx0Ja;RkSvSV+JOlHzuxsL*T|AC!iN7Mf)!=0=UQp{ znhWYgz>Qc$FvU+|ALtqn0}<0s$(cq}q+6=?L{q~K)AqDm-{y73y8Y~X#%t5(VT-*_ zE|cJwN@!EX9-$mKfo8Qkv(MA)kt~%YIiY3+qM{9 zD=|utotqawgI0$!xa4Nkrm6bP&=bLW^nTq!!!|DVwybqxOweozjM9UAN?u;R$oXxn z&D83^cqyDEkRo>7BOQFjd_*Hzyc#z|l%)jnjU)Npt-Rt3lUjY2#cNaLajh!cO&H_f zQ^_&jy11ysTIII+N{^NRtXcXT9*l$zcao4IEd{7cLhkF&b=#M|(zxDsJ#V%PE+IM#)NpG)`vA9s~XxB=v_v_EZf|K_)lJwI! z=$PiYo+!yoTpZEJahXeHW5$E%-zA!~*}c3|kqfHIB!{*ojDKsG-0rPU_!JY$Ii{5_ z1KPOKLPfBnj*u$I;L-6kLqoqiZFp61I?RToJp1Df(wQX5>=L^!`d{Ajsb%q@V@^{4 zvA_doTIdqOscY$)rJTls^5g^M*Q&P)P24`07}Ek?Lo9#Lao1t)EwliZhnorgd#uhq zKYYFw98GBE9}Q;ahjn•QjYn2gRG9p{kzn@pLR#SIf$fhQff~5><@a2d3%*PP zGBowsH?Lp9s>ImI=67Hz@3Mt~0RVIK#~wA1^J&+0+3T>^vZwP@ELmfJe57E%N7C}G z0sClguO?1nxrcMP$p-eWd{prKGrGC%cXbA&5$!V&FqLcSp zsznT$A>Hs_;Mrav^m^&#Cl6bw%4k$*ubzAG@L|Z%Vhgm~_K!2Z4-Z>!Ot+e8J4)r( z)-r?YA!V32sMTgb^jr^tn@sAb+FMZeEUk?7E?Z)(oiTPfj zhX(5P{)8R0U}c`>W#h8TzU`}?fjG;j{rMJUloehZHg6J)x|dFK3Uk{)R%9=EE+Z** z+-%RV>~U`@6Rk^cy+Z1lE6TuNIKSvfiB2J2*!rG7P;%tN)rluuL5^y~ zpMB-|oy16KoA@;e0nstI%a;lY3d(j72PG(Z96U<4cXl$evg{1cw2=w5!K}OUDO-uF zz95Q(U;=GZ(;JUQQ&uI<6bs<%AU^G6Jv)BCZt#4qPS4wW=;%lUVz)A%e`&nSfBt2+ zRf~Kkc&>;PJ{%8PsJ3{bc)f6K60Olpu&ey{JE>GU!PO4?G6?d)X?)=oZvBCj7zehFSo zt3?(Z62~M;1sUkva(Rj8N3u_tuqf%@SazQ1qpCw?itq($ZXbwRZD4dWtIa&_#AJ7>A zqwF%M$}@#nLKg+>2sr(&rV|npBDl;jm2aqn6Nu}C{~kL_v@Kju3amK5&CJ{flz|il6+k-}J`gWa0HbW-x1o%LyZ)~BF!mpOh*(*N zjynl2!AkW%JDO(M0NF>C68mjOHxE+X)*mtdpP)lw{f8h85*pywL zPk9cF6?u8pM%$%~T>jI=^wS*P$h*NMI2K-01Q2O3^Q)}4wY0)+e@(%U0j!T5m+SlO zTc~a`Tend0XfrnvTXo!j{k!_WPzpv&0e!Cl$5a!DJ!mb4f>GbVE>dh4+`&`i_9vgO zfGaP#z(?@o08`s98|z_c^w`PXL?5HA6WEHbj5eHl3>2RHYo;iRiXyM@-ujzqk=?($ zBv4e!ljap7);0!Cj_s%0*$Q-XGO!l2botsCZ7mNvjmALy(E|>4G9dg2KRhtZH*TU; zo|?w1BRhml)a&nOy18V-7#$0#5o zCDU9Wwgb6zxF~p1D_n~20}F}D%#j^*88l!nVd!T8x(Xk5ZhoP zl`Pch0aVd=sa=;b*ghJWxhfBg`m;ZFbiowdD-VaO2cF>TTIEbsrkRQkEq!!5lWnAj zyL&Mqi&xNZFsWDG$)0jaJb|S;Gr=nB^-2yIT@z~yfQMQ(Y8hDPI7)v0`AY3puPuCb zbMLu0W%Iok@;_fnf5mV{N>8_7If(}!se(Ta>q1zM{mg2vjVsfTBovV)Hm$_S%x=wT zEy<&i{HB>`1&gb~OJMj%5Dz zvLyjx1uAx*Uq(`U{9xP%*0qXpEq-Dr2}FdFpitR(?Guk?4|bSqr+y_6H13PZ*Gv~d9L!GR*Ss3yebb2 zg&__O^syVeKb;v?@BK%cNe1T?Vm=G+V+6p2ZhdkL^XY-|&gBe)a8@J!y(zA3Oz^6J zEpcP6K^`2*;WEaRAjI%nb4!0d?sWSqwa{U;$Oke~@_&+3o;5IvR}YUO-VWY1(YF3S zf1)J%WNDzkga`4OYwB>hNCkM}fa4if;8v_*VE(w9mwPnO!GUL_(3}MBm_EP3cMr=u z63G51I*p+bh~mA2tK_BkFpl0d)CEow$%uz{bi7hxKTzUE9Fm1!7X|I583Bf3(|k!Jek3r+1yo zx;DZN&>e-tfoK=-ly6@3v;{K&B(_T)err0R;n+^6unP8omEScC=?C2o|GsfXJN_tZ zo%Vn!2(PuO`G45xK-@>8`K@_H_4km^PzDMWN}=Q)`nsOHTz>F9wwxrf+{XnFo#miN z#RY~>%zRhNE05M{NCN6ege!3nrPY5Z>+SwqUZ)51aCDt}=cxE5lNS*OKdGELpV(uM zByA~M?!C~YeW1Y`uIpailX~OX^jv~v8@wbgLmJoUhSTHUfFiU5apmrl-6Bo%+f)sx zWZ3f4H=s*RO|6_dyab|J7BDvsuU;cb{mkSVN~OZ++Jc3`XST=$#zpiruM>c?KhD!; z+t#noj!pvnPL?g(p*u!erb1mlRo#KYB(<#le`%ma1n<$a)Aci4oYVQh(`-k)@-x3a zg_&6oo_TWmkDKcL!ecKOEHDsCfYS)tmjSN%1W^KnoxK^eLuE)^p+U12SAdL*Gfq{4&tj* zxl-B{?o$VEJ&vTLWaakYUAJE5$g?`)I&R0HXbS}(yC6OiA$(w>bpuu*!<GhKvy?eyN~dg1 zA^Q&puMw7fSN(pgNU867T|$9rO;mw#Nr)WCiUeajXpgG_T(`n8c5{mKTFTXu6ZF$MU*Fnz7il=j(nT1E@^|5_8IOnKTQHuIXKi@~o!`Fxo*nY3< z-{4%gLyy-8ir^$JRpbDYys_GxT>_zZrLe_`HcVLdFT*jj+lWU|JlB;_1{`JyfMR8! zzyoo h@t?oeT|8cTMiF0Km&mT6a0dszsVZxt3YDxv{s*LACF(|Z>F#dn?vPTtyE~*oK%~1%=|<$x|Lwi^{l4Gh_UNq@UXy{$vCWR5Quc&PgB=J-PD`h z#m(8u*1?k8!`H=<+|tL^3Iy_5tIgJNqvp$ydTWf;gKW`7Ez!F)$q+dl46{(z((zQ8 z;}}PV-P~0%xCUjt^u6&P1c5c&8fePY-6vLWXxmo&5993eMb2MO9lpM8iI{(Q^Shfr zEsI>v%$p9G-hGfhp0-~06$*InUl-Z{KX+BuUxFXIW=@Hm@sPWkml|FTsOz5&J@Jh3 z?4BMd+x3No25&)Ly<0w+H;+W$Z+)EcX5S|BS%<~bwD$}Lo=6ND?zh1;LxzpFL z>(A?q!anXIzn10U$I3@PJ~^D{Lp);tY_I>Ze6*~?)C;?Pzbr7Ase7Ok58fMf?)dF~ zvVob)2eQW!A^%Q*9C z7F^M`F6Wk!>vhF$=$=M(GnA zX?!me-LlaF^pjEGaS;tT4`sdu$HBn!Q)O-1G2d@8V;?&FZOT zLyTlevIGSWv&SbK^P)icngvxAijg#R<#$@C`X_upKWj{NT7(#|77l-sXDMBnldmuQ zmStC0y0m8J+zZ3T*192IUE8|VbG%XEUOV+9P>(D!7)_NR_`RYyL6|Z$2fqz_8jf?Do0j%qCzzI+dN zStNqf3yQHazRyPr552_fM6agS?KvIs?U?4<2TQ&Cv@%(I=@1q7YdKp|sO#bNEVp79 za-$*K`p#vd`PKTGOd+Vwx8CM=TT=SdmksA?_k6P(dTMj2)Qy~r)3Cn!i@P87_H*v{ zRU1FoHL|CX{Fm@pO|sNSHndR;^r_8TI|ZFnS?HXSIo0Zra7VL2*8NK|5*J6~Q@{_g3EC zLgl^6KLQfI<_^XdNl}kvkI#15f?lThmbfU$O1Qr&TS-(%k`C9?UL483er9Y}=4(&| z(Hr*PH1nc;@0O|`nJ{+3H3li@mZkFrj0>R`1%&3xHpe*A*J91ILhU3S$qKQUae-%d zb8N93@(9jkG(j8X+@v#DP~bcjqh-pQ{{M1TP?94=~jDST+sNaRH+_W)+6y+e#Q z=ns|~w|jn9bbw&{G7+5gjRd!qKN(H~vT5efN&7@3DVdSnoX+&BA`kMnqsl!CGTfu7 zB@0`@&aSx-2;m`;H3Th+fNv~0jD;R67&KwCwz>yX7a~ROJUbfbN1NJ~KU2YA1mm0h zt_MAV-GXm-Jwwh<>1X4GngmBM=8YKdV6)3{N1hLQ2470`DvPnIum_`R+DEN-8VtUN z3i`C_lB_@4?o83trAl1}B-S! zbE*ARG>FWI$1vT#R%pR8xSK`A&U7>P7sABj;R~PMoKaiL~8=n3O0Q zwqA+d@$bz}N5ui&k0)g$W!=nMJkhIv7&rq5k+d%fkDwz_(HXU)LxKbnV6z-%I0b@{ zm(4^kr?4J+F)C5MGB0fZ@nfn&i0QROxA2mKH)JmD*g*f9psApepQwRS4)wY-*5(O@ z;lDX*6@$Q$Qqt`)V&(MTsHI@^Ejn@YGo% zxS30I>FIndG5yCGe+YgHmYNE^NC%~!`1!zD|Vlk3(n@m(FjFFi& zGd#>xN`8k>O4c!E*FGg&*(sz-+9a^QU~6kohC)&VF?~T5YM8~mf$)SbU{&^;y|H4H zjEW8!<3{~u$7V8y-ceNWjvo~kYRN~_uP)Y{)2j%$i))7kskI+{I zj0mixvHHv{B%e>!LaBHhEHj{C=q=)j)hKmsqg`2JhZNBQw6n5JsvfMT@3_%JJ~bLTd!8H!b(>7Tv|=q znUvLnzP@_fy&I7zKI2_J`i6g`3n8?kn9`-J5nU>ZXnWR$p-G{IB~}ipcYWsB{wK*v z_~P~!&z7oBx%Yt3-(klJ@LXd@F?Y^9d_lr|=GVWGGbi6Znzos7H zTVs2|y-ie&g0tiCb_ie=jO!lR#h7rEVwo4kpv0nQAw-7Ud5rO!3TacW4kl30$CgS{ zgyMDc%$PDs7*V!p;`Kyg>!-lJtZ}eIDa7t#D~DF+)#)91LmBeEJ~u z1#%PVoTLz){+n<;Myn2-kF|hd=p(4bqXiXc*);SQajRFgzuC}a@p`#Uh_Wie^vSOE-t%#2Hr!{;~ zUxQcJf5v)T)k^&gHO!U?Re;UO94uI6TNyS(c_1>hl{^u#42lUR9oZ{-wO!lUF>}DF zJUH*?o#N)-sI5Rwj|Nvf!xXWiv1WWx#l$y=%~Ryp1QA3&rTAye1uYa$cc0UX@{h7a?7EI#XJ*q8SG#A6+t4I&c6ynQ&gD>nJssB zXIM;@Q#IsZl%T?Z7b2XhsCmDs5&XGNRAh(!H!q#+cPjI0jsr*gC&}UY0=RgcB7AyP zRW%K7yM@#cwaK4`1@fwfg8_065S=B}*+dGCw6cpe*%Nzznjxjo$GH26ts{HfnqlqY z2^R-Sm9io`#_rIaza!hwCT*%$g|}EBOJH1MGGJFBQE^AvVb9~6zJS7{F@H|#a!(^- zhGt|77i#cla4C}y@uN%5>nM37wv#sdtKtXC$XGk0>_I%zr1{2dN4bTT;LU|!VSc+H z9SVyT58JEvy;+~7E=QYFLAsxS5#-I0C{6j2@Uy(JNE+V+u1I(nDZ_vYO-n=|@s!7c zqJcOWvO%d8XS`ym1Q)>@{+Qqf-qgrZFxV_EfY%4@)BFHZ&ycDn9QUt~4QhRCoax3) z1Bz&7pHWnP)1u573XtQLA&<<}XjSrnE@7)|baZlSe~-W~qDQgcZZh2LfLhcHGbmo+p&G?Mv2}+#+A2MNNbk2#iH5 z^ma`-(Wlll+B8Fn=npQ5JA55$IcL)s)q}TKkl-q)_D%!|CVJEtekI)=i7;NN!-#ybv(X0SNgtS%iyDWggz)nP$O*4 z_{m&XFP^}PN*|Klo7iA7c!@*_VjVNI6Xj%8qxq9 zbMy(>?9k3)Zl${|NFyPm^NwF3&TwOk6Ocp<81&oEYUv!1;i zkG?p+sPVL`U6?{dQUx{IRBp5ZZ7od}-Gs>|2Z|hqR2Nnp#Bmszraal<(sZDNPnSXS zAt}j0P?0I)-2@xemwxN4;vIuBX{mxPmyo8)9jcOZwFRI2;NR;M$$Ea_{`{(#$;)&g zSy`!_4L_B|!hlMvAJij>J)zlT(o17fn|qaK`?M3Itr}jxy~$AV6RS?;yppLYcqE}= zlw<6KAKn)dvR9=QAN&xa_@zho(nUTp?+O_-KIDR!fg8-bn5;q_G2cPe4676j8L*ui z-{-~A%kFw_fYde=CBBW6{z*HMT&QBoT&!86?ZcQ0ZRYL{s)t30BJO!73uS#XVJcjk znmme99^MqZCkKjOlPsP|#Rdi;Ob)@P+)$lMiuPx_Y+^YrWlWT-%JJ56qPY3bNU6y@ zp#iG8L@tmt<0$>h?~rzY82QO-73H0kMQUtfbPYibM5O)x6cWmVPAW~5h!qwg+AF*i zOGr6#}KTi!9JPDrzI}SPAhj%Ix0Z zwMvobs`h8z%~ZvJLWhVc)RP5PaP8X9&ow5S5Ncx~>1BSi723EC$%GDgwLj2X2Rql4 z1*{>%iO-P|UUL`caLq?FJ&PS;;7w|=QT5Y}z-}-xfU2?$NE&P_ZT;UXmYkU$pu%7G zit#`}rC+c-OC%w@cJFg>`QLIl5%x8wy=#fs<61jg(mmJSZy8Xs-IF2EGN9*OAXbi~ zQ8S-_P?*E8a*+C#eKlEmtNV3fn%q8yG=$$|0+xOERPJ+>z!kW^NoM2*qK$?2?(FmR{N|r8kDcMGUjsK`%vnfQHRryNdHkCzOE=FMC&5qrj=&*zP z8@)BsxIL|{d62J!dPcih*`xEkg{*>|0a_O$BbvbIbCnAA-Au9RV}>LuiCT8E3>?Lp zw77H?Kc_C?^ns^v3CyVv(z7t*1_Q`3f^Mw`V-bRfCzTBx&KeZLo}&i(p9>W= zYT-#5A_3KQQ31tVkL5G=5LNDgf~KhQ#v4mVNI%*UC2j%@$pC3FNxJAe(Uj1hvG5)u z6**c;>JqdTW>~8>U$!b#IY=B0M_DeMaXvThglRBgdDnRC+>&c+$mg=J8*836+ST7~ z__;BUp}td9!4|`gN9T>gyJ6gfQA@q%l$z^EVkNb{AIYVaFLMZ03MYe{=LVNHcr$i- zaHhbOek_IYD~$Ybg4$3fj|U;{05)i?wxF}l`e-BE?V$kiHi9wyTZK!3*GLHg`wpCmTy21x|F6w!l)e%H+qK=`USc&Qh#&ot zmuz)8n8#*pF}+D~b#;?ePoc}OB*v-|%T;jd8f62H0&uSv)eSJEM@wAjU#dNG(W#S$ z!zfdW2Fj?qxlm8Dl%~HmnL>2@;CwW*-i>T~=%(7h{;;7TM2Wwz#-XcNFFg1I{; zDXR4*EUb}(%%f*&#gZxh8 zF&d8;n~RFRARTn|PgHTAW6ui7A?sW?(amT*OKaOY>34kD9Z+Ra4&(Jv7G5}why1p- zOT<0CE^+IZBsSVb6}JS#?*glA3|Aq<9B#$gHotW-nDcf<+&#$(Ei^#6Bw8k9a5x|~ zis-=7P~r554!&a0IhyV_!g{ zIm}?Zm;51)30f<{^yl~**506#3Fs8Oi8fh%Kv!8o34lORO&Rdip^M2e%{ZpzOCdPf&AzG#xrpvAULDtyj@ z8)I~B!Ek7HhWAcFPCo&e?tM!GcBaCbzf`h|dn|hm1HBA}PMD<^qwQCHP--wz;>QZ; zcOLpa)^o zDZoGanP3uOj^+vBs{ZqwLs$-NhO)K$dMZ?}N;^yWj>kgLs)Hyh9n5W=N>gTGPE+ zd3y;xI&!iz>o?n2`e#VL0vG6&ZwdR7VqFodLcuG+9#~DVd>9eYU!7s9dvx)Sbbp6P zB)Ci}3u$P0#=~=(_UTzHiW+<{!n{;I;M`G^lb0?l_;TM$QP34@RpOXjNyqrO9)^3Q zMbeyVRq7wST(DVPtOuJk6<}zUqOsD>x$1rMh5wK+(e!*!7@h3AB*bNt|5KqJ3u`fR zOYAl`7d=`f|CuqH{fP-oV646t8W=WGPRt}Nkb6`$K09q0>HD+`S9|ua0EXgO-&+bQ!(6(EJ zhDNJbUlkW0Y!LB=^oNvX$~}@Xc9T7#v9IMga@-TP42vR|5a$MViAMrpV*0Z@$uDG; zH+)w8g+Z}QvE6IsC5Q?4rQ7|=!@%*GLMaB3Lpp%l5 zYL4aS6(PhvVXcwEDN{;QYhk@YvU={|>)CR?+fvVHQVOPJf%tGreNK9N$8@iEmg^d1 z#N^_YX+LG(+GH0<#p~Mcf*09y(R$p-9R6L4_9@vta7W?{*YZl_iaFRsGR?$3NPS<1 zL|`;&2LI|r=<1krCsSvX7#EnAv>U^#Ur5Mn@l%>yY7!MNVV+@X8g_5XfT=NNMcWR+dZBl$UK3u3zxiLnSw@~Q+$OEk_@J$F z`4t>wGA4cv3<&8$>&NJeq#OSlY-n%`m9t*9n-ODN`NW~%S(J~#zrZvo*qJa}s9l-4 z^s&ZjTaN*~a&x{zU8 zK`|sEqCf-B$1}Moj2s_{WDSyvN<`t8{FPLV=`EK!(hip^T6^MEnysYSl4v6@&Yaq) zJsxaKy&e;~OS@pvyfXwClN6!U2Dun90NE-DV>AuD8$< zm21<)u)v6Ycd^&=>9<~Tq;QhB{EIwDsqNvi^RuTX;aF=a7TY!zy}ctwOE2UyhzD?v z#pQLLap4-OC_Z%KwfwPX5b-Z8EQEVyqH(Fi4Z^FyZ^ISY_-d+bmgwEef?1d{?)%Si z%9^%At{+_xxCcl;JY)3)o(n7{Aq+T*GqDl2mgO@L{K<2imT7m?cSMF-r1B>8K2+(W zRj+&>jF+g+@LjP(EiuK{G1f1p?&_crPkj7E~prk zHRdpx(lksDgEYu@?qEyyU=w$PPe`h}AU9TcEG8&a4E>AO2KF%~`eT|yw>2ct*H8>- z_T)zOk9cL{N?3Evp9sGee@TJAaANDz{?0Hw*#Lj@i?-7|PftE`tV&@>DNW?(XU`mH z|3&9#5H){5*x;oh+$CfNP8U&ba>Xpfz9w=L!qW9_qcd-OG=eF~H0mF;dMFWZxZ&nfbY9O@FQ;!{ilC3AHBHC=6 zOdiX*lLD(58 z`V4m3SUGTF{YU5(KRk22*TYxh9=j>^;9ya~YWy;#uVR8dwnXvwF}|gAmBWX_<}`-M z;FRO0QRjFh7I&spp7}~&vpik8md1^5^0N!k<)EWAAr9|Er1R_@1YIT$n7;?!;FE9GV3p&^Ug z)^t0rP3AjLyyEsrli=1v37F1E8?L#VkcEvBquMqJ^UagK6-y7|a$}l6%fg3KSC^>U zR;pY6>K*Cr@lp876C}C$A!ThQ)~z$&{l-Gt%0luhUSBl0_o?^(V>&rJBz!3O-(wJ$ zmbzOM9a`+~vCH`1SxSGHe@;M)h1dUcStxn*70$mGL1Xm(duu59pw53c8j-AdZC>qL z%V!4?h4Q?>whbGtJfn%Jk-Kg*Ve@>CguXEt<6jY<90)gnzc18;Z&HqpCjb!xICcb0~x~53-J>5T}GUBO_ zuZb8|Vi@#>Dr7(m25o}ClVs}%hgz9I+y1N1`i$+_FAixj$ zlVESJ;=})UzZSda|3XRW-wyGOFR31~fbsi(F%7@ee)qL*kMZ9(gfA!FfL#aC?LQcPb#cgbs-9Cl}*HXL20ledNh99b$y-+ zm=NVT3ZREb#8Iq3YgJ#M{gW75r2w{D@bWUBQ_@{KL#RYpQ{kJuRUtYt&PfXzjYISd2pb!-3|9X z7th(^7UFqzlqFT`-)GxPNVhx6ww$*#mKE-yZM~1|d!8CZ$_rV^M{?eF>SLBghgSWW zAcNip9aoLQt|T0#xgp79HkODE1c`MjYO9lo?BN7Cab(H4iwG3-blv_vc4e6IzSB6g zVX**;Pf86PF`{iU#1I|YXBjUrIG*Nvd;SOE20R_&EeZi~bV+b8Zx`-hrf>(@+o56k zvQ|J6va|)4!ycN{_Wi6~t13e}xQX0BYPXsWQN0rT(>S%JmJ}WK8h8DzsC@6Bm0!rGInf%Xv1$bCRZT^tsz^3He0`ZN+s2j7dYx5@_Y zlU!^{SfI89Fpyt9wGI>B3Q8!uDnWLvWC;nhp4WezAa+lA6VH%MKkV?LI;=+?`kI|$ zgrmf2`b&YG=(d7Z>3WxMcEAu$FX`q@nB4SY=mqQNgPw*Rb&LX%E4V&+aMQOl=U!4^ z#6ehzc(1?N3QW^G@EXz}8HoyG$?~1UA0EPOJP2}!YAE4<%Cnx>QNa26!#&#Gj--+H z04_Sj+zgGw;TQNpiMCnr5q@oMhRbR!nac?OmR&dxrmYj!7P{c{$1UpCb*FBW zWkYY0i;jI%-o!A8DAe&p%ztVv{E}#@6QjNNQsMpLsciZDfKKlX&QVt)ZJv+W=C&|+ z0Bw)#?V8N> zlLluz_1IYlK5Rc~gZe#}4WqK1dkLQYoTuhCts>#ZhN{v5tqOeFX)PN{F6wfD9GfEq zKsWFGD}A>pFn^_H%iSqw;gI~NWqy_Uqy)^&kF{TlNWjK7oGBRDoF_E%K&fX_!4f4 z&xQ0ln6o=!>D!aB^Z!^V*Et;hhoZg?Hjy&$_y)0for^Jlu_j2&T zLECh~%R8hz#L!JVJ#iX!kteONRvmD$u&{uW!2g?@ zn=7=dDMSBuBaB|q)%Ilvp7C=#Xq3Y_kZ`@EaAz%>tpDSv!}Fif*Ud7KZ%<3@y%E** zy;-cR%*>`fKA&0Rjy84iBSXXsgLijLht1eM_(@vynSV@AHyefw(>-`r5-MtK0Ha@T zTVB=WR0=DiGng#xtFA)azVGZp`du|dop$Udi$YQIJDe+K=kc$V}^Kx;?Ct#L;o=m?$jEA=xN)^{GDs2PM~_16%I$_MnR<$7KzM+m z1BI!pXEdPiswCL}fW%eo3U=x`kX<+VPU|y8nH_XR;!mwXs|~M`%bu?Ho8A)hoYcT~ zS?(4zPG4W``~KKd`tjgj`w-(9dSE1y3L$D~HFkA1p4H=uS*Kwe%gND?XVeVCy=Q^}e*JS1L*<%mg zhSV#q8T(JgVeEiK;7baC8-Q#}OG`5j+$j@>E)Tyl_m5_aR~pUX;(W!>!%@PJ`!RQ) z?0iqlFNPbh2g_C~N7YS%u-~CPtOVZa1mP?jePjmh?8UKuGk&`>o~Skn`!sIha<;;5 z;5Powrd6LVa{>s7flwR_MlGH{ zt})g?MCTqlBPtsePdDo_6kqnl{TAuFWn5E9NW3h@OKjBcly9F>a891oce0Ergb?%uRgn2u+C0_p-)*tvQh-t4I@9ZVT7u~u0ocsA~`jyEz zFDK`xU2cO|lCT8D*=9E$6&01Pfq`VHy6F>y9`vIlEIdeQd4);=7qs(?4@9;JRkBzO zS`4)8nW9Le7d>YZCrLSgvuDHkeQ)(X05?-}v2-C?xg2WLb;gFXz_w>pLJoyGt1?uf z@6@Zi!I21n{w5}ytZ!PofrT5FZ|^9Sl46)JUXO1f_ZO7)fx@mCko3C^4#qee&Y<28O7Y{&YU1YZE?qjahKulaY;XY!H`~ms{>4gkDiSJmUevg-Lm`Mtpg1g~2^L5eL)fW`Uk~rTNga zTM59INh?6S zJ!;I|lk<}khPb7N2j3EP`k!umz$XwJ-~jg~8L+^C#;lr|TqFw72@_xs!Ao{rE$fcD z86%6%1&7{Ft@;~2)_S-)8Bau)k3xfYek6(eCN3`M5)u-;zBkl%tyxTx|1798i`b6& zrFCzVd2u6iYjHhzkCTC?Pk8XY6N;m1u}ag62O}&jjMrrcq^+X^ETnHJp;Qn|V1ttj zw3dDtxyeLqnJvIZ;G>4d4sXvQ&Xcg`IZ-k!2RM~h+9q)}>Xsug)m52r3#%A_VS-%5 z!%w>$Qpn>Vf1S)Z-s@OZ>MLwl@++hpbmjXCCjk_}hY59aMlV{XSS?XlfhH9IrgJZ% z2yqAiaX`SV0E|8I;1`-bg(4{*N;g!RFDN*Spm~DUqXak)kR7i6S36>96PALvKi+c? z>Y;}(4)rSvi%4W{YJ^^>C{e83uRRl8#CvnQsREe85zkm*n^& z$&fw>G#jMN)>baz5i+3c&b?OUMa8@3k4-EsVeIYgPs?gj`b&J?+m+k~knJ`gtb28+ zpZSskk3thXR?#mgiL&4rAIzm0SZzy?E{4$3GF|5e6|6cm@fsT$McQV4&dZag#*|{h zl>**IlPbo9YtF8m6c_GEnIg(bc>ds5i>iNa)O)?QMhMWPKIIC(sT7C9VV+f9KuR>P z0=dNN_nQuos4kDcZS^Vu&PVAVOJ=t-})EFXIrL8bK<3%-&dLCN%ZKa1RQ-*^G#pE!w%eHsT3tR>q9)?l2SxnLa z+t$^Po5GGnT0!GT4p}sSp2*0^XlQ5%27;;_cI4qV#A{_`rTcYWc3-^kFQm(t&e*kf zP?97_nSO?K>m-(zmIg?R8WS!cvLAXYDH36Q!M)R3UXSYlECW9W$WtsFnZGJ-4LN*0 zf-B&-1*|^bW%fY{VXw{!JkJ|H>ulWg8Qp(cez?i(Lk9ijoOL~a);x=JRt%6)P)skk zdH`l&e#rvDje7~BkyZR=#*U5HBNx$oNtG^7ff>`_Qyj@w<-gXJV8IciT|F{u{L++eg2hbgtj|RI_hqtn&>MPye ztjhVq38SPR<0D$Cs(^&g!3XA5XiIo-c!+_TpU;7pAe|yh-BDS;>HyG(pSQjM7cba! zGWYW%Y}I!(3@0qr|GiE>cA{3R4aDiH&btR``}u%plZLEgvec^R;)39$3xx%Phe20jh8l3@0IE?CD>4Vo#Z3 z5`vo`4QMFd45lxNZf(HV>#dq&fEv@T{#)T@U&3j$(#AC$0AN+zzvlgOu^AT=gFs3M zFw8iIbuZqBkU5GmZcq+UP|Yzhqx7O7W}4}HWRWY31SvhARB~@(xK0Dqzl;~?H?uaJ zGfwThPr^X5AgRZm6t_glQV)jm5+;~=cwjf`#-kBeWIbiHfBh?VfkLq|=J-XJ%InOX zeP#vwTbk$7^ z^5LGoE2*iiZLIJlhAS>E2B_oSxHGlYvFHMuM;^3H1`wesCHJ5CrRQ$MQDibe>w3Et zOagwt%@LpylwTCZ)v%G#(9He)i2>yUARkjGgv;HRtox%jpp2^*#et)FRgyb<1V9@= zS?iQLu1f=GAd66_UR$8cbTc5`z7Bx`87B8&n3 z#({gH&)pC&MHV_n5)wGZEikL9nsOmPA2lma) z_&~}4Pz!Vcro3Z;O&+B*`&RwWxw#|1GK~{211>*O2EvH>ct&4rCOdql_+$Kf@w@H% z+s147^{1e||rKRumO0%QG;Ls=u6K+;aLD8f8X9QfxQ2YD)e?<_0*Z{yf ztV^?GN&xEs_=A880??Jc>#d*SnB@u5sg5wzc5=aU;jOPU+>atn+`xiTktUg&n1BGh z{Gu2c&>3lZb?2~tVfovRUqzz$#0mX3tSun>LqZ@#A)c&+ro$j3=CSFtYqhJdH9I~y z_u#x;p}hq&)_=w9^hbR#Ano=#-?NCd;f$@Uq&M(8F75dFNr8=zFXq1Yz6TiNXczlb z-6>$i@+J=bNdmM&4!3|llu4$%4639nKF zfS(0;VY4>BnXn?Ka4bNABnhwv0NKkC^u_FZS?>c}J2qkA#PM-C{K$dV0|1Hv!UTK* z(j!g&U3`3eL(*r42B72&SoY6Wn*r`{_T*`Zr=XzV^LSi-?fmw~89jCg2Io0~{34f4p92pP8J*(2ck$KT3gLRe_XX>j#tSGG{gOBnslgCI-C4rJY+6$in;UJ7~YxCsDg zDYD_T$!g|!xl`rrJug`EOcQ1U1aTv}KctIijVzaL0Wf@g?F_z4snHh9Vt%$l)V73MiiPFXYx{g3b03-{aloS@EsG?G=%LKf)rmoJ+ z$_f@pXFz}f9vhS0be>S045=@4-Dwl}ov{l*N~keScNIuad;|w@e*5(#w)dPnHjM$u z3&Avae>ZR)yzA(}Jqy2UX_@zY!}`YYk@)GMGXIgDE;baH62<^gFhDoR0k077I|J+a z-jgOZdzt`_4e-Joz+6z)cf{@tkj^V1V z1;8B*R5TDM^~!KS;kb9qhv6^r927r%bL}E|t`$+#1$gD{kphIk3CwlL!+1HN%zH*p-eGe`m zv(x1xgt`Ig03iS1!U8s7!Z#%SVB*n%AvBSApg{t37^p|VSMa{BuCD0l#m;UN>9@f_ zNRBVa>#}2LisR^MwZzau+pUMb0MOZRX9IE~n8`~FUGmDwja$+^`}9FJvTZpL?U@DL z&$Ih&O5hS26rYp?1p>V2rq!C71D+6|&jQ5&F#A74kUN4~v$JY>X3|g;fQtq|R z3Rp_}a?WYd=+He&786Yc_WYVkzf5r~QVLF3mBforIJKghSDF5HdSI4MmwjYp4`VV7 z_RCK32?>LoO3#2|!0NqDa0b$R=VpR+^!4QlXbleQ-z3n4!WC(Tl-xF*+I1e7Gkh$r zk$`Sb>iaXrv+opW-6!Ma4MA^#KPD#&0g3};0e~*8`U%yyV(37A046DEpW;*Ttc{6{ z4LqO^0N((5w^Vpj3LJa&vg@-+^6;Dgf8PaQ87w?+)#Psr`~2s`D+nFBGtHM%5}c*?2^jZ^`V}7^hYP zPAbb?u1E090na*de+S^8_{7A2jS7H20E7x~`goNSBzd&D@$m2(owtR7;%>UsM~B|+ z1ipo^`@|8x@#h2Gt9=Wd{|*#cKb-JY2wuEU_gT}#Rbx^BGS7xfSj`nrG$8e^uU!}H zxB$kD;*b{1#>+dlI3Y<1A;AOeyYWdgWlmg!;G0;I4MnxMj=I{Mj=~7lXC1hJ9RlR} zomfT$PNjK(yaTGou9c7{ud*t3v-{q}%#3*Z?e*Su-LbuqL_!T)QCWGfJm|6ff#aQ$ zBxQ={UXoKj!_@B%!0)Wo90WXoay>x$1NE@qz5T*MLrdFeJxvOn0hYjDc-qwz&&Y=Y z*gnTK3Y?9IV@~8h6xZue7LZwDM*jK=H5BhHwyLHB=(yEvNq#vJ%NTO0R z^zsRa_3+IN2OvXuw>v}*J7#Tm<`h^*fb|LR#Q+2)peWL1WMZ2}Mzz28CkEwm_~kSD z^#M7*Qy4`$_&3j!0?y1kF1k)#4s*TTT2*}lc>Td_-+yuF7|0=4SH%K73)tC{FP+2c zi5w{URP@~jAK2*?rv!ejmJUFJ!%tXKNE(dq3Ej4Y?7jJ4pXdAko#Q;`JSWc+pU?Y!U9amkt{Z+oPzVx5VCtI|m2eQA zr`s>S(%4-kXxZOQ1VrJ24c*uUm|AJ;N^&NM$;RZ}@rQg(>kdn5|I)#aWC}fKL!8v; z@T-pOPE$XENs`Ss&2AR%g}ow^SMWI-wyq)^5*l+^_F@vT*7z5k%eDLiw_8W~8i}j% zJsj?5kLkx)pFP7x*|%5=(E-wcUJVLm8%PI|`tD=qpcMk5hbeloF67QV)Pe;cx&ZBz^m4VlW zO+f28Z3U=`@*X0z5^`pvXpLdK1AQIZfU7|l+)cQZz=dTOa^vCQ@k>Y$qku9(P71s$ z(A}DjhD)0kw-us&3kzp=mWH5X1F^(YY(-%RkaTJ2{a7b375XD`jfP#LqqL-B0OeS) zchoGXE;L*jBfDXqMwk72Lh|~b=(7)RUcU}pt`ldi&bt3Tgx;Y+L$b_>9ez=ev{v1H z4C;b%xwz1j@NS{1=q_?3D&fim0+zD96{RTjCC|m`d&qJfj8{C4J*+ap8d_R_;d(1Z z>s2C1i>?JvS-uzJ?Etp++M9BBk6+7=MM{8%aO90<*~N0Hu~kTFC%YLhlCI zU2^4zngewRJtPE0C9ThJH2vKX7dzrw!X| z;QEwqUq=vd(iQN{+B(rr|CE*W%%gXfndzncX9ayxKqki#@$C@k;^}R;J1@!dL z`v=X7pM>eO7zjgwUxuqc`0N(HpkP~U>9BQqA6u#_GJHqovH&vso{#z69MIgr56t&1 za{5^YPf-ReYdIE)GBx-5{i|S-Y3~!6dRf0#7eyZeqg)gqaWD1;bbfw5ypdWl#U=yL z44WSp4MTtZd{Z&FR-J2y{mKW>DtWR^%W0#^UB#l+x!Ob60B2+9;UDltG=2Yl${PIl zV|3bFKe1lSC_eq*qjRrm2{IY;7(s5K^?dYtF>V!_ijN8NE)W1?gGdlr1m;L z0!}-;D)7bB*4_psq{Q)&d2%P#&}qU%4ra9??k&%pe^gg{J8tPCWVlcO&E?qN{jUVBmgZs%$K!)s268o22BZgA)Xjcvbe> z{lzd8MQ~=egpf+Uu*~$ri_A5K@+_{MTKGk%;`74-M{{=|$6+b}M*>pZpjZL)JWjcj zaeuunn~y4(*Yf6NRHOA8LOD42Z>M`6W2RyN%mX-kQByOvsK?|xrI0s>O!eqCWBLnn z9fR9)xrcyDZ0~rLJP>1w%^O48Y)!5&kDKRk-h_M&4Ur>}xKgz4C z!-)7A)D2mJS;QY45VHCjB}DE2EiWy^Gp~$-1lFnYt|0~(0IzZ%OMQ3#Z#d|r_9yI|W}_*3g@r?~`A2cNx?;~b+^6z_s>9I^uO2o6X` zha7-u>QL!G<#%6*&0>ZwnVb&&M7IH_>TB(9ViKG_`K$5rd0ZtLq z?|IqDMoL05JKY)rRVrFOYBtZ7ozf`mI!MV$ZU(9u-hR zt${BK3(K;GhHsLlm}+>ZUgLrUj=wo!rX*Ev-}=v^($^YhwV%b%W88=1{6svE1yyD_ zQUdBZR0CADKCdYpv)8Gqt)ru()&B7-m*Y+{aj@e~nkD3kFxhHL z&}EP1A>gV&e_w+Cv*(xeIZ>FYI!wrcsfSWxmO~vO3-|MB8eWuY8-JN;S4>JX)A8&m zSZuBsfdI(H8sB}$ZByApg>kR%!{Pie7L}hq?|d5ww5PzDySTV~K3=N_^(Zjwq$leS z&V@Ru5x2whZhOSZwfwOr7qdEBQaT=Lndy($r+w+8Hc`|1_A~Q=T~U*dLrX9j^fNLs zGb_|!I%a2Cc^h{g!epG8ro)>8{UK@ta6PmaV3M1cuSy5b&zl3u37Q%*P5S~(KuLIk zEm7(I$p5K;s;a8SpgdJQR+H3%GDpx5G{!IY%lB6F5V2+N`UZZw!Sh$q$0i5I&F4O# zI^9|Oi~~LqS6A1GsbiN%H8?Ot|2K1*XOLFwSP3KIn61!^==}%M8I^(vXpM<+cF_q) zq#@9xa0Y1b=BF{{*84l2hNXu$fLo>{bm~Z@TLORpm>oPwL{m^AbLV`GR1dI?i(00? zvn-$!X-hP@dG>lH1#aF9j^eGm7*4|Y1*UrVFF=usA6VE#I8t~&L_|codU`>2n1+w8 z0Y80#Xc`;PFB*m>?-T!QjNw-#!l6Np$XqC|${DsVml|RQHbQL4! zGG=xFW>b{G!s+KFd2StHC1X4+&lSh_Lo(C*vh2}wt6Ez1wtS;iHZ(Ap1C6olU3TXd zOmfKhq|X};gUE;*R^VwHpRjg~QOli}w#kVzJRyo%IAkj~>~Ezb$bQYBUO-6bIn2+h zYHA25W#Fu?s0h|Cu{;#@+2d(C{peuo?oRzc@{o<}P4j~;cj$|wMh!4Cjf17_^L+9X z$E9~EnU!~U3Mar;GG@CKM5E=qs7h2vrufL zNXRE@+cCzolDvA6Rnzl+k0u-x0Ig0$_xGK>lqFrO|F`q)$4D!o zQ>>5IUILB`@m2%|;@?%TXm=qsR3v*;ArtWOar5H)g(D!-fVDvASYIZfOM#;SvKU$g zv?92iz>0{cmVig-Me}iHy#Vl)uH4b(yBJ(XOdt!KfZql5pNF0{zuFSr+*H<94r7iRN{;&4 zwR8+RG#7` zo-^%2-ye?Q4-49fZGJwN3r_5$M;_Sq>(^h%vaU3)3J;ru8vfl@$O!~Eu(~;xl!5Bi zuJ~$r+0CqS%x$^6nXs6P5GwmztW*q?BOX4!s|+N_WU6mK5{ybKyaMyT69bwOFWXu| zAfwM>Y#G#*{x`P~9K#^x&w%(tU^p-tq-8po#6irfKm|h)fq^=$&%!~~<$cIAq2D&* z>8Uec`6BEoN^!M3FGb*j*bMXz(W5QRM7iKSDU@VjgnS-@TlMG9-F>#N`oTZ|cB4q( z|KS>WH}*5xim?=GawFPUrspidp)1^Luwk_wW@Oz7-akKd`Y`HEm6$4ZP_EWY444ug z;d0%!*Rh2Kzkoo{fZv+S!O!=SRra1=ED$CKnqtTBVPt8f?l)tnJ7QvyU=N^rs5TTg zuyD9A`AI0LsID=rfDCXh5KterL(Mao)L`E1OkbD$ zRyMXRCXx<-^FQ_)INH-*Ai`mC@!aa+L{teN5HFcMZK0l3gtlzlpNs-Z(^af*_~{lO*bvU{?_p85E$mUr z6>7*O6*hC;f76l|D;Wu@8;#{arg>@cavd;?;AVn<>p+eSjRwYv&}+ zJcvY-OCp4IT7P8zpBSo13fwKYSh3Oh|NOf#AUQff!qE!R!)9ms6hNlSK!a z^l2bSlE-$z1tYyoElT4LLuo}F9DtMnVE91lRH*4} zw{*_N=$sBi-`%S+Vkbn$V3%)22XzE)Aj)wIcTDs{4>1Z3JCQ{sR?)C`$Zhzk|G#kx zXb@2NaUNuXalcu=bC&?F>ZIbX8a-jm(8x4mU%z1A&F zeZG_(d|oqx+q6_`-%vSU*vJPSgjT=(i7p(RquRNcI~$>VzF7O?E(3Lse$f-O@sD0r zR8m7ZzPmAMdX(oG-bG!P(;>3VT{s`l_8*i{e^Xly6mCi3_C;sFjg| z5f2KBf1CoWUqKHOoT{xka1CBXwwn~E|^gv>lk}2TC ziRlYcp5PL~A&fWJp}n3&sNgmD9ZhE@lcZhvHNlaYiAhE*Bdo9xD3V6Zb8M*%uje<3 zo3fXI-hwYM0lCM5PVfE##55oZj0Z9&+q)e6mY+Z$YnVbgx{krma(D>p5OSD-(0{KE zb$Q$UhYPsN0cL_9Lel_7X22m;kIhwJbT(QT*%K#)b_WxS{zot}(-RtNw7`Ty(7e@J zGc)49Y2;|jQ&-^O*tfg+FM{tpU0k}AB?Dez=E`fd5^!k z_ztmWmp#Ugtn<%jOiHp23= zq#S|Cdy}Q2{am}~LYI@Llv}+43{#VPEEj=42=P1Kb4p63n}oM{?Drs3{4jW6J5|tf zv}PR0!%qozA!)+^8`oo@pWD|&fzY2T4!Rh~HbuuEYJMj^STx6bpsHGYY4U$&b{bhN zVV)^xs!6xrtBN1eE7$0;H%1pG*^5>f2E@0~{&OP!I7SCwXg0d9#yUP4?7S_u{*GLe zo2m)yH#&kauD7(b+?IJ;k(HS5*D>eBWGr*H3bUF!VPB5_tAgiw+ta;4SuESgk4pUpcMTfBZgV`u(e-7PkwFRb7)8syC-vj9jP|A-V6vw6on<_&>OsV=j z)L_Bz^ileLIjj{Q*Oe}-unXmQa)3xQFmu485#MaT1TumhMOyw0%!=Z&ps{^e*owCL zDe%4MPXa9`5zZ=UZ^gk)KfaXS2BfP3u7dXD+p_hrO&oEGfV1h%4Z>4=F|c}00eb|x zKhs}|X!+V7jM4IAgo9FFe|z`=I}f+qo_+@7RL?I&b)~?WAh@ixYAnp0Kc&;)e0zpi zVrk{wK~d~VNJ&A(*M4%6@J%E@QL;SSbiCFE1n{rZQ^Bl5G*qOiPhBYk3qHXs`tH{W zc&7Hp9)W|eKVr~OxCTVu!Do*C*Q1$ZfWHY8R$KU{TOp+$iE}7Zkf4G?z9^V-E zp$AL_!XNGB%ePbb(A3K?a1KZw^B_-Rt}7N%I>09gtO1}ez;AG^=H!LCoXA+D^9F%P z=gGCHq664ti?O(VK`Q}kL-MMT{YUM&;y=J@!kII>*)uRh8bV^rl^RkwqFKvXk0)H` zCdOD@zC&^6lN+)B4z1P0tT|Em>&K`|9pp6lD}TQd5qpZc+T{-y{Ze9{*GWpRiMq&` z@=+*}FT=Pn_w|(vko2IRzBuBxIX2)=4d~cmGU(Sd3+equWZ|h93>mcx?wLeIS{^ zto}Tg3v4prrFeGBhm{K)UBJr2^Mf-z-z7J^2*(K+_O@y}xHn9k<3?1Rn-8?{3O62B zX4zqu_m~;UPRh*U!K{e!vf$~_yQYV<{$avcSa~3 zZmNE-NfPEgh|_t6T!P^1y3PE8|151u`igy8-UnuSm(B9?C5ehU!yMYC-Cs{YLGhha zs|~Q`T4t7)j<$0xlmeP)!XBz1a5R!hlg!J*S#PA5@HG2`+c=)!eAtXFta6pw^bBrn z)f8ac{8-4X;63Q4{)y&$(L=hhAfOXr%7e@IbJFAR6w5rZMZ9y$K$ zM!0})-@Z*Rz0U&a-MCeWNoQ(5CrlHlfy~Diw0OYyxi;$b{c>XjCe4ICaKQ^Ee0^a9 z$W{R703`r)9XMt%dkaWO#adT=4Y^)tkX2MWVV~0B1_r)ZK|Ox1r;v%cJK~)y6|cDA zz2(OYX%ZMQ5C;bMuXcm)0_9drFLu|^z`7XB%f2n;pFXuyEqRJNt)X&i9JorNIqEdn z^@fDn=QizfeAhiS(++WExSQzp9c+{Fy@wP6(B;j?bZ0Y5WOekTTgQ(c-8gYz_RA_R z);W^T;W{|XIzJRyJ;TSx_c9KBN~>?cb|t-c-jn67WY&V&bGQsJpiz55-oe_I7SG3} zr>3Zo`#m0NT6`>Xbmx~=U_pg)QWrRbnDi>Hcp9dxQv6ZKOiK$cECdHdC%`o%9R~>^ zgOPF&aAjFF^aD7)mJ7!0cQ)Ze7$TRpqguVZ+l7jZfP z(e&@y82FZPx#tOsV>%MoyU(PVObWO5=;j!pdqcSpN>`h-gs~B90l>M@GEYTJ)O*Z< zjt6f74m2Xq@(>||(ciy+<9g@4O%tKoo@}%&XZl1+7yb@~iH%07Wa?4p$Egbrja3`LDG62t*}V zKW$_Ca92gktT60%b>B5|&D6KnAMG%WScD6*oNAJJkJc$2ew_=N|9be24h0t&*isGP z0^c(OL5Nh!b@16~%|U~Vy-gPlY{TAcd$8dn#xXR2Npu^CI#3flcLo&@Ns55>)roX6 zZX3Tk7p3EL3lmKXNtyQuity~eX?Y&*O5Hma?h{=>2yrbQ}p@FJyGg0+FqMs;*EC8lDllSUn76N zQkW-V%z#Hss-KQ`+d#*dP*-?$4yD5$^Xv_5J5i=zZ&H1*cgJC z4?IscyPUTED1nVRPs733c7b;4esf=-eKYj4|Gaq+1%Qr?D2>1aBA)pFyqa*tfL)hx z*sf(GDxBt^AVBo1nkj#u2W465tO`n$p8~)Y0fZJ=RL3vkw}^vDyJ(zNr$AHvqxOiJ zMqa+ik%U&oqXOr}0vQ*YN%Cy*eEU%R$^B~XsQWsT>i^B+-h!HnZKrJ>vp89Y6xvA)Cn5z$MbfRG z^}0^84kJ>AWq55>xnh&Uq||F9Wthg1iQBj^JLcs}$I43?p)BzV+5{BsFqpe|1#M@J zpFO3CncE;EsNe%G+@i9!pHEq904&M_+n$a%5#n1D2@k3t79hqYoKUR2wd6?llWBaZ zK<0zbJ3myM{(ifR5E)S1E>53euv^Z4!LXY&23Wi7l=Xi`5Xf7KYa+#*AB{X*6BF#1 zxt3}>z(u;r!`qY|uUxL?`1DYLI}L8Rc&fdV6FKlOZ`M@|^($h*{6U@m$1lH?HbOS| z{MV8zViZ{!Ee~lN6Jm_bYzE8;3Jh3)n*0hAJn9Xg;QPnGfJKU91+lCrPv$T=r_{Ju zFlOl_*WA2r2k~J?!>eG9HX6V$Dg3scz-IQyY|tq zFMrwLJ%C4>d3akK*B`~DEAlKjtw9lWgrp9-9(`NfaR=*KymGE+dLKYK_`6|JXKKSp zUE#fm4ap5mxtAx79?io7A=`SIWw|5`y1XJQI!>-Q^T3CMA095(@q#-}zDpif23M8( z5x>8mVIuHmU;&g^$GsIjFbaSOLYAtE2f;%Se3o3(YGO}1!mL;}aFEt!Ee!T26K8my zu@ZZ9KPk z3%=89V`)=P`$Y3n`_DZ@)BI3>6<3^n19hk^F))`eh$+9F7D@LR#s+L$amkb)Op1YN z`8_7zoES<}!`hyHdi>FCa>Amrry5_3@jkJhbm>equHO330DQ2i0iQEh^{fTQFa99^ zs|A$2K~W(g+#w++K;bLDtB61a20t4hYW>EKBg^B{=9pVeyB{6k-hXjofMf*}qJ@PIgzYWnv5MoXf9s}h4hP=AKw7)MK~;P?G_vsdp` zzR;jz14@Rim${dB0^lG6d6x+dT52(-6hQ+v9avT!d$bQ^^}mDq-`{a!ByVVRSGl z*Vy?};M&4esC8pB>FH*(Nq^MU0z6L%Hpek7{v4gn{pKUyKQ{}Q(}=lkC1+ILe_#3} z@{aKPQSLv+3??Zsg+S#8E|K*8A|xfPw#s0R19y(z>xY#zew=(|=5Eu+2GmY$i$|4K z!fK{YKw?0Hg1!T+&7*S|3K1JFbreK)pf>}&Z12ywGd6UzagTIv%reV-${MC502u#7 zggv}6!(mJWp;SaFyo)K|C4C>IIs>1?sIV)%8c)qekzkw%z^k z!#a%v^ssBr@T2)lJ2o9P!NdyMJ5;iG2QvrYH^3bQtpfhI$x4Xsjmo&~EdhRhRBy$v zRhz=HA!9N+eG0-*aB>Cy=Uf4u450&oM18|7La=9clK{B=gVws$cWy%b{AE>~wV{DS z10`5UlwX+z>0q%t5+am$Z!vc11RVPNh6t6NY~gqj+5U=)8kE|HrZG z-8P~P*{+V*j^77c?*p)vFY*%`X!mNL7O&aZ=7rr1#=91GzK7_&kn4xb17r9TN>thV zWPgQw#DV8ojUurL%aWz-YnY;c!-kD{^+EC?)Yz1Yk!;;@3GB)h)$uw@x^v|csY*Ay zp7cj#d`p#$?8+=RjrHq{$++5;nmB4|IlKANR4-ZZldaxc#Yb!%j4aYCjq3)-XNHZ! z0h$3u=Y)^LSO_SLo7Wi_P>q_6-lhM1_B{gj>uHJ^Ip~EFt|}(rYH4@~7f|?0nZ3vJ z=0TnK;8JU8&A&DvjA!{ZT)|@zrZ&_bUkgjpW^qZFC11k)2^7QhyA;E_@LTGsw=5X)=4&pDv1|wLdU4Ywm7O7DZ`S2 zhe4HLa5A2FPrQxdpg}wP@S{UoSHRZ-j4;6RZ~`#}K-BM`5QfnRv!$~zn-2bpVPJ4I zxJQPI*f7DF8J3a3xLAj!gdG+h4zoCtva;ruA}JrnO_*BMvaQv46k%;d<6=s#K#et3 zQ)vYo`kq`kg`X*pzbe-@fO0 zbV+^ydxtJhXP~SGC0=^&S)v))+d+2s6kRC#<>NLB$9``#;|ddg^3%+4#5>ZYa`U>*myK_86oGl#%OX26*jpj`dWj$nBtUiTXC zF{Z_9W^&eN(ZjEmJ7@xFt7m#2QiQ;cxAotbDh80bS?v$-8B&oF*RfnHG_L+irnMC_ z+NtY)7mR;K#+u*t-NpMv_if(lt~Ue4%pubsb&KhSrl$Uz=pT<0&7MMr1bD2kKWuLu z>Mnjh65)-&{|hh>?j`2q<**%rw?zTSN*!+@M$NSQ2w)j==0sI;ufNmc%6H1=pts0*9SMj zmiS^aXx)*VmfcH+cCVhA_(epuF>dt{+NQh;DcQG zLT`cx$CaZm*<2xpLFQKayvz;Q4`W{=AR!SA+EQCM>2@yc$|^1`ojW<){_MU=M|wa` zw(G|XQ5Z-oAi=-?!aO(&$+WHs>~WkoB_zjQ4i0!_;;RkugLDM%W5LYQGTeX@stp`i zpMB>~LPrZp*eVc40YrUNT)j{k_ISh%ws*PLBLixGKM8Cmuu6kbu_2DNMdh0VglX~y zJxBcWzgE)-C&N+Z#%`MWLa6`b%fh)U6T3n%qhkG@$W4=}4lbD_wv%5&TH;=Z*G|&2 zjx(S9ll7m~VN&SBLhBuobWKW~*|W^l+TlM4=#ZIpU~1D8Z!bO1d-R%)IeIdBT{`}} zf=vm1--H_+JmRv&du4 zaqli!K0)#)o#1wbh+K#ncEhLP`(jV9)0xi?x}dm`gLnz#x?o)ZM~@y7E&|KL0Mck7L4LGhh|h?U5PU`(zNCe3 zE2<_UE^+cp_{EiSHj*9%-sw4-0xLC`)Abv@lXkzl-d}p0xPG%Se6lL*8mfBvYQEg$ zUky1bEnMHt^|P(@jT9Ojo+_Nh`r`g*Pf2bH$LiVH)_^nNlTE42*72EIj~!e?L)~^u z|8u&8f6*DrTG(&YzueXNvVXp2BN|`(ocXh0d#vwP{G!3(E$SdyEiLi`tdsASd+MLQ zvxX(z<$2M^-ok$L?T5RhKw)I=#&-t2(7^#t3IaU$SK{)LRe$CIjN1f6976m{*T@Jw zOU8GEfB)M*Z9<3OHA3?N%Q1|@J}caj73tiTSE_sMDd?5&xd0#sl@Gqs)6;>J7#$s_ zjkEnw`1P7K%rCwtfR12P3Ash`4<9-CAQ-EPWQbtb!6^rs2*g2T{sZ#J)xa{Y)QV1p zWYH}?i?2iEkrs ziGB@hi(u{yd=o^i@X0uyjR1Zy3Q zi_x5~vK0f*@^3hrpL8=9PjgN^X?@^-lo5~iU7He*Y1SihpxrSv3PbaJ@;zC$3_70+ z?bHonUOdGZm)Hk@)R9Hfo%}C;00@#{N~Pt^IMEBIw<9uJXzWirf~OyS zLt7cFUcwCkcPp^%(DROt#DZB`01kvaael^PAr!AzcHs@r?xX%^2ljl=ueOCaP!1P& z6V}<&B(UYcFq6_dGushG%gETVvm*rD9C_Fk9B(lrws$Pq%3ElsR#xl0B_G_!OLgG* zfszfq*CJ)dyzRNoa;yHvpgn>|bwIn-dV2Y>{666`?|{BBqEc%@#Y)Cn>m(o4z1C}yU%EURIr$nV!7J7(9Yt#r2yz`9P-p?*j6kh~1|v3_l}DWc6I~Af2z*wSXg0QutXeef4Xx9;v3hUd@1I3EOfzS^ z+K@UK4bSuV*Y`M`sDxm4n$|C4CkOJp@rwNEHnez8!XiR!|Hv)Z*)zYqa79xtG(laL zQ#m0RMzZ_w1*iK|R@kv_M>1CP5Y0cl++h+Q{>@D3uln=U)XU%c`<z4Uwk>W3{(fCCjQ4m`O#h~h z>Qrc6jh%6?kA_?T6WLV@9W7*K$5!@eXD6Ou*&jyL$2QLLpz;pTWSG|7&$URVCdJha zVVMr?-1W9Iv0kcuG5@ZMO=1^xYGmUFUIA+y^wYxJFHVe-{m!*$un7KViXKY}2_szc zW9RtRi0LKBoun8((x`+*xJJbsh1;@x__yIu4jAo#A#e`&qN_r6}Qf)>$R- zNy~NilOF-QXX#rEQrj4#4w2q}Y8M(!QX`*BHKN6Li(+T^&KF{3*MFs}f70U3Pb2eW zmsUYrvc@k~Hp`>Iq57y*HmZo_qWjzn^LCTluQ2S%1$qr{MiKq8M7L z;{`Ik#IXMBK!Ft<^f$yEj<^TFei_0T=ubBRt0}<8BxKzP{RJ3?>rbW`kP}z$DP@;FtKXGBL8w@{Zh< zOFd@JEv!+2HawH~brSEx4NJpqZ`F&ibm7Inj1X@fuzL_| zl&TrOF5$eO21GH~mQ*(n4n$Q%Ov-hQ_GS+Q2`xU?@~!Z?Q_8jWByt}6f9;bG$jSTg zjqDOi$IQIt{Xw6#pTqp(PUw9zD{3uGja8iKC&jlmB$^KiEB@UXiDO5lCqI9bbxzl` zaqrPV%c;Ztqm5BPK;Y-r_yY48K5AD2D{#h754#-qMp*yuf8>e$`3{TE*_>>G?pazE zD$!gI;6A*5Xwwj_1b;&DE_>iGgxp0*;89A&JadyzAiypHkmlATv{D79QTbg(diJlB z%i<8eMFwq~&^s9G!4OnB_$vm1HMQ^lb1_{p49lb^L)C+yr!FatJwPGa4Q zbglW^AJwwAcguR0&P+)j#zV=h_2XIUE$NGh^$>!Ph$jck+(7NWuH~p9>aBR)b0H9J zVcIxs?R*JwYoNRKX#(p@`I7Fh-fR@Zo(i{k)}Vfce#%F9yc z^KQPgwilX@9x(aei`_JB-W25(-i!%0EH{aKe)W8AYWizw0J?&*FS?0J4t<5ViX5Xj z_{jvfoK5NC7k_VE9_o-e(b3<(5iIxqO*~qLAttZ=Cb{?}955ujwqs*9(o2R|8hEwJ zDl37ZgWVhgkS+$VkExm2^MJ6hCPf?NZ`Od88x8TlTb2xfBhRoD9|iD0Hb;HhkpNo* z)`Ml-7pzZHWXtBd8N=LsiBw{T48Z#xD)j?g6hMa^yRNhwzo@66{~p+W)nMN9Mv>^N!NRZ0!u$2;k$AfKsr0kI^lBY~dxl_dEv6IrUW$B9^Y^-^V+omXDojEDk_&6hWG1a^9RGnT9;j_c;2OB z=ev>86eeG}cQi&h)p50XN0_I2iV#y(Q%zVGmY8)+7tH_Tdn1)xn+=-=i(|GG9jUPf zPwD!x`k%AvBfxB&uH-J4aj(>0isMLb;jr?Q+fzeaUDxp1O`ahW4m@@BqG4Dd2R7pW z@`jK^L-wuQN;g*n^KUB*a3Lx%MOs%m0*Q?WN`E?#jvko}mtf!3MdZ*UVIgEApzihM zL#C}m?6t;yLnu4aBX0kB?vJO={eQ7xs`~_*cTRAR;7;{2cfa4m57@v*mWA$`h8V=M zUxqz#&@T#q*WATv@wOIIXm8)%$Iim4TP%~wq5qWaD3KD7MQdo{;bst?L(i}Wdm=7- zLJWfB2zoOO!=F6WSOw$6FI-?aa4^wY3itEO>{`e~l`Imel^#a_8aP`Sm_>4#jEr&s z+gvs>&c5gV^YCBr_`baBN$yyw9M8s6s-1`DLiCgQY=)Vn>~t_|aiD(pLAkE)ao~D^ z(TSLaY0cW(3dU~F1Ea?0Qb)ZGCzb(wgUxrM+Op2YilsY065gmx3ywYCGSyHBxNiUb z?h#!O9qPmZySduVWtN6mF%^l#wB5vFIiXIuXo4@7f@Q)2(*`|Hf-dw;2rYw*BUpbM zPCi6?vo4Kv7qssC(Rybkaa2D_V8fe*HGQzPB7tqDB?#wWv`aGVc&(W?sn< zh4lllZIG6kUV$S~zUr}D=|~%NY*1x^-~C_i6XQSV#j14Swp!eX)4PYQ@bY1w3v8=d zIy)cHF0ZN@#&6B6S;iU1KcQsgQKG2qX-B2!_qf~PPK!^O;t7*K;8y`{8a*-H5y~YK z(P46WUPOo~NzL!!B`wg5ywUq?^ByX#L}LvGUchX+~0AI?wUJ!WAcZW_{|uydK3ggr`>;A)w^HKZXR?i zNCtxiX7VOy3y9`I1|cQOFazF&PzHDnlCJAOCIYaWDD}al&pJ#tt&;b~R~=uKuqnZ* z3Y`;$4hrrLp3=_?nLbd*+U&{F@&yj^T`bZ#NRu@j@$|~TrG>;XQ&nL?a-w5UQhXMK zGDdw~*!Chhzt~1rZB8nyuoIUSVkF~o4Dj)lCKo1 ztR*FR_QUEw(LC>v^$3Gh9OAbTXacNz0>q1zXzroUV}RDsD<*cNOj^%9mpgLFQ)ZRhgQ3{vz0+IXdx?r3F-+#L>-e{ z+IYo=Zimk(X79Te^f-Y}9X>uM_0QZpMH3UDrE&0$xofdp!&4#vKw;o95eg6pv1G%k zHw#o%9cVuf0X86%0>T95dN3^W5aqZ$+h0llTJ1m5R8BO$ zJ=5nJwj?w40 zCU!aQ)UCBRBp;14NCKarw8jhwS&WxAT#KpyZNMH;*yP~6sVLRZE5Nrd8@3VmQ zN{A&qA|e9odvT#(v$o0cMaNxliB{)c%NrX&T>$+SlAD@}WLmHnRfxFeUw)y53DUYs z%oYQ*GzcBDYk^5%TPEqPKoa$UBOw_H5EN7aV5%Sh0nZOYG|;cWQ(B*F+sH*N3V}mE zJH|0^wftLr&CoTbCR#a09HP$;u_aJVAo#S_bZpPV!vj!SF#&cp_fNV( z)k3`ijY_cd(*aBUlXuNtUf9PU zhJFAm9;6$mbIo2a(%n!0j1kR<4A#r08CXdYZELJ&hQ^PXg zHhwVq1ImU}TxVu;T?t};DVAZU766MB0Eh%OFl{xQZ1n=x1nXv&NS4DkK(*-^Sl41p)?X9FJ=Bm+?>g-e%o!A8}zUfIWRH+x|R!au>_!kf~e48=cxF~*sk z<=XZskaZ#Q3D(J^swMuN7kG{c9P}XAnBY#{5PTr)sI#(H{Sy|Y28W^~&xuL1PG94z zJ<9Xgp+}*@as*jeqqqgAQ>u3eHj;YHt4|@bgh)dWqASs_4Tc}XQ=FvB>2>Q*w+t+* z+ajpxw^hK1+!wjZVGqUzpq@cf{LkxZ$KssQ!poS&rnI6AJ0tND{=&(F;5>{aI2cXt z3y^dB^a&{{unoa~6+i=|Uf`yJZ5J%T6+iLQ0Is|BwC{D!IpJ;nw>B1>?=WTpKdAZ{ zF_6u!6sVvVkyQbZeTx+?&mH*K{duS*om|jUI+k`EepG(609YDa7W!wtgc89FffgrN zB)_CG0Cryv4vskfOFT6!`}hKO(JKQ`jR5k))dQk;-?sg38GD8u#+T7bK;~Y;Q`)WI z*$m%-W`n+hA5I$lRUZcbD;pvkr>Ezx)E*tVN`p_{akx$s1soBd>`R>~y56)e>#mEW zy-g>6l_Q?xI3nkf78NXjOYA9IZW$jZ?{=1QMCL<Jmn)TY#A7~VZ49{ z1ndqoKq9peVq+^9kXMP*#fIG?P!eIQDJ<8!&4q))v56dnQ67M^^hfOwbG;P{2rPl9 zgM@}5uL-US>6(${@;*8%-=LcTZh<3P7T=t&;-W>=22FD`P0 zhsHf;yNHcUIHckmDAgge1+0sA``~CV$d@CrRbqb} zXKLT+?8GZsgRD}Ch{42EFctkHRY+6LTxvnGn`GLHgmgE)J!fQj21FznbYZ2()b-)V zQv@N@TyL*%q=vlyC76v^Yeo~t&kprB$-CqfGW_foZ991!d)d|BdgP9*#Ey-I3b6HIPE^#_6Hic35Vku)AgvAFX2^{n zTR*5p)rK$mJSorw?WGeHYeshH%M>%mk3tVx5?!ZxiZsxJ>fRq7nfQ zKG`!OV(%%bx4z#a6%bTgqh=doX*TYynD+L=OAVVsttb=O{yP6505MDCi9`C(g!5t( zb>uf~!?&0|a~FmfAZFadF(-|0t0yjmE_lJ*4xxxCg zzpPlL2Hf&$93UswH3qGZWE!=GShVj=TDx_}ZLFXFD_E2rH`X(-LAl0vP@&Fhy1)N$ z7w@3zt#}Dq(w+5&OUMQ@m=IvUb|lRIn7@=BD%pxZqYXiN7?Z+&n3i8IsWl{&3`W=> zGwz=0%Eup}z5xOvZbvUT-)4~Tri6I20_}5#Ccsr--U9m|v}Z$@u^~0U@`T|p1bu+1 z!|eW^PSDhTBr_;2VqZJ_|Y&d!{`RoL~<~YdpFa$Nj&Daz1&}CFK|k$EQIUR z0{noQ!MPew+2Q2Pa|{xACT4`6hVTM-psq4u61hGUMz)aBJ_jii{$D^5v0OZkp#VFD z#m$~|0+WAMoYTOmz`I1Ul9sPYm*@%hE-{{#-lBO#6z|~cv|L4UNexvwfX?d9TkP!0wgB2A? zk%7qwI>y(hSd?2Wfh-r1jg0U>v(-N}u--~A!-6$TuqCUQ^uIkbGhZ&ex$P46?qUKP zEHtNuIp6l01&5ld+U~>^=+Yn|gVRb|TibDSmK~l8IE(zE8b^nVbOazA1_h54N-g>T z3rX6735@@!g!KME(1OnjnT&HVj3-;++Y19gBUBF4V)eq$RBABd=30;Jqsj$|(R*^f zH)Ru63%NXgT&~*=Yn8yN=n6Q)KbWGAJZ<_!4ip8QuRwI|{_ z_kTE3?JvyL^ItD~@{vi1%9gt5H3NO*{mx~c_+cu$Z_ijFniPU?SVeavn1RX?^;lzo zj|6I_0htGGw9&~d4P?Qcz{~|>YTW?W8cA3D5F%>8^aGn+czTM3_xzY)wHhQbhL+IJ zn&47&v8wg7)FFA+YmPTOj;f&lI=k z21JFQ18l+w+ue18^|)}LAQXv=oW)PH|4VFxwE)5uLTUgN0DXZs8d(qq5{&2Gm}Tmw zI|DvUz_4*?;M%()EFUXJl;hg?ZMZ9tKS9FUPoHAJUBO3HUnXh@q~UM>8vgVJ+`^uo z9;|Ch%w_aATR0<5S5!{lR=*~{_Zc&_C?QSs)@Gz+Q}~K;X(6`{{?M0*o&KFk*2~0= zHIqH9)iw1c)vSk5Fy-9987k1!ymg)#$|IM(ulC2D>9*&&&%N)YP`bR0yg6MhLJE@Yp^(&S3KzHNAvY=)?&34BKNfJSHRZ6k=gn zXEnhc*F~}zW~rDWof61%3CtBPi-9x-C=WQHEDV4LmW|tlqizQ9Ht^0C2C|Uc8z4b| zt;ln?Fjabhv9IwkxO#LS+2{{@5yaUP(RCN)<^F1Go87m*v^xrG=i6Yi#1DoaKlu$J zPUWYMo-X6CatXwk@gBcp<(on(gLctK-+^~#l+<-)PZq|EPfu!_Eas;_GwpQ$UUAcb zB@IBD77kly1x=WvJEYj|W#{m0%@eng!3{y2#b8g7+{q!GLAIq=*VJsTR1TQ<`7wfw z8BWUF3Gy1aDPU7aFMB&bQDg#aZ3Fz7Dod!q!U*O|^S?LI_x6ez>FL|xI~B+h4d|Bc z7r{mn|0639-Qbw+O}yI?ban*4tf=3SP#2F)S{R@243Q_Sqz6K-o*_X*bNpHes!vDM_(E()Gk7vGORz{Sb7jZ zhcH^m&}!Il5mW6n|CI`Yp1_y`z-KzcZh5lvhZmOq2aNV;?rk79b}&09$MZ-+@%)bu!qtP zQ+x=o`^l3h2isETE-+pF_W?0k>3}|Avm*;EEFC)XFOmO-@|FfmA3(3lF|!TjSlFix zVj;XFc>4fEI=i}Ve+RY{Opg(zi3?s}699Ap+7CM`WZsYQ$`-!X)%3B~sAb5#C`ee}OHSIIPVwOW-LU=YuN#Au-{HlIT zz7Y*Pg1yxA@mA96#hpn!s<4*klevY>CsNxpi)Ib|x!u#cJn)GnmsJO;#RT#IH03E= zI@*yyfy{tZI}L}QPCMNiknw=Ebx+|m!xjf0pL7`(yYNds%CRyl&fu$J0e{f%Rrhn8 zdU}4DE|qUrZ%ch=VS3y6VLXb4uYw2BG;CYnaS;NU4hRfSFBy?a5KaKR;gF_+oIIqL z5MKrIK}-#hB6BoINj2U==gUaLC$>QaJ6WANyV10M+(e23f{b5SxZ~T64AaM||55l) zGGR%EBn{wdSbQ|_031xj0$wnA;+GV)yL>k84z;o_kL?2{3qi#~M;i4Sb9 z)%}9MMi)z`hE(J`SW~F;2~%@%{>&Hsqju`&%Qn>h*TslMG(i{w*UuFF>)%O&ac`u} zu11h^tX?~8ug2KHlCa(sUQ=_g3*bUX#OHvv=at>#gv2=TEKFoep3*i@t=I7;r=-O9 zn0Zd$NbGTZ&fkLvhexXU;v`^FfO79tQXGK~gK`XS2XFT&t#eDq-@hCR*jd2wf@{FW zh6rXfK)9k%UnRFw2aKVa!`$%>H{S}(`#?hleLMr^ZDF|D4mLahet!yXp?7R)ziQ-> zHG?3}IYIycHO*y)f?;_jMJ0ip4#gJsCXko7JHF{M3LY9vdjPJ(y~|3((6{5-+zi0Q zm@~Fw^L|OV;pkt{==k{axt$Om$<2rd(=dm_nhk{E1487OcPKYxl9}}bNgG*V{#^|! zyxrph?UD5V5nuLv)Aa`SIdQRamdnIH6*%p1MXpqyy<=71pPkzCS?}ZgzV=&@BiCln z+XnK|2(tdJ*z(KGKrxQ&ITjAxV2IXG6;{?1*q@7SiiU%Edcpi*?5H%Y0|=M;1)UMy z+ij?Kyavdeg`7ph(gEs+$Px_RXmt&2%!J_?g;4@JSsLK6M z{Mj~236yk;7>D5v)~F7o#ofYbvcae^L|i-WbUpo8Y;@Ms?D2?czxgzyxkV+(fz(&{ zn(WA4+{=qcBL+`t!PyO~?O<=Mz#_YPQTNxtSAnCC2X<+9D3)P66^jxDAXuC>SYK6r z@v3$iOrj+zqmKo!KI-z!)-GhWAuM8yB1flSs%k~m;~Gq8z7;4-cL9t7Lt6%y57~p{ z?VYw;^am|NL2RPDbU;8kvh0SW%u4FlzCdSzk}-WO`TuD8>Zq#Lt?vzN329J3x&(u6 zX`}=JF^jMtu?o})HQom~<_LY*@we}tR5&)Zme z2VLl3%Y%0e7DC7%d2*|?FKM@Y*Bu0OAaP=3T=qVZ^*Li|g&vH>6tY7IPJF8lR-qdA zEoK-#;35<6H^vF?_u1b7WxEmBxAl*|CxKq}=;xqdu9d8Q{s9rE3Jfbi%en(N!a*3zVg=8-U_zxsRK5Ek*O73ra>5EejbE1DVo*Z2K z9<-ZXJpaX>P40r_0&jHv#o5M8+$w8(B5*ijErehi?CkK*{B&>? z(c3bs%k|4+{g>Up3YKBWo#Ooed zwI2amIeEu880_In{PyGNt*bB@BAY6N1|x|s0;P=nc>`< zo}sPtBeue%>5i!Se^a4bQ7a?GkYocmECf4YXOFI8FwJ^!`Bnuo`c=|6$~OM4ILw>M5&&P+U8TatU6#!?|Z|%w48jr^?kl>Y|R9wTxKmfCXXT@cn zSemiG8H_Ltl@dZ^Im9V+PmN8%k|sqS(E*SybW%Z@o%=upExuZY`2WvaqZM~20eb`t z9&H^Fln!(4c&F_TPSNK}G3TMCv=C1U>Ix*mV9ISKDuls@3Ib){-$N8*-2vf}y$I);2cPn%mlvKk~Ri0GKb(cX-4>Uw+QpoG)AA0Z-bnS>!)wZC< z4!mxB4B)}NMnFzZ8_ZPRgxgYQAs0!~B16O@Ljsdiw)zdg31E8#;Rl>5>t#&|%?GO! zMD$c)ejC~ko3?$MHiLbN2aD!~efM*UFDSp`9r~c;<8l&wuqvTQ2MB0lKS3jR!ZzvR z(fQ&8u94+CxFyg16*x*bkjOvS0*1_sz}tb_YJhqLNJQ0TE1@))!oYWgAf;;Qq8&we0AqrKXl#1g z?8BvH?HH(3fT(l`WnIZ1Tz0jGGadPgVFZU$2*v>r`K7z!#82Xcz{u^zlC?7m5&GRG zP#*_ZB9b3df6%be0mV1trZOtx5fHUbAQR{h!IOUfDX}FC2-3-nJxAthZGLL(E9A$l zN&%*tUOCX4f=u|3rgUKV;>9&&jfTs>|DR-~;;V@b`?>13D2|^01rfndak9;SHNpR4 zBUkk_@WSYpCC|eKV?hWE1Z3>dt)JbXT9_&GRIC%()nw07&a_ZUOn#S@PVYgmG9ec{ zpn~)I#!&6_wf95I_U41YZ@Eim!8)#0s6e^3j@j5_MU(tR!Dz7hV;hismG|HiA$ zG~vyiE|+T(UGmJfa_nac6jXJN{;;QjOvoVa>6y_srME;zsFM-p(cmMjck5*!{5fpw z57Wov$Iu;pVPBwCxV`@j6a~;k=qw7x@n>fO$?^-kw9=S%2l+!5fWkYA>N<;h_s_sb zpcb&)hm=0YvJIh95b=L21M@9da4k|{@P}g`Nh?2Cx%1@XRbX&IThn#@c6b1w(=bD! z4fVgmb^+f897e#Szz&B<8z3KCgAU)dUjfKye{mzEzd_jRAIkulnDWRHfW!``rvr&A z3yE-3oJZttbrkwZk4n*+iNFt?aD3w$D;{0-8p=u|Q2Or+uWY?ihY_y|!y6=uir#aL z7r}@^NWnS8{k)ca(NDU*i!**&pa46t$wxebnnA{8V- z%M-|tlNxi2d-#Sw2XpAJVl%ORmRgl5yN-HOqjW*Jptc)=66Boy@H{lIbk}9&Ap1Ay zjDYQDgmTBFx~VnA?jIAEy$Wm$?nbEnSbwUi8nCK3!D{yoIn&^{NtR_Z?|6_Emy9X@ z(EWASXNg%O4{kl$PBbu%N39m*jqyX5!tP3#GrVerK;Dbb# zf&0QBO1uO{Yfz~m$)QNHsVhZo5evZc!6Q53`bYrsq<+vb9CbPO|uyJ$%)Fm#)@)qu*fPm|ne)sb1djzEsX-2P5jE z5ts_M+}|YdXcs=cD5u+{Y(oo#nKQm1_t%f>Hr00~(9-WBolL%__pBAW^KKPk^Q0bw-WZf5m4W%r?lFcyF;q58{Y ziZe(T$V)_@IXnI@at%D}NCrHXyhktGZ18o$-up&H(ZjTMnPgsBn7>vJR=DDZed@jj z4buW8=4B46C{Ucix5aU zy1)1Qqs7rflG-u-_=$ox~8cYu2NpK3h(#AG(?xMkeoh6>G$K4 z{FK@3i5u<(LT9=JmQ#^4Nv$yEhh*fLqE+gHf6RfKQW8h#h-H!D}aVyb_95H2_q zo;RH1vAPQC7f|1;Pz}sU!GAoO==XF5rg8k^9 zi2-DEa4+HuA6ihl4!BP^~)$=nR4zZ`8&n8@S zcReHkPUSny^B16AyY*VxdNC1#B%w&!z{Z;pv^_BQ!8-typJ}f(9MKymU;u#!c8yBr z8&~PAf6>n1n%8Ocx&})LyedEpLpp@lbH~zd<%Lf<@lmsQEr7hOCElQNy?ne-5C6gHB{U<^RgqF|@1tgKv$+V$|;(7GLr$1nP@+PL9Sm`0+o zHhe&sPGPVFV0YI4hS4!JA_I<;ns26*(9Wu#^;*>Ca|B@|Et7d&v_K;u8{#zz9L$0` zj*KML8}e02KEc%0H$G?GGeE5+!nhBLW*jPuxWO!vXdblM^o#fwom;PAjDl|z3(Le= zHU*lcC~+;%(G+35|Ga`oe09OFYe+YBi48sk-{^;!?>WH)jC~GH`t?6c)W8}5UoXoa z604ktL1VfxDkP7e7z)# zmQM7Ga>Pklwsz*ruKkq?ic)d_BISv1a)w#t6?+ zlkWHX=UMvIA83J+0p~A*QUdA;h#=#Qo0yGQS5Y1u$3Ld3+|tm;mNWNq&&(`=@hRhg zvawW|I4Vu0HB@QQ?_+6e?jUY;r|X69-&5vcvukZYiY^2$!{H3)Y$8r*o6|d++-5?| zMBot^FU=g>k(Sy0^p1)D`_+gx%?7=gy{3OKok_U`ZK~u;_OmAAaWsZIW`whoC?KU> zV&oIbKMkNbFoOyE3F=HhIQ~A&%z8=iU+TxOD#5XX8%UDT^qe6Nsn2eM8yKo8h-ZNZ zg0&p7d5}15@Y?&!F%Kq%txcN)l1CgII~g!3DA$mgeH+WwygfOhjTODp#?#)}sbC`3 z9$5_Z{T#N>9n8juQ_@k9k%*8H#s&CxDMwj3Njy`QR#l~6d3?g9eQ{57xx8@Ko7#i0 zE!M3z^hXvQKXz|(HOkTfLsd)-J<_2`Vw5T|8kqgpGV*oo;??&d1I7{Oe}Ayca~pX0 z)Uv-c{8C0Ns<)!~4hKq3A!7`#8=8_Yk$&8${yo1Ns9*bq?;7h=vlSQTR7Zdaf;@I4 z@*Eza)Q>y_q=(*{%Drme>?rfY!_pUIa1HmkY=cP6*=D-`DT$C*n}BaTXs}6j=yz-g z0N|qzDHbH&*Kf*ZtdF_U+)}zEHxft>WpZU)`v61*%!|T`-m$$+HW5yWa-$6?tOE#^ zIgToHkJSRV!4|U>B0)yO@GzQ-BlHv@qhvnb^x9{yXkSHi7EsiXZGcR;I6?Q~+xdvX zDGQ;U5%Q@$RWr|h3(vAGE_n0uZ*Z9iu zBQ0(m0qy|Q9mFx5d?lQ#!_=1|@W(xvu6jLe;_iduj2~{e?gG@?8xaRgBvctZXT{kc zmLKhqRoBS*Db2>s@mOdmn_R_`LcMFS;VVPIya0b4@I!0QZl}KS?a2yeB1Bn5KOyoW zip=@yo*Y(w?=M7+fuI5FTQ)*s_xi zTz?TfFLLU!#3r*C7%i_N^a!bFka`m+9$*1+3SS``27b#hD}e6N8wb}=>_?RiT)lv> zS7_V3e1wbVxhMuXEO&$Y08}fR8BO7H2l}EUwb#aKC}sYWj=rN4=H66llZHn%Lfmut?n|#G z;JLsJfndnDUVm+L2skWi7AD9wPdXJUs^?$|$jFVr?lH7UX$8E zBre%`1a@w2Gr2{A)LM#Mz`z+_EC2eI!ao@Dd*X-L6(iDNxA!1-KxlHMuuNYEnZC8$ zRhg$Idb^gX!BATpmY6LbPW4zTncSWAkspe4k7-3S+nY&({w!1Sq55Ty+R`nV zv!HiCAC!H&W!4S~JXqSqU)@%wksGHjZBJ3j^X2gIds-KS_>crX%ZIi{d?AQ<#C7Zo z7ZXr5sj1M`+zDFvb9AE*$zSc++f41TPg)AYX}S`c)eBE`ec8gyryPfD=hD((+(Wnz zPylwy+RdqB3fgZUHu$V)Sg*_pr5aMjUa??wzf0>3_#Z95%3F%Lk!14{wf@J2Pu&XZ z_EW`p>6wk0)npUy!wimSdysfus6-_JI00}n7<92B0p#RSEhnzxr0ISVf6dD1Nr$2I zRwv`iaA;UA4F%y?SgrZ@`MC4wWOGOpD;O5h0fe029&Ck>wnDh-VR2a(-)L+(SX!Tw zb4gC$?D#XVC1AsAt75rWvwIdz*-GT6!I~MN{2m@AB3;0!8W-E}>;2W~${bCUUgd4i zatE~!Y(9lc#RtU87hzD;_FK{(jAPzB*qB4RZ_ueAkS4+Au;3@a0^S;=dHO7z`|?w~dv~EWzAS^v@>o`Kd5!27&Nozq zIsOnl$aToda;h|m#CiqOy9B~5Tk@F`}jf_1-E zBrD!r>5$VvDO+pS-Z*skrLDaQ-l(|%bLO{=N5KT*-WE6vckWQ9N4|%v6e(E+nFM~i4*t^4 zE#=Dh!g0IdX1MoGlp`PwugCj=N%TqW7|C0@yKz?yle(Wbs}BBoSD7m*6Jdj@FTWbo zbwrGzyFzYqaj)xo+3?&c_8#Cb;ee)tM(ZrewIp<**M#qEF{(_O=S)6cE%#4rSpqP_a>Q;Wh}}FA17*4 za_Kff66&XmE}SwI6YCfSw7k&?t#{fDMcR14ivpqd-CBP@2?9r5Uno0; zY$P8Wj|v~kVfL=cV>v5&M=N-!QvQJbmk0fQ*%{;ICx@J1eUK<PzDz>;85*=5+TezOZbiI z?EXyi{~(=8zyOaePn3)*Z<@s;f2jX+QFVmPBt{w!M6FW9QMALA1x%}-Me3TGtVFS2$M!j>HQ@v6g{61Gh12!c9^S1tq==Iy=Ow7PMTsN){^p}0w%Gf^kc55hQ`j*_mxG26Deyqz;@>=YjZ7dd*>{-R ze71&Erz=!B{w2g&_4cZ|7vc0Dj5S8o+m3>~9cI+o^k8n4AzFh+^6PI7olu8k^14ue z47)fer8lu&D%TWvM>`CS2oQqA2sFi&TI<@aVoPjt!bH4JshHgBVw0&K2D00pJVf7^ zGFj3J$Mb=^fV=U~XqV}*{}p<6&Q(~H^rRlh5Xh8Dk-swUDbI7`HXljA6qVdEim?Ct z-IQjU^?2|yoJU4DT`FU4<8e-uS2xPJal}UuxJXSf<;S|Q>@44%fJ-Y+@j4W1B{w@V z{2uD^XC^#!Y5(v1q8pb1M<*xCZqz`J2sAN@YQUS^km)D9cL1vgxQoF9!N^a3T}NO{ z0Uy!f!+r>Tmw>6)pZFK&eJ~FqJ-mdPA>Pc__uPr>qq!cRU#5B5{j9(7CfiR3uhXO% zH)7VCWaCqxEJ3aU(e%g?fC%{kfRP7NXQ>9Qxk0vVcq+S~)I)L(nE=)V!4$O8BPyQn zsHvr@*Z*EKePpGY?!5SWWXka`wzI;$V7%C2VKZ#T#`P?rQVy zWC|=)SphY-5oHwaQRr@VIT3>q^}D3#n1if&Wh(R-RMQOo`L6v?HH+ z2gPv{(6jp4D>mrj(NE!7sU9KOyX*BZ0)q^kM@p(cE6K+VCS)MCup-QeO2^lG(2@rw z-={G%T|Y0mgta-mYQ7opeAMBW2Up-82;~I@5g|TIF7P@(DH!&rSBY?Zs37C8_ew1V z5|&iha4MRwoaWrPJaIQ1jT=V-y01i}GP~F8Uw>YM$KDj7mvJygPt5))1|zPl*ROH6 zK0oi)&|mod^`94gi~h{BpX=coj^6p0_5{`*_ycby(u)N3Mo$@$l*_Ckw%3zB(29{n!yK4~oP|d80 zb^eLQ2&^{b=`k=CH~ym+*uoLRLq*`u(^_WQ6CeAdEHSY*H__w6-=gc~jx)^sbLU?~ z?HWn{A_xK~rsvZULU0*xJ@|Nku z-FZq}MCcz#^f`C-5zTaG34=*RuNc2a+3K!xNR9NU-|LB^Zj(pOd+?9HN4+a%tYmQp zOzCeF`qx_wmYz2vF>PY51W^jI%}m}+E>5K!0aHHGY6Czks;~{Zg^a|eyFO>N+f-~o zc@>sKem(Fyq!N;5Pf!B&Pc!o*4FTm6kT{SF9gqHyYuWroIbpU@uNb-tA&LyB;`^;^ zx#0G6J*!8ACGEp^tuiVTpt)}D$g}%;(k%4+O|(W9n_^BM1`m_3$)BAY+m$Z{VKlB5 z)j_QalDG^*1k!7<;HW83{o`?5*sv%sr^SG(y$PYlt7N&(?7oc%Y&(^!)D!}bohwl2 zHH(oT4n!cbyIY|m2P+&5c-d)t;>3_215f#*X3d(ML8NaMHey842uYUKGmW~_30X4- z-jGjhcpX=Iyd9JhASy8Zm6cAl>zZS#0r5MVibYppRsjb}my*`7~*Jlk*}(0Nud0;<`Q=#GX;_-taY3VZ{K`xYJF% zbN&rd_XO!vA&8h9st1sMvi$YtHV?#nnm< z2?Xip)%!2LO1Q-?oUsGj2dWu7gM#Oeq#(|PyRm5=8i89qoGkfEVt+W)0fnpfgS7d+ z#4SyAp-;<<;VlM!p>)?Q?MK@ldi5via{e_KWC~WYZwr#ZG=So~FPIs7vEDeRX?ufJ zHh?vQkXFfRs7PWGYma(zU0iW~;H@m8$OGmRzKIY}=D>-vd*WT13E7M^lQh0P&_POQAn^)U=*|# zm56p#7CnBI8HMi;YY0p_zaR;X+BdA<@J&$#Z(2mmgLb!OgyLo;BmI2!`qRn(lye_} zX0t6=(~iXcNfwL0t&VX6^Wl1}ZgTLod+zhC83&>pCxly@tF~gS)GK?nKXgXgp<+z0 zIz+9S<^~g=06q`wZdSyg>;EV=$|rA zZw3p)Tx~iZ9S6hW^)szQ+n&H3vEiutwFF}n@R$r1`Hv<9He+O3l@l7<*hpO z>U~-pD5VfeQ+19i`m$8uRRhp9iaLn#bfXN%ZJUYviq z&~TG;+8zqtGI2Pfa}y93Zyp-%&aJk0WWC6ck>bJbl_z8VyoDsd2ZUbS*#2x%mVAvx#*OQ zBO2x~yz?)%WhqI5fkNSEuOIZQjD)=!t7g^rSeeZI|8nC7h#EnO0Oi{2*%2Z>TC*ns_p95QF5*gy+uS3& zsvSU}f&ZRQl{`t;HHD`oMMWW;)}?seRXd!pd}gXKLdF+&lECgtTDMZbr6k570af6T zwia@$3Zaw;-m&L|#x?6z6G*%_1R%@qx`S@#CrRY!w zxav6d3Ifxmf4-e$YG@Cdf8^i#9nZ~Wj~Td%=^A!Ed-lw<%eoxWkUlE)q+vZ4lMv{w4-6 zB6^=g!y`A)Bfww^VH4o?dZ*-HunBn|SlGvbdrwNwQB!)){ECDE*q1=3guy)p#Z8b| zI1hVj|CiiZXkwT9tkVf4VnOgEl6M;LG-pXgUi|2Fd3ANk{eg#>W8GkwsJYuu3~jI+ zfOQmo`d#Rr1|e1nr*;)6ovmDxC-P6}t}zjQs!Rv_k{psmINb~-OSj2?!zr2&&71eI zYCFrzzfRoa`7N?A16S9lezdv-plM)^d#kMs@MuX}#aIAuwSk|koRD2JvaF-k! z%4(RF?%LtT=jY{_o@7V?HvMQ+;Lxn$$Kb9>E@7n6sHUg+Luk5V4s!%?_;qjrPH!hx z&no)-R(vNF;Lru#3p5YEP^#5L;;?V&k+0Y|;W22Y ztKo?noToy+9in^w{5go<>>+UxL~`n>8xZxwRky|m4Fiv()5K&veyK=*JNaJ5p;3aC zhOBbKdW%aSo`z%xUMd1uSV>*7HT1M7{?v|J8-W=V=BMr->bB~)UbhR`^#7G`KOAVo zI06C~#>rRnM^ER2Z~`&F0Y}@_0_r_jMkF&SJB18l#c4yJUug@9-;DVS1@*n8|fcqS> zzLgiqj{kmfj4Z?Hys?T$NVOc}@|{fcs#-9?J4O;VtWSX=UwF1k16xql(;m0s*Gg2s z@?UW)+!g=xI1tf`hr&$j^@l&RWl{3r4pN0fslqpiyhs0}n9x!X)9-t}11RZFzL|;n z-qNIvr%2((-O5Kn#0(?HzLO7AfH9jFfy>CAmYw7RV2%;a9%jd;E;3$2XBWShja`*( zf#_#cvTh-^O}AZgLA?jh@vtRR+&u#b2SaLbryD$Be?U%lj7IB-X%o!<(*l?@fT|yW z4118OT8s<}g?enM=4031gp+Z6>t}SE|Rhc;+fO0v{B7?b)9q4Q?S z_SvWY8{D%Dn7VY@-*|`uOoV1FVsts*GNPV$@kTVxi^RWbH@kK&0Z~98+Pr+<`DDFX zUwv`Nf!1()eyec7Z(!Mz3PrWJWL;Pa@87S05$cY&G=E~>_Ac&(cFcjgAtz|HmNnef zSk3v}1`cCoz&6MF1GQ(^y=d~&6~8c;Og$R7dRyRE0Hbrw{!gn?w+%gLXhFt-hK)!^IwDZvAyQ^2LWIcOg;+T;Grg$s9*QlKBrTXj0>%g3k28 zay_Zq|G?#o%&KP zoA^6bhcv@MWh#bW`;ApIJWNfx|*Z(3!l4=oemJ$!PU@-TK?)#g^qfPV-P_rQHU3dV7 z3?v+e`64g>hQ!O6v%hZhJ^JT-ATKN;>}A(>kRzClOZZQxG?s3LN81DmwXCw)VqWBm zwwWMHnYJWsp&Ft)Ig^|#9Sr!vLrBFVrzgFBTmJ!nJ-X%t$!&3hPVO|G| zzXJeNAQXWlj<3~T+*$r3E-_o*dd(h-2QN=c3*CX!{z3+{BpeiNaCySs23g{eP>3`y zLWUb05+>#2`Gwp%I`Evp{AC35@%IE7vjz>0)*W#mI^)vqdFN}nz%KxhXPKa%-K`Y4 zbzwWXKS_2SF5`zgUr&!wq8a{nRXEahfgfIm(d5k~tJc<7Y7-^dW(Hd9le%%3#5=Up z(o|gTk?2*s%DES=D9>PD4)U4$xIe@@3|w4 z-Efc8u<@V+{hZ4c#0%M3#uiNE8W_A~#=TXgdX#gSMSo`=rDVpbN0z%t`tGF7Ah+aJ zSyF?_+E^ph=pn;qGpt7-(2+cBA)fmeK((0eW(46H`WMHeNL?!E-#{(xT?6%x4)amM zn5gsfjSG6D+YlDw>5KD&d0?>}=~1&-42LM6{pY?Epdvln@RdS%y&kSQ6}E@2adwsW zOnP`W+rz$69oojp*xz6OWNQX>FQ0YPC>n$A%Iuz=dNXI>3RILm^MAyKsgqu7t}Qg_ zUnkSn^HXM#awHjKdw^K3K~*2Odka41TOl2T)wk)-$+p@{!bYk}ofg$h_%#jRbhBak zO*k1UDqv6so})xepXk5R(8K z5vB8kfU`nzk#yy(tvH&^(te3hDUXuqfU`-4*iyCT+D3mxQ7i!+2J^kmvb*>jwOfwA z%az)*h2o27#XN}g}U%)`Ojm3cOv*-h~~ZF-ljex7clSy6JE^z^v+LR9Z; z<=Om$i`@rX9o#;o|7^-o$N7wnb_Ii5sQ6ns;SlQlfaQAqFH^Bj;V%?a&>;DJAUEBZ zpp8UIqn6eDLc7c1-FTD=*OXc9RMFZbPe&BTiUE(`gq!@%1J5!jh*HR5atV>FFIo-% zHe1%ih3}__DLPRm)Bn`a3P)C34%6?Z>nw-s2PdHl3NGl(ydXw-T7WR&W2a-1zQ!*PKeF*IsPee7MVpUi-?_d1VIz1u3 ze>l%A8Lr<{7hb?l7D}AXm63?|$M{%sV9PIuYoN&0uO{N6-j@vj1$mOqZRRAtQY%BU zWs@tsi>(7SF8#b_(>NqzIL~ULL!5RVpA-Gf%Tss~19u)gvo{4IZN4oyK60mt>P$4A z%;?{8CQb6V%|+YjAc>^CI`nW>qfkD(ioVcALc?&jq_}VZU>_fb*KQ8O)%>W@hQBUQ z>>fkAfqZ1de2=7gW1yf&NUZUAxOi@X+xHG?_D`Cf0;EoZqIt_$YTp=$5oD@ryf|s{ z9hzreTEJ{^coX5_?>gsf?8R^G&kJapLBw~A>9%f@%YXqudAWw6<>9haffa836P{&$1?ztD_TJJz=wTCAw=K`RuVIIu%Hqk-+Rx2FjFug>gl$;}T0|Ka{*# zIJ30yzzGPKz*rTh9P~ing(6Zw$?kl{)>Us#GVZUksK3b1+bXbg+G;kwZ(boGzI({r zxH)U#m4nSeY>i?wASmggpZ)qOhO*bxe*Jv?9=ma<0tc%RTN$3g#3YBkzoLmoUf6}p z&bgCuIpOSj=AGS7Cd_UlmNgseIDJY+-x&_mY*)S534~uR2e*Bah-=ln_CY|D#+pCbF$9&5IjiIukzJ` z<~N{#S@LAOA6v4Rbi0Nw^`r|^MWkAd`_CX%A0MCwFQ5l6NS<{ zZ(To3xiGex{+(wrE1%RWfm7_fG%is%)ry~)aKI2_XG&sVKk)^yk4HD%GuS6A=SwR? z91@#@^iMnV+hOW5nIQACKH|fELeaPCNQB`Ng$=>M`+uis+n2lIr;EA+5VOJ+b+5r7Tu=Fw{|I zsE?BwMOFo=lvsm4->e)YC#$6G-s9VgGiXQtA^ztLNtw|CpjrTAj%-O<{hs~Wi;Ue% z>Q*Y7sr|@JhM3-fct$|*nzgc?68NPyZ>bm4R+)*rtXw%ijJcpfl|WG|#A|?Rn+PlL z7^~tZXX%ris?-R={vCtnX7C^q7*h!$BPQfm2+*kRvUpqEd)J08CNBKyHA7jA3)z9i zqw*e|f5ghZiE1?tnj65r2pH_ZrGqKDu2R4}yzs4AG3*4WJCIqmq=ZM3cjhgG%%HO) zo1mn)$HaZ3;lK#`c-=?qrZ1t~ku7YsxJeJSwkUP+0tVY(&wLi1{XT;3Ut-C<4-YU< zFzuPpxZM8BC(zY=kWGK&CD}}}0u^=C5LE&OQ$W#I<@~u1RG_?6gY*%@ibyo;9lj@q zxJ!&9!9TI%s`QG)CP(@gcOW)ZT7dd>$9uVJC$T0yy(`C$&mtR}V65D({lU584%I+8 zgAg_VQ|tu777#4MatomWFeV^71QI+16~YwisuXE@3*V~;uJPrnq)fn~1{?}9v*qUI z8qTjaUtKMCts`WRtZ#|EE(z%ppkEVr&z^$p^BinrI}%)sUzqJcuuRy%+s5PxnrX~1 zcjw|P*Oav~J#-vyR(U_wA)wnj?SW6Z0Eg~?iS_C;9j4v~^9~M#LXIyle9LFBa?rO> z?sH-6n+Z}!31to4(fiO&I-lgToU{q&`89^aW*o2wZI1e$jf%LgCD-@mOPyJG?U!eE zc+Q;Y4HDZztKLX-}e3?YP|X;0!eTgd_nd^o|zQt#Nbh4B}3qJ@UkZ7 z-8@ibCJnJse5v^kKah<=yr0|xtFR20;Im-AbE)C`)*EDWRD6TGjn~z$lYfwP5D_x= zchLy4<(bcHF<>-^72^k(7&(jEdmca)8-OYR?SqsJcp&HigW&}%?IJ#>B9K=BckzM` z`;Lt!g|(H2C$t7{j#M{ZMW7-`n&n&^2n@esHv$E}~v<#Q+3N;L){ z2++(oaw?Z zImO!d@1hTp?>4({KvL7`0$g;5XEPVxDJoURp;XxCyQ^KB`!udGO)n|Olhae^l<`yd=4c zxU54X#z+TAGw(PlpalGD0XzxR#llwUAsG8$ag%(37L@Lw*1FmwgKf+c8W35vT08fc z00q}Ekj=10yUlnDgM}2}B;=Nf=zGCK1=f{_eRtT^kTPp|J%`O%8HjXxf?EFiHGJul zr|kojz|f?7^!yg2%1v2T&eBL?pUD`=NcZs56We^VtV}fz`J5cqrRbOl8)}hYEdxL% z#-O}`XPpjf1~#BVf#b7A_fa@LDB<0NG!?+mu4TT#cURX!Nl()59=5Yfk{-Gp@@Agz z<_mkDBIomLUYczy_h~ElEMlMbSqkyNE%sWt?J%nAhJl1Dz)NUbw=Oj4I$^WzWy@~6 zXGq;FOUwGmX8+=bJ;hCc*KysHnOo4}L_u&Tt%y{fBhgcxz2;NStW23Ii zJQewdO{JAEo=9gPJF#A6ZnRAFtM>1}g$La!uNl%#JY^ek!5K&o1wT#GTxwMbBhOJn z_wL|&cs9^eP*Ps!~RKAYzo{b0aEW-r{ zdnozH=Z*bAhTV>Z06a&SvS_c?h%nZ;=|1bbf9rL;nd}yRFw^%-61cN2Yhu@l0oX6H z0(ckflR&e)Qd)oAx3h90>G=807opB(U@?s&2m_|zO44zga)JK?7!<)1{_XI$S4bq1 z|Jx~bJ})(A%y2@5#7s1oaZJJk;69W#jYd7+qd8ZjUN7xC$fkj`D|4aL@2`Sg&I&D( zL)qt6MW?7ZdWhQ3LCKBc`9X$blkmFRWQ6ADPmIJQ0+t~%ydP9fYOYE9hPx+~04|!4 zf#nf)Er3)32kIFSMEM8-DQlfTxtu<`SaqEnGaSz9~MBAK}xn2zb@z?g?QG>_S?QJ8449oy>n zNy(HYz9TvP&K&*8>$6TGo7B1W7uIT!yT|n}%6rH(6B5wH1496sFYZvfLf%cfn5{!0 z!mo@dgNySs1fqrHKk(pJ)dxF?`)!;}Z_s7F%%@Uy`?S@NjGfkr`>0z-f#u=uYgI~` z{v~G!SPnkc7sF&|bLL_2@snTJuhSy|FvIW#o-^Ke5qFMPHum=RY0Q^-a2=_;dhaw60Vjs1?J6)X0!KHL}9O=8VUv635mIrMm6yM!rMT8B? zxzP0tlf%NXSTu`=RU;S1$?op%B^i{B((FmDop{)LR9OL|sDj@+JQ-2{ z^&U}IICn4ZxG&$=zdR%WUVNy4__Y-@aW}A5nBv&(*`ZR?Ow$tOgRZo*Ey_R4)Sy;x zeTT_XtZQr9HNS z){wPLjEVsKEga3GRW=Bh3ie=xN3$P({fsI78XF6lI~MLsB@%m(EkkAzn0#j;dJK6A z=0zMccXoz|z8!dP9S%P={7EB178Xh`l-7H>d}`fQo(OXHUJ|nRKojQ2;g1|_R3b92 zRimp)OE?S6d4}1e8(}ltnid{c>Wbjb2m{XEz~TiLZYo|VC@BFt044*D6p$Ig*#g%; zRQAA|Z2ada_coy9m&d+E`ENLh=p9JWce>#B$Mi2q=?n2=H}+%A!pDTxNZ~2xgyi>9 zbR;ADA{WT^EpSyTDb2$Bbz$ABJ>HTmE-o%39xBN5phmDd2>pgs+*_9-GnFoU4ZN@Z zyftU+(s#mtN!~=Ol>mpG&Nee{a9|@x?1RhJ;@;Srds|oSxUoQB+4vn?smzVubtT)D z-?{V)BB0KjdohdiME3<)4TF?mA%5=e}>-c9X9wD*8O zK~QFClk>ZQ%ILRO(e+yNS#1(yzy)_eMo7@nb-(B92J0@m&!`WKN(W!C++Te`{}Ybj zK+v!Nt_@QHJk<#QQR7Xyr9lgXnk9aEn8>Zy;qEazr+}#&7FC*!@({p71mJwA@Cyi) zf><`l!er8q9iS`gGfAb13f4*D-8>WGDK_34bL(SNp8#T3@~Ha(w(USZrdNgbH3ICz z(?zKmNPawi1IHZv<)xepM0X{|nni5>ke2|}kB!P}vA>;wp~4CD zm(yPv1uSgP{E4&Cn(fHMQP1R)kQeuA-EfV=uJk*@wyK^Y00l6OA z9%(kux3vJF)GDkpjFa}vE-$SFL9xMDaVBM>t@W#4voGa^BJqniPdt(6?7HQH%BKZN zUMn30UN%@7MXo%rqoN|lE=dCEh<>6Kd?}gbPua}I{db&GD42C~`ApcBo26sr%%1WW zpu+;srT?0iHF}V$KEuQ`ENlLB$F^UdIfcZt7{TiY6&d8q$K_bbWO5(H{V8~yuZBdK z0L9T^^l-O(c;zAdhG8U9UP)f91%){HW#LbD^>AhKTVjF&rip?S<58CozF-&KqLIJx z^}mh`ziq9oe0=Gl?vb_LAK(3s;|m^F%6mnW#3l}Ay>aPSo?{@ufyQ0YVc1&vvcWX7 z*nt@EX2JIib1g}f0&pro&kn4+#Puw+FDA2wNj`|X_j{s>Wx$L<2 zOVKW8v(In7R)z&qmaTju;aD~?r7J^)HSQi8y1nSrr%+mmxGrs^<*R7>DIg|xJ2N5z ze|iz74%X`+8Uv`l`QC*J$;Z{IjT4B@gC)F-(I7t6RP%wDR1t(V8X4W~mL8Z6?L)@z1Dj!8H z%7V?nd@=*Kq(Q(UMj_|>*{8a&wogUa|E2l=?Dfg>=e-zFu|F(@>C-XB$vy_wpN9dy+nirFb0s7d}FPGFdr=J)WWXsx1WWJ&g)sT1CR>c!G08>*LooC;ZZ#WqXUgY11i{#yD!?>BM-|ID(DRU; zInIIo#x#Km`DuLR+r~t{nxSP}T-E~Vb%TQ8%L37{GFg9Kk46VwSo7gbHQ&wZS2I74 zjCmpYwB1D)xF&R}Oe;RA8j(rs&1+1uMS|UFx^`u)+AqY<4}S(dltmp%4(xkrplExj zdKN}-Tq@Bbrx|4J1LG(^_uoR)gh)ga-?a};^~Kl7T7t^VZfib(GqGSe{zQk{{YZl^ z!o+=#_gXMcTgmU|aEZahJC{BIXS@mRO_=ZGT8k7R^q z#U@jU+#MK4h&E`@o3Z!aCCW6_gkc6MyAqxV(_6vHP^on_2?sy%M+Y>nL5VY%>;An- zBF{GKQYiMVo0B?rbfy^RVE1ymik(zT8W5E7jkS6!r2m@j__9JlZ)%4C&)g!Ru#Sx@ zt=7lp(VfyBuD+RC3_M3;(URTLs&Tb+n}*QN`XtCaXY3Qj@80cws+d zbsyu9>`19urSm`G7bPWD50nZ23YM^rE$}XH1e)d5Wc}pOxNCJNfZuaK%>NjD;qm>u zfu7xtGGp`3r=Syv%yqFn$gn`oRNg*`l>TUn(tlB3`)kpy3g@tr^1)oL=XUa!&cSu! zlB$tTN(P-42HFpE(tIMS9+I0MCE5mvYoS7J82f2coZtnIhNzE8}N=ei+HOjppl>3nk0 z!;Hn23dJ(!)qr1tP5!_P+9&E{119mZSl&!A3HjPVGON6^6m!24EVb9RLpa z`9dr`+Ru|v&t{@>&648(v;c`Dv>7ZT)FW$_m6zexslkdJWowfJh&5{11F6nP>bYb+ zxI6RdK)e(D4WNRvkftTT1|R6MvzEuT8phdr$kY0_;LU%=Y)<1%OT*Bo$aKPJ{KZz( zJBc+`S|Ag%THA^l9X(9EB!TzUc|OVPN*kk;_4W2q({0k~K|IlJ>eU_T28wy!)xFw! zDb4BoT%XYpS`1=>r@NvyN?6)c19|_CruPo#`hVa5?U7B{E88n%g={68q_Vds*(-ZX z$X;b6D=XP#Z%;F1W(y%JBYXQ@-k;z1IQpxjj`UdfYnv`tiv~# ze*1JmdHJyU#DV?a_8^^{_%|F&LLZ+6ZY`z^s*3PVcYj=Ov(*W{FX|i$k-|Iv^T(n5 z7^B4VnrH@d-*mXh(GQgzI_3Z3;JQU5H2VTmO^*tuJfO^?PyTE{qsC-+N>S~|wf z4quk(F-p(dD!4DOtwBL{HNk-daxutzNOfqYf)4w}Rmb`{sX-azEPWNVi(@%3BLUB1 zBO-MmuLDb9BRT%o_7xzL;4eT0>{_Z((4(J~+MoOd)&RNbWo#94PgPW4=wb#daM+Xp z1{@j)5P>VaNorhX1wPD;$LA()6?|qO^K;NXu*9Z9$HE_tUD`fAUu*D{{Y(Of8LY8k zj}9$;TocUpOfsq>VoAd!RRrGvrA6o*^@c}zFD zs=f(hu{i>N+XWL;u&4+D%9mftH@`{OgCkuMmu&7 zfFb2wqyN6rVqByK8v5E^a4o(r&{-^7dg7tyUKl;{Q1|2Mpl0j>`cB8D(EP-9es9OmHp4HwFzDW|h*v zk{~h)f>UU%pB;zV105QMVTBh5IgJywj4U)?b+p1*j^tFIFS-bgF8fjj+D$nd4L@Xz zQz)9RyhI_MD=JSpmdwZ)uvmEr%UdWyZuTNi;gb!uG031^(O7%yXq{eRY>M(EM__!v zeFLR}YwzjXb?J)U$ck>-dr?<{74O{(H8nL@k>K3R0FQ0o%G>vh zLqA;+&dwS=!8dz_foz0X<(VA$9B)*Bc%PuEP9q%s@VrFTV01?>Vc7B=UW`CPua@5( zxr2$MPH)mo-#=NI;1(}jb@lH27hH#ejTbjwX-$me_WSkslQhTG#95W&l8&}y8=9NY zL(ITbKRN=@s*byV9~7~oElv#Mqs)hLD%)aZ{fHq`pWIefR_YN1EK!uoNIt-5^gk`xY47tp?0fa$>19_cPv`ZaR!pm3 zV#NEWm$MIQH-c)+?m90zJ~JVxx3~W-LF^jh%9NsVs|OsCz%kXn{^hnPf_#!a^V%E| z^xhbH*kDuge?~FZs}@vqn(_BA>cDW>y9)0nLymwY#2Oj%xDT`Qlh`3_v}$Ni z7D0;)4G;9GH*^{ovEQ<7OB+OIBPiWlzHhX;#wM&ED2ik!;M+xS-7_`iTIw@g}f1F2&ALShlffURZ3H6Wpb|nV0r;c|YYmxJ% zx(XTScuVevx_=6>OV(7{0@Ti}*i=gG0&eq~;`2wE=M;xV>1tfn+>k{kW4g^%rBxP_HdFQce09CH= zr^X^>P%B!XEy199Eh;a0gyJLhmU#-_S>pb)%_9ru-9g1?!VG(#EC(trylQcVBs=wK zEnnes?+mFCM7sR^g{1b`+$?4#*0b;K;N5+SP4TbGhCDq;Yu~A_Eq?;v?VMh6FZ=#` z|LYF+Yl0ZAaVKMq^7=ITTRT@;_>xBwyc0H8%|%i2|E2b+8M+?Qx?SzCJ64Q)Ys#lT zedsX}NOf^4(O0XEa|Okyc3DUu(UiJ@z*^ZOX;6>JMSbOCS8vg#aUIq4sEPPnwiCVW zB$Pscg%!n)8OhxP1GkN zh02S158y9DJIC>$nYzW{g#)1JF~|NcsH*R^ zo(l_v3d^i$?fVN;YhP$W)4nigc!F+k-Ng&1OM>AUd-yv#+`m0>MaCDdFuJPUj11g4 zHKp@ecrN93hfrV0b-ddnJ!bxNgEJ@FthddBQT?xuN8R`FnbAcHcI)R;V_n@gyRw=e zq{F3OD$@!nza!&L{3wX?qSu;qESQc$Pk9C3~L1cIbD=AbOQof6nS5+j*hH z6Xf)xidKxOSu_jvcWSSw4dl(9XCUk@T3FpD@E3N?SQj3mHV@i1BA%*U`4>+iAG2?! z#?wuxik#%1t1UJd2?}2v(|+71|u`@FZBzp}~=f z94g4`{V1)4(w4egHU1_i0{Nrs!@mjS7_C${cMsKwWk{%ueHox`iz6z3#tk`9H`>Z4 zMLPPE8wSamg?F@lSIUcUc1z>FN`IAmlL|rGQI1hUGbO1;k}*?4Be68oC2$tekqL7Gz%ZR z*H7N;^{&3N=DFbN>dkZUuBA=W8Q*7cG&xEK%}a|$qA2kr%>g0$pnM0UOaGHbS-{ag zXj0{FJP{$cvDQnm$T5oeTbQUuOu23m?MRwxVd?rqVm@X@y#u{}pyYRw*YVhN99dh3YektU z#=mZc3U0C?LT2PlB=?3?`Y`!+JlPEQelflq7o%@vulvF>vr*6UX-)ms?KSRQ9}&J+ z7$aOXL5FF~-_w}laNzCwJj?+=(_r{$!;%-a#@3Wa#=5(F?;n>ByMvwRV|h>XpMB3b zx|8Pnehv7TBc#IP?5c0@vCzejCT{{ykY=GRR-){i>_$>tdd#(~GyUBO!EAcUxb3iE z6J(KNs-~r>s$pT&_7BLKkT6QnUF+>#Y#(%@y-8MqC+D}G!%X#CcT4uqel4JRL6|uG za<}f@#Z+Jqr8o>Uv#C9+*=fg%k0bChME6j0v0N&54Z(W-Q3fBIn^?U(fUIW&i+^q` zCnDWVeCUZ|Wh>sm+YqW*={>448JfNc_RTij#}2PXPBW)$!_^WKqN;aI;@D5}C|KI` zC)WdmV~a|TQpfm-iq8oe+%0*FbS8Hn$@bw1*(f0bSK9+fGQ8SH$dqd9qPmN9EYwIp zop3fVrnaQ8JPI;23@zT+R{wgOo-o0OkM7=bUSG-I=WsUHuz=2yBd_S>E~)eQf^)-b zOwf^$#`D&~hS_ncKV47^5i5z5@2KC!if`W+e>_$1>+MA5FO43kQ?eLZJOBM|aL;v` zH{JJa;rTeXoj+DbV*1CYlHWd!$p|;P z6`6yV@Y@JRhqx9<4|7=Ro1cAd-T$@a#+nnP>EIF#l?D{_F!f7zIB;G5tvE5auQT7( zN+De+pdWTQ(c+1LT`~L*mn!C!;DM&A2$xLhZhSSLgTY1+EIQ!Z0b}|i!hwxLzP)`?>5=@Tynn4d$J*WTd!7fm zV_CEqK_-`l7IRgaIzhSw)kv~HzsAHIk!a>8y50XlxSPo%9K_*+;emJ4zusvo=UFku zDP$U!BDbRcru5)E?;^2x9@E|C#`WM>cEwKI}qx1c|NIgN|$?>Dn_ z5;vib2HHf1CTd~-jpkPu7s_VjerIIvi}&Xbol#mDF8)?cp7zx+9f})KIFar8cwAZj z!F=d*I7KO_6U_*sFu)dd=0l%=RAdM42J~Y20dvYrKr~G@FK)A@u$rY*7@T zuWAO*RZkdTaAE3#;7@5Kq#dj?tkq7uD zoEK*HjOJEh-qdUONlt7tBXkjXA0_%Y+~PjuiL82~>LfM~nw2MWh2Z}A{jZic7u|t) zl8!0idj>!k;0^%D1n@%e3&7YQ!6?5Hbza5+6U$9p4&W2EW@;i1O(R zSb#4Om-^u8`qsUH@Q^fIL4hJ2)#CD{%fAc2DN)mK?7duR>?sr14eCmS7$w;ggaq0r4O586f?{6 zDt^6|xxHrgPlClXY+qS7Dhn_69i>Mf0n+a4BmQt<4*zZvk)wZwMjmK!<%LKO4&pr2 zm$iI~4TKniU!Nj3LQ?byB9SQCGpOs_d<1_4&%aXrfW4So19#ewGZF0n?`b(P?3L9Ow_ys}RlzfZ72|=Jy@Lo`g_f{pO5-$_^_!7L+*{xbaS}3}|N_et1 z|GLkScNd@!?6ftjO7b;4vnF(D&1@g}CMKfDlDEUjrJl07PevAx^Y!VOpP(G6b)<{s z?NJ?=h+iP6CbnsW1g^KC<1W42imQxocDO3Bga4N|3JSx;%;Tr0_4DSa$5PdAD7NW@ z&M!J?+#Xf?r?-j3#{7=CS|n+_#10v7_rExTZbn>+kf<^Lfa$65WW~06Xx|gY6{7m_ zw=xwL{-@f%j=f0wL}es6cM2RWiF0)p=aK<*GV50Q0z5z}89eyV7J2u9X$j^g zvPzjYwKFv9PfvAVp3jyL$9(}n;782?3NARimIa`mg#Zk2R-rK-fR^Bf3 zh;R13y$F^jwfiUXgkqbn!|0z}<-;mZ5ix-~H72#c$|${_I*oLf5H2|hC64oRr_8kL zt%?S#h`*|m&mHLraV?-$uxYyIUrMD+Gifz4(E)DsEkEA5Ui_Tmf3HLb{jyaM`>zY8_J$v-mymW@ z?v3}8vWM}WtSooE_{>X|OIC#Avw3(H(zEG^Ot*Mj{e9PGp~5xOqq!?kN(ATnxS0lJ zx9Pr4>-~$L*~!x#PqM~o7FG=#^sX+~%vSQ>mZ39v3^UNm?zA+Klfdgb@t|e?`w4@4 zdK*!*5Br~R-2P>FcU?{1+@*MPAsNDu9Yn4@-#(DWKUi|#FZB33@8`4z$_j-;R3dbI zEW^x{=)(}bo#~fy_Y*nYVa-7%-JQngN4RyVIuQsPc53*~O&Sbv*p!;0`SWKVaOXu9 z&RJ#a7KXc3gG~{%5ugqxebhF1`5T@M$VdN6iKmm^BFr&hvB-NB+xKUAwf>nH#3u|}LWqJXDBM8q zDOaU&hiIkjHAxH2bcQeVPslF~^jAv^bN6XGe76>@1=%O{AVq5*3ABC;yk+!w zz|HYI=Gg!!ov0oNE0FcCKxL($ig`4xYp`O?_ThAA!< zQ^cGG{AwSR%!4As_d-`zvbfli`fx>9k`c1I;0uMc56&Ot@1O1ZROxaLFWLjHq_uH; zaibG+%6Q;A*+G1daZpvY@854?@$FEHtN)h-Dj&q+T`|v)Fj@Y9qC_m7RM~*+lvLG z^S$ zr&ZKNyXzj|1>XZXN)&QuK93ep=Bwq1nBOkB5$`Y?PeYV>jVSp_r2d69gcjz#;=NjI zBfVN8AmCDw6f+7*VzrxfyJj-nZ$Dk#s#kZ|K9^KM-gdOo(aeOEUCiH7y7JuSx6D6J zpKpil+J~)rA_dD_)m4YO{NBPQ+8BCV1Zt51puu{yt_feID>|3ZTw`u)g)^4 z-u(2EAP)v!KJ3(-oScx`pAxODG7F^`RyZ+w!TpuKgR8xA+P}JwT&_736ycVds&Ozf z7FHH+w2A@`N*BZ)fkGcj^Sc$;lF@^UhpH^3Ol_2Kp9QPUvrh_L#i%&H|Ksr6%ey@3 zO+7+-94Dbgwt7TT84X;7)Fik(8;VCC@CeZ{Y?A0{{}LEOsJgIRmXZ)yy}{(cw!q3b zw!@H!NBqq?^T(Y!tdaW3I0AF_GQVpCf1S*E-<=k$NfUCNnew>Hg%(#XUL(9#uUm0+ z9OGx=p@Sc=zM0Ha@}95`<%s5AuGzfnR$#&p1oI^q7hCZp&dPCy**B>txTVYp@` z(j<{@R-Blyb74@>=5q#(1@8mxk*wQio(PTSP$6j%A6WlMEN_2Lwp$6CGOO?f+13g6 z!pB2>+{zL%meN)q*;J6TC?H~`>n7=9?eDjCj~L)uI&d= z{Mv_V{$#z7S2&Jz9UVo`3!RI5!U@XaD0C+=6fbU+?!hsXdHK3}&y88vhcrf)e>nes zR4cy!j_aL`-dIPNz)M*b^uukGI8`5hBE;vD`x$i%J7-tQi=L97pWzkk_e7pcqEQ<( zWrKqigaNI`AGdFPWTpL4%;tkpb&BCvWDe*eHQT)k6=lJrv#Uj81Hd>yQA~<$Pl@$c z0VC1QY@!3>NJaJc^pur$b(C9S@Hqv=zfNZPCzT9gvW}33^mnd7OP}H5%g*R z9)gWGY`t#Q-Gs24WDQAP&4PJRp$r_mAbSQSZGV5{*b=;#+kDVSf;|L;WTaEyuu>j` zjR;P-3S0X0-WpY8i?v;ka$BFo_(H*lk&bbGpfNF#+I>TB4-O6vh>Wh%Nx(-mf}92% zge)~v)dVvY=s8ipxWzJt|C%k}W!4)0{EwSLVCVc+NuT^8`pGo1@I>xs@U{0m!6>ie ztGUiV7lcoHmswEB^MJUo16;IRq_fe#H$IZd7&8%BAs)1jer#~Gbx1b&!#JZn)N@>E zM^o_P*1C)6f>vLN{>DlVu_M3P8RR5VcnjEPJ)kCl-twV6|9>A92{sK+#iq|>`xRPO zxcM5ldu0z3qY;1?+&#~xwb7! zVE~nJEfSCQ^5gW5fUR9li-XFyY5vw@T0WnKoyBuWw-}W9Z->Q!NOVq`or}MBUwEO5 zSB#5$F}Uhs3q6NtCs83#$PX%FUptltQ69+qR4Fm2^ARkzq)^+9$>?|L^Vba zd8m&57!u)usj_4jy_wMNVEgNd_zncr;(r`c%K38n|_E8-JYQU*cWHJm?WMQr?JVHZ*YR|MY)9a z{z0xFqCEL!`6DpKL^~v=m4}5U=5!WHN>V%m+Xrpg&woMs4^I*jJnG=i^_d)%(@i=h4i`T3Nar!K#(TH6KiO~nTv3zB{?4uM&QBZ8PB(f+BM1xGL?e_Zx?^nmm1;V_p;lVLab zns0nK!Di(me#SdIjSKP0u`0QU#lh*WrFX^36?yDzvG2MeWETukQ7`@r2K_Pp@ZUCF z>`3rfps^vN%5Q?(fYnsexTiFA2-z(f_vpH1fRONP){>BL0+;$~5j%<@o^J^< z^1kcHG=nxnlG`N?`qnEkrCd@NLh=}l*B5NB2Mnc4c&F=W4j=Og$5B%x;5n|PLH-6= ztd()m&Ye+y^H;D0z;r|A6pYfp`P62Na_`3Y-YbDTkBvtgS72k|_qFx(^wdr=Hg48# zwK>{u`&Gc1oJIFwV(StJp`Py%s|Ktd^W^6r=HAM1tnX16tl1XycC>KUS(-ST-Pa?<}9_1n$3 zu7pq2Y)=lAs4!({QYC7uIzkE`WfAM7k=cId=2hZEj1baHf^FpcN`)5#a`7et`&I|K zdemKz3&6P#%C7QyXFN4*M~(AYQ1tYz+&0c5kJT06rxL|UAA8RxQ;c|He&8T@!pzlI z|1#k{+nTDwC*p^7J;wqCRH|qWLII2cdlSo>sw0$A3>u*^zJJY5Cc7m=EGnt$q6dY3 z9aYyXl4YE9?0XT_p;+ANtxeVs35w8P$7xL?s3W$knE_}fC<_Y^x4^jp5+w*^gf_}_ z`QKi1LKA*PLD(_fCQBQS4GhLRrl)l&b8Tm8vozYTcf++Z)uBDjyYe5}F%uz7DYwJs z4bZF%ZZT>`xt}1@BGum2g{9KncCVxk%R!`!sNf%8bG;8?hWB3W*Ff+pKY`T;d_r30 zyCF&#oWq%K{>%9kNlK_2U*2rk=s^OyOZZcmC_rgdL47n{qtBB+OP7PEHWo80-7pu8 z@+`0WSzg!2q8)z4#Kgl46oVh^EVq++6en=_+aUj?41Y4O7zclR5|(do6TtithT_I! zcw#vA2wMh0v8#cyF!_mGdPH!y)NjYe!@5q$No*T0M!yvlMfpp3cVY|zl*sSYec-9e z=d#w=y+8UIaW=pbRrJf?-eYc-1vdkVIeX#XSvvoHcprF{^IoW+DqhtE;ift&E0odM zKPyPeiLKTiViht#2bJy}S0T38mjIQB3NWO4#B5=r40eLr?{SH1xL1*g+3!iRgGwPERDKC5?JN(_| z9i5KieVvPe0<7)pGf_i`A{`U7!9TMi+-Y6*g$_g*>MeCk&L z1yk_6peoo|RydW(YGdr3*1=|N7%iyTqbGVCBRgNLZO16dDl*m8kqGj z+>H3km4D?Ud~<)KCtC7z<^QyXq_c-EGQ0$||0Vu2tC7LHNhx7(2B@of9{cPvpuvMK%U&5{X4 z_vLm)k+?hoD(-;^6T7h#?dBSQpt}0i=YbUN_1!V+D%h4Y9jfMeZ`_p`IX}^;8=t&C zmpc8{it5XuI6{>%?i(>i>t-Uq$AJq$OnO1^b;clvwU6MzZ)?h!uuOLivJcy}txTOC z_=>ysPJP}Iq<8$+m$B~koczTbLnJRu;sIvIoG3$Tyr+#(52HjlIqop@RS;83I#1z~ zdXj~G4veEgdCXuq5oD#^%JEUO8%>krwk)#Geifr&HKFK!DPF`CG$3U^`<=C-33-{g z!kR^5r;U0)elK~|OZyb9fnULlbhnyI`Sis`e@jUd;Tx2|7P5c+Hr@N`5mZv@>)P>D z!&Ti{;!_J9t;f!#b{EZk(;YiZ6yA|PS$X?b_u-2Q-Wf1nsQ+MvuM$6ZZF=FhcOB&` zZZmB{e~1~lNxFsg6K3N4NyX+fu0-SLOaJrBtK?$AZpQuqD{9Ah#iHuzlf8mb%NTtZ ze02C=@DiX;dY6Z$r>$(*2Am`2H;hRBV8$j5cP;{S$zh$>kmvS3%01Io=n zUWp30PH?vY((whw8H9UPx&S=W5yPQgFN;&k2PfO@h)ge z#Jawm|8-e3=skr^FMXG!@bPbwG{$Awtf2ez`>5z&1yTzA+2YF2b!v*^@0py6Ca|&D zGnao!$qR!s6E!ZL(;_R2-d%(i_&%IvR9*krVtZI(d8QS%P~!z4;96?VqC6QZ}4cmz4z9XRGZ%Tz$u^K%Q>&kE)OdEO}n`nBUTGnU6ut zD5I1JYa*}#fg{n&n63!}-|bywp5lPXZ)?2Lp3bU+ z3VMDeeV)HKUMr@>0C$5m1SAM#4Bo4QXh?asgs@rV3Yi!71J6m>VkD@eZj#x(no1~# zf`EP?C5G4c`F;P(h4+VMBF^Q*wx)K1?OKV0*_y@MvdntqN3TX|o0Gvn+F`l;dCIc= zwfwF)MYI{R!fuRO6)Jj|ko`QC&M;&g8wVl_%O?!|!Iz58jdCuz3a|Lfo{ZXoLk~cq zjv^gZ6NVA&89t;yc!rV!)>nM_dji1pf&3?^9+jRC98a&il-1RR!5DVY@r%B9tzKT} zJy%gW*Qi5EPtHD=bHJAx)Pq`OYTCjQtPKm$o0Bkg@ZzuR$w+e#8u(4H{T!k^k(^&} zFEVg6i>UCp5D2lwNXAgQBgWp&bznI&Z27j$IFp5_f<#92wu<%iZ{xeHwdQV(g6K(l z`Uq5xh@l$BUJRCc!irqMBCgm^n76P=dE-(49uTjg2iXUxm68o_!XrE5=tJbSTDt9g zmtBX-`7V`B1c^Yr1ugZ@tg<|NplwKRFp9k2ST4v$TVXN-Hs{blPmK`jw z_-gSaK~4wcR^0*3ErLJmPl7!m!j=7r6WPMix$h@2cf!=vRO?k%D-?HzMyy`G9{jd{ zC^8FQyifZz?_`L3QfJ1T8fP{-gHgEtQqs@SN+wthKQ!BkdDc-sRP_Msv3OH`c<1`M zqq`|S8|TvROSOh4x(}72N)CiA=thb=Zw0?GH7_`S^;{b)s?fdy;|bD6qh=@bRFiPw zM*=^HiH(i21a^RFoJ|!ql)xYUe9Q<|d6g~9XT$AIke{FbGVT43hcNRvZs)Mz`3)QJ zn?F>YoF&Wu!;rr_(W>p6N~fumR%i<4$(2B;Im-O`uzX-5b~a+^jt)(i*tK<19#6PM zfLM}7Z8%Pnhfh+gdwib2^dW$uLv^8Z`LdjdQYjaaGP8KJ%l7m%wjkP?+u2w z3S!i5GzV2Qf}Vx%VxKf~xH4(`ua)WD+vB`vBkuE(i^XSz>I zd|g^cf8Un)$M(&Ew_eXv#Y28g9;J!!>Hjdhjp1>g=DqaDxsFH0M~bwLTV_2c*=2Bl z$iC)|2Q;gcQF6CjApP=*H4T;A5&K$HJN}m;ikyfKw~kalbT7=(G|-?l$CSJ3FxVhT zY$rO0_Kl-gurRXqrT4EO(c3IEfh0Z*gMSKQ7}`^Q-tjPNRdGxPEB%1QV|m=Za6ua< zlc&D-!y?me_YRk267jbK>;TLUKe2e&2x&zEY)YFnJA27uzsm{Nab#p<#Ed0^htEgy z1{DMrjT^c(oDg5*)W=1d49}WNeL@beZoQNOB+K8L>S|tE!aLd)binx_nL?irijPlO zY+uwEuSw!s@|GV)4Zn+{n2dF`uXpr&Cuv5c>Su&nGRk&S`I}E>zgr~h`S{ZL>F*ot z7RvWk;H=kc`kHea-~&Us;$OTfmBwhn=2p*lM~&F>W(;@I12v`N#qUPeD!t+$iZ~GU z^@zqbqvIt-CAFdn+Cs^$4%BSI8$6Cv6OCi~FEG>AaB2m?`)fM>=s^W;(CQ#2YRX-; zivMY%4GkWj(i+m>=G1j^wzatUInv74y-+{OLU*29f#Hd)PtwXNi+|Va%|kD^H`Wyk zrV4&i2L|W{N6u9WbX-X3@vvONq|JrnA5M>Gcu&Nx3iq1hB03G02_5w*N@NBf=>Gv^f@C|7|k*EHQ50gl;*ZfaVm?S zXIjU`UgYq=aRZ?-Rdy}q<RETmnI|nq1{oW-QW8+#`j<}+ZoLc3>|gr zcRJ749+}MuL1Em-|I*I5SdoR6lJ~vsqjA=XuK0%%pewQm@(bo$v9uqPzsj-QX$55- z2CsVlH!yJQ%S!Ot7W;=bQd~j;z$3fG4!(F`lHhmH$-86Kp8LUoCn-X9wlcBT5B~~s z7=Y;TpG9h*Sp`AW6U z%sWzgD_dPO_&2^orcB~6(Kg*EB;ZPub)Q7*o*R1V))iNH+`znr)#-&5bL& z*Tt&(BdV?3*`hX(z3ss!vZF)G2>iE-kAP7e@jS$7*URa5tZn_+p_1{Y2q8$AO-tDx z+V*f88sv*{mdQS7JPq8aPaMHX@yksYCVVn{IK;2Apu{jWNp@L5Z>#;NcJ;QKJGT09 z+}QE=oeN`L^#w-=2ZfILsnn$)@X|Zf#-ei~IIZ+uUPm-)78WEM=Q;@EBb*=B>i(v{ zNAHy-%`*pz!e?!+%;Z4`Q#bMU*84dH9E~t47X}(6R^0@?UMrRRiqLa6F$p0B^?Xh3I z=ZCt^^uK@0zFB}xC#vh+zx+LKZr0#~tnlAX$3Bm~SvCA~3e#iL7vu`!%V{hNw-Q9eY*VC&paO#8TE&aU7-B zb)Wd#GRDdj+(M)c+>I9g<%oa&Y0o(>$Ads*1|O8;Ul^NK&gQBJd)<}iFUd`k*ySdV z_;_ITDvsS(Z{>3_+(PfygXx|C!+|&Z2GjtixA|t@I1A17{znsuAm4&Rd4A)cPF{9W-1pzjXPe5f!wiY?D2x|E{8rXg zsKM-w(bXI?m?0i)Ecil9nsk}=!*ARD5G$(a?bvGpJ?~#cF#M7E_aQ+8J4ZRJAEfz{GokbzzTx{jDz>jxgSn^ zM|7xd@(0&)c=6oa$YqG%)WTRFHoTuCyvb1@UY(eSpPMGn{Ofr*iWg+qH5uM#8mu6o z2{B%O?Htu05`G<-sI4c=&sYmTq$^gUF?jeLJp+ufTJPSKw~27=j7rl5)VdBE)D{a|OhBS0XM zSb5JH01x03!C#1oDgG=>*~U-M`0vQ~q|2FrV&Y>1eS_}i;_B6g{ujJxdGW5t$95mM ziV^g=N%tIF*l1Svvu@Zt-_Sf^egYFDzS6~Vx8L{liZxl%q6+P&7(Y)M+VqoR)B6l+ zk%dPVu!xvfVp6G2wM97H(kO0@8g6|_*X5Iez?xhfV2cF_A%efaFeWI9e)d+5Qyotd z3>RkaP-kH~DG0r%*8P4*4oL!lfjy2yl|qd7@h_p_O>4pUu94c0YZXtZy;1fMmA52e z#$Q8=o9cFogCk+I>?V`N}sTqK~yEDiyx=2g4a>P=TA9E^!s4#P%zzb z7W*Toys3!*|23Q^T5MH*C*-vwT`GD11ul-#K55wCl(5uYtW#57Z%<{hl7M`Mq?pa zEPl+jlC7`DOGz4zs27ptOCm}mot|=WB64faV4iHJ;})QvevDT`dD0xfY(njE%^rh} z@*S6~wAl)kNU|>Y`s{c$jxGpO7-xYJ3yl%0>EGl7oIfs2@fhh|HO=qu!-g3o7C;HR zNBph2I*+S#ZGz)@+m}~AQH72CLk|TPG3kxZcB9H&qZaR`4suw0SbabnGB<>8%D9l| zWum4OCHMS+mxA=4`jU!^MQ;mz{wC)2!9JUgNp&IVGVSL!09=AP+1;w8cCLG~KP>JE zDB?`Ly~RGMlXyi^hXjK8{O5&CAHzE3-5Psy$)@*QJ)KcJ?9W!&5x2gjF75ZYjm~Qv zV@9vNsY-iWXwgQA;!s;cN)dYxp{2}2xbh}-K~6EEi_7p!g56R8yKx;m=O@m;Y!(fOa+%_Y<25KTn3lR@Lvt_dwAp$Q&t z*WXNR|AuhJrgyeHrGh<56C3Ai2VN(0{$7;GEQ&aho}N=6GnTnA**Z5sP!vSi4nyx- zUr){Rt-FK(#c_9`ZOGizPBrieTNX40i?F5Gx%mVfrDzoX4?-Nc4$WH47rNUC*h1u&@jP?I(lat{o@nd02_sxbvay-54WaQx zyS_$K15T^)Z(AKjWMSUYQ-^pOM9(D~noW6P*j_Gi5O>`gbP{YMy0e4%|Fi&jC)Zbp z+$V9@d#>;4$Q=BCGKmh*U;RPQH8Y$icbWtCbmx{x1D(QSl9j+oPM4ug@G% zJf?!wHw9056fogXmB{(0I&!}slV@2i=-CJ*Sl)%^JL7NCQ!4AMaxl%(o z38h8}j*}=*_#?6||#J*ud<-zNS#+_kI1^ z%mdqzs|c@$6RtmPdhI&pvdiyLq+=Qk7OYmipZgk#HFstnTD&4F5ER`K(l^X;n$sjYb_DD}l3Er$zieB)f_nLD&9#CX~Y3Pf7aYbyNKjvws z19!8SHh;p71U?*0oUWhpMbn0yf<79@a~Dl5 zd_b}%HaU+1{GUJVQ?!g&Ne_8S0Mm1V*p#pK3(!$P(bIFj zoTDr!V==2-{oXje+77`zSp=yJ?p9^FzjWMeK&au$y}Zgpv(AQ@tZoe}*q?RJ;ZPw$ z@_u+)na;)@(46ml?8h;3)j>;0hRk3IpC3w&v zr|@7h@4-95=ggEKV=TH^SyFF`aVlxPGUHS$`<@Odn+j}hSc^6lK*p*L%}07 zH$sK`x;GE(I;z~W*}o#HU5UnPfVS^&xi4c zAMv)o7~>cbu>5ME?KNz{NEQsiNT-ww=g`aNGtt~*gpNs3_8Lpc3M@S99lMXBYsi%TbNe6rDgV*R_)|E0BCiacUkG+y>MOV{+)SkZatKB4xqs4PgpGLwIy}77L@5K}l2?=@x;*DZ``WB4%@g|{iA%|;Hku1@! z&B^xVENbh16HY5FJ{HV||1WD1c0<;BQ2|=yFZtP_g|+Pc zPVSVg9O)mpdQeXPSF53=tSoFGBvYNV+RO85BX|XkNjh^GOWfbesLSQyPX&Q1@wx{+ zgTXr)*ZFcWD4^Olf!g~kvz{ArQ`f3F`WNp*tI$Oyx^vU}2lp&o8s-ev-;Nv&Rd659 z&6@WLW0lGPiN0B|8JDv&bM}ef5Sc&30~jn|Bt4tj;gYIx|bA0Ku|gr1?iAZ z2`NDoQMy5tPU$X@?vPLs5a|#Q=}zhH2I+42=Dxr6t;JvOy7%=y=Q(H2%$_}abEfkq z>cu_yRFw&$0tU_Wmmk|t$~0Jk9c|nU>M%%E>RfHDS9SwP?@LMnTXdMO2Sp%^L)jL! zF&T}c{3d}YmU!DHBCNUtx6#!-nyU*^E%8YV#HX;J>no+sisO8=CtNjJyLD`3Z_1nI zaj8zc_i94k@e!a-zmwFu&3P7QQ|ThS)I^jkD+0*pPQX(q5sJ$^0tE(yf}R@w?6IO+ z0Cr=M9=gy~CHm{exfd1!TvLk2|GtaHHop|4Dtzy!6SMIzc7>6Q4!Fi(x8jgD$}O9z zZhG}yH2la)IZZm#e=h>&P9c%t4tA^U7gM1hZ?{|6wdYOis^l&=S)erhe(Dh)7Y8@~ z(8}262S*DErE&RTl2@w1Eqnw6tg2m5xirk=~F zwl_>fkRrUES!~d8G2a(GaHqMnB*`%|D~u;5K(A|~x)+H>=YC1+we4(OAw)EtE6cAR zZ8~#U*k?YU)45_}>q?Qa%XVv#bV0im3X3UOWK^NrGcQLQq%<7 z8o(V^J*?GOQ8&six`%h+y~}AC%(`OBSnyP>USBa-wuE2&ZETwm;@doGYpr!UyxI|S zCuw^yk3$@;oh!@x8TYVUeD&oIauGc#gzkg&<&@dpnbXK-R`#di}6PfiC)G+CE zn*%HCFvA?o8;t4^cjy>eA?yMQ_Pp7%f605}R}4)XW;wKnAb+$M%u_SI(l_Dq<|4aa ztSW3JLY%n$692NdCvC!!>uo9EGG^_^{6a#14CA?ZLq{km55GRHhPEk{w#X%hsKM{i zSS|>N8Xx<~JE{EL(K9*2N`AEv=IZf1iqjl}n*b{1(@ERbAa|U8!BE;UZpDgJiudGK zdx=>#70UIU1!E-AxZ#FTG7GLv9Eb^<-t9v@4dTi?UX4bSEin)B&pFJGNP-ixRqorY z7&-jLQYGY#tN;W0!itKINL&wUacD9_3ZzeG-tbcVcCMN5utoV)z@M#3oc#59m@xbF zS_gvv%5zuQBfA&VA3&fK>C*t}{-0E2N3ll*G>58bv0`4~uHX$TSIWw&V!P1VsYW_P z{xahrebjnS<5ym8t{${|)W7tJuN)S5LQfGBIin0jxNOXKjM@=GD7GUY0&*hmR6W{7 z0<0x|!39CL%~WC0BfIk1lIx0)tzJns$L(V?IWrcege5-_rb5W4fpQMIqOv5>Tg(nkJhP2^yZf9lhb!x5^j7alfI3Q;%QVz zb7n1*YVcBhwPNa_LrF4eMKwE_mJeU6jmn%thO+DfN(fQN5>;BAYphrM(2Ic`ZbvWsj!&y;>%>JgWXKCqefoIwKraf z`ieJ$W;_RH?ru55AUJIHBQK9G#hOEm52Vy-)~IILNVea%;O$?r+`aje_%#y|5QX7% zB8e_=6+p|TBPW@qVEaa!QUBMj_m=qoX??uXTjq2gQ4a(GMB&f{lC&nALL5g>5 zsFRE1{sUTe5+V5Dh?ZUG*(E&XU8j^mEJhN^=!DB>dETyHTOMKU3=~XJkc2Y7Gq<#a z7QA4tzU68vZEb-|Uhik_jH$7A+zomhPcoFnMQH*OfU5*ef7p?~fH>v+Pb-7Fg9dLm zwl#dNC_3w9*AimDZ>5q$;m!>-{L6ZT(k-o9m|r(&)7nAV9~%2P=RppB#W-hJ1JjZD zIPjxzU8 zaF8A!PeM)E5C_vigA2hw7hb;DH#JjPk))ICPXs)37FXrVb| zOy>H0-q*z_@kF{Ttx}g!$y$WOv3i+p(rv#RCXt;kc3XELIB2(19V|<&T^Onp12td` z+@YCFf&gg3TF}g)BCS2Z5We&l;&`oaZ%vIfL^yY(vf^mQoy?Kt`+V@-a1%-hWdLUp zym^!FX1qYEE{tt^c$cdf--!1i!uWm|i$VWQvm3urrrRV<78|%&ei5~{KD+&Q8T;z_ z@#2GuvKHz@7Sq9;qLKvjM;0zW6zL2bF(iNs%`>8vP8jxX^0_vdF&1S%mt2Mv_7M_8 zv9ij^Q)y?42zpo-8;sY!)v+uvrbQ=SkeW%a{y~SX)u!-&{%laQ!kjNw`n_(YMS~L52xnJh(kk9Jo4)8ADlErts7PL1!3=`oSE6 zA?R>}suN5{mC90t?trh_EtpS|1h+XDX#&3#Xg=;riXBLPVx3xx+yOgHC~jk`e1e@# zt&Kff4-UA2>Io(v9OR)OlCJ++Q+vdBJ5U5I^F7lF`IG_I=kEG4$-*v;9>kmzJjlUJJz;$XDj`JY#W+1!xya;8H3nUtr~d^-8F>iOKM6%<-tnuLZ0?Ai^ekZ zhkdht{pP%MAI$f5p>x3FYEx`ODdX*uG`>HB&G41@t}R7qNX3ft==it<@8_q0vYzpc zeWACA|9%+}LKAnU`8>ZWHTka5KyV@Dm0!D2+mQHYV@`=(Z{IElcz6Qg=5QRV~2V!=G)9$yQ z?XdG#V;Pg03$(bDu00C zj9{jHqtPCnkXp@-C0|$DWDAvWBS#FY<}Ei}zd4M$O}qDZgXNyseQ;W#0D}pC)DYbZ z%B_zMHM>p$P`3@nMX?~FhF(vMNvouOK{Gn6rgXgM*Hq?;wx^r5i*7fP>S0=V>df-% zH9w}7FkA3O8LsZkG5YV;>bR0`z@CDR~Y{ z6N}eXC_j!FmuS&D5893YQF$_yD=gItpvZ61h#!}uWxIt_CZn_IRQ>q-T2NNOAk;zi z_86rjPdg=omPAuftH=3FXZCxJ*w_d=%T@O0Vk75 zBE7t;&*;BuqxKr_`(uu2>q`b{Rw@Bu&D6!w9wx?7>m7I?OVV*o1*m zq{#tTv_nZ(SSbcB90t*2BQP|Q%UuDvb%6elr`##`U~VCAIyyw9+=2q+oT{y&ZUYVg zAPTb90!{$9_5A#>wz?1E|H|!DR2&7rb2p7z_;`6$QuOoUZwVQbMoBT=y$O~ur9WL2 zf+<3!Ic{U}W*d@!v(&)6<;n0daQ|)A~A|Fiswc9ntQ|QLa_V zn2PiU3!>8GEBd#K-yo!+O@o!tW@(%A3{~JNRggJ<{x?R;!tUXBzASU@0rASx+DO{S z){WzHFg5x~k(kSRn5*m|E`Tw!aCB7GAKBg0BcnXJO4DT_ky5nuyF>bkkB#Z1lg^kq zFJX39kN+O_DcS8Vd`vV5lOJQ6xD|-@?v)=pX$dOzK8sd-k3Ls<_vwiJGv3q!n_t|& zY*?D8Yg9LoY*M|_i8ZEP9E3MbUFnBpGb}0RwDv)X0j{T2E+^nz`VG8nwb+=&wArjr zN8gqNJ^z!Qs`OqWTSXb=mJvO+lyi6_)$#%sN5_r(|e2tKe4 z#VjySnJnr~4si-bI_2Yt(NUmX3KpJli2~siDZvD+HcCNzEHH>f*@hzNn-*KT8jFFm zvpNRpOKJoH`YqlwK=(35Be?;`6k=~9Sn6^<7C{>*9sG49jNh(8*epTdJNKk$W-=7r z3#928XLRMM`P140jM(%bnUZ&Ttau0Z8%V2>uViWf6vsbX>st9F(B0|^yv25_fVCFhBWsj;jLW$ghD#9aHk*j^UR0l=v;)8 zvR~+h&JS;+T?&|68cHTE@Dr3{$zwkF{GK#muQbE*8LI86TeiTE``e7|Jhu=dsf=Xr zrz)Z{E!#)`NdZ4}oP9%uBfOC0EOL_C=61&@N<0E4N0hjj6au2QRk=ng)kPhC6EIVI zG7KlYc^(xs^4YC&icPw)ugUC8fu*)8HEco4*0Ynur7Lb;Cy3b@x>Mrswdph*%(7A= zc#QXdz%>5k?m6onDw%WwUS3{6)M6IB>jgmeKDzypt+zv$4J8pzSEWpLWa(47;7->! z#h-(uB&umNf+LNW{@#(h;j#iI!%Q4(?oAmE6rY!nGdLh!`t`-5A5{5Jv;(oxNGH8d zcFZ8^7k%&IZ^9}te}ilm$PmyF87wnPNKFK2i+n_$qVD;tf*4MaxCO`_InDP|bRVC> z)7T1#Wbg@P5t6DfFmi^(@K1h`JOl29wh)TequtTsmqO4oZIBIpZBn~`Ishh%YzuZY zZdAqCI96xfo>Bui))#!+^%Gl@!82!Fj7@|yPQnwg2a(y;Fk5ue=!17L<9yg$4l}yH zANIrwmX|EP5fh;}v|z$MIiP34zBsV!Q-5C_t?9BSkKObgQo6Q)el$MHd3zL zXY%yR^HfB%{$l_Q*35oCdW*nys4x;(GSC^m?;PGPYbP3hi7t%k_**SamhCo6XNmw> z@tg}A@M`)uP+y2H!}`G~gSK=%NZ&xxQFF1xZfIeF3tj0i&wH!}xL@C~7L8XO`J`Bf zUbxXsiWi8KwALDF?(dKK6t?0TyY>!7DWS4ZMR{5*-Qm}v#TE#Sxda!FI7l=gWFrsH zNX(yix#0Oo7+_VWkPun<2>UA(_xme7weMyYNKa57b@?@)(9mf58k3)ND~b7E@1*m6 zqe6f5P{Hlr*Zq?@)y-yAO}fdK)<_h>O33INX?+0-T53U}WAHpX1D-b8&ug9WbQ_`M zrxDE|x!!m+4jeh_Axpc5PK$D(>fD;~(LTbe&)PO>tGBZ8mMbRssYA_#x>l?&7~ct=Q%C^epNmEr z(QeGPz2HxM)ylb@%2zs3?M5&(sv;8o;Vn!<5h&SXJFyP<=#%~Ys*35n!)br<)zJ-7 ziKdsjIOTUdH(RZm!aWWMwA9`QjE#%nm#-+?Ex{(mL=aSyhda6bsn09`dmCOk{c?v` zbZbI2+hCm=*(_YqS3T0WK3p2;YpuQYc^9=CHN&4Ljre%3St-^JSeeJ9WX4e3gEIV+ zQWqQ7>)5PBQx)0!)y2mtcdPqvsGyn(WWVJiE1lSsy%(qGs%$Ee?oKvYzaQ0{Jd)<6 za~6sf_H6HYAc&hdM4*%{Vt$blufoRbHN8>v^s~fcAak!0~ z_s!Xs(DroV2L}JHV z_TQ!jO!g&#B3sq~8G{*SM>eY0-nj7$Mtyd&jM|-c<%zO9EkRQ1sOMQA_Zj;@fomT! z8;}~yjbEVWo$}T3XsIlTjERkve+Wu)^G`3YkUD)R{xAK&K_W0BdbsExGP_G>lE`cF!NS^bvw$i#D&C3N`QBF$7On6jIt8($P2Q>HK zi1de%X(nnvU07_7M*6P9Kw8XzkGU*P^k9qTDhdLJAmHVY0Q!!$?wVi5VdCIQREF^s zGy;$z`0S;-YKVFEdw(1QLW0t@`vZbZOsyTybw zq=P)+IPWy#`y`a3C;?FBS=l2m(gCmq)Yag38j)UjGQ}$QWblqsZL<8cU{)w&CJsjS z`;a-E={y>~@RuawHTlMUAPzw`1;z2dlCRVn9U5)l*BLW%W4YX;v#p_IiflcqYu)x( zX1r*?*9nhYpHt{;uEU8az2&xQqJxcicPvwpP6{}gTVOELJ;`3lTSm!67n+W5ddusV zddUpi16)$G>OOB#*R?;zEB;wL_ROm5^Wuwp3^>?&#=cY;D0{zM5a?r6V|{s#zeQCq z8QmAJ{rhvTD1W_yyTRP4xW3_dyYA`hy~ha;OlBS8 zSGq^CMkn;n{+|o*SlXKCLmyj9d%WrtGIHx&?Zu}Lto?8muO)Gx30!gA5fO{}+tw=Q zMSqxCMFUZ3g6sE}%+%QM1s`>n<_>!~bu(Fr6LHn4@hp# z$$!KcgO5uRs=V|owgOKBR(ZmXjQcG^|0pT5HE;_JKV-ww8hPa{Ui=&d(|kfALm7N> zQ6Y;1CoD)WV$cF|&!Of9vNL4!LtMqp4_Ftq?&mRJmnwFur2SqAcw+}y7}B*;=V03c z2CyHPqcQaG?JVnF-}ksUJdfb;N~du{T<!{^GLVlvMJ(@32&Re0r?BSCm`j$uem{ZAU}o}So~fS`-BY&LWnuzjVTXB z0(8~_WF$w!LXj@0PcdnPosc7;ki>Mz1=f|0nQuvn;pnk8y0U^C}G60x=aZiGBoI=2;YXj zns#il?W*7>hCJL*}c=v%kFy(@u)||el&l1Gc@ z=CY@KfX#&Ln<3Q*P{+};pa|iivLgwq(@~ljT(Y!R{c@G&pj|u+BR-$o8$Osn^rdZY z2$%-aj|x+U&gVVfH#r%Ig3|ct)=0wJaxAOgeeJ2{=e^C(0r6QX!V0%ipS{p%D^$z$ z{@2^8lFW8DoJ21|5d@D2-o7bs?^l;faTvVLvx;Ard`=F0x9Jsq1)R02hV_2a?7IQxxh9i+{uQ{k~#DI+(xyRf{cqBtFkWKxi;nhOyj>lhB<%xfboBT3lMH7%gPkkDpSY^dYGyS`1odKZ`=Q z<8hp~XyK-X+Xv&yITdC)-a8GfaYLO}Y#ZZE61Dc0Dt66TtMrjx_V4G5`_3Ss{IB>l zzXc1wZn&<|SIL}qPR(gYRmb-;v)p$=;#-{|7eO+@H520OPr2{}|74r#sAC!52|!ni znu=`>YUM3N+W~&acnU9!1AbSR9BGh`$TU-1GF$Jf+zTIEx3IJfcq)Q5c7uyM%6m>= zz3Z2KR4)lyRCnye7eVXz>6(5OT~sqhw69A_<3{iPwWEun{^6 zu4yDL6&_wRu0s-9i2OYpFq{Qg`kM+9BL_W|iqRBVsPvAOk~%^@;-MKSNv4*Q#@oAy zKdC}tv)Zkv`zx`}ylcGTj=}=;_4vuZjhRXl*XJhK+km1pyzeZmymFg{L>w8&H8 zLUIQnMnR!>FX~q7GT$&&aa#ZI%Mq#)QQ1PJIA`Dv2w>^@wtlq>tLgn~#6@u4Y5MCE zvZw}nxoxSvG|Jnx&KyJ~(IJ1_Y`nRs7<+bY=Aeaz1?%2vOUSk{$MOJg&9-p~!Yd=2 z`zr0Y`tAIJ&XM{ zyg%A>Z%q4lTSPZYrJ=7!&7S)@XKTQFsWO!%*L&*&@3`~-ZU4EUmw7i(wd%pg?|G~M z8!xQ(r30D(Y=xhv^3yj6ch-&rEiI&HiMOcH<)vvYI@^fZL3aXFSb-}Lxy(EDyPjJS zfQThpx^0J!N23&Vzk{8qEDp7v> zbOl3C<_>FM4AKS})VyO6?bLxD1^8k+oRoM&0{sEc=||h0oVZ4~J|$RE6nn_e`*W4g zSv!q6N43^Dy3vVD>|D~8%?Gva3qlVH3QDNfKrQw|Bzj?`Tm62ub{EswU%7jv(FaWj zWSO;WDR>W_#3|~-)}4Ln$QBVzI);7f@y;_dEzQV-ZV$h4GS-DY10#wc{we84@~mgP z+Hc$UfA?EH_pA|S7pRv-vSn>GYvFfG69(wEjYLL%>!(nq))X7jDJXxITHpFOR+0XE$Lh4QI6^+|gK|eA zbr#F7wtfOlA|~M zQTI-(OtluwB1MKiyJvKHuK)`6Ka$$r+eUe178KCpUo&(t<2|}jsIL&N7Ec^)PM#_H zCJOz6itOF6rO(XsEaZJN>DNZw`CTs6_ZGAAh^3pTurAxJ2Ls*5>785^5&9=RU=707 zxov#Q4J@R?La^DfUdWI0^QSbpaR&7W7lodNgvRFMv_84zvasueJK|6T6RJ}8zeWXB zjK_bGzmZ4yLa{%8Z>EykLD`6_o)(Akm)2nOa|6+(+6?UkqacUn#PFr>-@rVOeECGf zR!FhWY12G9Y!1^Oq@o`tPZY4zQcZCf&1*Fl8uTXTH3u5BOVL(E(vsN5c= zan8ppvt!Cd@VqJD!%P@F=Zg5)gua9$90(*`pj81FMc!^?my+s~-N}LJ>+M2GW(ED| zD2hwJcIszYH&V??`kI!9-#&P%e1_Vo@iUNOSM9&a3Ustx&Zx;R84o;Lk7GKQb|4yT z-)9FV#cxh4l%2k`YNB$QS>C5!*T*B&O{!u}ByY-Go%Us}u(2N#3pFkHQyoMae}C|* zuOcO*j^q})bYZY!%j;wXz;EM~edDa$ z#I=M~ETHJ<67l8rzqr#%D0c+R6rv(l^GPei)VNj&IH;ZejB2IU4wCQxo{VxU8VF`v zYJ0elO;241Z`pdeiwsHhg&+qR#aGOVk*Bj5SY1z8A2_q`dPGkDv6Xqc%@{GUow~y4 zsf!9ZtzTXqYqLwH@_YU;a0PA~od#51?bWb8HdEp>F}ing-XwHwervr!kw72J)kXiB z%MMu7=^A~s+(V%+6SsQ2d!A(tM7oUq=T7n>XXpc}EBB5S?E#sIcE+Mlu$2gFbTMe_ zs86V0J5xEafO$5MLC+4>I)5gzV%9{DmeJPHRIl?^xp@+xc+Iu>(rHl=XZ4>fqT#^*xVmtVei<@-x@B0LmdiTX!5%CB>1F$UK7=7aICNngwWnV)fyxNR4F+Q0A8IJOJkgVH@Yb6I4#t8S+UAEJ?}uIS?$60! zs`U7wX9O9c#_6N9JkRCw<{%+FVkAE1qfpQW*a(9&&EWOK!zujtFd1Z&u(m2OmXxID(a0!6*x(K!2IA>Ym`X*9k1z476%#Magfa&304le5p~xzmw$|)c2ot(u<9P#S>_y{rlvQcJ)w@( zE+==xPx;%uz!+xPw0XHCZ67sIZaRtWwmh4EVnrRZa&JbAETe6gz*yt^*q;^Wj;+-U ze!@&|wN-%CD(iH0bXxZIxS%DB>?83Z$vp+czXtn?$wIi$J4EFZX}C_F^@oQt#Z zCSFLp(hz+L7s#pxJR&`f$W$D9i($vTlK68(1x^^?_X6bwaL@$tqX;W4D6nyc(=5x; zX2Qogqm1KrcXB!7-ZN%uff z#CJoty!~F{A%`M;bjM6Gxi+o&UrlZ8DTjOru26J8vaCLQ16n(yWqE{)4H5*)7=^0d zNrpaVGsmsyel(Hlx6M&s^h6T+15FB~4|X5Z+`=Sy!eVU56{hZJKg*++o|O8abs_2y z+j~dLLTeP^$G{%Qp&GU?O&+fwn&A%1RTIcYyV-o7xkI8pPcC7eV2I^cz zkN7?2&X498bUZm~Xyctiq|;?3@WzH(q1*b zN$*3B?-Z%2L3yvOVOBD+{cUivf9$p~u(vtHGvhY#?&ml?>Z;`fAS*XpJR^v@y)u|Wst$sHCwHf0XZLZn&iPMO~ zZYEzfHh}BU^0!`eirR%?L}!27s(-Hw8ek0DX2$&(=DIBFHp^$ikZNZ=E}g`D?BQiuu*kxvLO4-hn>@p}vX>bgj(cXMn%mUF60N6O^A z5>`zVCm(lhu6wS8-~A-Eii?K#$U&L;SM=!mq4n0}Zxd~V0(pURx0&@~Tja{7>+WC2 znme9HCY}MV+ttTu4hnP?H%que9X*I0HxxZh%oAzqcyWb?9BfA1h(}@xPu>C&l;>#k zYFvT?v#p1>X2*NvUcB>rx%(d1uUdC@W%%e&XfVAB4$z#gSB~dxlaJJ#pelh<4Sv6l z?HyG>dE28IIC*&dVLJk+6AghB)CRjd`+2+R%Pq$R_N#gJR54zTSmn4UlbI?Q9vec8 z>|7uXO!JBcIaaT0cWC=)hKG2@b@8;)T+PwU zZ(4Z@!CZjo#T@>E$hI^7-swe&PCUEkX(wnVom_2SUv00t)Ld>_wbESF)6A}TUajm- z)Lu@EmN5LwjOOw zTDH$Ts;^XOqy#YgB6lM_9awej_RFm(tF~*-cOHxFKkhjGAM$W?$tsADy_dUbedBp%J<7+?P`QRDsp;q~<_b^;g3?Lm zdlKcv6xtc7y*fnhZIv@> z5=nc^AhH<~!v|5sqUToIErFtk=piyAMMI=|IRw1;Q!z!mBiBc+FXWRdPJQve?6A1- zjO=rgR+kEvYg?7O%OPgNVo=siu5V6UAB#LPxwA{Mc@rs0$6mps2-19T0m58xv6!I= z56)w{uy#Ld+X!>sL_E}0IL!+5cl1@gdI@|9l5K@v6iX!g(HAuXwa$3bj?guK;d0wk zZ2J1@m|5~HzF`M-R(G0%YDR&*epie5hk_V*LwnXQ=h>u0;P{&-BPTgD-HDra9}`~} zWhMAnkP|HTqwAfCDvI$U7 z$WFW5)umP|42AjhKuUfyqGo|=yH5A|P!|nB?KF#m93Hf=a;2!*@n@f9@Z4iS`bfF0 zJ=0ZIBtX7V%CUg!4BfMbRmaua{>JJ~2YoU{z<+exG}CQ#ojKP*+EIBP$awm}^vLYA z>vl8p+|Bn~U-TdWM*vD;a5mg+k?v$wM(pOy@$OfAcdTufkw3^zG0PfVxUdhHN{rMT z{W=*guklxM|M$wNQ*+t}oQl!Ky>!oi={BZaw5K5upPKJ))n0obz2pM)XKT*4UUeyy zj7>U#1S-1dZjjRKuL5^g#4K19IvwWdR<9>{tV+@}Ab(*@r_eD9nPboXPP>u^;Wqpy zdY}23^Xi#K&6%?|ZgV)gbxN0mN^{5yLg(_IDsYlhgaTn$!eV6QEX5;}9jjsot*d8g zfnt*>pQ?wSJ!G>AL^VLyPi0Q!&l)ko?Zoou$4?M)61J%`?6qA7Dyk`hL!EZ1F4HSS<%4{dlOGA_q`?#H zqSf=_#@nt06oguhn*hXsf-kkTG$T5$teFalKEPF;A2%y#UI-TpBYXUIwsJkcvApJx zbR-vbe!feifC&Vqn=r>q&Utenxxiei5@b7x2yShDK{L;xQ57m(O?zNrzM8!luX)yTs<3c1kb2r;x;rT|V`J!d_cO zNeN$2NJyU)6Zt%PV|&I4BCfo!h12duw}W;9rRzo!|8~_rhs8KIY!#uc3N+7?Mj9wW zfUXe3_-bcj=IRaBO>lnjTi{O+a%6)Y^d?^`2v&hw{Pt|-XUhgi6gfSw+$bSNLA@|q zsE>Aa(keEeWR@>ZA6*Y#FHD4d#H<>Aa5In<)SNMCXljDmXYcfM*tYw&EC1T}XzNLv z)~~;RL)Yf#>zv-fLG+&e{}yjW>{5t+JQNa-~VEl4@*x{F6Tp z$+w&wod4kDVNI*Ktv}nux^F7;=@J#@)2=TVhVwK7hjLUGGU9bmUmU)zzS!%m*lnTM zg*%S|!2xTd6c@e>+!s@>%Ox3v&oxm+re0IIIL;&FFR|#oguBM{sv8mE}U-U zCTa;G^BvLkq(UIGE4t|R*;LmpaiFZMI!&M%i`7|5M*r(Bi$;R;e0;%!Xf9SmlDwar z;k9`Iy!8!$YM6GwcoB+N|MyP<25#+oL>NSIo{OFuP1<&{R&16{AWy=@y6!cs>Cdv# z(%G~1g4VOc^&&Vh7bBjRB+o?-RVKkhE`isA91arL$sKzlywbx($*rn(p@-eV*-GjA zG?;jR%~|_-}h0|EW0pmlFw$Tu4h7!i2W!;7EK*gSUb3Qt~_-0Idud z5cSXaOZ@*_0Eo=U-~toz!D$zZ3|Kx0CE;E4%;zbenDZ-8JLB0~$xM>HvG34-`Sptk zIkSKq7)A2#5+0l2DGW5h3Vh0V7B4a45JDwH+)8P8i>>-V8To<#PP*AJOBLv|sudey z+iSP>XFO{*>4wl#$qU2q@)15@>v{9{rE34co2!S zr&Ci?AEnbz`ocZ@JTKRE`4>VSb2x5k?T#mjx(kY4t|=`bHyXUszyT#O?JgAeR^iKo z0Wb~OJ{^DcXiPxqEirnWr- zk-@qS4ZjI_Gpd)oML}3r%wgKw+ka8c{>ZciVibnpv<**yyoez-f;B6C>91dN$iuu< zwf{WXVOYIl^;@t6S#`F`PsGbl+L8qy*8`XB(*mB5K+ReQ;c{Hm7g=C2c& z&_D*^K(Pz`Udn0YKXGxgi!AA^$Bg6akKN6utH_RLJjIZ4_?F-btn>|Vltq4BTn%cG zPw8QTOr=xE%rE;@mH`|twL(2{hkIkg0l&;f=JWq-NvNuld}=o6-p4w0Yn>oev^gZ& zmwREk#1@|U{*#oK%TpaAs&ATezE6+dCk14sy4)PdR768`r3jIbXj!8m^Z=@b|0U9s zJX0wxEc&@1HaroY!@J2U=g0XK^Pjv&UcI^#CUT@Ee0{MGFT693wUN1?%7^A+_6EXh z!m5@^>}oe01tA70eBZ=GFsES)0s+^hH|(|l7Q4b`!MgonydEq~f~dPocg0FqqVH$R zO1TSB+eKOZ+DcfAoudh>g-+coF2rns=VcD<_6;ig7w^Q3NdEl$nX`iou3l((bt#V1 zL1ksUAryRgVGGDz4rf6KbmD8yh6?6o#LfsTAvjG!^|rdYT1TX+g))+-x7?NJv=}M# zKbhk=n3< z=R%6+dOovuvGd zSUER6r>y7Z(+%Pu)}VO#-Wduq=f1zn%>Sc;8ZWHVN*?!Pj{LmmO4Tv3_PZ5fOq0KT z9N&blCTrrqbrR`5cXCFYFvMbwoGU+g5sBfA#qPe=dxJeK91V}3_WN?1Zp5B}?*d$a zxZMI%)AM%Ep4#{t6OCwAw1NICY>ObNZ*SQ!HSZ36&hf<)#DW{ka|VN`71C@Dw-f8C zod%rwqPRF>;OQVkO#=6OLbsXLWC1%4lfRURI=F6q0hCw(ncF)=gNONa+b5;0`qC-^ z+hHNcbcF{V%fZs7VUpX;$I6r3iaDn&P6b7)m0Y(CL2qEMr<@E$$6` zp2(WTG_|SJ8OEM)CI?62Lse0}N|BbJY}k<1E)LyCj?>OrAIF_R!D0uhmN__EcjY4% z`0rg4zc_5{cxmWo&Z~LH^8osxbc8{(kk{p&Kds^+FnGe_(d95jjuFd(%>`k=2c&=~ zEGxu?KR{-v-}mk5$%cuT(5j7#t7|g}(;U9%3ECB4H8K~^(NB;t9pr#P6joIPh{Cts zlhDHmw-ByH<8DXmPGFh^91NPV4 z*5k&#rB0i(wJbx>y16TczZ?q-W3oFR%swCT_%AY$@|ur&Ar};x8bXm~11a#Z`*f~) zN$=EDsKfg39Biv5h^ygHP}Oe5??I(#b21mW0IS}NL89eF#QbLVWFZ_Gc8fl2YPbR9 z@KiJnpOph3bP~h`;$-q#NFkT`uYuGJ%iW(jt)_*MiA7@oZX@LH z_+L!3Oy9k$t8(7=V^3HBK`3!gPccwe>1 zjRho36JK-i5GgT;OstM(JUs!Xf$Id}d3`>EqB6h=B;MLO8_Hz+emN(&dT*nnK=Io* zHWma={;{Vhy#ML~`KzF^3IVL!aH`S)Sq|S0Z}qpBzl8n)@<{xrD6nMv&djV4jzA!k z8_3iXZWd&I3*Wv8jd%t6|J1CIx>fJQWGHR?PA*cRR!1TYP$tZZUGIy{xDr%9R1egY z6lJ-)i^0+L>1O}>Dawl#obve_A0Pcaw~|o^pZ#{O)A~@CY1T}4)O|(Q$l~6^!RMo8 zn^ZpEtq%?ls|%Dx%ttwpiELKN2Xbyj$!E_x>+ah;^LQWwyWzjB0|a2C)w!s9J>hCCG-$qA^mApZ_njy6o7_6atYlQ}5y|rbZrtwfZsbmf z>@EPAZvvr-h*MsrkWQDR0k8<`Zk~}@*m`yM0xAC!US)ut`UUy|!h5n8N9DE6kl_J1 zhJ3+ua=`mF(v4D z(R@5vYcc-7GHXlLkyPHp>y?<8hI`v46Kd2t)U19w<$LRq!VP`}?we+6W#(jI zqG#_ALt*|Tx^A2vd(mli$L=gTRiseY6FvA`tHh%yNgpi^7|dV6br$;%9}HG|zapRT zxaL?yA%O!07Z=wFU?L8~mRlz^;cy{+6uO5j9g$CH7$$(ZKAd-Fs_*2=^J0N!p@qV# z2>|nFyC)F#nr%LY;Jv}$h1wPu#?+rWI`YBkOiSwN&~FTwgK~jz&C;Lhh$#7jDpT5XuL7LLOSA2D2qY0EO7>*u# z()VjW`|@)c8PpPZ%!rX`GdsY234B)6HBcf#m6>eQLdL&YI>2Gvan}uyB_jV|O!8gST= zka_wv<>B36R$L*`AE>3QLgld+2zK4eX3vT9*AA9B%|{!PWjyapO(kF|O;=ZU>Xx~G zeUa%96T~a&2W3qLoVnV6h>_LoZpyZsN!FD)&|zRa|L(E{Ygw7y=d-&gvj=ZN=N(0^ z%927h(ZvvPR!mQX9P8PL$Yef!v6ui4Fj@L&A*ls#i=d^>_=T8P&+8#zAfcS3@iJCc?8A`v)BZSH{5}73XL=Nb!#SX&Kqf_SGmVzMzr(F~XV*Z8 zZZ6}^NOg5JO~Qf4?H{VNUVmA?9n{3pLM{Pj6m+n70sXCtVR|pFguwtlA z3GlAP-Llh++!iQio;-Vo1N%V`@@Wy|Ym%7i{@@^WpWx--eB~BHre5(q;@2c_4ff8Y zH3AOMH!$Ff3`Wp+BYGgCZw*z=EV6upJ-6#ibAbRAMWLybdO+SG5<+$$@wHG z-Afj6aqJU&vK-$spU52E^f<@$#iIcCc(~_Q@T>47vq@>hP8I*1Tok0XG_Fv4xwmp} z*{&!N4S+u+(AuR?H$$)qq|Q)g9c#~1Tz0Kc!VP||)Sf@kQ;~45el%D!GLoD9cFBZ` z42f#M(AwtarqJffgbc=0k}ZWVY|dc|epR&-H~&RrKk4Y*>1Gcls->xvvFk=&wBN3E z@ETQ`96#sU=RK-%U++Be%-RfEVAxE;;Hufi(M{GTeP?bC5GQc0MzQlwZ)(=ue$+R= zK>O#g(%N0?9YxX$5*7Uyi?#yr2Wtmlfj%7NQ7Cz9GfZ{$TeZ40<1l89=lK7A&|ZBs zk5hMqDttQr_GEk4fA$>3z_hF^PY^NNacZ@2`nvk0IRu-glY$d`$9l-DI{vS*uMUfH z?Y6Q|dcIXBPk?uZ7NlPQrqJn_5pwcCv-yYxhyuW|H zxwtOnT*sMbp69;rz4zK{t-UShM%JLfi}QMGN|X%LcRSw9^^PEu{khHIT`gr2idCz6 zsZ`?AxtDLN?c6u<#XkQ_EeHuS-3P|AmvDo{Vm@h)LOI^nCJk9ICP3EclV)6Z{@rm> zPJHvj+EWgyP+KOkOUu$R(|L|FhUaM|#moT~S30hcx9qgdFb*cQ8~1OqR~==4eP+@} zUbuByLov@vxD7=Hwk(>^d3bNS+<@9n@6^0KNjbc1+Oc0$JqK5(C|#!n5L3XRyWJ(y zzMf`Za69O~@KPq8%{OcIK@#?q^|&cNoxS{bfgvh^%z;;T-0z(9_`TK)84)IR6>Qsz ztl6m_ZFyWI1%%HK`jkJ>GShofBJ`D;5kI)uH z<;h|0)q4E&YZIbyP9x*dXtTVS0C@1dJ!|b++#xZV!+g+GfaG*+e*RC5As6q{0FgIh zW#rYD5dS^;Xl&CWe4G@ek>^I@PL7k z(IbD@fD$nztQ78dcsaTL$^1Cbx~4usN_lJ}%G;~z-r2AtMGo{cpik8DL$&}GfuEn- zrY}nFo`GM@ux5b{Wm;-owBLKt%TRTK<{c)6MJ2BOK2KKyErjtjf#YZ=J#*5woF&-o zpBaIV{oY$5sn={|lw$&W@asbJ&%VlIx^qOLrygDeF)?w(@lOiq#(j2Sh5XNx-&n`*)?VLSLM#$kEMAuRNLJZ zCWO2GLiB06=5N2JZALzYd&uN0`d3Yrr)?sZ5GpNl*iFebz?6~SqUMFxiKGay@hsk* zWd_*sL$XxXHk^6v&*h|j zY*}Rd@u*L*)nU=hAEPf5b%dgh*<<)$9(oz;p|Z4#*eVBi{fF8o{$!$D3*r(5KiLn4 zvpc?B8xkhX;Z#IYqS&=xq%iu%6xu?$B?b)`f%ng4Ibx{4+lWF!W@d1ojOWUtx`1!E z^0<75sJcW9&h%pd8a<5ZIWpd7@U_p{$uFl1iQ8c2p~4zKZipUPoiLQYy$EI$;cG4h zFAtR>8JMo|OnbD3A5R#$%d4TX-bY#*{wh1V!$bcvC@2Woc9S%h$>q~+=S8k(#!UyB z#MzT`j-G+YZKvaLaiTO%WPjN=8A26Sox>ar_aR5Or?aJQ^nrzVXjE>{yEx4hws-?sQg#Z=EhSLnpyJcEJ+7fWpiPS5m0GbOOFLkI%zk<-xW! zpPcqR$($anVMYN`^rZW)`i9W0Sl(3Xa9rY0`RD`C{6b4A1bt2GUL^Yi+y_=2`0{J2 zA1Il~Pl^Vz-d2cYtr0D@e`(X#{hs%qug{Q4O#1lTOX9o8)#yLcrvZ+>YL#J68Fqh} zQ%3eIJZa0{w`0>$kKM_wP!+bWpCdOW_`XKyVCP`|BatU2jXPy8GAv>d>dC6OE?Ob=n7IT zc5m^T&SUEtf_PPp7|R+4wTTd?bsG)QJY8NQBn*4#vEWF)Lp1LO@297y$DMSG5IZMs z50Aw7`cI|8n#M{#2lALDwB?x3U?d}QaE4ETy;6FR23|Y&w7ki&i58xlG6h#XGmkZ1 z^cyhgY%STBh_^+G?syw<$H56As>V>LojU7wCFRt6)5lkmtx(o=WDjM5&DG>*_~}qK z{~BE+D11opQhQZ7q9$E$$W&U5doS-yRQStMb4)xo1{Wj9H_U}ttHg(~&M!yyguKiL z%*o0)j#lrf@w$;!!6#Q{fGD(SRMs>&GWw-oT23iBg`u4RxrW9Nm88`Z71+kZmrV&r z#THKiG)}ShFL#t7xJ`jL{W)DKJ^9}+Q%o>J;xf&+wDYMo?B~@rqbtU*#|%)P*YAr{ zivCoGvF$mAzt0|3>JF+9&vFeV+d)0RE5bFDj~*AF%^0j87nMdnT7G>+A)%ZJ3=T4e ziDg{iqzrYjvS*!2Q8sQeqH8mbpyZ<=mKV8_%Y-Xia5b*9W58F6MG402I6GHqjlBJK zX(~U`TS`zzw0})??d{SCcYB)91$6#i*K50FBdRPtW3}!jSCFf^Mtj#^87^zF8LJD- z$Da_r9ZAbeu^bxh5#2z)BS3)lkXcIJznTXVM#{2TEmc1B{2G#pjURqyVpaK6DphI%!KoEfL5OkjaX~-9CtR$JKSFl%0zxj#WNFkZK z*Ft2ySy0!SS1P|Lo=EH!*Dn8)EUx}*C7+>^T0G8Q-rIb{plFln;b;?M+;As>XD_X+ zr}lB^+HWWFW317*h!XPae@{wUny&nYpk3$KFvc$j#O95>eDfPADjc^IBaqe;#JM#J zmsZ*|jHlrQ1A|0XQ&a^0cbt3MxHu;33NtQcI9%iTFR0|$6_RGeVF-CKcWmxFIslaL zZcY5-^w3d5aWIH=Ei^UHKI1p!;67)4HA2$$F7l%dX)rCGP9i~s4hgo*a%<&p9I25i zd$ye1+gZY$YcJQ%-ApUDPP_KrMUq<9yg>g0V&I^6L(&f`h22+Di^3dKb+K6s%jC`e z`xS7ns!(?(D}8TvmZE#bIYZN!uGAgw=F*ffWGFb4pRCMtbTG&(bV;>MOtUZVPiTWT z$=B5g_FJ(nol^2qXN~^;{!krI9fnxcaHB8BlDZqyXNWt35SEhHU5bS~I?+onPZ!UZ zS(a=dFO15K?W0*mAVDuqPXApIrk9qjiY!Ssa??HajvQ3s=^Cz9rwjKlNOSV8E}^o% zlUY&bIQe_{uS8iq6|AfNm|~+^T1ETc%PuvYWO7;UeCzsQveoO0o-+pv66P4`hg7k zPTf?nLw)k$@ueMaDOAYG!PhE+kup}ge^5q-6u3~@$RAu8R}-g-55XeC3X0n})z(px z=09{4og(;8Fd`;yyS#9HewO~#yxu*(J;U!oE%?Q3C?IDatDRIltvnw1(r%T3+fF;; zNWLp^JaW}MX@>A(7$xs&ZZ{5MblC1l{7&Bbw{L-tGvJ>nOv+AUsMsC%`NG6;I5#I1 zk$EU23d}GS^f`^g&%jm5mX^-jtx3czuX;0-wtW(54cfGdf5LcVJL1J50RpMuk9$&b z#{`=ZeS6*|U#=mvJ5q;MLQ-G!O`Nom+sLBsw9m0f&#TGfDe(T?$JLRR!9TtZM3Q&? ztgt;OoYBeTq^}#8yN4{5@(&;+=ZSF>teX9mQY5@>cwT!wIq_}h;_G8^6qSnO$~UZs zlQg`Qjx#`hfa+nfzLi^e567IH6)*ACMaqx5yjOO;njWS0$`Voe$+Xyp%tpr~DoSg% zEJPKV^-n9C=xS}?7|1WT_bu(D_mDQ+aJi~}pLnQNe)T26iK{(<)i{KUW@ChubPt)TjDhx7N0~yC-*p79c0w5^a?%$YG2uBrGzxU_VDm9 z>_&TmPKlNp!;$c`eMY%0r7rKiNJRXm)m5H|KHg+vV1WeK<&o>82D29167hREFVPX| zl&^Sp9uSi^^#qQHQEKXx+X`~;k`i8DV+}Q;q4|Y{jhhoB7GbpXWo`ZQ-g_S_QkQq@>v*jyN zi}tzAz@Z>F;~w;W?YXn6K-4!8oxGA0{8%P-=j$hKMEOxek*q9yN)axw`*>E^HgUR) z|I`!lV#Q=+m#h8f3(X`(lybjxytBL!cV3LoT>pqY>z3m)F}cTR=hpkR2gqg#+m+vU z(=y$#)ixltJh-Oq$8f?OnsQlf}^#V@Q?np+?1sv(AoZHVM{D(Yo zk(2o?3A<`p<|a`}AHt!|NMK`PVlqpAsEl`|_9ZxNPfXw#U&pPfn2wQ5Q|qdC6k%yY zlr++g@-~}%xH?Sc6R}yc+;Wk{`XJ)4n@}Dv6w?2i8j(7N1;$ziUMZiQMN>Kaqw15k427Ue-%#9W;1wJtZ!^kS;pP7f1um%`68C&4HqWM($ebI zGpCmyJ8&KGI_eAgk}+jPdRS;_S5!{n)^*z|TA5wXX~oWe?wFMj`#A7&#J8*JcjKqYWaf{+Dj#o$gEsL-9TuTA)AN?D zQ9E7N8}x%USExoB@H_eukO*^&=8`GnXI?O^X zTNFg}6vM5wwe_Gn`w@$LErE`L?Vpa#|G9xVPq8nL-?;XYi;xosRiHUT&dkp`fvsP? z=l`I(AQxXhL)U4uR6(LOEa$Y3rR!fEB>I+K1We(g%c-TjUKCmB{$L=CH z=HZn2@k54;GPm?lT3KkW?EiKpNS(z3(S_`E#Ho#aLCDt= zy5p1Gb=7!UA)Yz@zO=LGqfF(&Qt^go+pkv6`@j73HnjMaHY4sW_u$4$x~A5Bzc!ht zMjJ7G*Pd)8=6`OI4PbmDbym=xoJ3AX))uj=$S||~e5vxsIle}Q17mP1MScK;? z&(CF#^4m0^X-p1i*kAeKwyEIfirq`pxY?ZBD$Iqk-lB<#zcJj}3!(bNudT4L^35AV zZ#|OGAoOJz$O)BjSBz|aI(_6=g}<+K0b|^JV|%-)KGG%zRK+jYH(cl!-mr@-N&GGc7P6Hp<;ayi@d`-tG@b?#g1zn^ z-$WpIB$mvT5>%WEMnNt8`6tE}#2G+SZ?Sxaz6=B6Pa zCSxYD_wXDY98i$e^*r1${MD6cRDw>zKrf)4g+aZ zUlXAJ0RIC}4v2r_-#nDZ3YA5Cr60d!&%LuZ_Gx9)a)%m``eE%sbvMD$T zuVSq})y!GOmqyBQSOv0@V3&l{wfpvH%r;zfx)2zLt#<0kOaP{xu9*Ved_mCs{wlMw zL`s`!OKB6oA^pXTah0yidZu10P+*9OHTC?QZG?v_HJ_0DqPSY|Ytj2RWNJx9y+QcX zWTXhRAGHaZ?zF@w8rUmV_;dYvi}w)ZZ1PgF57@g5`KV1a7Q$!u?A0Ub(i#)Vg8AxZ zOS`K0ZYk1Hnnd-{`WeK}-A6O>4ywt?$x)i(;f3B<;P9yKCG?ni^;<*X0ibir$a8jN zcVr!!a3ETw(K;wyY5(X5QzH*4z#WBHCBOW_0Z|7oW;txKljIAZ6tZ>Yud*mTYa;b# zA)P?oy_?9|=8@4>RHPJlOXtzR+D*Gj?i`22)_@qU>z3J4rXzb={B)q35rO{>6c)mu z_Z6%yHsx~%ar6{t-Yr=8v1&^juV>|A`^8N2xA9`E4T46Au`h-b&tK;nojsJD7GjDf zHbnb5xh;mkzQBMh#KbeqnW6(IbTImINkx9t_|jxys#EaZB3By99s6DK2#{8wym25x zZ3Fon76aOBT;euw@z!k|ik(}c1X$(DxHUF3m*NTcJ(zqsalQz5ysU&A9m-_{u5E!k zgNQ{4S}d^wCsQ^dV(1@#u!hbZ;|4PjXk!B(H592<%B$ynk{i65rfbqB98dDeW~?)j z&QAuRziB=v{o;@<(Qfy`-gHQzEg^+QQ1Q`&uhS`fXSJ^t;!Pkr1NucvQ_rV-u*}$+ z+3%7ZlPHe74acQEKP1Qlj9ju-kK_2xR>}=|Me{@;ngU1jxzo~6!^Sr{{{^37nn>ke zEMccCiU#&w)13rlblBFnt3dS5t)GX&|3v@po;|WeN<2z; z5pD|cj7_D=)P=-*GRr=NOZ}1f4;*v{TwdT#curjo=eN%h8G9TrK-nUz`O~WJ)?1G( zn16sOv`!d~#9eWichsIOgp(GI+MpnLvPOYGYW}qS@e0q~I&7R0oO;JdWs%R%{cg6J zL|S}GwVM%#iMp)sK~kZ-7dNVpHMu-EZs}AK;#N;CP#8*gEl8v}OqOIJ4c|dMxWbVw z&9J53>I-XVvR4gce`0vgverUBUmB9uH=$9&t<@=EO&mab_CPkMFWFcSI70tQ=7H8| zo88om%LJix3|3@_kn(js`x7w@4l1#TBSd`6q>+qH(}{iPjN$R`re@=+EA{^_AL1EY zBirS6_gocR<+cLNje@PyYR8Mv=!m=YRMqI`cg54CL19W8jR;si$T6&1KCdfzq(*kR zW@~%OtN}lsaCgcZ#IoAGAQUBIqHU5CY!Og!@@Q*0W&V+|IKq7cPSG9aEK^d}{862T z8#*4gv(Pp;@sqYPUHC>{_sUx;Mwb_cUgG%_`sBZV47aVR$PvvjvE%I}Ij56mDDJI& zkkdaGS{L9H z=~whUMzd?Yr#DwFSfBtArh;MZ+)hzUcei&aY}teE!Ri=V>9*8X52lZ1y@=RKxl;^I zCzd%}rR-~`#SW61{;GE)SP4;vOOWhDkh1rrDql39MRT}!sjk^EgxQcWM=O4PBAx52 zOQDqYvtyveoB>&rTbgda?a#{)g(NJsWW9;)oT?U*(b5lJELWE4TD)dj()iikzBj(= zf;KH@pSAayZapWw$}{e#u{@ppe_J{kDhG~6Q{BVKgj(epfGht__)vhRj;Ode@zq|# zX}8)T75b9rDBlxll}q`{gc>Q514ydM%^SJ#gl-y)_I}yzi+PYeK6bEV#XsLK#8>-z%P)YNq4Cf3Q$ZIQ(7H(G;oZ*6l z1=6N!+R^$eG6;`L_4KS;CjqOSo_xjLEad_(=g?YE9XEfOmyo+6-{(JIMYgc4`BOvj z15Rb8+Yn56VYuPJy|{Cna77;RXsRH>3)c})CKhks4+mXMA2M$ zP>Ex}E5WmZy3g4S?TdnhXd7K}pGuXTm3aHwS1%5)^mSmMv+t!^M_n{D^&-WtI9!|u zNs?(a@oUMo&PzKIe)CGuo>h)c9*CdiiT+HvH;JnwGH|~${4!6HZX9i7PghfIa}3%R z8wRk75_3_>jk!`FgzNM9C~O62h;tKPfUQw4Ly|`a$p>%O7cI(hYY~TiDkZZ5g7M0P z)^Bc9P3^7xhzg&?`B>YI6^T27zdAPuw;Eo}`6;m(NsdhP!ws2KY~O|38VZ!1S${?M zx3?63C812WlqSGDga14P_iYU;qfg1#faP|G^vbYk+9<=t_ z_4y;Lhk>kOsmipsQp@pmZD0cV?WJr-{vmm(G!%at>eFNG9e*3c--@{Jj{M{2hn$yg z#;~KAN{VW&101MYESY=KwQKQSMw&Bpr4Eg}?CrW|{xh<`KhF8Zq-~Q3p5~K<=@ti7+73$=*o3voNzMIfp}G64Rym z=StCa?D?aRXsqG~%$T-B?2BGo13I{37?v#;O^O$B{i`2+%0G(Nr0aE-AxWIwh<`!|%1lnqiKEG~$-_6#0MEnihxMC@I zZG^mZkQuWC ztkc9nb3cN6&;Rm?pUFER%}Ri}#O_n=e$;VbxK*W7k`2Y_ON~4TpBcl*c;yb!VXeuG z&E}jCG=w2YX(Do#$b@ZfW^uAT+G?(h{Qdi>`Qxv1j|R0Wk9@zz?%X87EXn_VTTdLy zT&)BIwoUPm#a?Igi4izNHm9rlKq4q&)lU%xe7Kmq2f?2eF0SY>0y+PVxEPrWbAB)) zFOM=x5#QOey7OZSwmo*s)PI?;L zPZ{{<29OJP{JFN;z5-cN~B<0k;DqQ?j~BG=DTUHWz3lNbpwJE&&{}+~9Wu#jP2w15jC=%Z^xQ;9F z5Vw@zo^ScE{QC6^VsZ#xVB+;AZ^z^9ugV-$i&EdjWn@@B0rnn1HwFoGjnkJKFh4M! zT?t7Q-cj$ke|QkKsvC282r`nQ2{sb!czqqGlM_l2aee9w4??sqKD=>3hOZan!|gA_bVjgy+R5SP4bP9P+Y;q9w0fd%IWG-9P8{Br8d4H*}Yi zLdr0;tWoL~=sB{hs>tCa2{Z4gEsu);NB$mZ=)^%54JGNNQE+eodsqo_jZ!EC0}z2S zW_lQfT6#tXY@$%(pmD*C6{@>P?21-K_!05Vt8wYM__W_4<&DzOm9V@DFQ%q62TMtSgObrzJgSdGGnuH5a zo+iIjAtSdEOF)z9nT3066U&J*(dLuO-IF^EMkq|nJrj+HrX^3dVDv@Ze!hvkh^Q=N zLxgxu3P)``$FXl8B9bs5Yk0JK5(H~kSZLdesK=O(124aTk_Mr9nHCI&Jd>6 z&co;3AIApdGKv(0L1+1zx7mv;2k#4)J|EeczKFv8-~uiLEgei zfx-8ND4tRVDytuSYV^Z6N7h2-VL8b}gDn8P1CkK6I$GI1@^>-qDKYgKl zk%q6;NcKr~%~;C-*>z<{POUqb?X5yvcg%Rpw{G1{uS6Fv@-`hM^gDg_ulr`M%3~hl zf1jcbIZv(X?bjXZoULpZQHUBc0tLshil@6}I#v%Ta9u>n3`Ue>Xos=531qX$;tD{y z;2<6e8*s2>B_u-0PW!i@k50*G^xAiw2SI+SSx-z#_&`Jef(qnj>ZBgaGsO`uHyC_~ zsa}U4nmc`?Q5ti)suI<=gluTQRJ&)so_Kd^7fP>RZigh;B>L35iAmGyg;KUQF1(M6 zv0mNF)l<%^n)M{eRzH)I*Ts59#b>Sm?zit+kOSmqmz!8iQ?i{LwlTZo>n5@kKVgVx zYyPs;EZD*tpmeif$(E@jX2pjEs!ot7fP(>Rd*=7?-wrc$;<(U@uBnpk9pBn?BcnVjTk=>R2|#pPKyxi+J85*^=@mTi1;=4CuVx=0L59y;jy!lU}i zHCL|kdnrfuSg<(@8xv^A+P2UT(c3AsgwjkfoVY_=Rg7X_OF+VTN24>aGL3(Wb4}*% z{ks3rlj}v@~Q|fv4^A-pQ!`#upv( zAOtkAtioAq6++Dbc+<@d}1$Y&Q(=zZIG)hlraR&2k>2xd&*-QR93WHB}VG6I%Z_>Q4mrs z<#}u=B8?fjFU=;YI)Nkt0y@(NFRVcL%vS(Qp723_$Il-IU*2v2zG^l^g&7w>{_6G zfr5d({~FH%qWk!pcjad8n>QCBJBf~t4ieU*$C~<4GQ?Cei5e&9U7ge?D{h6+@rqtv z4cvn4?LyAihBK768^zO)bV@k*xn@7hMlvA3%45aY2n*chupE^S?G?ISnBA1`Kcp_U zKC2s*_AicblcQfp_Ng}V4a(EX5IbH_M^;6I6B}A`$8GeyPD>3-)>q=|zqVQESPaC- z5|>B`{=8Obc!n}k`LRMow10i3er#$g6pEWT)8wT4P8F==t}r<(M@zOw9|hm)!Gi|? z<%IGY@zaG8tM?VHX_*LF<0qa6DaA2{VO8lkGO8U?JP-~cUeL!T;L2-}h(p`ln!y^D z85W8+m&ne)F)?gB>0)!W;vC7Pn*0gwW1(>|QUhK&jh?em#lKClDxT)C})nl=~xp~)_`(NnH z(?}~_-qJE}-GVdGD9yq34YuBe%`5U0HON<`o6YhE%gf2N)+c*D4Z#P*lcotpnU z6-A-WfX4_5*@iM}nG!WBNuC7flA&-!W=q2L6hh&X03Uh@Y_4Eb{qk~{Z&SmWzMo%G zy{tipR7sGbh|T2ztJ>1-=B(`wH$E(_VG%(ZVvxkbX}6f!bT5F*ih>A^apbRPFViK@ z5M>VRK3BWmp|w|)mrYSF&qQUy%0I*LBj-)j!maWCZe{ABTu&6RR9fTzEz6FrAc@S; zrHW4=;@9)FR>g%`kv~HsN)_aZ*}pvG!h3;c^wG4&R{w2@d~zm!OH+D*n8Sd_JD!dw zefHYBWHre#Bvcg;2DxW4zYlbOg8UAtZhOH`TlQIAjO)IHSW>p@nB-XI;GIS(nXnvo z7EM-`*hpRCp8~5^FK>Ky4NZCSvgv`^om(TitB3eM+gGQR{}I?5ZJgTes^i`zw4UeNui#< zZ%kKnSHQi`BE$2AP2tOBtrQ+}`7HA3$3XLD@0Tj@*p|#$l#57bzxRsoYyR%wQt9gX5}o8;Jc!sY&aQxgIoC0KjK58Tn*``>fG}J$GSTGD7Mc-K;W-?;NKI6BqqXeMjUN9hBeqKM7WAY* zL|&IjmUl;ZmRgF67$-U3e9YFHtmzkJy|-@Nf}8^GFp!Wvt}UTFtApC_^icCweD`1B zy?XvW@?)`Fn4d=G8C6r!1$R-wxrh3G{Z46oO`}>y1U7_qorwaCznJgzqIG{Tm1e8| zn^wQ1%38~T8XUSIKyL$X+lrgdsJuv;KGm|}$<&Xo)8aKA0lLWCJ;tpr#Z%TG8-}p{ zf|XMK2Hnxdjn{iuunl;VyIqy#Ru3;Y4KLK=Ylxp@y2Oe+R#~!K5^+1rTvosp8)6Mj zrDq!db2htSpyfRtR_;gc`Nawej^tAre-nI6mG<%?FtLGZpOb?HX(ELJB$d?VzM;;w zTrbdRH@Bk2VXWw|C-dRYwZYViTg!{*+foHVzJF zJfOg5Zw-{8NK;&XrOc;kQ9SMTf}ei*YsK}3d=__?sSM|lA0)ene(z5f!~9x#d;WJ8 z8DoP3CfpC;aX~Htq0PnkdOpG(`yP2_)K1F-$-I={Q-|=gPxWL#M*K!BbuKH&Qg&Ui zmQ*5!diNuhT>ekz<sxC5phJd_0?Cx_FTwBh8n|Z^k~=3(zt8{mJZ(uoUiB zSJ)`H;i8a6KQMqYn0^dp-pVG10|6umIrHgY7()(`QiTv|DU^e&o4%&w3a;+Y-{LQj z23G+)&A+TH~>-R@9Cjz=(%m|^S+qU(^=kl9Lp@6$(+{fw!&n4tT&Vdbd;*`bG* z7dud&lJ5Xu6Dc9!6uf|nIJIvg&^uh#i9j76wsTsvEXP=)f>wfrE6aENT{Q(d>(p}g zcY`F6xl-ox^>?>Pzd)8%&LAZn{fdjLOH}%Er7rK36*u|AiMTf-uH%AABRk!MK4Wrw zy?0rGv%4gbTde(mkAnpU5M>PKLQ(lwjA0bVSy`+@W6-*c$5`7nkVt0bT#>mqM0Dxo zF~QLBl>{4ulxlrwB~Y#@W<9t82odg1@->e@J5pw?=-&x%CI`MOds{IG(4i=2=rn~( zM*S{$$1`chXX9bX$UJ@Ls}SNAY16DBN|5i3Y5)FZm1cLyD%b$Y8k8J>>;s#=xP5X* ztvKl^LLhf4PUO=bub0A-U4h!i<9RC~KCbBfe_-?opW5l@W8*r$)k8v)NyWiyTz_Jg z2;T$&gBuE=ooM-qAT4YhN|Qll&RssfPot$A>4`oy5~FrEyLOaoeciP#@)b>4ojynj z1D5?5n()3`M?zXd1l+NP_5u^GB^p!S=aGdalpcZ0}%%>xA>8lNyJ z@nUQrX5U8$9Zl|9)E)8eE)ELOI>+lJ*Sddu1*UF)nShV;rqO5OVq)Pn+Nx$6Mk_xZ zr4-~jTiwhv)MiV<@r3T=?X5;Qw~7*t`8!5h@OyP8E=?Z;g&Y?FP~w!a_us{U;TUIc zQ&Qq$VOl*|5hx@8$}l3kmLDDn#@7L`Jd9QhbftI$;G|Rf$rM5@{}6G!5*!v4{sT{U zOmJfpHv#D#s}5GardPbuC4NK5Nnqjdf@DEZJ$*L|6R#W|ljw100QRuu_{It$IVlU_ znJmr6_|o?(k`1DWsrTM)SQocA`kqx1G?C0Hb)H^Qh0XFc5sfKr@$eYo^n#`UJajtV`u>j9PgE6z3%o@)2ECW14VGw&@Ryh8wy}kz~3>V z*uz85{r!C)2e%8=5Cy{6nW0G3@uw#xh(i`@p{c-Nkk z-=^Vw*I+W=yjL4o4}jzx4Q?4*IHr3T75K_vGRB)LMaZ;L8Jf^t8bk%qb>nuR1=9ec z2~iP?Zo(IZJS|=9icuZl81pDc6lTaRVT8=!P9y@RMaynYu^zKvDh9P__&m@^K#&fA zB!pi;@3?!|>T0Vd1bwO(Z|+FgXKIi9V^kHVeirq)zX5=WPFH4|^e|wJe!m2fNtc(b zbQIQ*IoLx^k=uQ!aI#plM8EA+FWFh2tyzF1KD(XVAa z5d^1_@UqHgVhG`uy-}37NgXQufer|2PPLC4mh{AUnB4&`G5>0AL0}C(Fyjx}VNkol zUoi9j-aaQ8QpnNjz}T4Wcp<~c`$7%6u$pM8K8ho$LJ* zmhJ`la((`<(X@8B9I_&jT!gw!pWG9*niOL3(QjZvy2}S1I^bgUJk_v46d6;O&(_ zMT}WOeV3mxgH|6S3&wnmfMFQiKInj8EMn|Gly`!Xd=X^e$HH^xB$ z#$lVyHo8iHD4!y-M;N5qptuP;z6twYof5F&o=bhyTI`P4t8N^2|?Dkl-hn~y)Fq# zr#_779|$aST1~N}-jX%%)dXQcZhdq<1io zn=l^=%3O?#5F)#xf~;ZIjpTmC13bhFu%5|&Ism03*P117z;vE za6KS+x%KM&`=erAUI}nL!9u`RY6Mrj8t?KYoJe`kl~3}62E_fdN+ zI7XPm9clbtJG9Z;uRTHjZ(2xGpH4H_4sCHaoA7LHsnp8NR}v%Ea1el7L%2#JC&rTl z_*A&yp&tdO?gHq+Ln=2hjtLpgZD7>|ZdDg+?|?e?&-wfx+IBr&+ILyEF3NySS&t>T z)HVV*ePALH0KPuDQY_#n1Un<77uj&=51tZO248pfLB+3BB|H}i3Yhr$;Y=6_@+Iyt`JFwYIG zt)vg(liOcxt65hlV8d;w+fAm_u8zhdc@r9n2I~!EaiOgURqg|n94n*pEUCyq1pj@Z zJBplF>fTMg_xwq6Q^r$g>&M8CUt6a>E0U+1=22BLujcncaKz4NG zE!7A{fBDlh5C1VYGm~ZJq9Vc#mh+ZkadrAsYi+S)zWbVokMC`nUX{z)u`4M#IVOt3 zl*B)c%^9<8(P(sMD*CDNt_ymZRceg0=o>F6EfBNq&n`niQBY9Ox<^}3P|)amz{^iZ zGCncU6DIrx)*ERb3?0HWqWbhIjpUr11QnH(T*2(vbz(YUCd!|2F`GUvLWI3`bV#GgcwM{-6XHNdelXlkI%kqZFER>pUKb|{1ZHXd=~q*2_pTPgvzqw z;*QoQN8PT_EZW`M%i$(wx#i38v=2nj77)ARtQnPJnB`!3wE3Tsmc98!_)W<0H6^9U zG1b^JjV+Nf@+XL1&tIBr3Fzf?Q^fY^Is6N-9qH-miQ^zLsnVNudi|iMbPIrsuPzd# zUtOk3?|JVohfBcQiNAYz?Qm;87UKw6Ur*Pe|EU}Hv4TDrPj%;C{0D40LHQq_k`g9h zSdZQ#XORj3dLDi-15h00l9 zYgb5`QFE!u?(unNFMtu)=21Z7?hYvaN2?%;v(aZN#5t!MH5fKsK=^2uq z&ZN&z_wk3PuCA_Z0tY5uy?y(Zfq|i8eVrTQ58Bx&_g4R_YZ<|-;5`1J;))6kIkf9* zHMbaCGPSgZHjS>H9tOGbFz6o~>;hLj(z3}L=WAfKmXwr~U0X|a<;oRh6%{z=Nfz9i zy3hAZ?4f%J9!}Q=2M4R4964cB`!n^9`u{v612Z{H9$?tugw@eVMTtb>nMi67j17*-)EDOp@r_7==9zIwi)Bo6JieFE)>yKXg2?=}xCw&q%MVLrt4jv=~j%}*CRzt#8hrdPFP-vvk_A?J6!1#dypK2L7u zaIYC0R;4_bdZUZW%Tea$Y|y4O5=_H@ z^doN*WbUW;`8D2=>u+v7IV8RC-13gejDtA4m>6wDuHV%0{HJxWUPJ`6v`8Uj)z!%| zHMrotT)?;ZscfR2q_lJ9q$*=u4$J?xxXS7qLIz8h+ZT@JC9GrD zGulXeh|muUfpS8ASsG1bNf(J%TKc|3To=Z7H+t_NbH|Eby!bFT_kQ%W=Q5xqDw98` z{%-21f(GT&{cqGX3V~moo^z;pAN~ANKL29!%NK#LYp~$Jui+`H8FQi({U2&sRQH0a zD8c%9pgJeDj*bo{1L?M?;ixfGFW zW@Ux)kjnl<>xt#^I2Cg`uCLWRsIalId7-@}*`z>cFa4hQMEDg6)4{;zeMGZx;@aJe(7uAYZOx~eHOT< zw_qR^iVSK)29BO(AEEM+)S2GlvYU5CFvCZsK0Z-^z2TWU-OvR{g?ACQDASRE3n1om4~SndpI)Zaw4CFcrTZ3Z)Xs*58^#iB=;IVQe?>-z)9BIP}^;p7MArM z{8O8Px~QnT-+hEyx>`Idc3ck0i97Qt9+}_Wbu8T0mUCs%XSOgH8|Aqzs0_Z*GV)kAGQ5SF9^`ZCa ztYiAa;AM<;!0y2!uc?r}1T)~<>!ZQ(PwIn!kq-{bJVHu{yL*CuIb3N^24lx; zTZojMYg#xeJyZh>#sG4{3jgMBD)+elj~^yxIiF=~b9TTgbp3L>q9TGs{LClA7!zV0 zOU}oOLlEQ_umsiY((Qo@?aD<@?9DbY7;!>vd)o;AqD&eXc_sRTiUpyi?b|&(u7cdQ{ zPKD&AzPKOz+sbWszFW<^(9TETYCQ7jiI1}8#2B<%FK^l_W~y~>IhnGF|H1>37pzrC zhyEg-U2nQuSl3iV$gazJiRF!R?d7~lQ@l#0GF@QdT-Y~_MGEk4{ZeSE7hB)cje`Jk z?B6Rf^cH?I)-m2al#VH*A$vZwpt(%*Y5kn~S>JUewEd+512eXADCY|#EKWT%3oza% z77y4W{`jJ1kpD4RQkxbFxYc<537zr1>@|9s9$ zJ8QeWNtJ=i1VQm{!F9qE3nE2f$B}GnE_r89&q@=@MwH|49>2Jy zgy1!1&HH5(;7<%>+MF^Z;Te&#N`$cM(uqY{%JgedAZe)?>^X5VH8){hee>5N+D@yR zi=F;+BUG35n3V)-J%C31LF>uMPw&0PO~Zm2JDK?^d_EF;5B&B=ao=sa-&Xh11?gwF z@PEHKop;plhgV6blfv1q?r-HDePR&p9#wl{?85u6B2dQ2pW1vOb9*GCc;k$btQ~ro zY>+8c^_D`^fA5!C-(2v~@ZglGhq=ApE@1if8QTONdenYQiAudk=uwt^e-f)UfjMJ9 z%TW%Jn=g2UVDKpStod+my0y-=VI|uBbZ1uEBRyhf&Ea$H6tPcnY^exYLDis%yIjs-8!xq8gCT? zHqYt`ztYY@1@j_jKKjcaF$0OpbkEPp^j$8-5<-O^hprj+VwJgz(Z%&#nz6mLl(d$K z!%6^7*SOsGJ2E+9hE$rd(Arz6q|Hb5pPEKN8~$};`2CPU-Bjlrf)dwCW*E4j85x4) zxY`b3PGpb$WYdIb-t~(1_Jj#7a*mR4!8qD!u6fdPI7>8_JB{9~a zF1N{hZI3f=xdzxA`r-Y)8gYyGN7KNznfMp)jq4w6A1jDdaNgA=%J8*eV5@UY$u9m{ zDP4YRMOjO1=o19tl`>(cHF>Gj?hTAsRo{C*zGSh)2>(HfJ#dsk?oyB*z&m_B3G8rj zTT{*zx8q=r2NCk1K5%al(_xu-mKkBAJ^P z7YoQxBL%(gg2MF6`qLH6_?M2XH9JUfC^?=rjB@_`xb3L>aJmgud%b1URy_X3HkWEs z^lO3By(<+SKeMBsGjuORObf`$h$R!GFP$!iJwo+2IEvbf-=Nw@xZPjwovki6%ENBlkA?U0RVlA(2C?0spc!j`*171o` z!>wIqXrq0a+&AAjm!kr?E8iq*!B3b{0@BWY`DnlSx_$I3G3d^3e@LUZ3}k^#q39&L zoNf)1hNh`UBa#)Xd`FopA!l^W@v(Qw+qv85_JICYV70J0N6?ijRReBcPEh|x$TJT` z;J<3Ic}8mi{}b@NjvgBR-KhnrPmRIHAL)4%WLD`=@UA*t$q*J4ykkIR*#hKtE^&z; z@>=5HlH}$Yk7aKld(d9>scG9EUTVu?;0NQR4;kH z*^MX)Rtlsz(+OJyhut{NHN8Mi)kCjkKq`wgE%yssvQGDY^RR^F5MsuCJzw6sG1;LP zrKN`P*i(xp@5cSPo((y*MO2+^rB`;v5tO+KE_DYa#Y33S4`*HokgfIF z9#KwopTcNSl2;9%O>PTr%!QhcH?ll<0nZOA`HDNV@(DeblM7v8Ht#wixSVWywbd*u z$ZOApdLl4xc)_wIp9JypOf)I%WPq!pw7B-0WQ=js>1Ri>(^{7pg0|{ScW%1VZ{E-t zT0dihaEHwt|MKF|;4QtpV*Z+zP3tSan{3Ym*UWf=W%ul{-xrNIZa-=pHvXKNQC*Py zYOAy2?o24E{O`G^z)?tN-KI8+Y452 z+K@ctorKk_HYY#vfBr=KxbyZgq;{V)(zuzl#HgYiH&ahXwba~i_7e?&pSm$)B%&Cc zVkY;+pBj~$Js*+2+9}@HR%Or8e;|Fn>>Qeb@c^73d^wTFYY9NCK}B+NeK%%xS;N;_ zEM%CzgFN;6A}`OD!fuOge~5!eSkuj(*^}*j-!3=cd7gv28AaYSTY&LzNMUPT8N0SN zNLze8#rt?n*@&;kN`9p5fA9q~9 z77Dv5E-B~S_tY@X^zRL6R1Q#0pBkIygMC6Y?^F*K%lPdxGQ>v)F*RAHrU}upWp;#T0P4RnsCJ(dYJFcLWX1R^VA^z(aaEX z9us}2Yh1`pQilS!*CHt#J@Hxtb4sC|A(hr~uidEaa?&Nm;v}~^e#QVCq1mrx6Sjge zf{kn|H4-%*nzcgX7)*6rdvb#6rGXAKP+faHt1R&h<*q$Nif?A?&2~h+6eZ6Z4nk&n zrpj5CExeTx7n^|;<%PcLt_i^3lxTCYtA9&u&o!MZHez|3aSCMM6E~(?ZJ-KK*8@21 zVZ~+EH-GB-#A|zRqUB_&7chB0@+{c>-n8m?!B@RBJ{q$xigJn%Xk}RE2WPkrtOu5N01##J{m-i&83}%V9gB+rqqMNA`Y2v%D;)Pyb~I6~ zH6s$+FX+o$yfi{JCQUJ2s3yLr8F#jAEpFYExZ2D#uiW?G*u}Rgo5|WxI?6szoo(Ep zaa=mAplZ6SDP?d?>50ms+0kT zM$6R#vb=8M!4obd-EH;F3#_~NeC2S>wpg(rOU)dZp3)Qb=k9=+e8wAX|D-H&z{HYe z^oU#zj}v)1S`RiXj2a^iF9Ki)Otn3r^vaq8^AUs*-7@T_dJ9SSJ!JACV zN@W;Aq_!ifd|%Q#V?W6+(|FSG!4S$db)#;n(%ZW}1-=_y|MI2{s=6&ZrQU%am2FkB=&W7xK#b#14@@X@9mn>JL$z;gPYm^>wP$~#Q4+!T)a?B9 zTs;21*XODn0lHgO85T+2hzaw^1KKbKIn`(leWkDU^$_tLO@xx4jah8aa*!(^AT^ImpYj4FJjonG!s zZELK2e;ODza+gl)>QlwqE`}fBK^VM7vvWRJHF@xfnUXR$wE}*M$#pXtIT=VLC|NVY74Pe!T1YF(qn)5xeW~O=b*V zsfTTP(VEASCpW5uXoNPuFhZO5zHf8xb+w?AG8N$E#e(gOdx;;|cMuI!{qe_iUiP@j zjWfRzK*#a}=XPWs;XAjt2tM2Y9;)82c=h}vy-0p_d=iDOguqBvgs~L#ZvkPT!5CI! zqcZkkfVZdbI_-C3cN17%%}sL#@nE`Ph>g{rw>OO#3OW%6K^F#BtX>ZV607gO>&U*> z94>t|B(Q(KNmOv$E)CzCSRAUpyM4YR48xN%a8;nR9?>>-l#l&F7$i9||GNg15EaR` zyO78Xr(VfeVn=UzEql1pr!BC%z$nKJJU_1-D%prDOhq5f;LzM_S=8cPT$9(S-B-5~ z!)$UOSH0UgBKUyivijbv(MpjJj2(ujLCn!T-OE_CRR%rFsg6#w1t$i1mzOpQ$mJ8Su|^pnoqDvqmnIWMyN zM@Zwk{RjP0>`71tD0!yre2rCF>Y0<8zzgz_TVP_;fIt~Y>m!^{20qb88HYL53HEUv zq#IF!btU**uKpC1MOVYV1cW6#8NVAV6X#ESqQfSINO$I)%oxPs7o8SBR9w#n*GXH? z^Ng}$3f$`oKJH;(p`N1CZxxtgG_%>lIrn? zIqKsp`VVh&i?+NsK#qjxBCqhD;8=k%!4DGY&=IAw#@Qio(M?%8iLbA4*fvXiF5=aO z{w#mO6HKQnnP5aK0}3ZCyXzR9$E%4fPDkvBG__*pH3m6~6mMGoD(x8g1-5-q^UH>8 z3Yy+1*=btVhJw0s;$)_G3kh=2$)%tE3fdZxVuIt)xjZoR{mW&3e=RN3fNgGwjRa94 zi+Ws_D5gNeyipAFy+Rn#mKmSmu_SsrV~@Z#>ZQU5c*3~;VG)410`S8v)-|yo!*rDH zAq8ITTe*GZBT{%FYu0VdxX1Ch=SOFPni7Y$PTR|YN@!?NtBkd5qbLm*L%1%h%4^2B zTW}5K*y+U5vg$RtIyj|EE`OoHt+?E%U)BMlg^U zrO#`M!|8(Fnh!eQ({*B|6^Og` zV_e+ogjW+if84bx?lqz*ZD+jb{aIakZgWjtSXJNrW9k#GJ-k6qaBW5iDDr$88wZEW zbmaW)=n(krk;S`u@rsL9TrFLuQHqg<*xT^{Q8k^KuW_R@7Cmm)Uf1b8m974SorKi! z$4+#X<#HoZWDjnA9jA+)u*|>_n`63dxvtJ4Na{474UfUC&>XIi}L~yEjg4M>#bmgGX9u#J`dmwL* zR1d{m{V9fHt2KhrO*P10W-js|Y11~#c=aZD*u_E*I)2R_@tSrb$VtOC><0J4du?+` zVa>#l3kxT#;H5=*WUW`y4RpNv5MuES*T(C70@3^!(w$6k<%njgv12)8lt^Ne_I}yY78pzIhcP?}{lph9M!@Ns$!q z6O`C&H=YmDE5b8P!IO)9r@X%LMV2aYh4t#~M|t6JSS0S}k3Td15PjX2cuxBbMw+(WZ}{O>52X`g=xL&m zUYdlWc9KLBo}#>Emj8v7Jf9&fXkxn#Xr&+S*+USLH+U$Y-SWq2{QDoHG(y7Q@2A_K zTO?AQo)coLZXw}>HnVi-_k-zSJHM4F6Uv`sk|_f)6S4Z&fPJG1`2y&}0fiPswvw+L zXU!MIg4;Lnn%HUBLkUcWJzOo1SRnik>{RcdeZIlHxO9TA;aJ&h*nZX+-p$Kcph8y- zWGE)z1q%c;I%|#n=aO_2ZauPM;{2wm2SRu=4pHuuOa2EC>HGtT9)#7sKrWhJ_>M2q zF7h6>eU-}x$xwPI`95?@UD6=aj;n|}!BwT%Lr*_vif^0obd{7Aer}e1&|@BvH}lH1 zY))3OAY}mU+7H(Mc+}lTYgYI=_0;-`asotleuJ4yeMMY$F447k_(Ue>Kp^i0rlXRB zNgTB@3EY){T-q=qC&Pc;j>P{65{MX!D=& z@mkp593AVaUEja&q6{~Cn(7td&nS3?eik5umwkP5-4}tfiR+x^zDn?zZ>Wd_bFS ztYLMWbGmo6&o!IZ)@Q8hON*>nw&`aHUi`O=FMo->A%PbkFF&FVi*iXfiCEo<9p^h} zfe_L@<>;)vx|FOdVF!;GvG5dsFr;{U)54MczQEm=JBJL8t1Tg^r9mstZSnJW)Tis# zZF6KX{FA34+)u)sk54Xc0#jTe1{!SM^nkp_!g=1OkIBw1QUjzuWd9*D)sUNs((nX7 zWdtm4{U{0>DA5nf}Cd$P$-OrEkN9ZwNj5u=Ra|_^F<@W~a5fKy~3Ej2q zLUdor;WIEMh&bAeL~X3U%%V$OW;j)=RSBrFWvReaoNy+bh8jDWhN(k7n`bewfSMQ5 z&5&>uL4q+&Gy=9IJS04wsoil9&GI>4x^QLQnns(b!(Qsp_1a04ran*sJN=!!7y2bH z4HE)Aag59+khU@Z=I+>Q4`(7Ecdx{f?6}3jjX1hk$y!D0h}pk;WmqZKna*hRgv0NL zs(o5c!55ZJk$ZHS^ZK*rat-~>=eIbPnfSV{k|?1JO@UkzPhQhOW8E}8yMQ+>iVRs# zf=(y=EX_QEd;H=t1*FCO*uwIHR%?8QIJRl0pQy}c ztYeGcegu^BuI_tBIlIL3c_v~IrA4T7BP>19Ev&c!7WYve%Jcjl6Kf%FEF@rI+rmiP z{*x^;0Y%62IS%eJ)Tuf5NPL{Q7p?lw>Ibv6%nwGN^_Gg3|Bit;`)P3%5k6x)Y+1(Q zjtnK~le!i57X>28F2 z!fx%Gpf&7 zhebj+?~6X#2TgI)UsTQ1d6$Vs&8v?L@|YN&hHakzV?+SGQ!)RDQ{KW7@O#%my%O6% zsy?HK-M?sCmw90TojZ?cv3uQP(Mg_-4Gyn?i$}zG#>o}NRhV3fVMDMp8hWpG=VlY5 zc&}gV_e}}uytFv|Ld;Dg7UkcW;WKIBZw8D}ndzYSLjReiW?YADFuZFPzkXiSBFO^z z)FRnzz80nzDa#y+FQ@TNOcJWoF-@k9DO{Bfq3H<_{A^=Jenm2V&VZ-mTHlvX+^0gQ zC$1n%G9O3lNHcVM*1Y+o;_yi|j(t#f01UBsp|8hi2+9Q?xa{(TN^F*%@b&oi-nq43 z0b0;gEb6rTTV~hs<*eoMI&DFsr)yUT#qsmn|Kb8*tPv32at0+a7|Z0|zb)P}CRw*+ ztvd>vS`3?6$W8PTY%6yp|HKWTkTY@|cL0TS&Id^TD?9J3C+1q#OQ$!u?>=39bJlSM z$0-|jGMW>*?c=h%7*Kn8tkpw|WUm@BhJv~E1jyVbuMjOO+drf~(o&65U<7K@ED03c z^c0Cw=rOC#K@h!xyB^J?s6DVpXdO)U4tz{L?&V#5CLPP65Lj+W1v2BY)_%h<^@Vx7 z!#fFbs5~V>4V$=BB*j*)gz5fK_W7i{6shA|S!wObWLSt;_m_&G@Lmp(Yc2qSQpj>P z085~EYW6pGr01$ol)$E}1n9slEbJl-a7kc>QE})1PNE!%Q~VV(y)R~4H1?E;m?NCP z%XtWD+!j(qn5A^1iYeHKDa2S_T?6M=65k_`)2QpM$vr83OYefCwW8y`!LTet1L6JPPS?e(to~Sm2}hXZs@LSMIjfb z$?;mPl7cZoKIW)y_6`uxX$FbyE;zVxgdxrUCe!)f>;M>ef(G=7E4ysBSrnL52YO}s ztmhop_Fj(;=%(j`5}%KbthSMy^ZiyqYvvA~<=)|qNOWbJ=&sz|!m8SoxxFyp*Ds?m zl>BVVS9}e6l;gb1gBXo`C>;QA>~O9Z@`Dl9G*l4rKP3v<^T{*ST(O}s<$^SlO*@=~ z*!`HiG%9*Z{qWTtavrt~Uw;nEEAi1W|m zzKpiTCRswE#UIg6i4gRGvx63fHzU2Y>dkow5FGb=)O;dl)qX-iZ>q{ks6(`I{!+#$ zDO{;T*Y;yw-7`U5G7>Mx!X#*X$~D-Rx%VDOMJc=wpQXGoWrQyc%9}9*sqE!~fO-nw zcsoOA4AlXp@EJFDBhm z%RXRyH8;mlCd9$F?GT`qOurj!!vN>`d=GPv`6m&QvY7CX5~XGM&zB&0=etKO@W!Fa zq-f+N<&v)87z0Oj19-r8ne>4@22BS#bLx8t)!GRwiK5~kYN%(yeen?&aWPa+%K#~) z@U2EN*aP2Ytl;pj4!;i1NO~L*qV$%+b%O!Q%=O|66Z)8?xID7FG#FIl%AE~j^;OEW z{g7x*-Rl8Ns61vZcceu>b`%{o(uoQv$E&D503A^Yn^GO9?AzSsB~cgoiy$P^U0#~~ z+#;F6n0C6i@0WP1c^`envtksJvVwu^k!D=~Ok7W0u@)scF&220@B8n90s0;Xf~!Ow z-hJISXnu@&83(3u>#KT!Zs^oOI?GN%L0ms-b6lM4(%-a=QW*SEe2O0QE~GfvHd{v8 zJj*~_Rc@~qHJPZ!26Xr#S_=JerL#w8GKO;8bE-|DTBB?mygqG=knVGt@{-x3?WxTI z=Zs><;w1{peYvB?qcaWxDpnG_20>=%H)cE=VA^X};2*=Mq8K-a)=t6xIHQlft4JXw z*3b0n>&-y*@yJmPo=M!M`6$6b^I?rI<})H{tP)7nev$%PA_rbnyMP);RXpz8`y(}$ zH&k^$ubqC|K7to=Z$oJ21&F=~3QDkWHvcw{$Z>l0BJCs*^W!g*lL>us+*Pcp4B|@- z{oh#rmixz@T$|nZ^td1$q;>qKPt1st9xT-CQx=kaovVbBK09-v?=`P=Hkh1!3z6ki zF?%U&_QZ58HN$$?LP1~9^H+nDGRc{ z@`?$oD8P76i7r@HVIRmNY0ya{)roqxEpIPH2v>je9lxR3!=`VIZT)f6B`5m+f?P3y zl2D-fex)aGq&7dghJxM7E(a()hH||%!FF~1&Z}HnT`n}4bP~|yo{N5VGDa5o$Cpv4 zgN3OS348qn_U%2oB}Zqi_Kc)@rJ2k~ChbF~=JSSkESnJv<)cJhSulFD_Hx_w6`akr zfDZF>(uXAT%Eiq@XP~_J0G7R)#7K1VswcfBAjaK=0CZqPtYHZxqv>GL7f3qP%cb93mnOuh2*t39r#f;bhNS8nU+PRC=7c zp(op?z!iBD(9y#QVWits`dJ{_Tu4J)u%GxnU9(J&&+{i)nPwU7`c!e==K(n)pIF96 zpPGJ=NP!;8#me?v^mi~xa7mnd=D&|+VtW2_YO?B&r!P>RGH?gHRb|Q^!GAuf;~SSV zX4Bo|CdP~Z_AbF?`TBKyit{M=Z_=U-x7gIAd2@6C=@X7bkHkroip%AQMPk|fb*vmcHaUwin|egVO=HG9g0J--LaGYI-wj<1^*TuQ9;v?f z4pAGs7&flSXnEu`m%dw=!aO$Yd%h&nuwguuu4K22V^5A>DVXNVI}JkN+WMqo&!q13 zo7{5VmLZgD!zpU0_>P*i?tIOqG-Ag1Cc6wqU=k9#aBB8V7e(1mHrGa+Hy3+=yAW_A3Tkti-pmUNO-=ktOOk)m)FV09 zRF!ZRu01Dm!1ab>3PN5$yY9FRkO@Wc4ONV6h2Nzqef^dDQk4&Xtmhhw<|CDO(U%nE zB}JBQ00laWTcgOV-e$M z`FN_DwXj#B6_Wl+`I(T^(HT%FIZ~_U2AlgCUu(Hi}Ox$(MRxONo+1~R$mcj z**#(uYxJ9sbFVQS&L^)EMtJ(<+8kt-x(1i_C&+(Ng~VTeZRY`KPYTPFn1nksir9t< z9-x?Miy$`<29kC~SHT(QG{zna!Te-1=0MyUEQRX&H%+Dmi2$ESQ{$FjZ zvOE37?i#}nr(m#!9Cx{NPrPf(^BM;Od-P%Z!fb}{uc08T$MoN@Nw-Z0#(u>Ol;Lq&!=su=JKK;yxjF4N${M90jUVwIj*mw^S0#|3MtaBo z6KP9-%t~bsxfo6X!9H|m`L}PG3pfZ^BpG*&b{J`UGEX_@^@v<66hG)!nuw0?hzp^S zzYVE>d=mxxEYaqa7$zyNioTBl=q+ca0<}Q~GElLwxtYDKF)0@$W3o*s;Jr^B?)l8e zrc0*>-D_lv>dTquI)h)?U#evg?y;apbi4ew4%>nWhj~siW>Q@|Im3Ah4!=SYRJGl z*0Mpm5OvBB&q__grx8Ks7<&fa3AGxi9L2otrV;74>;MJ$tKcuFK>?*#(g612^SVdW zhG;}M$~EC0Gl6wyt*auzD^Gge&O6(+bdu2uwNf?KUZ0F03}IxOow^N^?DOPCoiUC{mOq*v z#no2lD|TjH?bCccemwySZ1%OYkv$PnTnJ0Q%-f6E^KD#Rc&5XtVYad%;k|eYhJb~D z)ca1&eMsk0HPU2742+Iv=-(h@=sL5lRXj3`RdVgYm^vay6~w~&%AU=MoD6VZ(My_= z7Rp}g5(ZI28OI{5D)%7Xh?ExF2-NxU#iw}xZ;ZRfS7cUS=ygI$6B#}SH=ijrfoNPy ziC^8b^towxV>y4WSXuDe>bdh3vMnr?XM@%|G_UycL43)po}!!x?6jiih=XWeD?EB2%SZXfsIlwv)2tsRFX)=d7itMvj9ve< zvMf9I4E%Mxy6`2FM6yF{l2{aDB5lD*;=N}&PjxO|f`f9p2B`=-A6c(tHXln!ty#7y z43xUwb8dhTv-Gd5yotUa5&kKC5i^&DPmp4Hr@l@Y|rRPz67B#up=jZjnb4U=W0(tF_^m>&$&v_@o2%}1zktav_KD@b&r+Av|IW~s~#KT$J;cfRgIVDoYrmVUVuyaLOh zCe!CEu|0;aa7(}*Au^A8hez+Ohp_JssB*0&T8HHxHxVuba9JxG^?G@#Gwb*Q2u)pgVg=utN8qW|c)l2?;EbDa%XY*Z(tQx@g zc0begSn5H=OZPf^^c-5OtK?|w@jyYBM30m^izC2NUJ-3Nc!`KrU1 zecyezeeD1?0So9={Mp3&x1&-?yv@oHm%-!s8ss0`eqMzago)63yN0F)r0>wD`kya)D8K9`a1UEe|M|o7U1#4Z3@LUd>w5gPIWA{qAV&)-x*dzhZ zSqLw>y(lMXkO zW8}qsOE4KMHr#}n?kxg=gA_%*%-NBq`5m-*{G?o9^uzW;ixZheFZX$o)d{e6u;^-H zB|DHp1A~dr*BzOD4F2`cm;Z-jrK9{#f`pH_C>%Br%Z4$`5}M+w6?sWsM&MpmrcSu}gK}$z zDC3a;17n8m0IPfRL65<)7jB4K4vUx;;1uOb41qNz^p+OEe?>GxbO4LZF_=UJ?SH6b ztk8*6r~*;h5?BNahC=zZ=`EsWApcAvia`{A|aZvKdtri?oCAp zuL(2&)kmJQg~+URZL(vXZ_+O?@W;PX0d(#1(BQy>zq=d?z#L!9InZRd_wS#3_Y=3W$42OO+=k13KH*t{R-+@St_(v6vG zWagzPz7j>knMgzz#lJd(xlC}rt_{25{ma-3Z|{^Q zmurXL|0+%X`)aHtnBPUrybP7lHlL@zJ?|Z8bP27zW9_d4g#NOrJI2!bUK5g+NyR=I z)aB4=_B;%Alt;i0FyHngy{~rbW#VyM0g_I+_wCJ@%#-OkjrNZOnVuFlO#FXExI4Q)y;)-E{~YvxqFIZW zp_ps8L>;Cd0#co3slz_zbpViX-7&y*QJ2$guoacYL4Ah&Z-p0AQe6Pj&VL091L(b< zSDzhb1GfBu7QQx%-TtzFy1!=OAV0;H_iK2J`MHIbBEA;lV;+9k9 z=8yJZLg?|wg610AUxAxB>x)51p1bBacIi&LR2$s7vfUZtJgs zS9tCuQYB}<{oAh04?_ONQw>NZcX#MWJ9n25HgUu8Vp{8rUASsGJy^WzThdr#A-K@YtNYPk84l~)_!XVTK~e@;LPX)!uT5#Aktb)r zyl3sx4GLyR&D3%Q0T-rXcqcWF_MZ1bBCt-9j#OYLIq%x;&fx~!n|2L>;@4E|G*(az z2uJ|s(K;_U17dAt5oj-th!YyPheM-73+Tm8In>j6dBm22eNH5GxziV)G6a4X8cn89 zdd@4XXyr6wZ#@}+a*HrTR6kLBxA96f)n0I@T_g7<=7tIApdms2i4kz_v zg7o+Ads-`p$I!zBAVQp|!Md(enrWUa8SYBAbr}cP4lPLu~%6w z``+jZ;hS#@Gvh_QB4@;-pBbi^cW)<<9XQ2%fB)v5Bn) z^eSn9uBpj82};DS1!~IV`2d#8wQV0mwV_TbVp2ax#Unttd;oLU8$eA)W4l=%t%0>m ztKG4Kz0#*u1JI|Wv8*%!m7XC@$kw3L10!TUWEZ%oSHWa^7{2@n*0DDG49n!Vf0846 z#r=^&A-tq9@&V$j-^wy3_C-S(LPhp+akTO@=YVq4{G{X3I!yBO=vnJo%Q_&*EEDX- zz1Mh*4Nz7j_L3;rz^Yd^6CkxN-UT4x40`p_8r71llIfO)GDe52b6GyV6Zs6@6^*(% z=aSBz8stGQz2%(}hLd67Dg1wJ@h&`!H<>DO*`S%yM(3#yGIXaYoQTHv+h?9>smn zF@gh#Dk0W;v+Q23c_F7?2lyTfsm4W4s%%arb{J#sJ}%0B22juj2`u&_4q^VyA3IAd z`VbI=6@fAhd+gOOyJXGcAhMhX+jsKOfoCN4rdc1@Oh=LBSt^fXN3QSRjWXJQGQN|N zq`6v~T?tqV%Tatia|s-t0W4tTJnZ%?tU|EKWSEGcN&doGj-z^WScVK^bTss|#(@8! zq^zILjCxK>tkj$xaQ$608<<-gXp%~9q#F)V9LkjRcdxw+N;miWx?|s=KF27@4`*b> zw08s<*lU75&U|x~5@id@%s+j@7yMy>)P;4qm{JSmv~C^%8sn@7J7 z#AO*}8y#sjltD>qHThjr_#r@<2lkgQCbZ|{cVB(C@cCEL7PP;Dr~(r6pyG~;I{^)z z72|&2)1+5w?SMQ_TEEQ5GQ{@+Ae|c9$3yK5akM(KDhEU$)DT5M215lCRkgKif6`Y}UOeF5~NjM&x z+BBqqwGN8jx)M!Ye5gW$m2IcM-U*})T0=Ke%S-+d5H01r3%{Ed2NT?iAd9g;3?`3$lvmJ5+bZG@3pzn&Fs#I&~P!TH?!<}{7?4| zok-ru3gxmzdr1du0E}msS=XB3;y7MWYHD3^*;Fiuv;x~~``p`mX?601%%%8rAG^v? z|HTDhF05i4c41$Ri&y4ITB$Gxr3bTfxik~#4hOZnJMfVpaVl7bViz2)|K_=^LqKzeeZ`zM4eCKGO9 z6Xmor(F*}l{}sZvO}XXMzy1l~GHz%40Th$#V@IX}k+eSJq?VJ~ijXqSN#!Vk_Kly- zfCiGQH?dKIuo^0jJ$@1!UR#Rx2~Dwwlaj;4G-_+x;_SN!rocQ?pcb}PBnpM%R5R^o zB2hOn@AXnuJ`82I%8*em!5?JEUfy1|r_AumG!cY5NR@To3s>t|0Z6q2ca&%>t*Wt^ zfgOd@G%Ll6?j6+adTtL7z0Mtoa>Bnun)m z@QD=a9FfXHACY=-sLy9Lm_R>K$v;3hH$B5J5HznO`aZ-4+Fn^{ z*%G_>6&&iCo@Xm>RVtNrx-+vs($e!!(LOi%@pI>iVeW3q6Ka3jI)%B~a21a*QN-Z! z#OJ&^@}<{bH|8ytXG!660YbR?Y8Oy75u&!O;*#QLkG*i29#*dh2=24CITv&623u$u!dcxo-8(pQmdnp7%$F8HmP; zOY_~tneSX6O0su(ZMBafxV^(eu)}J_2pJ2f|*x;zrCc zcfoPAY#=vi=NVLMfS8lVsHvJ7yl@h8*{n2ZeBT6Ff-*4tKI9tWTn^^ZTV4uew_8ok3A%0PUlv4=MHlz^wmi z%EhQwiggb!J8ptM)H|lSJ>@2CLFEVWOb_hg2jp9>at`FH=_WNxOG|rGlJj55jdTVH zmGm9zr-fGF(@Fz8)zkVRKBgrod#>>=$u#Y(S7k)(kVKs%=tsz=pfqOBK+TI$*u%45 zL9TyOJFIM7W7ooVeO7C>#_R0GG&ZpiqK48kdf{&!KOLtQm<*mL_*=(4q98x-1d+tr zb^_327UA0O5UF(=EpiM?jG)L30GrpF+-M#PR2a4}IH2)Gr2KF|b7Lrv zvknngh)8rY0hIwtO!q6`{~pxL-mXaLIE-gMEQm^FCB1LDO%zMskGU>bTbj+H1~ljZ z6^xc@Gjd!c8$|8)P506Bo#&olcqg70&=KnwFeTm(%?4F@em~D6NqF}=@Yk#DG6XLx zWiK*SJHr}Y8A#oh&;4uflYO4z_P1}mb1Refi@)x37U`s2|4E(VKR8R2ny2gOui0G@ zF|ViN8r`cr860R@yvY1tWW8lrlx-I-ERsq{NH<9LAl*tmq)2xT9a2L#3P?#eCF+Ya5dG|;TnF?g%=rpzqFj`1ZdH;UnDcN79@M!Pir+}hGtg0G2$_2K%~4j{Cw{ap#>u5bK9d$`BF$ma`Gh)wq)sjG=#% zY~A~ehIOhV^Ku=C<-Dbobjh+>*RPeOIabU3@eE5e6fH8{ZAU&{pLRd23ckDkXx-!W zRoc|=I~#hCz2HSXknt#%j@b)Kx&!t$t_kn}bv9mkUNkQon&ukN^Co<6DdEnHu zV5xcK(dPMzT2fUx2QNB8wFsS(M4E4ri%Oo?OBZe0v-Q#pxoMKj%XWnnmr7*2INw0CA81@}b1B$0a=Qn8Cxy_|?#A)(TIku?8;mRl=Z*@s{M zmj)25XNZar?mM<@QLv3d2D790V(&#!t0ceIOJp2_q0Ooi4%aprwQ)O_HyWo7Tt|Nc zX!{T&yo_!Ff)E%dNYb%R8bzXS0w73fv;E>Uf*j(DS6(H(nQ+Q{&P1*pAr+UK`Fs$= zg#xG|Z}Q`o6`y?uM09mTv;BkgOfSk7ozpYT(0mmxw9|vM)8Jp=a#Xt9&eofwwacv2 z4zaF|@9VikG85@?Z-gjR&{bl*rG_l_a)**~S21WgKd+P-Q*>1&y1E>g#|Ra;baX=~ z_%oR(sAKYOvGZ8y{LGuv?jK|ARycHeq52}X7Up0~!W?ja6`VZfv{2(UYZdDOW*JOd z3Y=?s_%l28L^0a|yU=mgw%7|&isq241WzWKn%%SS9go}TKG1lsKlSn^0rz`UohI&t znjEF(Gx^|J4IY|LRoGzVo`ptMvPw!jHc;q_r4bJz!)0wSHS8<@R5Liko7ezTB$K9B zOZ%U|vi^dXKMHoA=zH&sE8fIeigh|s&VX0)YgSId%Pq#+7fHcZHSv{?5%(#qY>X5kSRPw>P z+p2FIAyGmbj7UAWy6$X^3_aV_80GhY3~saY*V`Q{Og%lPn>WwRcuR}i{y-9}^h0>O z>i9C7z$HbSvsGj@k7Pb2l0>d_EXIA7MTDF)oYxR3mTpY`Sl5lQMU0xC32^%?>}fVi{N!kml?X(y><+P`8qfp;MWTod z*L5u^ZBR>%VUYFtwDJ%9j(p*u(kdMck78{5%1$&**@dnb5~8Kj4bRcJA46&-P+j@y z6es9sB5jfbBY9cdnc05bmB${265nW@e|{sF?j0*_OWHE67os{|RHSt&`QL4;fbjg* z9{+48JjVAuhySjIzBbbF%CY#Wj@XkQwTfTQr&*dD84}{_%2cXTmCG7S7YXYS+f*?%2bmX?#N*;EBUWO*F8_QAfL2QKw%dM^Jb`ax zFpS2;sMg;LPl}3Yg(TA})YY-m#Q4-j!{PlMQ?XDl3^PzKFLJZK{`S$HWAAu&`w+8~ zk-|Mofb~czb57Y<@Cpb?xfuC}j>(dC%rk^BTdz9`mON=jUc!^Ai@g0>I5lFH*O*|2_82ReY+%*~-mznqd`$g<6*ERkYcSL}!d=qsX2Np;QP&32ade>255LMrSt@)m z0FQoHh5iFxlW4%tl$BS6xI^b)749T#4aLXklhr4#;p{OPm8;`0&-#60C=}-h2`Y)8YSK2Y5XWSlWN>{#{DZ+AQ;GFQ;k9hkG_W0jH%F zvut|Zh3bx>XM;xJd+CLgH?zUbj*T11nt^1raDeeZ6HH$1T5BGh+~^E$ErPEYwJ6$aPnW)=BcHlU zon|Mx=tVFzU}IrvXwt*gn^(Lp@&B}hf#54@KACg-r-xGjFIl$3-E&viBWmi=yyVzm zFP=^V7H6(@aa>E-D}VSBG?Mg~fs@tW{y18+HgB}{H)tcsEojFN>?V)D`?X6{o&TD2}qZDI*FNUEOpo6?h#pDurGzjWZf$f#VAakz76j1EwO*v_ zLyT**u1>K1HtJRF7ssxX$hVSP-{WTN3>WU-sAf}=Rs(LK^G=de;b$vz@w`e+i?Y(% zDLrtO`&1De$Bf8Si7^IFa#udtk_FGf*lt{p5n){oj{f!_;8a-cviWxMqFOEd`M;^y z^bf^Z7}B4WSktuOhKStBAz6#gunfjnR;3&on z5(+!E?vvltoD?Yp1@d;9+pCb;2M^37+rQmkY^=2kCj1Q##^n6*bP+=<6s$=#vp@KK zy58q7P*UJ0)Q+gMhd;Kb)^)0Kl=AJ@WFMZj^G(Dmls1)l=CM^|w^8fXym z;@_WhwC7uI-?a24wUU4I5c4?Ber{Ik)Kbv z6_eSEK) z*U7aPS`Z`@++KK56`3dVOhF|Q;A^_tX;fbGH_OJLTM9Yw+y+FbdD8 z)XmEY`gyqmVZ12jf>A9TU9|e;XEguWcbeYK396Y;X)b=KGBupyb zqfXY2L4SKkO@f(5gz0+GOG!~zUw-=I&m@GH{l&;{*-^DCoyN%QFyl8 zM=FnJTS7VO#G21rPDgy2x*Jck+{XS%`qC%c{wH3sxPvV?T@#l`KWl%FQqB9fcops$ zg)uj&{-O^?-?e1ZjJU-7Ta#kXy4I4RGJ+u5zdE7$*f7=hU)wu;`DlATVW&K5gqZeP zqUHY!W80+svdWMVq295Q)?of*I7o&DKW$e>K8IeirYKw^ z%~JZ+{AWvSo~VAeo%|H7e9S#X%wG4Y0GD8ATapzv_pYMO<8K!DQaDC=GBv#h*v>iU zITSQbo?YaFB}V7PgIGdxhN(-lq%$`%BM^g#XlpcaTF_Ht_niJek4{jbK$05`SGS!I@E zI*_c7HM(;u_Dq)V`9b*gCyq&bB{9nRWwY);wQWNd4Pk@<0S_jwebg;L*38f*P-`JYL1CrKn6PSp0l~+80Tg2FB(`GYjV|7bzCXz4AFIQY z{#}K;grQJ9HMBu9NP$)^L}7Q3LDMs9(sAfrmg^}bV0$QV&-k2m`E07bt(MjFE&L?o z^d}}~CVq8?bZ$o2P^Ky3W~NVE>zM(?^YSqIeDODe6pVE4k8E1D)G?g~pXiTX|K>C( z@m)QI4vq#iX)5k1vxH-#gC6W2a{{Z}|4HX6X72+a&WwvmAa?_KntrN5$%A)$a={q30|T%Q5Y&nh6uz+Ufkyy{p3^Gl};GjCri4SStEJ6w6Ano>V7 zy|EYL!!Qv)qp40{(kcG;M0pPtT~k1>r&V`o@PF`6vpxC}Bzs2pieMF`XMiq2it%#L zpuNq*pkx;X$p5j^rq9Ogv%-{UbU3r5XuRrp1vk|g(f{t_fQ?O-nBs}O)$veX?I%gz zHVgfNM@O%$w%}lv^$K!3$_fM^+)@3?$aWzXaYc+>I6;h|`Hz+7L3_}}B+Lj=ZR^jj z_Px7cE{R*_B{YvRsa_)o4K?rF&8Boozi!%+!iu!#zEJ!Ug<+uU*Zq3r^Bjh| z3L0Kh_iI<#aO*9s0YGzmh$qq3S+DAC^p1|Qs@}SlBu{Yx@F9x++X@Ciy@hop{wuj$ zUUk|z_D)OMyxB2 zOA!Kcls=k)I(G)O(j0YJ`zWE$e%XRY4EUKvei_tOkIW2%S}lr%i13smWZ$GHacvOV z?JEX3(0*pw>MwwdIxpTMvWDt$yKLFy`iacdF~{CV2RjT}j^y(K$y5khpXlJ9e^WL> z9A!|ef3o-4v%PAjaIOCpK?ggIqUJh%vXq~mW@Aj8#gJ-N$@Nm?A=U0Cl8u*5#c8AYk{peEK8lg_hy#hVCsPAlC zmL8bnCsJ!scs`gYjF>SmLGouq{xA(TCbb3txBlHfylL4F8jWxxPX(@D=a7blypQYV zQTe3Thvcl-#KB*&Py{%0v4{BA~l!mZNJCH9oRRXf*auxR}qVpxHbpnSaq^f;} za=irP(aHr1NEs6fSiEvGsKf^EX3T@zcZuneSy4a*8kS#ddV3T6WUvyXdX3!0p5~jy zV3;MqInu!Jj(*V+iF^46krdtVR5m>>i&s9aU7f+WdwD~?>OS0ZTnZRW%d-AE! z3cv|FebhR~J5(qGMXa6K;FNmSKhbW~SBvSu?vzLT^3loP%=>WNb zZp)Ow6(N~tPPq8^1g6`Utf*Ebub_Fz07Yg8OM+P$0 z<(+);IcT&ZY>|BzIRq7SgsLJ$)uO&^w%997!*Ag}2$7rkG6Or~Ugy~adkC0X)3fq% z3nCxA5>KO!Ec`#g2Z`4Z>~e;bkw?rCDa9#sLdai)$e)^;k!%L9I;iL7Z@sr)NsS9~ zAeS2a=l(lq<8@Ob1@l@1_3Ht20MH5PEwTo(=BWjqW|LSjFQ_{)pJzL^5x4h=vNjw) zIMvD%Aipi=De}@@=ze8nAyeZs!qN;s8FRVY;%AK&7jMr$KV=D5UQ^#~Rc9E5T<|Jq zI~-myjMjx|JBo?nraFpJ29vDX09=xjx5hwR(Q)>~F0lkdYRDac02{py&bcYVMj_w8 zy;8>nPEg&G_l}@_AjI4sq6b79D`q$*UUVMo+pQlRUaF(7(2@^eW%Ff(jtyjGX|5m^ z26ImhGD%ePIW&3@mUrVvpR5VzjthG1Dph} z_g-1Gz(uBVc@XUhR(&ysB0h|xbs<`g^EN02Vb+yhAAM(ZMV9CQAD z`+t7dp@S#iM#<(f3J2JMd3F960~kE+r%!{8S~pKKk_>qP z9vkV?lD~k%PxOz7i`ZK(ppcCpqyl`Kw~;>e{0oW1lB4m02vS`c|HZ-r)%TPA%Q%23 z_m*T64?ied_G_}WgwLJqM&I8{@45n)_L#3sr&;=b_Adwc7mTPVAi37x@TdOo1>K$l z7FmTht?w^iu!#j=isX73fWD$f{*Q5tSIaTfzh8Fu#+p*m;sMabU~7+=jre7qz@7Nt{YQh~l_<|~JdF(pvWw)hU z^FW3w@NiRr!gHW3S$Ks0;RjU#f+#{X;ga)r0rh}Is&g-Of&Kl~3&`fa=P&X(fI;xs zJXP+IG#^R2`6k;H25Bw@DAFCZ8n@!EILdOa7c9Z1mf`1a^Zgl$z*YYz-hWr>HGl@J z!vkIU{ayP1xio)K$ksr@(t$TL{UJM8$2H0&Qrbk8{zG4uF<0}xP(Aw$xXk4Wcnc3| z;4Qp+w)oka0n48^ud?(AX|4hO*WONWL+kHdq6mTgsG>```HMCEMFyR$ zyi;GK2@9Y=sJlep(ES?6K>6+14Aa|KpB`rQ5NiP(L#B$HgvafO`gc2CK5oYwQ0!k+ zaYsLJWdBP<9wAxyDDp4ky)yrRw0LVX#Hx8>#XLsk$JW zDYTZ1rQeVgnmGrEQHd{nPIqG@?9oyJWgEyhM;ibl1UIgZ&1!Q1)`@{^FFiWmiD}QU zxqjiU)(5!GgK6Upxdi9M!o6hU)(ARG%;WC+!s#M#iwjY`gSLw*mx~wF)bdXYFwmAQsV0xHE1W3C=RvnxCPV5m}vwbSd{DAZ({%o=h`7=M@J*~nSHZ0Ug z(RO76KCWXM#1m<+XP@1_0}qH3r_keEf6(>)BL`b8^v$KHgP*uu7Nw+g#WsAWXj>nw z_Pi(E2;U#&R3ysQPKG<1>Q*nKm+Z$1o{haP3p3P<5nRgyAFENbMaEuvo_lrEQnSdx zAHG#v97cXZIA_ajB{4K->LwjP^}qv4d~uB~i6oD5e5_ZPqtxXiUDSUl9h>TO0E%R(8wmfhJ&wmJtbD_|O4c|BGwu`-Rpa&8fS#2$H-xI4IFKLC zNjT_V--DaSE=nmCmfH_>uCr_Jemy)?8>XF)VrJ_;*vx}qdZ&i9%kB2Y*V)aAq&jxX zpv&T}!4>jY&X7aVDl9Bc4rKi-wQCQ3qickb?lr_%?RqmyNC(RGYfEKIvkuMy52Q}OugAp0AEYN9@ z;vxUVc+d@BBn+-irG;h;`9P|(6$q+C+=ET_?>yhr zqsG8XKu^^#n%<@nUhFL0SJYy|UFLaKCpni3#6-l31ibhg>8@O^>j2%bGmx=wqzRl@ zzrOf~liC04paO(~6rnLTxar-Ce%)eJxr^?jjo`hkY0ea*dF{5Vgfe|S3Gc;)YIDo* z=&USqcp93+MXS$13h!1L;4;#bbKVJG)%AF!kRVF;6(ywhdVFx6$vxER?*uIU98-Pm zjZk@OhXL-c#vRFSRj*Zsi(D4JWplP$S!7DNA5KncL$yl27t>Dc%+pHhB+yC;%F{}@ z|EQ?=;Ek?nHn~EcjP=awt}yw&5vH(QXO-NxKT4+OAy{&@U)^%H-;{f{-&lRdp{I6T zFKjN)zGtk5w(Raw>%1BX@uq>`=dC@v|Kz@UQ{3NwEq+9xKOw)q4+Asc=MrX$*|I9s;_tvDRBjWe}rg-^=ti zmwDC4+#5S;RTb|kvrmE4{NkbMiM5kN`=430{u0apg*JzMzq0O1;(aa4cJFqORo+`z z=6%;7yq?nF+StQnC*b@(|AFu(={{v&|9X0<`K%pCA3o$RDn2M8*zfH24X>wX3(wkp z+s7V0-@!Wea5zGoB_$ZjKUtNnu$|Djblg(pdVua>4=4tz+=Ah%z zpMi?%`fLp~L1B?K@&NDyo<4{efVibr@1*t`nVK_f7A0r@0&$?>K>$=ME+6K8ciO}o z*?kuv6pYTYcH)RcWNL370(HY*e102tFC2iBc--D*G9}-M0ou#6~7ZsSm8@yp26 zxV}+kzdIBy$7(t52VH)mAMD78VMTTlDL8Zb;JcIK{$__-wCKzUV!N|!rn|FzJv3G? znLK?mR*!6IC$qEc0DR1Wk00W-+XMNVY;`1z~q)v~V_h{FY4>h~q*9PO3g-Q3b zim;AS^}!W1rerCzS#QO`m68{1ArFTu#ct+FVp>W1uM+nSliU&+3PUYEo0Zk(L#~0f zLd1xSXJ^G|WQxvTdE{kXFxM;J%Go_Ge;nU@d%>Ss-%&ZA(bM`O2WmiNb4vm^x$YT& zyIN5SIOU{cXuXOx`Uf>CjhE^7CrM$Cr~l7fBRI@Zfc+6O-w3oU_l~@8+~P(V`kwL;xeWOWm}tPJ;-i<5mxGMr}fZo4V;@8 zub+?~n|Hg$9?N6xs{jUMF*ynKP^iq_=XeJ^uCATf_EuYS)-Wl&_JbGc%s~{W;c5&d zxHSQ4=&=Et?2W@ZAXdpiS);6X&79QuEeHd(>kB=h*TyqOd~_r{>{_`bLH5&y;&+q} zY>^GHld<-7U9iaw^i#|s#T1~rf*eNKk>@jk?Z`1gtJBd!+vH$F&HyaM9h0`1HlNjO z!6ubR-kFp8=9dmxi+KghS&hAa&WmI;07?CM!rH*CI4h+ z-5=_mtPc6f0$XwPzpcoiHIv7O4>8PmBG&2WfNm?1GIj9}=aP*Zoep|>5^nn~)pzBG z3R3p)ThX;_n_rvF6o+p9D(!W^Hqbe^@29lF>bomZ!Bg$_60gQu0EKhW+ocHPAXx64 z0&EQ*QpX<3%@vm|nG$vNvvn+oC2Y1`MLlk-nA;LSpscs6IsnY{Ry?;y*7qjNH+^#7`;Cv%7=uLqn@ z`l}LjVsosgIf#wK-Z}bzm`|RHRL3P3;f<}fBCA=vx_3V@95GjZbP7eDd%3TwiVf6* z;_tmW-$ACK9-Mb5>Dp9+8e<%gK0^z>Z#YcLu(CI%gzz# zX!q^libxD_LfOkgzqc9Y%ptZX^B-n_Pv|80C=m*SgG*qHY}!f6+7-p(0=iXAqR?{I zj8YP?;njqAiB^&_{Lpn{xBB>G61)!8N{JrzwG}gWpS?T-zJsk&qE#iD?coo+S}C)& zdpjjC__$U|XMPE6rKm*l;h(qD@(r%BemAA;lVb3OmNN$rEkF^L((>3ZrGZVL<8~-j z39yyxM>8T`H)M4919WpPmpofx{Jx$_*RtJl;EM@jx92V0EbIjtey>r3Pmu!4)MvAY zTa8|$B%V5lxlN9!QNUtYQJ|}vkKT_y+^SvyZ9m4@z+kD9$NrxMpfPIl8i0S-i(amT zYybctFfb+Tehj28y!1GXi^}UyoC}am(wZazs;@aoKs(RqhEu?eKCCZo7uzS^O5eKO z2BvJR&zUpKJ6bUB2`Rp^Z_1YNLZtC$9eSAYEY3*+do0R332`ux698xslhN#Onylo> zN!Wfb84Wj9dd>P4y2EZN$k+wMD!=BR=i2de&4WMb^xcHLwjK}9aFjJL8s&8WdDX2a z3gn~1h1%{49Tb<<&2%1IJy?*r7V@ylTLVZvPvghxH^#>5KV1E4vhMl9U2`}H`Y=%p z@*La+WrVv8e)+{$Ih_Xn`Vb1L>~sds#kj)qh*eUXm{zjxO*tzR$3XKM$Jp>XmR>Z= zFhVOM8(KWn`~f_B8^Sh7MEs&G!?-2a-v9S!&>=Eg$?~RAW%ZEX!ystucD3a2*79M_ zVXGxTGtG6WxO%oxwd_z|li%lXw|dq`OFPGP#87j0dbrp@i*Q;!!j4T_b9!s(Iu^JY zaRsLzdMPUuVW4ZtVW2i_Jk>tTo2w(CX1@-eyfxb-C0|(i1F|0B*&tZP+j|B++Go-T zo&~fAIsWjHr(4?Hr@TS?e8LE`M_>4M{viLM5mV9P^SPMa&DaIke3I@%%i=o(l_@)n zB}Q1*CdH|6E}P-g9ne_Bs%_}5DFoz#|BeU)MjFAsg;D8^0G(qlr^5o;xjH4^<*=H8 z#qQKjbkW8D3pVgg*J5WV`$fJu|a z;~-RyGGd54E-ZX*)liA7#^%?^>x12bn|-Zp$`R0&G0iVmUq!5&X-dV1Q)0kfIxs{t za{oZ;dm0b=6=1eAe;)<1bF>$Cdh2%8MN@g+G5@acn?h4logL1K`tRhb42c`5d--%mPXQ79(XJi~Egc9lKh0rhEXZ+-Qb>zsBw zQXQXm(9xs6*{s5R=c>x#hpEB^h3FbRGxI|)WA&301$qHy-{zx&eKhxM0F|t+l{DK| z&hDxz1ho%X9jiWCFHDq4$$1ZHvA3B$xrGj9UX+%0Gm;u~a|dfMSo?X(Fa(ydL}B~2 z_k?L<5jVYgd8*44<*=iZ^aYWa3K{q%#L5%R4e9)H#J3e(hyq#`tG#v{xdOpHRS)os zWiTTiW<_3H2p0R?Js`C+eUW?~`U*w9;n}MoTzr}y5omF*i~3=FqOMwOfrGSaaZ&?UuWpiEgD$ zCbL*sEs62wcr9}++Alsm#yG*By9;+h&^x9TYj5VOmwz4(Yp|tWvupnHZu(;=xVUH9 zowlt2(>clTXkjRZc_k%rb`p zmMmK&NV7I5=A@||%stK^m1s8+`bmAiRg>?1P%;xSU<~4q^77s&Qr=3(MPc}IJM{~R zTkblC`A@S@E6kT7A6A}-UvKA)MytHS0($jEJAU*O6fi1fi;_NePsgRFf9BAyBcZX3 zAW2z%}4+Yxv zqbdDn2U#xCwRb6EX6jpOA4YfGPXi+orf`sOc|9g}ooHY4A?xve<@F|Nz3F&|Wi6*? z^>){c>f=Sn%-Fn!XImon4LNl6&Iq+7WfHUWxKf3L9^dM6C2cE2lQ6JWX>#tfo%^l> zpU?x8lXwqOg_bH2nCNcUKf4&Z=7-I^v>eznpU&-|j$Up+9jv-`MmLn(1q;$~`>0%? zuO!k-QJDIKQ(z{y1ZMNJiGo?SkYu@pd`mHmG-u1@(r5z(lg5iO1M~BaI{`BhhE!F_ zQ+bO&Ym-Q?4A*NWZABV~-$zFA4bGsWSo&}HC#3dH?(*a$T<~9-j+<&Npo$c}U9eah zP(EVh>#&>bx{1{D45&j{%F+1Q<{9_;P$qJ;NJ_(1{DZ0j{0Y4c(@190Fkj0_d2=Qu~cH82L~0Lqw%a;FV`Y30fzyf zQ?Bzt;_ghn1<(agCz3f0S!ssq9A3VBY0?)%i57ld)t*|VeioB8?H|ZB$Pb0%x$rmF9lr;-P-R3qPN7bIV237Z#5L*wf6v=XUT#sKVcT zj!|_}u4oB54lm!9?qxoCIKMZcpno7PoSAzPU*Sx)7mt_7qovl|KZEY}1@$FLf(^|C z(;A3g+WdA+)Bn|pABuHpqt;3gJHP-!n4C*oEe7w%FpvA8?`?NZG)J~NV9E} zD;fDZt5dq#zLO!YHY(dS>6|~W;gd71Psj%p(oO0c36ew~vm;Fbd=ebBT_j2Up50wx zFCt-)zr&qcj?u~+H_0ZZr_F(?)=a$n8jY>yAKfjpjcm__V?EjSx%AeAxbz1_mJ`1}{c+*v zV+(5YJ}GHP`$yDlvaw3kFWjiZ`Lgy6!9&m{^xn`Ve_BEU6=m((u90-4o3NzkpS+%# z>Y3)dGc&0Z(y9ooi8RM~YD(_fp9dQIo`d-W)jC>kXJ;@juqKydT3`m17;Be(W?Q~&KY z&H5>sud|_H+~>~yO$0uKM=y?(`zMd~y6e+%jIme2zo~=6Im3w#)ysvOU@g%|I*Q1Z z$tWfq&*jVbRf}F^=W^=o>{qt3nLN}GAy5e`+YFzNlA<1=tP?0J^VIXp71v+??39nS z0_N&Ugg8g&*E{yyUY(-)0uv+^^(;TEbw7hIwUUeHyeKOxo2juO0H)Mtt?XSRtaS>A(>u5+`{X?tP1tF0l^aJW-a+_ox?{hW=#;PrHY7jU z?mp#owXM}eTVc8A;#HL5VXGoVaHX)+&s;1xRw*e7_DJ%Swa4vpa8X3{dLn>X$H+KT zqT$ix0%pf+J=S)1T_qK5#bDV5lTs$xzUh+-@#`=k7yQ5bKBcAmnf^co>HO9$OR${tdC`6A3CT35ATM5@8e*#RN2Z(9 z^-GMMYS7{eU{}J%D2AZ_*UdMffIU5YzY_FMN(RPlK2^S{jDJ}JReUxD=0A?l484C#W zbzX<7eLsy-o_y}A{H%Auk>~z!NVT(u^ZSR!&P{uQX-#BtCY88{X7Xu~Z5sNQ33?VW zr_s3V+ku!p+-P=%JK?g}xRl$yZTUBxozu~m5cDWM_pbS09PaaQTM2uACYh`w%eo-< zKKl%lQhbDv@SER~Ja^4xMN7Mw=y5St+6Vu1CcxmP*%=`O`_C5gR^m(tMooaG&a_zu-zjivf;*oTPu)h|vuZLs^^cW_S^sJC)qPGL% z=#Kj7V=;8>Y-~EO&-e0^NfxaL2?@C+?R}#K&YVpOHhri^{7k;PK`N)Qf1Z!sE<@4H z{6$4n%BQins_Zu@TiXdAO;;1Ta58xbi>k>{ zxtpWH^1Q@-nICl=?XU6r6gqd;k7b?b!EKI~zS$hXm*s93>E-M4qPuCS`u=5>DPW^b zA3UGI8_;yT_A7}lR}FXN-b7Llur+_!nBz3?;t2YJ?153~Ci-n1CvTh$BRq3kdZW~g zoi5AdIX(*w5dhliAf-)`rguEhP%pGhm1L(ZY-l7!B zJsJ}AaTMbx5r#;wYOdodf5u)4Q@2DmnT#F;^3~!kMG_Y^nHdj+Oc0vC+!Q2jN7QD; z?e0tVK@FQLq<_{#Is?eh=P%hfIFbZEh~?`?@`Q!qP(JM|Uk!MR)UBg^ae2IsU(&z9 z!x;DOKsDexp__+{#QNj^YW^HPGLrop z$w%dO5ENqF!h|giMp~A!vW&_d<``Ewy!K==G+$2p8hb!BG2P7 z@o54QP6ZgFUz7y0#!ALG=i=OB!sHDOyI&neN$7ORpwI~2RpBd9QK5?pR*CdN_o=MeGQpJfGzR!t%J8yJwH+6UaBZK>WFYGr5LktU(?Jw-a zOR5MZ4M}z##Uba+-$gQCWmaT#7>>i&Ze70OngETx?a zv)LSCx)gd-H#}%qU>9}%Ap+IVkh3sVQWEuP51*XL=2NE)*Z*PxI^G0V4&eB%`=Xu+ z1;*Sl9mjWyn`h<{)b{IHXrq4VCo{iywDDyx7cMj+-AiOZdAdeto|rFIkG#Z|(s{4T zP>Q+r^E4~%DAN#YH0|mrGnt_(b)83n=Y8wO$B>{XpxsLK&20_!3zBeb1RB>*?x{&J z-%D-<1%)rk$!Jaez!-PO2*nm)OnF>dT0~;v%Oa^dr)6>Hm^&cvguWLeb7MTT3l~Hm zdn@J-I*lCRxsu;0B^|}7cu$@cJ+cnl&b~5gcp+U3)#E_>SE_zJ;Q_o66aNJ`@>|*( zhx%8heQZK^whqnj(cF}5ct|DTHIWeP<3R^Hko7L_x zEr~2Qqe&#Lax={B4;^hH=hXlL>j=El8Vl@LJ$Z?5@!*<3CVZscIjBCxycbq`qG{qyJ^Sx@+J9u{l0nN)uM7z$U zE-z8!QdR1ef_1RTT+dSx74K@28(NamU*5d!A<6pjRr^T`{xSVl5pR2Ln_7?Aw!K@|P%>Q;TDgLmOBL zbGj&eD)cqq^p#9VI!Wix#UsdHy&T77#^}cQrQ;MSY5x`jf{K(P$%-CsGd;Pq0 zOmhY`lCPIj7i(`psIrZRU@Gn7kuM*0_PFay8WBE9^I0LH{~9;+5@?S9H48vkBE zl)XeEq_EKPZ)KG@TqV%(v@Y_b6c3G}L9z zWk&Zpsp({9>u?kD9jQcJqcf}pbhaAErpGT|MNnN!=5)mr1g4<`@|eK6It(TKB|FLk zJ{$HWUcKp%-?P8T-bpp1Fne1L_5Q?@2MAdyF(OHHW;qk}Cil4x3)@QCZ&bT))KQD( z(X7G5`%`p(@H)Esf6{2=b`2C&)f)BO+w`RfcGZZ$_1Zsc{_znAmE!4RfZ*dck06<4 zym`W+GaU&d4ohYG^s9&-;q^|*?D@`9SE26h*97%xyzL1lImkyCXDBN|l0$~{QKOhg zL3$M4!_%RD>w`NKroEgs~ zx_R7gS=$HP9ZyK5Fsz{qXgEQ6A$Ep)a&qz|At3}vrx8;J+*S!T#QYmH52f>AMoIXt zek7LEPP;d^p!+rhuxymSmuxv|IK;q<)8s`KB$gvB<;zHE#cx=e~XvNM4K-pQdV$U5Cz5L%J+8eh@Kp=3>;YH(xf+!{D@{ zX0#bIw-jJD6v$r}Ba)Y}zIXHae_>Xxp3^hIOh#@vtnq1A3$a$mURjt}9wj82+F2Q? z(nx2l_=~z?7dNH)401T$z4bIPr3T9DCPxw^LT*W!>bS|es%#fWp3**%n*JK*SjB_Z1Ee)pK8D~?jpAG1 z9X{N10d#Oe8k(Bl8X8FUrpgk?OroR~n%qk|Dgh#~bztIh7my35kB$p#zuR{@1_~pn z4*)@64SUvWzx%5#H8nNb1HgrWtkzB&5j^o2BhNdF_%WsHn$=;DNxAhHD%&Rx>Q9s= z|DHW|7q=~!=$^WuEE=&jCH;@Aken=7)TpZa0TT0=jpo9tj>aBY~YO8`4o z6!O&UzuN`5rGSPAEe%`ASxEp6;(r3;TrH;^3=sxpnO9?^5N{kf)bi)eoy!ctuBlZ$ zoxcq+KC=I-ievNKhG-QuvtoapA4|!BJlMeSN@;9xFlWKu%Z^o_o#Zt)e7is)m8yt6 z!qaL9CpcF{&tN2;BhKF}Kn*-}p%7&is7mR2OYskvbhAD$Zx*;j|ny z$2}k6J(L-t1Kug^goktCz0u~cI{16BRdq9iPw@Say6dm;w70@M;$9$bId!1IrW+m| z3O2zPOp{0cg;)pytFCL?&eTw||AuDC-fx&?JKTYel-$Nf-4#u5?dj~5y~7Y>8l}9@=*mRpxq{>s^RBc)0=Zju>tZPis?;7E=0}t4R>u|ndK6hr1duPY zVP$kn>PP31&*61ntv98apKV~O(35D4SRgX~@J56*JL)jkc{-_KC;!&JHd+5Yj~6b^ zzkwK+rKFuoM_<=7&r(P8(w$N*(xw-W%#v$&7t1N~6>ctU`#tR{HEYgue zb-(nCC!p;Y)uG8!D{7}Tl37#ZxR=*j->!^kv-*3z^_2yB53Fm#~?+@C6F#O+~$$Rsm z^hu<{KZHjrKR#Ym|9@nig;!N=xAy7o+;mAdNT(p7ba!o#?r!N06{Szdc>uVQBlc6-MtypLznbC_akzCuyFR)UI1BbGV) zAob+4`$MbBVw`+tW=7T*Y;Vs-z@kmbaSWu|5tct4z()=rEZEv1!W zJyoy7+jB3)!f{qZD=;gK+aj(nU^HC-7tm!Aye`2UN%+j4Ckq;5+D$V!Ag#3L3vwGD zj4*tpVYdj%#J9c?ykWJGbk?J%t2pk-gCMo}zT5vHoY@^$?$MIDIOJ=7h8shSm!7#3BjbSVHaNgGB zC7nSk(XP`1ae|A|J?{~>kd@!y;*SQ5!)^{V)@pP);zn?Fp>HnNK)<^JD(Kn^^MiHfb>eCK$9{7g4cgvZ8g^#2gGU& zQ5yUT8G%xE?Z^5-*<|$B3Dnw{z!QttM*cUzpoJ1$=UzJwpo#(rv@+63jX{EAZ*`A< zeSzN87cN;!>FUCbR3Yo@@ytkJ4%qTQ^g8*@SozNWpJRUpU=6d|fz-%}sVQnKW*QpF zpKVc5QOcT{bjruONk$0lGdJ!zKb#EPx`rbv0R@eQo?hl?XC#>u)>Zub_wTaupzWTf zGVS~5R8*uG8DUnEx%p^gKMRa9=8}zHc7YnZGf1d#UHl?WCRT{au*OLGqK$U$+3~Q6 za1)k|gwW_Y*ru=MZSqky$vy<~c^BzI3@Ao(1$$UMS|mclx2YLDzk%rVpf)s_kw@ugKcA>|jl{(qiV|m|uY6?RNK9BFwlS z{&BRg52<5z`JhbTbTEo8w*MqT-QxPIDDn0Z@b&_nv)e&F3oTyi9chIfanS_%7FoSi z!bR!TrAj9^Jj96QNK3C;$WJyPxj9$!Wv959Xd)H*#L+4wYA88@UZhV22Ox>TA9$_|0>;vkWI=4A1e67)FGyCTT0~-l^E`M{SVe=6y@8AkY5My(fhOqZPMaYkGs?dISI7ak`B*&OJ6W`f`w~AZeY^#oUFDiKhK}AWKSGnkbe|K z3y0L-g1tT)dv{~sSMNp>H@#=CJ=|h~UlHJD5;=pL=4g#T=Szi-`)qV9Q7wDzUN=~) z!s3?OVm>7aBO1`&1`VZgUCkk z5eP3OiCG4(5)HhW8g)Bszunr77fW0y&y|V)SyQ8`Z>rZ|i%kexHQxMOMl!d$%6eKg zHl`RUV>Vi)B=&>{Y_cT)SYe>mbipsKO#g?&^LUw)m)wd*#Ze9jB(#dXB!sU5+ufVw zxEWAi06dxmL*bma4<*)fYRh~#&j9!Q> z>4Fp2Z$&}>?{f<_hV2{pCCcHM^Ta-LnjGWCj(1zib3E3SM-?GI6XZ}(f;bY;aMm3{ zWSP=IbmB)j=bG4cbkev^5CEu4EKWhy1)~;Oesq%&Wj2zHT7G^an&rB*_LYDxeEvpY zPQ2{rPYE!g$H@}ywF3~-hf693v=6DDEG#ff3U)rAk_FOBYdG)#0t4clSAJEQ_rl@{ z0C6PrgDE0Kd$0=2RVKZD&GS!&%;VPZi^~i3kx~+bh?yudw^STdQMXY|OrFxB&jw|= zWGkQh#ihwxJ0G@asfw|nMU>x~Od)8L#?*Jhy}5=-ej9+2g`@b(;sC=1np1)^&~%xH z6jfkg#+h_OhCXFjf$n;cR3;@VQ25?uiUI=Q%xg%7MVTlYuH9(;o0dSC4fuWZzSI_zq`DrTMX+b5 z0Lh%zc_7Di+QGg>QrtwMXB)jm+sVk zs*soeeciVLulxjyTdu~P9c$akCus6u^X4{CdwTKJ?uC;y)YJ>~M5uQ__-j(oACLcP zQc7D`e0EDA9&29ocP;+sKuSEym9@RSw1NVn;45}Lp4KlzKj7HH$RU;2Nq z7af+v_6l3<^mJsy8MflAwVC&^-caTIiqHGuFR#Bplh86pf64(I^T=|w>jL%F%BrFF z=>2eP0>+Wzk&4WS1Xfz#p-6+s<}Kh*1iHcENODG69;3_=01pwqR~4zVCk?ijE>xiJ+q?kBaVpd|HySKGNrpX{ zDW+MtxNrhzk%vd&f za-i~1&VJF zeVd%^2vzM5dd#G$x!mKOLgPvgeqS@Cez`70GOTNjW~1fXn4+|JxN#3ODHd{NW!vp> zhNXDoM=IiHA9#cC4iuQA%VzqN*DxyxBQ#h}H^zY&wkn@J8+W)gO3Tx=L}g$8a?qy= zDG1PD;NrBiAaRvjE#5F>+J}tj!Lo_WQp%TyhnbB%Uhnr5;`}NiGea3W^gqN-DOxpM zqLl^}7x+`4Wg&S8NzN_t~MR)o9u@^etkz1>JDx@ zUqs0Lu2g_E#@B%31|dVgvi(N)-0u}%IBj%6ml?DYxVgE#HpHM@(DR_68Qdoz)Qx0s zBqexfXNO#zg_&7y&ktDQ5PTW)qZ506S#2t>0XIWvPJs4YQ9I^NsvIX=W+KNxQ#D*} zzT5fdU64I%4<4XlU`2_CoyTm*gYg`WzR8O5;n4`IIt8^RItV_d|HurQ`ml}ghr;Br5FfB|oEj075oq}aO7eh}eZEp%P7 znvgx;g_4QXTEQyWED%%TFQHIs8>7$Wa>}Q0iiKWq5!%d{f_tp7_UzD$Lbli`_6Bph zE8-QewXl;WU)Ww1oku`yV@~$X(`Q}MKRuY5J(}jZ&c#@SYbC^v#T`4Dy8gcMc=KQ6 zy%czKqTOb13^a;(B;o(+v|4$7n9+?&y}$R(Z%b!SM|d$fpKN$SfD4ufwoDd5d#|0Q zwKJw}Ow&$bFoaR8J3q;gNx7`9P91E0{T=IH5iCdhZe1DO7P%Rz6-WROe9Oa=Q(9Uo zV+OXcpx4eV%gKR`G5g#3^4NK<5&8Q?M%O{a=y%vtInIKZHXI~L&cuS3%EXKQ+uVd* z{?;{85>$VdpGH<*>|ONNuAJ&XCa zj6(aL5`%FPKo4k}sr;kv^;Dy)r1L4dN*-i2YdJXxpNPq$gyJv0v2nPowsVlh7v*Rt ztH}qKZ+y@GY8TADgXg+8UL^0&_ildU8l?g|o3~zGhu-|DJ2RPVusZO!ckk349kZ2x`!(`^uhh53{BAuXK8}y`0O{VH zbNy@mJ0BJSEQ#+Y?vWg30_3KbH~mKXrR|%mFdY2gBh(nkfJ&x?>* z3%@=$F=zWHGqP0^qv3GBckpeviM=0ff<@NDL)DAn5jxcFlGf}j*dfqG@)f)nnw#wUgooSb|f>__X`&m z$hf70^Sn!6-?C#z#=WjRA+Vs$*uBgRo%VBqik#wj;C`vspT~jqrk91gHTL}=-M;ny zBBG?4hJ8Zr4FOh?%*vgTCLQaYm*~-<#pfHye-uM`vByWo+}dEFPRQ8>c+KL$i}F8k z{1H%o5lb3PFIobD52z!l2&}@T0UE@E(>TZP;X@vwLjP{@x@3RDArdL;p;qbI*Wk$J z#3p>u=BMYd0-%VUL#spJn&8SJN_jE|#6)W!UL3B^0vkhpIZSbnJ*e#2H8nW2w6smY z_K9;Kt#}(S%y$Ev$srJ2MwXb8!gvByKS&+CZ%g=nU5Gf_-PlNV*He)p7Ccpj4Lxj! zL5*wiefAnz(Z<_ARWbtzS#OZ~?OpXHh{N=^Wwtb+-&CL=fxa?GTKLE4rq>jsPB&C_ z5K<+;$m4t%vXs<9ZOLnJMGD#c<87l)f!_0mPGx+a%;*4$nJld+!w4m&SX*KM+qw%6 zqi@F#W9I0@X@Yf+h5L^DL=+WF{?aY)96eoUK(fp5TatOq^?(mwf z4lPKIEElu~!gVY*Fmb&*BsTT_SS1xuOtny;DF2BU>gRzo)_O4c#Mk=_z<5`Be^Y{E?r0OB{UBgB8-`#1IywH2No`ruTv*5#ED#|O zpFEa&IbtF*jwa@h+>odXeb-qb*CcTZT zz$JQM>A9|fcjj**0vHuvBlc1B(yML50%H5aR5Ow6g;Pzeh-v^`H3pxd_V7L}IOR*# zD8l?YIHguc^H|3AwJIbt@Qq7wsM$u?ws1SCnV6YHTw&MNJEeoEX4}QmgUI}ug?5T` z5|h;@5)Wd3HDPTL)YJIuifqxVcl+Ff7m`>)o`Siw_eX;Eua1k_7f6cNe*aTqas7ao z{%C=Cxoj^=a=$PkNJHq9lxVW_Lr(ddR;X0%Bn%tnP|CP^SJ;vt!x7XLkN%pna0@XURnTi#5;P> z83yW$51=VkTTW8%?C+bNEY$b;s3 z)+S8syaEa-KZ^YKdHlq-s?R> z071tQ^y~3QvTwxYE`Hg!Nso^+!y6j7-Vk)uBh^ZBfBZFbuejSfds=CnQJ)#09N0eb z_s&c3cMwLkTSzH1en#^E=72>FufdeH*tfq1vi8=JU^mpQz51>(hcKRe?9Aa(j?J>3 zA~*5(C5>&4#sjsRMEdSC2Wr9vNs4843n_jclaRrSnKjIOJFxF_3gjSm=G_5sC2@> z-Yp1d#$NrJs>6`x{Y*&>4g9F+=xV?azXGJ;djdOys@))(SYe}CVidsmMAi(fsX`Sl zy&}@V1K_Quc7y`t=N^>F{)Q{2*#t=-Q4BanbE(2lR>S) zEprt8si;M?%!lvyz5<}oKlTI?jZ4SF#7B(9{pk=~8vGc)o0SMX-gpVv;ITd=?U-zh zJBQ~h$fZU0{Lnb3DC!#?M7>xHKYX(8^cNybiyP_wHtO;=8&k-2 zhOcKwI`nwbv`JQ0_#Fiy<^5E`b%w0Vq@OB2lJciUx7y1|9_C4CTTIZJ2PA8L zpPbU+&YWErKU2%CHN9-B4R1(WZ&sm5!nk^ja1{p3xL@a%tBU}xO|%)Ivk5BNh?yTp z*R*8Mc=j7<6+kZ^A34z~T}ZS5zEAbUJ^!KEp0icj3#ZyMIvDoIP4?~Ans52|*L!bgi zGQcXDFA_!%b44#!_|2Fo!B7BEv2(Qk%K2^FDIHr7+q(GGqJt8~sJ-nR-W-q&BhqrJ znHEJba}i{6hT2Vx9wC7h%f`}DfE0}s0f!^N)7FbUow zxWzzSkco3gT1~OW7`>j-7)-_kf9WmHgN3#{N6E77Uagc*r-RCMixt8{a~nsu)2w@X zqA;|&9Fo4A`nA1~NT*RSn`HsfS`>y^fW2^lsFP+^|9b4;fad$k9NX7Om_FXSH{>o` zZYcgA^r<1g4DzYpCTLFc5Gco0Ye38TRRfZx8_+}w;^RGf@f@H1_~6G0DZMYL7-uX? z<8J93{KpRh+)2HqXwOS{&^)3O08_ErKv|A;c1}uCCf3!RT7)0ik8(bELpis;94@WZI zm$b9qcFgyJnX`yDmIo(5F#{ArlwLDRmaAZ6a?JJk-i^>3n2}}yvQedRh?ZUxE5!!^ z-j8x}-49%Q4wkPpKKu;C`C$6sUIjpoqhq6&2Q6Tm0W(aQwYBvdQBFQSLR114ERZ`u z5u$010S9gmuwc*y2g~#=wUN)?i^0j+Sz0VRU?&C4`~ioJrDYeeFeeh|pA@Kx!==%~ zKeP2#G{S+6MgrV6Z9Ogcs5?Xg@=9U0=BT<-+decm_Mzh&zI+U(xV?;*E`Yn5 z5TNaneMb{S=;PU?KX%V>U0V1s!3l?`_kSB95EhLS+z?` z={fQnx!ShG+D0N8eC)vqO@PS&XB*VGHlgCP*%(uteid7?+XXg zp=!m&1^R(TYV@Kzbq(h1Q#~+t1qHYsx*tAf2CQ@+?HY>#Vt5$aYZrB1YVnf9aV#iQ zF?OxS8EBu+{YQP}kPn5Es?3W$D|)9Nonc`iWB^t}`&#|JsxB>VtUGYK+4C^N_3y|iT>!S)1+PSbt!l!*?Xy~{AokK{%VA}(F)uXm-YZ+t#}+1Cq_dbFN= zUig`i*t$8Szw+!1P5$DB5ZBI){)mnRxu&GBdzI-7I|# zwTh^&_kZM6nmB}rSinS)E#Wg!+_;dyNEEo$ztTppMj%Q$Utds^p@meHx`B&WFP~Q_LM5h*?LZ2Qddp- zU7WKUz`?hHeYYK@j3&Vu1k8B&_B%DiV*8df;IEoc&8xfwCWjeOQ5L*QQe&5sD4#xgghBjak2~cx_>67(=1{ zMC^_EqE+9m57}`3?z9>tZmSd>z-q6wM#ijMRf#S2NTTf9Xw7|Uh|#dsnlyarD{?l5 zzx4)}gU<1b-!z<4!la6Oc*D441tI09US> zSNK1A&|1h#QpwR%MhfP5KmPe&YewNrLC>;GRz_gz+$J$+vxAoZTmm_+Q-_1)WZq$a z^9ff6ruX@=_qm%xqV}_IGPFeXdV5l2>-8n}Y|S}Gkr~{dHq2SxM106yI??3E z)42$0*2YNp}S%1<)`I2l$3VuMe zDSGANbu3GD{bfp)S#Je})ISoET@tysi_oG2qFB$f6EY4H3B0zY1t=G%aBYQ=L%KUD zQ@MNUz|8ct1|YKOCo~|C9GOedw`Y7BRNQ!6=$3t>SJrqLn?wvQk(%={~hD` z?#zUm*8)_H@FN9dQ##w_Q?p~P?%mkxHPYEu923cazo-5dY0i%Imv2# zm{z17r0G9rw%_LuxMN6YpViUp~c zl#-4!Dn{8X*Cs}}j5VN%iK5M_yy|l^=^BMbk7z4i=5&oIPDILut*d3J4{I2342GLU zArBd;fjciu@9o{;KN7M_Eq)VZDJwpkwEkOf*=fV+F2+ZA)|LRA0L{;2hSh&M;@5_I zwjFj%rh98AJvC@`HCYGBP$(H28Jti-ANk0IPl*+P6{nxQEC`OVqRMkOpHWf8lG1k+tuUVTA-&rK9Vr z5pb}5Vfw410HxWI){`6g^SFi>JBHD`6MKXfZr}hP@u}PA10znUW`J8J-TFa56|2c< zm1uT+g%_>k0DN%Y^&9bUW%`+`$3yC=3F#FR6LIaU zx#ngN*{_0Cqnu)xpm*YX?-l-BAD(KzcZ$Pi62$&qHtJrRS(qtPJ$T-6WRdIa7rmg$M%afp$=A*fOw2w^5?>% z`C}1Qm`hO7uLTA80d>~uoqB%_Zrgf_$Wl^%>AU=G+(W#C2eAK}@*A3aYcU)>zbZLF z69xOgr|3^rokkerTFx?l`ed&ywk~%jnTEoN-DYn^?e;EdawmP^Cz{(Nz&&V zKu^v%@O9Aq%5t2HZ9uLP4+ZTJ@~E>-5aIXmjt;4R%~ynwUDP1NREcnl&%q28?eTG+ ze+dl>a>9TiV&Td=LpXUgbYQ!_Z@Kj1qvm%`5fa05FZH%Z$>> zV9>-^3nS_<2J3-Vnnx`(7&d9IL(B>o?wsKLSB>V!gVoKg^@FU-c5>MQ<4wfEg%-V+ z*Am+68SoCi@Hmh(r`>CR4ZavG8B%$+{R9N@b_I{7wZaUg_BJIMG@KBt#ve21EmA@) zpyqvR`dKU+sU^1A1Ez$AWv9hGwCQl-8u(TBcK)#- z$IdwW^l*WdS89+aD0LR>1N$!F%V{-Z=myaDa$yh)Jb`7qs(4KtWP4>Fw8tEuB zB&HyomfGuM!b$z$Vhma}(`jrlUP{EM($pVcc;7iwh|Az!4`Uc3gU1I71Z9AsEj}_d zNVkhw?rprH^yWd#yo`iE{O@$H)6cZu2)YJPJ1}De_|VHfEF5 zPLw)-4O}EeIqsax!=q3z4SWrcaynL~TJsV?q+c6F*d{mf%7+@KVEHzG*J2+|&eHRQ z_{VAW$QMtZA>{gpuczWDsCJ$RxZBGOs;{CZ7dysR!!#NPm(IQ{W|~Ys0Ijx7Jkk77 zCg={BBy~~79DZl#ul@5(#)BcmeNREBNKAXJlJ{(n`i+^7imq-HjZ7Sxe*Or(&s6~6 zOJA@4bT#Hn?@hrWxGY%#e$LvQ((B=P1f2KmoealsRWY{&VVym z(Cf@fUMyHySKqE+Z{$*@Ji6Fp6$F9`OP-S|o^~@!bhUj&Bm{c$LNY%MbxJlLxr$T+ zWoWmhbOraL=^urRWY=KlP=n?*`cp|kES=mj^gbLNcEA^*tu-<@7Yo;^rgdyRhj&TS zv5M(Xj;Z;9URX`DtHqPE%wa5Szm8O+G2%K}`YxU6AD0$jLOA%kzd5z|b&Vq2&&y)x zgIyIP(GLcPtk^ITRPVZhG?Xf^8WrzKO89A3m!nGsksc$|MA31C2*;uIdQq3E{OJL-sgT6Ov5EOK{n|!xQt)X;* zYDH7=xwoTJz}w!H*SpS{!v^d+mtGq8-8^~seMhAfFeijM(xiFY*Hs8qnCHLrI^lx^ zHVX65%PwjsTMW$GrB7Ok0PVcDUyZ#lhNM=-!_lO3X=M~40$o4#@i`ATIHE%w-j)xx zc{k9&{bHlV(!r(g4Y{`_KSm|&POE~M8v#HpCZ$)s(u%MeuSXD1DO?3s8`dAVcQIgs`MlX}E z?bIu0ajvO!bHE?0To2diM9VcUr&29T?5B$h5;o=PDY7T1(F>|J^ak1!wJyU!IX}Dl zVtaEkIgLHZalUlxeiz9L08u45hZiIL>hX(Bc2g4hO`#h^8_zm0MbD&sc}J}-tQsnS znE3O&LK={XI$BW|U=;IChhlIFlZ>_oM#g@E+mG|LX= z*YPv>{~jx;ZRopDAQd8+Ds)cA_DQUf#Z%E`DXC_s7FT++ZzjUX)yS2+X@b1(3dT_U z4m!$V1o)>iKg{mgvPri&0S7JP@$U7Y-8z}OI{JPVleT!ossQr} z5V`9RZt1}$H_fqQd%*Oi554z5%knNe(m;Nb3v1=mD$arajUT;b=xsXH-K1S(n0R^I zw%j|Wlck@ga$OTA3MdT++P$snzDCamCGJ1s1b5V7iTJR#g@aDnVQ7;~S>Ss0v!k!O zgSr$e6JVUX7o_Mvs!)@ys>GQr7c^?L{(4BY)*{`P*~m8~#XnH1Qff$89`Wp1?p<;8 z;ki>&%lGh! z6ikCPlDkFzzSso9+N_*WzL@*JyursV&|sk@&g(tJJgf+VGyYyuE$u%%{#BePUn?pp z(Q>(CMk!#x$Ofx0LwHzfZi9C%jZ{#jBZ$o9=$TpbJ(uM~x$bM&#cSlgOF-A65(ONl zTsL89khy_221Kj=JddR}WM=otT2lpg3nHl8Q4M`7#!b@c#rK$ZbK1IickrG-^qx}E zctNapMa8clDQp*;V+_8-rI^BkXtkwQPF{tfn1QX0xcLc~Rng6Dj00D>5yU z{r$D%k0G6AxDU7=Hw0?4ZwCaUVya2uJ&t8hm!*L}hKI(e@S>oW#w#ZIQ|y_`OU~Uh z;N&TXGS@Hl0dD}oB+}>+`9TxET|Tx185zB~`~A&4BNG-NNLAwu>L()-3l;>|YaM`d zs0^&Vo{kJT)+zCGK@hee2qM(?+{ZyR^65xN9X}LMwbe@O-L1Y5=(xe}U{`u#=Ybxm zxcqTLeEM< zh1&aat)(kaAH~c=vfLdQK{G5E>>9Nt?>1s8^fa6u$UdHy_8$8{;CrxECVst{^xs0W z3F^BMz)}cUTHB*k-nJ2Fh9Hq|OclIqcO02*vkin3qBCK*OsGdLvomtnHknnthIy#v)9;Hzm# z=Hc0)f#d*YrAw;I_Vl9JdrXvLzT`0uoc`^Hj^u!^#g}bu16v`Y96E2r%%u|X6yo>! znUbvXg%00?bWCbj0rksKW$(-n)*IZ zbm=`ac*3O zGxx4(;nTc?C%Zn}<2Tx>&zh)^KlxSuny8P1a>g%?oNa{aU5`d%`}~!)`$NvJVsI7@ z?2UlSrbs90+L>0@V|(a%CLhHOpLt2bvdSR|&A0Z*9c5uYz+Xy*!ngC$4Aa!-S&ez| zZ7)=Hpp8pb)?1(fz?RHGG$-$^r69^UQELJJ2?dHg7|zE*jdMk(0BtR(gt;)680(BLkc_p^^$@C$w0oEDvpSM!mV6(495&-4aK6)6x&6R88q z`ZsyeS8t6nJ&pSVFpGbl>T}kMcx+P&XjiML@*Nb0jn;>@Y&Fc7jgR9A8!>0OwK0lZ zL7edYczHk5^M*aJVY@!r6``%#Bj+F#u-IvsmvV#I}@0$sn$ z^{Mm6nijpEtQM#n1O&kBd)pigGLm_e@eO>M#BbPgB+|`FiQX&RH@VM&FY>qjIzzDi z_fFKl6^c`^uc63~7O7ub@>mzX0ms0tXUlID$pM%PYvdm|pu`19Yb{#C_i3B5bO3l@ z0IH2!v+H?M|6E!$o}V^MM(gpRJxbj{k`QCWlQ$L4tI4&Y^2xj|tFc2XB;=d-_X@kN zhNsK(oo;F?L`?kc^Bf}2z2X$lh9vi$lobAl9{E6%&!S)L0<^^f7aZ1yyAnFb`rI1# zjisHi3(Ur(o>OTp7dqiQM0xoZ=GjHGLOj+-26DEgEWJIExqN{mtyAr??HuOZllhH2 zN_Ho9G^R7}ra6nkbY3ZDOgcdq!`EOSZ7i-~ENwUb*pp-KQ3&wBel#AaZ=pvJhe3pc|@W-$U;k%tb|HN1`n(elkrDC%u+WJPak+Fp4;LY5oV+TZ4P}zHiwV zN=T$(^C#Lm&X0K1;MuAu3c0mF-E8;8-V&}qs?S5{>t_$YO>zUA<^k8SVEBIiA?}N4 zQ|e%kDv8HjMzQzZVZtn4zD}J>#|qroN%-MYQ>vr_&So1-pF|gP|FNW0f&C`<66e3~kP;7S znDWwM#E;_)13)yS9>sUk@C|qqKbNDvL_mFLoi<-7x3>N8o-%NLjn1kpUuvIVeOwx< zpD}COx2e4*!-JPPUVJPN{F|qM)Fwu5$H!t_jlHY%Z?`pze-qAZJlnw3AhWWJMVku^ z)~5RCc1)ZhLu|axc60@6k7R>Xyn4=Y&JmWZ>Dw*XdYCaL@(P%Drv1HgRp&X@ES}j2 zml!iYugGssxFULE)+aj<7X4v3$I>vW8!Yxd+IV@B6J!3!xw$}bG(etRz+Cjv%_|@x z3z^mlx0gdKSec3Z4j7E-2b%ic&!Jp0BPORZS!iR0RNF6n)Zk`usxP6SjusnCj<&$p!)`D7P-`aYL+tKLlF?tLz}!^+8;+&>v(>uZ>1!b z9!+RCGPn#-(aJLmqK#Rljq9u}^3S)1bQql}p{yRCa=h*7ew**-c*!ujix~xZto;%x z&RX|nrbxp`%5hgC`e=cdJ&{JmdYL33`{0Bvyv`W3Z?h=N&x||G{;TJT+4<{vFtt2? zD;BQeFbH=ZZDlZSa`9klIIv>Wb6V$C)RsKWhJLc+pY#R9|CYb#gjmxY$8tJV9qFai zSnvTqqEO}Z^~6d-o~CsdI<*m&r=ff#VB54m$0?g z!kTI{J@*@AqxB}}E+a}00zCW?kGI^l6G)7W+K}e>ESNnlvKNv;D;Uz2g}X|FnDj45 z_auK*uC+gPE|sO36H)Ry+MQk|ygH-ZCPj-##vvB24N8YmzX*X>2f43o*m`5sN)oI_ z_-+27zuRTk%^#XDp#m5Mcuo77$S?S_5dJ+@G|&{z+%*c@mxD{iI2o6u=qG|3YnA!V z>aQgYl!_4%<>)_%$lyezY${u7VQPlq=5pkeR=qFP?f6!DOJCg8NTzrjsO*BdS7*ZN z%I##=*O<-{b~)D0FlG;+Oj(&5OXUWcrY`z#aiy^r7(w_^TzT8kSmmCbx=JmX2kL1r z#<9*uBHFAKOz4A10Z5gg{US0wI2m7g8A4N}d#{$N9q!wrCK*6`U?X00YA5>4|ME<% zE~*;&KtDWrHRJ0lEP@zlAy@Tqz+&}mAvbX`LJF-~uEfkS>+6~M9>g{ECENCwX|(e7 z0(Br5KVIF0YIB~(bx|WKxAZhJ6s9IME0ukC!&#G*DPf-&gH3#O8x_8PzK^k~6o0zW&GJJrq{;LMsX5}@R$CM1Eq@ipLtt!izB8HgC*P#hzc!Ca z`u^Vh+DSnID)|dTVMMhp0%WD!k$(!fbOXHfzoC_eVjr=Wo=(J&k_``*>h)t4&>bAKto9+PqzJ zoomNT8{Av^PzTV>qp~?D?uviurX+G-5EhHt?5SWW(p;l?w_qXOjaLkve)ftHp$5(A zR6b0ntpoS*j;1#5a^B*H@~Q+Nb#Wn2-^kS$->rx3B*SZc5H`Ye<%3OQ>hEp9j|p==2M<^0RS3M0`#-AK zQc=h6xi|0KIq7_Jv2e$jLiQo>#w+nNbf&AdYTSLud{Z7_(ydie%Ma+G6MObUgIhAC z53}Zg(&G5(IbA6gx}0c6Dy<>;8u=ta+CogG4Sb$&GQC_$*mHTQ+lOCA)7&mcxzOk4 zFWWj7A31;DwWKtK)Y}uV@O{((Cz{XCrxof88hUSflSB}9OC~>lAbOxT`)N<;|Q z1(OAy%p}BQv5}X?&4;AG_NXfl(rPC1hC1H#6f7b*wCtLw=C&kBIAcY*%cslL(=(daQFx5qZToO9>cuzGE=(ERlbc=c&m_i8w!p*e_}Dx`^dsLzHjC>-)rUIow3`qDnK~8zV`3&5JQ%p)P0MMl6`ZN{ApjAkKzmiNWGsBhvDO9DOhAy6HrwiL#*AD z5{DsWFH#Y^Vq7H5@neF;P8}V<3%FIxH6z-rObHs)TcK+i<6p-Q-w)=9yWI4iVw4Ui ze(Xl?n{sa`Cbn(l8Ge4(2%=)Tr8zbIFb}70s=zsiIPKQI(~3c;Wxe(&|CiM zkxg0iVCxIjkwyJbH>L4Gh{J9coHg3{5t-*xq5u7Huj?ym`TN}dmh?q|$e1gWQtd3oc5UHFf6XonPM zKiaphz(i4a2mz4m^H=uZD){~h+5dADs3DF6xrZZj$Z$JJD9&r1lSXwx8%qzYRN;Jj zj>kV2dxxTZkMNuy*TKp-5Kc3;UUcT zIF$ziw07^$&`#u(M=sl67SPl!vs@7x#FGEg?5rpUNX4@pYNa`C+j?>2YJb_&sJXWJ-Ey5K5$%<7<-y|H66O2NPZ6T5zETkcvTq?aBPGKj` zT{D>k@Zv~)q=N)hxFlrCuGZCKc&3N~tY#}zWG1b;UmMEF)cB^~&Cm`j(NS)Wsw)bU z8@vx;D3g(@l0f{%-{#)#DN$#*6qeltsmDtN&u6a>0SYP&cTD@wYaDK^x8tx|hi_BfOyPe;X zGT|c0Fi3|JH73LfO30F>44ui&tK~k%1;kKtf6BcitK1flfcgbzTHylgKQF9=B-+RX z1>kPj9KWEikulmarMKN9qpp$yfEptA@w zxG{lR6M%IE8fzv%u?uz`GwW?+^v#tm?GP+XFBum$_KqC|WGrF`HNxH;vn@pXML4KV zM0wa*XXh6@z-g{dGEUM72j(*$TO$Ttyif8VzGO39-PuXHbXIg-A`|P}8zz+&DHDN$tGbf)!&Xa!h;S^kM zbno9{?k#8pG}x}BI9W6KpLLnokqLaI$$iC&s~iJwtiylw5elAi%pFm~;c^hfyxnn$ z9a?RLA!^nA(vnZ6=5Qqb``V!Ck1hq>pMqi@|2GwXP?2yqHnr;W_;1qNa!`RBk}tiM zl4CD7bY{oaQb#xL^6Le}6osd*TO=8}=K5k*jSNWAAi9gVR($boZ`>v9y^)_n@;@O<<(XcQJ9ITdFbjFX`;>}GX2 zaqMr9E=BaBc*aNj9tfko1FB9ccZ`9wvV|MxxGqk?0eerC`q)8~s?B~F{acBzP&K7l zS%K$3h{*|Rf75e6wJpD91duHev3yMDl(D-xkfgr-vJ^RRJIyMMnEakv)G$ygo}p{d zZu5p!s7YL48iVbN?1urv8VY1$wMGw0Q>`@TZul&RuV;~NhoHeK4}UWb*)bP*EF&txxZrs42eL{3G1pEO_VpIj z_O`vCySH__m_L@KBn#(|)*dDObn@WgC1dIaB>&CaTY2;_g!1-QL{K_saJ#St~2ak894^ zvuE~fH!;5Rj|aC!E&oG2;Q!xk&|{nd8i$$`?l1{^o(9yl*O3<2=@&2Awx8;!1t zdoyPc?R5y-_mp2uS}@lKMBOWV!YeLmI@jYK=*6JfZgOahnG%Rv5N`!$4Q2Wqc>!EPiHDJg>r55cr$%%Ndg?L6SRQ|wc`}x)C*+;HLKP! zqO|PCl5hs*&Uz-6HU4B;n8$_E+p!-c>!|*%{KYG-`*YHRV#mxb*8B zM6=nE*LDYsq*?I(^1s$A{GYmFB{M58sQO8z&-rvyd&m~Bh~IcY;6hoZB)t^zY@@*d zU<29@y*JWC-}V-NSL5k(0aNdw4umxF;l(k=f{??pqc8pDN4LFDiq)GMsVsaL61AS= zXHR8K(U{KfChdYov z>@(HQhow{nTa%D}%qvb3GxWpq!nX3~VO#yzRkYO$Nh)5lv7z}ir@GfY(i3kwk~YJU z!85J;pQoO5AB3Qg}LYKI~j>JI>h)v1!iP9u_CQ!D_O&y&1HcSL+%MT(gL!cre*ldAPUe>WmWG zsT$6=K8_E(J49w!_(N<*$-SQTK7yPEuv2AOyszfJkOiVPhTt%Cc%asf3_<-hQt znFh|Sac{6OTUE_|&(k0nTJ8nERv>97>1~bt^G}(_5Q0W^`G%Fhf7uIbl9)S;v^fiH zD$}!ey`)AkL(Le1xDz{i%j-u3YFAPeoa61AwN%XD2LW08+0QlHa8;H!`%keV4UTb} z(uKIxC>NLg$D{rVtGD|E9XA~N#1E6^J~$cZUot(J^G9@#Lj=uX-ECejCQtm`6yQ(B z2zmwm2!t*7`>qGTdjwHG<0ibq>|jLjB(qvNRZYWyMmKr7TcJ3M8JCP~Yy$~2vPjkH z`$;1I`AN-Oy6-}`|MthT7h|l{iDtsLUUu3RHHhpo@4pkVcce%Kei|H<%F!*Zu6u8ESv|FWHq@wD^A*O0hAwa2?dUY`xYIwptkqa-t}lE#&0YBD z0I@l2zK4GF-jf)kJ(pfXwDbO!`@~O#%&jvuU?tAdm}8}){L-%)z68M%!8+KqB6hMr z@{`53QxQq5W8(E$4ZfE49!-k&gE3F!JzLs&MtlNRB8clam7olB2%sQi&XuU1zX%1> z+Ui!nS!hc?Foo&1Utb&=l6{En5-R{DA=#ALdChuvDG&27C(L4%g&2!k)&4&6zkbR{ zeZ>0se>?);DB_^@_z&lgd)nz*v7hdDw`MS>X_xKWgG#hld;{y|+&-Ip*Fr}{J)pk5 zf}_n&5%@YPCnZ?@!ziCe(8h&#`Nv|VQM92Jk=a7@e;OLAjQtN4G;eEvZ3N^b_CT9g zzq`p^p~-9Ad%tc2kv$~T5+&3_D|NY#W}RX+xQZ$ZdMzci zYx@cJtbmpC53@`cFx2Ll1l6)AlOVki%_TsMu!5*&FMl;w@q)UsC#QOlsSp3qnV3S* zy{r`_oZY5=1-*$BQ4w+_fDh}~&%6^tlQ-Af;}D0AU{ZW=3hFQp+q$N@c!(Agev2cC zM8Ds#Iln)TZMa7&cl0od4AbFiR`5XB zm0=KtI(l6$8s)QKif;0cw_o8FK9B79q%N4gYyX3@6lRL;YYg^pYLWC763Zr@>@x0PExkt$Ur_kM!hjz{H2_ z-(AX>Z1qF_VycHFV$KZkEUh{w1>vQ?B$*cDt$^Tq+>AUpu$Fu+HgwT)pF&EZ5$gw3*BJ_x=~X zpB!oaU(V~`0gdsCGF-ISSb&KOUGzE+C}Yg{jyYtchM0bonx0!O?Pm)(DUV1%K8-hd zQjt!va4Z%)BizaVOM2>;)zk4+h%@(;^dYJ1@|D~sorY=qxjyx}$RDnT>*G`z1L;n*+ z*7>8Ho1_qU8@Yo{i~>zz8`w!2ip`<7b&UPN2*Mxz;K(i9X%#X=7__Q-!u0I52?Y(B zLElyA|A40VSH|dzG9uXV_M+Uy_+`~IBNrz^;&OS~yGdU{dpJ}fK^$#u>La4e!@x7b zp{T)40<}4{?e=f1>$b}%KtJ&%hBMFT>fArBh|i4Qri=X%f+tLUhh@q<1L(P|epdni zu0)9s_oC2_^3^{oT>Wk7fDX=>9sT1HDZctLV#5SGa>xnmFNinu+CLkdWuXxw&J_p> z5MPOuuIDsygRQ@_EGiVxF~>Z}Z7`AdYL-LqT){?qa{T+jb~*#d{inb15R6uw8-r6* zV{uAg+eRAxYT(O1wHFrjr<>$_Z7Sa*PBRPw`)JX&0r||^wG?5mC$d4s zwB}Z|=J656-Fpsh2XhCO|q>zgC-UEIJACOT#d z2Q>8ESkp$oTZe41m|Oq{aU#$7g+5d76?Mc}vQLUyJdcGvt!%%I_x+h%UEr@M3=pdM zVp;eu%ZKF^-d*tHZpHY?5GUH6up9IT2WdkW75) z&MR6nc0DS!1V<>JW$|O1ue&fplm9RMz~UhbiBThGu7Xh=dBcwy86_??RHmdG`FeR` zgd0}J?rdFT4JkUb9R=eD5BBKyRMee?B8jfXo>W!G_nu}U&_B0)k%EkPKfjYWEYJ}B z;N8uw`xI&(1+qrI9#`G_*5g^63&U@>cVA5ChUrn(wHI~j@eRe0^HMzX`9W$kPipV! zD`Q|vl?3Pg&&66L!}Gfw=b@#xp{gyzBD#aS0YkF9m3#+t~&v*>8h=AnAMwZ z_Sv>ND0)hW?aSSWy#}FM5#VLF=B^s-t5L@f*}=TM*HjHpR~{nEU&73 z(X6_1`P``U6;BE)jYLj&<3J{O+iu20fh$2tx7wzFen`om-bO8m+nq^;8`hNNQ8&}O z5JoH^a5f(U7DEF1nZE63-W=;$~=0GwzmG%q?F}08dYPx8fW2duOMRoUHYmSRq4z12Jj`QON zS+Ol=DsOP=)TO%$Vl+VdjSoFY*3WB=Uk9*kJ4l1qo_2{!#$$~#{LQqufv)0VCSE@% z2gj;GC%bbfo9Z*>ZLKeLZ|b$)1PUU>=VOj=bq>l?c5EplHT`Rka_dRRfszWk*XllbLYPei9}|;;Y8e&wK^ewVQNmBB6*m7%@(gT zOj_mE;_lJr#x5=;PWQ|GaNUGs?Ae2~Lnz73c!%Xx-8L}iWw5Z|?lqeO!5C2$)^@xz z+{j|^ebr*Sc(6bkPsNF5W0l>S*3TBOf$td=^SgBcE1}EQ&TZU}ZxH#pb#?8N5us{fPZed6ZNbZ~$gv10;KWAf#b z9Vj|6%-Mi4>n)qr5bGy)zJL$Y`fDcPjq zV|N~F93d`&;xXn})ttf`d6v!J+BDz-9#9N(z@cRZsGStP&zZp8n|%s>@a%q#BzT3K zX-d~yf^uJMg`J$>rp|q;emp7+de1&l+6%9a zWhpPMtj2ZJ)L&?7gj0DP#Yx|Sl`9tMs4|HmS=qMZb_F!*BeOimZVo-13&OWYH$n?~ z+%r$#UONzV?dl#mJ0EDUsJ<9X>m&I?$reG2n2P z(IG`a^y(hfL8AYz2w|lCs!!9H_<)q%HXgFT{k*2p&x`HXnyl6zhU)J?tmlA%wo?-) zk4XWN;+&X7riS7f&h%NFnBtkbQh@~jt9{yJQYDF=Etg%=VX65yR-KIJhVGO)4w6GJ z=iNV%W+M*Sk@zEnAqO3HwUQ39Qaj zjJFb8Tz*Jz=H(%vjiHpi1^z5Kb@Y3sh3V?pi_#!uOYh`^mv>g+4A8{s8V=j4m^WE@ z*^6!X#BP3fj4op1c<0Nx=(B=V^GkTp=UMfU{RUd|QU1T0@9dT$R$9rkv)z~w`1OVn z5BOEV)?Z))w#ErrW?m3fJ`&(oY@Mse3gQ>~YG96q=!7j(=OEsN*vtI)Xdyox9CKNZ zN1xoJRfI>*Xf|938-=VO2yHXqtUXsW*{2J4vG*`if|DAyY#M~p;`XlVp3|i9nA-p4 z3`uMYsG+~#TT3e>oPV+NOmQh3rsK8t#f9rwOJ2)htc?ymF+|U#jkEN?9e<1g zmCW|xX`&Rh00y;Y`fanM&F+6M?mP$GFb2%8Y=>zLz5sn2PQo_~9#mPz|3g{E`85<} z&kt{q8wPn({g1w!XrlH@@oGdNOcgYBCJl%r_rYfKb>EUHlqC0KSr+E z!~TNTmeup~Enl%Sey_Yu9yEL6O7}4+6N}r!a%&vPOdpBv~x}hQu|3G_u zO-1+TizkH+HHEa-g9};LnoR-sU!W`pg;N;-I_kPFDG%QIS3xUaASah z*dy`E+f2UYYLB8XVW!FUw8mLdlz;eSCnLaNhqWL(dkkw8TI1=AZ zTtozQDM{v;6j|5Jzw|pXlNWD{zEnum(Pes5ckFOe%DmU+9!i{OkB%kG>hj07nK;a*AOhe$+nqZt1!3zqR39nLFD-sszK8C!ksulDsODwd4w z`uBa8V?F8KwAdM0GYR#yM0dSX(OyE>Ulm4tMf*?bir)w@uZx5x*9odaUqhgzD-*AY ztk|BR=RpRe9Zr$6xwM-0zhF4`Lz3q;l1!kq{Ax9u(HaX> zY>p+};oqbdc+RfK<5WhP>QM!8;Y)TT`CFs~ZtH7K zLr@g1eXNEfqflbxF6I?a#6Xn=U_%nZ>0`%){JI2Cc=S;p#o8g{`R5^)_3~RrjCY{y zt61ZZXcZIQU8=zS0qw$LjH<}T#Hb+k9px#1n zfKVQp;Fd@2_KE6RUWV_tKB32f;SKG;`B+9i0Leb(iDAJ?07K5?(T1ilsv{S^cm4ObW9#%(aAUJt$2cKw14Y%cRgV0IlL`G4nQ@8H=;&9 z6P91h9se&LUm!mf^|6DAjN}6n$l7WSy3JYr1uEfki5h`wfBn^BVt_n4$B{o-sbL8$ z5L*OXBsnc;tIf^-{pvo`AwX>g)Fg`$1^^lQl5Gn`O$L=1gxp$|I_)H*XH3~0q6c@c zvf0;~y|aO-9Cq=~Ppm~&9#ytSh=Fz2?gl9^AY%I&{j6hN%0J~u>rux%4zfw78M-B< z4@wTi(NUXYh`u8~+I>;aD;|q44pma zTZ#o|R&^2>Cvn8zy7=~oE;5KBfW*JW`cX5oV1h`(Z^Z;Rb`l`ETlsFipwG~XBcl`h z!%W2?=!FY&-(NC%VpLYu2ZOq#r0DApVT>H~p<9xGX3sukuSuQwE_T8hPS}u385A37`$uENxkVd(!KjGB&j| zbgMzp#*r72NwSpzH5$~H%0L;)Vm2T36OoCcsQjpl!CsU~B@usGQ|Ao*ugb&E7e5d? z54n~ROua?t(s_K#MA?cD8i`NJ!}K95XY~ldhXR#ICxHI+yd#{L)-dhRuCB(4*jeTF~n5Z2auqUXS^;@J2h*zix2G@#+az&s6&8ucIIa&k(Mf zkFc8f{LfVFabc1OIy-3pO#rJ+=(XMDH*3H%I{ud6{^k0gT@EUdUV272Z9Y%!T2=VmGN-!ZGk#4 z1+<0hR;*WiaNWPr>k{<~WZ~OnQmk5SZ*#OPH`{{woUo zc(mlrWMP(H2uqM1mh;Yq z_Uk@PQ5itm2356_4;f!}8uaQ8=b95Kah)M|B4RlZXiyhk2??`wU)c{|kVB`N9q zP~pu$WD54p;YKqxT>Pq7`%hW_(*-;rnOfEJ$@HC_w#ph#`yVr<$Rv2Wtlm03Cn4Sj z=6}ncy7wCdAJNA11RC+XgeVT58xp+u;X`C+R9CYjr+1hbg>s{}&6u2g?h3%WWb(3? zb)P>^6P|+6QXBPL%4DP5^OS5OpntbMJ8rm&4&dFT8y4Pp<@_+{1Z&Eb;>slcdAv+q`UU5B%0oQi?`4B>HY-<(*hE>l@+TeJ@;J5m>YR||M`OCI`hJfTE zTO3S_%hQNvnUrmvUL=Xo5sDbe(*s-pn2EE2_qo!2(+eH}n5qT8YA*kjIBZT@hN*+e zZ}^48H<|)QslYYboXpi&^3J2R-`!Bz8<#E)!>Aq(Gz6J`)S#4?ygoTwo-k(Sg*h^X zTyW8KKb$%MJMs8WuJqip$rS$9zn|~vL^Z(U*F4_}n|2g2#SwL3L#HcgK>K307ZgNM zAU)<2p2zSU&A|R6(|&UT>h8p_zvN6HKem&touG0vKLM}?uh$4FWP#e8$f&+|$K!*r zDB;Bggs=Zi`R=$Ka|bd7?P5~@?=I$$C2=^BCD?FLAW}e$VC?)F2}s@ZZ~u z(TbtcOr-XYA-a!G2ThODB1MISmTExVj5Fst2Y&InY~O`GAbR6Z&q*4xvm_mua}V^0 z*ZrbfzoKCaWN3(9fwIc;MIKUnq8s-y^wWbtV@+{e`JkDdlo+MthMmF}Dyr#Vi_Fh? z>VSA8W)UOv+~KiAo1$23Oi}Jm{5H8aBZkFW_MB&m5ax+q>$$)o>Zr)Z@B7RA|qwFOGMa zaqc0fXK3B6QB>MXCOwIN0Ic5|wtZejC$t#+#chq!D0mG~aawwzy_N0Lls{s~OwU*G zQbtvF0|4zWZ1ua-C(AlrPkzV`7QySgZ%Cq537&9Q>J~qdW5Hp1i~jkR^-diY*t?(afZTMzIfS zD+=aLab1Mf@jt%QerHc|PHE-S5}t?N$0Hs^`rGZ!ysY0p481t>fR2$I4A%?5%NtVf$$_VfdJ2Ej2M$`?`yKD0HB%k_o z7E9!(Z>2q6Je2?FM&*WzZLFlt*GmP25y&Ew717u*x|n52j>3SRI}x0f##{N z17e~m4xj2u0H}s%XhY+#^5I6Q=o{sd?jh!=cqGvxZ^oh}LX#LB=}UQTH6amZbVK=`aO|ZEXgcl>btEUkfdWSZT7t6%lRh<)JBm7!*GX+WHi)>KGOA3+ z-Gw;#XwfJp@X(71)R_TagMxT5IU3~VO5W~`YV0mz5M=u4KL%r0p88lbwW@rF!On=)2M z5B$iGN{iq`?8Cz%sj@F0IItsmm4L2f!w^-nh8@H9k zDf;fe=KH5ZN$tlskuwq`wso$-f1vRPix0vZ3f=z-xdOBjXIj9PRx`$}eGX`(+>(m8mIPzMYD zI6$jnshJFIs5)q(Y(x9U5Tlpsz(pHI(*Q6qoBGZC93pu#Np1CHx}$YcEt#=-rKd)7 z5LkleL<04ML_@W&Wl3F&g|NX6kD$qyhf~9;*z{+^dG}KFn7w(^6VY8x&}7>P{$veIe{K z{xAAhZTsB$Q73ID769Bs&dU5n11F!Sh>WpJ<8h~MtQqLGZc}&fBtt=X8qC#&_Pak& zGCC=G=HLlgAo9}UW~Yy&=V472_ThR6_F;$t z5?Z-ABTVOdEW_w(P;WL`M@wtZ8tf;JA@jm-+D#0HboBg?f8aQ@?yo zgZyGei~=UEM%VZumaLdU+U&P199$3=c2Ztc;Jp*)rGsE&lil8()BSU)`+#jI%SGRDE(d#fkuXXrtn@tYNM`_KW6U?+^u4@5?tq`2`E=?j+9 zP%CfF=#ALWE$4u<%pvNb%vN|3GR~S4^1<6*59Rw>kR|{P=vg^!0GFW zM(vg?I*XdQhh+MSIdSj%Q>cK>^^E=)w*&2raVw7hZU*}!okUu9b%=-nZ4aUa%tl7_o<4$^ zt#YAfz;QA(i;N3>|42~=P{I*lf(c_|?B0MN-Oy8Um%>w&>2s&ubO`ds}l47h5`*@1%^j`_md)$D#JCs#v* zV-nia{kFNzM9;xgOXn=6-ft{yqPLLX(kqgo<2rhSU!jID;EZ zKVEnBMIqg&Kzv=t!z=1{c{zj2P*z;ai;50f`fu~#G^RPt55HJe!{VQXPx{&PG$)Zu zYT@yXDO}RB;RC|CO5af^cE@gXmODuon^NU+zi((#Pq7?}6CA0~ZobBXDm#GG>XE`) zVhVLD??!ULH2M?EMu&eQOD$TbK2-{>D zvwjccT!Rg=M-euRB?ZxiLyZ8r|I>a_xOAU_wx1?)L`f)>R0i#(`(8YedZ|BdX_JJp z5z;uES;L679I~jMScOVMdM%%?ha{kGPuHc$j3L~3K@p5~`%tvD>~*M*M*a86yO+oQ z_Q_7{yHR8J&M6x3QzR#KBSHa>2cuzT_&^)A<3%(XVJo%6ttaF6HMCxPd&Er@3gk`u zJcLaxK6uC9AAt=`ved#HBuF)^=#e6uuQQ!JEW*$Q76a~nKGe#5D~{C@u`yvu@t`(R zLq~kjJ2&FsK_xMsSNC(lLwiF&bo+U`xO6z zUMcbS^A7q>v{0P&MTD=3x4ukJ2d={ks=%942Q|wfn@*O2(8KGx^V@NgM;`1Pz(g9( zfnYYm_+s)g3@{UYX<&e&S|B3A0aA(>ZhDJ817c{(4aZc#B5~DBsv+#^Lp@FET9G)O@>2sj%U<1Zis;{!xfj{NcS?Z&1GB&qoiTM5|+g=vfLQH*3 zqYY78@8Q~H8q4^q06VMP73}(R66JlM1fA^a9~sjX6Z$9qe+^OkAT!1fsbu6vXs(~? zV4vXcN(1hoOIu?vXH20r_>(G&gC>_SoD4NPI|dsY+iRTqne4hN(kjX+3?1vydGECe zsNFW?@e9lPV;Q2^25R4fQ83Ln7j9FUkMWsm{W5-S^`emYK}tP~}TDPvHda^T0^Te*&JYez_0_*%4= zjhI%0XnKrDQhW}U^0lM4H6a3CYm!1+m0s-wQp&FWqVF&RDads0kmXZo$*17`9&A>`A82te1BXcfUc^U=wqs*ZCvdAO2Y-RPt2wP3vDVU7$ zykZH`ePi40#C?`EOHFg&9Bn!{=rA$5X)jg80UW@(pQn%^PL@MCzE{UAJ4Y@{!8H*D z7Kux3vR01|cD|`q$2En@qnw8>Xi$U0KtzS_zn3YJ1$)n(-XBKZQ$)cjWLsSLntsd@ zR}1xKOzA7uL!+^v_66+FU#DBtLfUiL!4Ib)f;VMYfd19A8BI~eW2sJk9_NTzBYVo? z;{KaeO(%WCLH#&e7>y~lcY}-TtB=@`Y8h14UUj=$@?KXGjPyw+$PMOB(Oj|F)HI}K z&bk8;w2%5a$RGFansY*G?bt(pXk>6#)Q!ZT+aZ{aQ0!8uP*PllA2w&8vp9x4V`y8ba$VK(oM?$omWAy}wGY zX=;8RnrEmyEa-K|+S=B3d2IAf?Dzh>aZbedei`St>DKj3R&CH0dVK)dnJv@c_j&rg zyJ$I?-Lp5Aw&yB3^xyZaUVDTtqX*$zhIzgf(AKnX#D0==#IxRLGMTG$$o_{mVE-Ly zOUGXZdfv4A(jeFf+A`{zhipJ7gdaI)JDd90CNUZxct5X08i*~CPu_7!h&q9AI}5BJ zKQYN8q)G~)VSUeq0f`LpHgSq}kDEB27rYAZe)Lx7QzY*8jmi2Y>JXmH;yw1yHUnQt@VDEG}+$DI*R9X&t7o z_@>4x5Y9Mibs;Ly9M#j7=XX;FFn_L<6CeK7`omtRzM0ZVun}(P_qHV2KfWF%chGAe zZ(0T0*bbNxe-(>29OHyYQx zbL@Cii3y+V?Q*#H10lmdel}TgdakI^ih#A|R*3&hSTtF&MkXi_CsO(rB%|58nX!|L zN7B8yz9H@YqC14ov{6r^dLyHd)(app)Fuh{5`;d;rG29*7Jj(G%B`HRNx~+ z83=jT4Ez0Orw1MYS zSG+ShX;m|U?X1ySUn3mBHtGv#+}p#eY(TTG2wGaTOn%WyMxhL1?n(aK^ zOnAlT^gTCG-svwan<;8hf0Hsec=??b0!*003Z?SaO9!AW1>NlV5;W>GxfSeiHrpwB zH-54~Va>hrr)#c_&*G6DKX)i*M z^1zt_%=ufAuOvi|ROpR8Wce{%o{L_e5cInWs_u?E`Ik@Qs?vh$HnYj-WT)eOK)8Do zBc)B&5neE>zGg!O)@g>%13g$fI_CiNLYVn73`XXtG zM?_AOsizGsvGPE{rfOy}TwS#S{q<}zW!$RRg^07D>WbMn4WA=7cC2L4ABw-}#h_FA z$?2jArNaHCZS8 zMzOWERjbV^r=&D+b2w86U5FfDdLQW-Y$6*+K2Rtf6`z$AH9RaQB`rJFCTXbywU=URT zffM&!7Tsw;>d$q__EC4VE$uq{NMiY{P0@V3fi79f5!!oG&@2slCEt=q;Rb90@&M_;k4v7kH7@HjDl_`}vsx_F;z2k5tt7lanD8 zTCrrEb@TD_gIH%_zI53SBQshq%OSIoGHn+lbmu3Pw@VoSV1Sn*TK26i<3dGoAY6;> z3^pj>fgEky`GzO&%jF=M{eLFq%R>ZHG+i*4fWb|p1JE!cjmX1VsAz=|`DA$TzCs`35J)Jhvq6 zKs>RZDxPvr6#?1bEF0LyfrLPv%wi_O`qZaJ+ z{T}7D_@7~Vr=5vOHBU6CP|QL8A^97Mp$47$M{javL^Y?-bwpNO0G^%}-HfU4=!t=7 z#lCaTVbdj)^l%55-jEmdD{#~5ckkaZ>bH>xcKM!g7-%s&b}U@(WKk_TFGYGxpfKvC#SvvW zIXnhdK@ibL8N5Bmw2BAmYbIsXm0)e}K0jocac(!txqFL{lIKzQk`=3Xd4I{XZysD) zDT;nh6omiJfd*}_-t4fZ4{7y&Y$T^SCIt$Wz)sS$7!<~mH;(6|$1zhD(_#sfL9AH! z(`*N)#Y0?;F1t@tO0ImzMGRomcsqV6IbQx*Zz=Z#t4w;fsRvxaIV-TLf4%%}^NYpY z(dy1KuBCcki=?#<$599s@vD*;ZcHPY3I8lURD|zr_B0EWV=!zMg`LpyuB)cOyB)$! zaN~zZglYyZ(Jc5OvKnd*{OE^XPFQty`o>xF#+pTY6)e_>^wUH4yp;P~R5C=;3%3{@ zjwpkkY%Q2IJ9tEN{XguD->L!h9+T-wAC2~1GUEjt7Up~zj0dyk(Hirnl*t6$}O}oYaSI(TjzlwR^ z{FT-2@)apBFUKMyi|OtbM?gmI``SPyAfS`4Eh`(=UD>AOjvY=W>J5aJGP26bvAE$& zr>?LzhlhvSwZ^cqWI~}E?{D|a(ddsmP#aCBZ(i=rYlk6vn95h)`m|lMzB`0$Q6SUt z3kpfKOX$`D1>z1zUNghcSxOk@+;N^X<=xNilXy&5l?(LN9b1ZHH>5u~CO@5(!(rcJorT%jWGY0QsGG&!M za>>Dd3Nuw`9n=wa_)hD!D+}m3({$PFPxs7b*8Y~IdRmuRC-{NjNqyjhflPupIOF?F zRKPkSv^ehnoken-SX7oD4L-d@n9T)?4>(Tz&dMnp@w!Gx$M#j*9-!yqprfuD#?Z9u zt<)DDZf+@QNB^a{=rM-g=#q>xTXrvXad6W)HPM{24o@76*Fx;NRc;Co5h)p>=k2K| zM%l0Q1ip@%xYOX0b4a0uw~xNU{hje$p+Q^*iFtd?9s@tp5n$_$QCL4hWb8=W$@8`wkx05DMM)gcrX`YA?M9lw@)BpE-`)ba?#h_txKdj&Z zq9>t@68KD4-yUNK8*FNz#mc|&oe((4UWF`DeSiA?@+JRUq_g#jX%@N{hOEh(jL=o_ zP!As`A(C?{(m<|i@7?dA5!qMoKAy>Sa*NfpSt4MiyNx>)+@K#M$WjhnCWX)VHiH=9XKd~v-jO&I|x2S>E}!;!K-(r#rYo_}R8 z=d&-ncOFuM2sP5PGZU|xwRCd^us(Uk-*0Ve4GJe7A>CE|l3PlOf{F;=1o7kCRw@@4 z@WbbAa2*~$+3OrsD|P3N1!zxM6Eby=;>t6MrbeE8%7FT@qyE`R12$CIaRR-SQe;26 z?az{3drGpxnkKs8{No2HXhp;yf(#+Y%{M+hjLMQ_1IHzI?gyPaH^3SHU2SEXjtY-R zD~}fXJh_&ulTSK~v#{hFe7|tU;uuiSDul@5mNk~kh43+TN_MusMWis*WI#`kIy$5; z6A{Dfc@c}yAJ_fzt4HMde{!x$SuZti7(GA&(CUase z+`#&Qc{;s8+1|Ct0l3p_bqLg?B@W)S=_nPyc;2^l(eSm+M&0ROo23VUf4=OZAdU+k~dNE?zRvyNAA$u32PK`lt z#+)QeoN$oYY)mY(BRz`W{H zCZycq>gSd2?UPF1Vr@wBmo_Nt>6aGD2IH8}B$M=*j%Ci6vYjPeBQFM2K z&E!{AgIMb%MMfDv|HsVtz$+9Db$?dyw_Fnu4Yg`$n7yvZ8;b7bHWey*D}plQW-$jQ z1~L#MYJt;|@Cyw*+^TT-LOmY2rW2gpaUwVY)23vjOmDoM0ld^qhmJDn9Gs-P(8~xa zFJ+7>2{Lr^>wH~PXU4T4dTvm2=O>d6n@(upASr9R#%d~(LnTO!$zBej4<#FqSY&Y*J@a?Mkqb?MQV(O z*)id>q|v?~3CJNjtCtsLir@0U*&QE8n`_i8jk|ZMMm4&=hi#ooy35R&L>Cz*iOrTY zr9TTWbOHvN88u4yzD7XDUHcaqrH5&zz)ZmL1(KFjYmfA95Z#E9fUt7P;e?_$*{C6I zEw<+x$N35Rz-RK&P)1nObDOKMZ2rVwpSXB9_Yq75X`MU9Nx7if&2D}&y!l(E8!de7 z%%%1kc4*L=FD0Purhred+G|IbpV`=3zur$DlJ5+SqZ_!(@zQrl z*po+QJ zWn7g1MbulkMbSUt-gI|&Hwa62r*ud+f}nJFcS}fjigZcGvb2PNba!|6JI{H~?>heg zT(CRenYrV04=<&MN1^Y+ymqDo;Z5E8jCHCMCty|WdlM_A)%eI&#-VkQWnY{xoN6_; z)Hls&Q3>UVYzfe<|KH#YjE}p*#Y%mylyoy+6<;MC?T&-ZRcI=irene^Fc8NER6vWi z=5JQeV;qdpXc~dnJKcNDK!oOUXz~5+{CzP0{lz-iH1#7IDk@|mS9sw37}!>2J$V1| z;|IOg=Rav5a0BbaUk}w-&;%26NQ%Yv91}sjf_$!f%9$jzc*R2tPLKQ>&K^j62$ZR#SZP&jg;o-@;y^poU)_CxN=sA}))suyT!}rVj=6 zTE#JZg+H`DnM1wZ)KfBf3aC3@js8Gl6U~d6cK06s#wO$E!q@k@wP9Ol{%_I_2mkR(A3;PuHH#+x=m9#YsYk-7Db%oJREjppEL5ym@W z{>oB~v7vh*XCygjsIt}{r{u={x^6`NyeH&S)-Qvio>F#*4$)KJkkepgu_14r_{ec- z`Paeq1Ivxu&RX~8r+bY7WcN&YF-HD{lM}p|vkz8WVEo2n1^%-M>A7GwJaQG;+YvaP z=KkS9pEvJHyOQ+g$dROU=3*)2AE~0=F#QjHch&U6OZoKklOnj`@YQ@{!k>pywK$o? z(`Z&pHI|ijntg*Z7*e)78undn;K{3zsNr)5Nc2+k+rG}zxnwoIWmOY}u?7-yT)f{DHaEv!7kh z`9whO|2y&rRGJkVF~{J|TempW30Wg)7P4cUx@#x2B|ts7`1Z56u4het*|z{^^-T=Z zeF%9#s6*PRiJlB~Ek2l+1~-DT?5nz620_3RE+hi-Il?ff%Qw~8v~r?1XvlD{!W1cZ zb61Z@BzE(MdgP^6^B^-XuWVG&uA0xxk5S>w&qo(AEr0OlJ46d2$Bk7#I`We5(WLwg zX10HdY%5V%$8I`Ys}+5$grUB7+;D;64B<#xt(5Hj=D!Lb(*^_5z<0{T(vo1a(-O2> zR_cFF6KnazmoY;U9ZwkQ3?z`3YHTG=ZnP;+4$%6238lbBY2FSa^`AzpAnp}1>-RU4 zPY#$ocwWUy#lrqz0dW7ou{CzWVRE$4LozetJafHpPzHPU0%}ye>DaZr;|2*__0-iZ zpu;9-WiQB6k zz(YJ`7`yv&eGm3WE~G46$(;Vxd|SPn4s9 z_a|cl#rXiRR@gmz4zLj?;|#rHHCFXQY~uLE&ilibt{fRbi5yMMI5ag2l`*4=eUI?$4n(y}gHolIbut=$} z?%B0{ZgrRjRx%-3-c41sVhauxUkK~^3!xQz|~M zbUea>|FYgoq)G6qX6p0*5DF%uUdTyP7_*P=A4_8rP#ua6_MUsQyQFOd;y)uiyTS!! z{XT?-OdQNgbU{AtdRDliL&2P`H{(3Q3gvvkWh*APxSRUC0O$!J!=I&DT-h{ndPn1SHv8!kVJrcWq)ZJi6ES+EIlk@&y?eI(O zX`R(`_w@|Q0wv;5e*RrvDd|)LV012 z)gC9Ucu}~zewLaiC(XzLE(3%e?J#wlnxM4bvSuzyXfc@O)B(PQ%F&!0yVeY;$;?7N zi#RKw_4509WpvAE#0(63;5W|jwu!H;Bah}ZM6)NabMt1p^DSKRGGz|WmnhRt3a+E> z4jv}y_^`>+VKL^$52l*TnB{oUzHHGgJQGUQJz3O|AwxXe5X?N&DLvetK5v&wR2?8e zN>b_-)_x=cIj<}I$JGtnYwm=s6}WRj_d0tvS#Y7-m~FH9)FaX-ay6N!H#-?e4gxi^ z*@Zy{;eEjw#^as)6PEmn?#L(4l>8*FtE*SnU<;Q3NHXjI}GQRXYU^TxSKXk zJwJAW)p-a)6_l`gxY7OfN)MANKFTkRDp3pR-5fRt;79ewFp^(p4SUT!qzrq)7gl9a}F65(QZ}6UXzF=Xv zhi|TObN*aTiO-=AS6~^F)zrXnN#8XG{$+0N`jk)S;5OGJCKHVy=ZeRR7wF)Hzxk@< zzt_tHb|`_U$Wh>Z%Bbz^u{iorIcn6QLG7NdCeXF5hvNJ!MLR~XLbUh~7GlXghB5f{ z=}VfJMMLy#SM|+!L*t*MmJU0o+}tFkexwaDDrENm%Y9T>{t^4~ zN(}f0i6hoU1Ca$=HVs6+hRhuezZFx4z4Hgq(?epfdJ9r92^k@7`jj3=SDh$m#nf#9 zYw%H1@>PY%dCe*yLIJb5Uy;sk#LD78SFKVA?^wRhI}OJ6eDVn}pG%V*&HnX!{e5^d zPn-NF5Bs0|+VLLk4A?#}76Dn48>rZz7amyJLtLHI=#|H4y*7HBp)82=rY7+I@Z>M5 zSoB0kQ7P0l(PXIco1L_~ZSmsC+^YvQx@$Z1`Kqg1*S{m6bzreWp7_4`2o24xb5=+V z*W8DdG9m~NqWH~Tl+pDbYyMi6^B#V+UwY%?;Mhw*Q8-Z1%RxFy8kxCbHQC#lu$Qot zowUW8cA8cOgZ3=oNNN(DZ=1B+zMj9_s#2Ay5LbMp%_P_^Sztl;sX*p*zxgjvmWdl> zUnTL7#BatWtF27F%~c`qSI;tGP*EZBcG)bg zg)#9J<`*-`O8h@dA8bU(5uZ@BOp+U-;oJfBHm-uq3BsvFH8u-1%gIlg+3VD>F0mXGI0HfA2qW)zB%m zII7B$5}A#;AFR6n#yx>UErCoh1eoNd78NCbdwV0|w18t867W1T$`pHsP`)`0ZATp8 z!;)L)|MnY1??1!L$&*oCOP?=6fiGN$T&1?LtvA2Dl97?!hg`_%7AL@+g)D&e0mb{eLu;b&$byixh9!kHxzc^C` z+-iVoOi-nVtVk2`>pu^5JK3#UgOk#bj84#o%K1Yn+w+{;Sjo6C=xNKCi9ZK^nfk`y zz{F>H#PnZ)tfqClS_t=5TufGCOwH0}Y&|OUQC|N)77R*7s9?E<2xKj@229wj0kja{ z`@abC;JS>)F%gRsG7X}0bdZ*s^+tozL*P1S+j8a)TPo0ZAQX>YYr1@sszhb(V+MSO zGiJglNiULlK`eb7iZp3_jKy1C$!u9))P!;Afoh5=(!f)!BtRWf)r9Z+?fLWv!54@j zw|bah^mpu6F$tzJG_ZgxE3ul|QT{`zAP@e%#n)6XSW((3!`V3&4(NgOQqOv$wDt3a<*&?sxDycTb}MNu3jIKQ-g{o2_jM zh8_XR+v8;58uq{nd9!B@5SY&W+fLWSPHxPFeV2mt5EmCL%8r_uAo9r(0C#a;9<<_w zhzFcR_N>Z&?%>E?Ds)l6F4S^FddJ+b@w`@`B?oX)zz z%AG1dM(UU}oD0g&0gOQBGDXBJA&-7Pxt-qe9Yl;g1R3f0ZeWdF{N(T|S7#!6{6DBSy1vDkE)` z5qr*<2TK0!1Q-<7V$aL#Zv=>#6nH_hfgF*L;6Ldcm7q{yYk7X5%a{KET1QV0Ns&@h z?$YGl2bNP@TpZd08yg#DHN!UW>9G9X=iz(DTM}TS^zchFTYF{PZy{JffCAu}FyO&N z|I4Adj4GzvgZLrl?f6sZ=D6=ShLUEjI_tD(s(U}W76OEdU~KWYph?5Ry0}Ja1~FLp zlm?KeW6s0d={btr@-5VhIg(fW`+6em#gvNaO6O*%n&Trd4*0xH>HpJqr4twkBeeTz zLL4vA!=%D@85=Au_9im2a4%rm0mu`wv2T0LXI_;PR2QjGoc3J37xbuO9Q5FqFJf_G z+8}yfGZ}YUAnmO}jeIiA{|%(7y6ty+B1V7fCGJ=0B-{#Dk7Jj{ z(~}i#r$4e^!m=&wSM?Xv2jImxgu_Y=J(}4m8|$TFphtXN29P=$1d*E=an=24B!LkL zwSidAzW)m9Kw0)p3glsg@m-}6s#Y}+D+D{rzD@aSrbLo~x%FX0ELhKPEoNy?rr+@c z9`j(q>?}>N&9Y8N)Oab9SFPOG+M>z{&=RCR*P-^Y=XAOIQUnwU!JypQOQT@5r%oO$mwX~KyFar3bGZ07Ohd~kh^V}uCEIM&K7_M@qAY0_^a{d_*pCR1 zpWQ|)!n`&}#Iuopy_}BX2Q@H5gO4jIzQuo4>`Rn?gK_MNp#YRkR>q$>_2QRO&NlzZvk-dd_-A)JjOn}ZLEFz}HS!xW z7p0Q*6quYn+PnpnKZYf0)CKGIYL6k&y!uP0sc?+SpD~!#?NZ93u)5onQ?Zd+0Mb=q zgP?xUyzl9zz`tFN6bthCt|^akQpTDmGf!(Mo!^EV0@#UkipVv!srO}98qj>7=>EQT zr}h`(Mlm1CNO6yEEbROX1^!?Bv~LM5SW;1OLtP-7wT__40!ql zXO&RPMGnk>MZw`r zig@BmwuL%cGWqNetzLq7{0?CoDTyIKTFL&u6XG>R0xRZqJ@K`)Op<6Wy?wB}1;+W& zfJ*1Qk-9zS&E~(9wy~PO`irko%RL}O*i5Z=&H)JyVm|}m+JJkbhl<#*rs**UsB+A| zF!Ba4I=M@Sbc0C)l9V?hsbcvA?dI_e4nOn$4_9d<(u5j4MCkf3qIAC zWuq9t+biHFM#V~N#z)dKfju9%U6S)mgB3$PGBY&=5JAo?1S;j9zk7mUFNINgqqBHt zw=$g!p{2z4VQ9*Q;C!(C;)~Qs3CnL=9~xmSZm?~0kS1doGPuY<2H(uM@F5S5XE08I zA=eUcOV|lwP@F@>(DlLWR3ab9nPYT34Wj)d>@Vs-aGbZqia7u=lUuTw14J2q<=p0^`W#DXvi>D zlP}~`Ft52>zm|eBOcen^z>g@j(p3^@=j-t>hV+?gd=$m$m3dcWrz-T~IKYjZSvxev4z zu6NgQ^2)3%gT_awiRs{s08_E@UXbb%^T;SY`c-?5UvZNckWt^E7{wZqf`{zowOE)N z!lnH-q)RL>f@SQv^RUH{zc&D0OKf|MLN=7N|_9S3TH}ShNt* zn8`N(PXXMJdI4mB>{6OpzR#d*a zuQbcx^Z%)W#R6g&5B`OHVFHYS{%xZW^yXK&MlO@DW3ELWe2^m=#Di>S{}%H#xcT3c zg79B-ldfX2aC<_{7cb#@%5&Gp>^wV__r(+mRJ2Nsm;&k1y9ovL) zT??GwZ?(U*I?)KY% zJUJKC2tP4hMH<{?XPvYED5LCxjpn!u-X17Chkt3^ssh*zD^agwgNu_00$l_0Fa0r| z0Tl65WtIP|(>B6c8(p8T3T@$+%<(X~J+Lm_cP_JvV>2cInGD0hSTW-<j@7pQ za;nEjnxqfRiz;0guH%*Q64L<|CC48Z^taP;X8Zv z=q)xh_AP9(PpH3&RqnOkYs{v(`0Z1U!oPk8j%i^RTPx}Daqd`QxWrTy>;qY>y>>i2 zt)USWBEfoOn#=`|MsL}O3Fh{&CeR!SzLX_ha6iRpQe*mCGtEO7=PYl zxg`NxUtU^#m#JctSy%tK8SX?RfKBjIE$o9rU&Dta0WUTfYhj>lz?_YA+y!mTPgJ-N z4eL?zg|P3z0~!q?;j*hAJrA4qSB$Fr*TS)?jW5m;_@QF`G7WUn2o35s3qO251Z69k z#Q<=^9^}Cy1|lV7NY;kFO3%M&qNx&ms~xaMd5`rzV5tj00p zTRK$JD~9gGt^)k3w0xXZn)hr=eGReW>Luf#BGsz^#KJ>(bvITFIe?^`KV^I~NF|_C zHkfsgd`!X{XN-L_sI0U!h!qr@EXFmvyymd$U4UP5OEFSwmDPsrW8bT3^m(b3c~jfL z0W79t*UOhn-cG^Yp~E_D-a5N~F5XX9X?*2o7D{nEzsCiwVXs|b>UYV_r#s~?1}0qq zqGiI-u~|#v&V!vpG2=x825MSU_-C*;Yg$v}=hFc!!0|{Q;Z}_fuc_#7g-0ji9SC;j zCdK;w8z%F)fwP&OMCoC{BX+owkW7_OL0(TD>LbyJXw=ID(x+Pnu=9~{-(*>!82sR% zD^8M_N4rS-N&emuuGcpgRh3m>^oA1Dj7)ogYs8Per)iAiFZExI<)pD(7RZN^hlfVS zH6~_FVl=^do$JXjI{QR8YsLjw6c!e4(at>wNZ-2jY@cvz4nM!2bVYg^&Nqv^5bl%? zeI(nbPnfyRo5`VoHq%JglMF#W>AGJNmNe{I(5clq|8dmyJy{{TMCR6F(KI<-V8D5l zBhzub`#uQEKD$eGZ;IXc@}x#Q)1=-1B>wxuOn*6rm24>u1gwPrM>adFVYd zRmkC)S*P!$yhMHXBMrC?R=2P2!H6v@!P0Ch`jb&NNet%MbDKncA{5rMNe|dZS!XC$ zSWIA=I4%>&8wj-^tx3{@x^i~$V)&);t&JZ~X5#-A^GyVZ!9|>xn^$>Kyr48-|I3N* z)xA#YVj?BIXDsNVC7Z8B+|KQQ_BywJ$1Y}?;SX5c$4ro=YAzy*h?e!Co$lSkl~)oK z|F7-VzUv1wCKefZ1q z-rGgcB{{|N9()|JPi&H_RU|}^qM~cxTNJjY9cEy%#S^yQPK4PP~KMVHn-+BuMpQpSS zOm>K4F-oZ&Y^m8QSvQ*tRE5+=>QGH|q8mb)3#bLa?N3^lK#+-JK)gd)khoVWGliqC zRVyfC0tQz-ZbNtd^4FZ`(UeW7ynQ1><(47(Hmf(N8>=+f{C#&HrXN8MN$hLE>#w22 z&BY9=b?pGPXHqRwl|Y)kkjmoU{OZDMq|L~ujN?StrRs6-U&*A38iZA}G? zhpew&gP&plPM>8n2`z8WL6Q{yu!ey3qBu=mIQt`noc$Nxaz0G_TDbbpvkQFLrgT1E z6E!Eu5pq{#_%xX-99gMb8#6SHNa;3hf^672!uK)AUa^f@i;wx&LX+rQ}WSdjACh~n1LhC%JcIrh#^awd(abt0~ zkaJ+CKIpRwl>ozM1KdH?nX|ReoDD6Zu zCYuH8cQx9SaBR|V4tbJ}GVGnhzY^43kb`CN!iFn`HN?ljWjfHM6Xp(ta|A$Qbm zG}3%L(I2Xa2?rU*@GTZaLlu^)Z{Sua!5Kjk4j3o3uJJ9rjYtQrs~TM=UoZW|XEIv| z!C>9XN&GI?@(AD&+k~-5JJ>Ru*-0KFxnwTEzF)~1#=iBXdfy_gsz>C6W@gL~Fh6g0 zGd1b~6vU4o9%*(JdXwfe=kus4L1@Ts*b+OQ($I}4>a)R|Vzq;?0pPxFT39vfmBU^D z=chUY&B@oPy zBcqa^Rq@>WbKtw$OW&9Atni{D8s%(0bRbX$d40YY$HREPtqlx5-`wzb*K+n8N+8b= z@#gaL^K*K6aK62%GZpj5@$Ww$fxOk`T6Jw!7J^j9L*x}X8TJS*T=b#KQt_#*97yKK zF(vm^Okei$DA#)4Hx9Z<;`r72X2YA7yL$Vo|vj`qF)S%PmYV9@e0NbatQCvdE_3tV>_j}%2( z$=scF@+m#uzu+-E?$cB?z=a>XY6J?PHb*wp-?*XF7^4oX%TKL9e_m_TN*b=m@0}5+ z9IS2I!;Da(00DC@3^$@tusa>2O98j@>-KHki982!!u!vM~1q2kh<|A1%GO$zypwA-R;` zXL&Lqd-rCCM>Zj`IpwxPK7yr>2V;JS$V|L z_*g*=ngBwXEJsHg2N`7l>hI(dwOWin?xa)`nQe`%yskPURDl$c#LT6LQV15BWJGRC z33<8HPSo9)`fl@{P1`}Zhkvy2pSEKrQZ&Svm#qzz!WbObGlT}Eo~aG&pK`@0ItOdUR30h~4%UCuIoL|?UHQ6tbp$_EInhIgW#+1C@#@>JEl4?_= z&3-k@49G=_(a_ZR9QE~)_1c8kQY8A*Rw!3ATtutN93C9P$X1D?o}yA76Q}w|^QSh2 zOLSTyxpUAjvA^PWD&_DWL<0o8S5fyJl?Mj0wWhqa4D&iT@gr-Wf`W)dd%u`Q$!OWK zv8XX@HaFHU%;86^>W=9xILOXM)AgUTg?YR4GW4u5sn}r~DyjpqMaR6B7AQWTUIhZuYYz(k>_sn(7o!C_CaOZvo)0}ed9VcZ7@qGKv|e*)-I375+*KeS zcoCn9CChy5H+aJo0m5GlULVhZ-g$tHZ^;qxB6vIze}7baIaCYmdwYGRuSo_s+s?DA zk7lF8*bxnnj!L4(%T3cJgab1ks zU~3tQZw%F@M1MZ@TS5!57(5SyoCa1qy$AD2h4z8`jgjt5>Nk!)0C0-Dd)RGDUn0f1 z-M-o~w3JZSq^z8%RbRGN|7FzkHUEp|g!9)PUojfeyYd!)cs$ah7mt$D^RrxulM@Wt zR}$8c4?3;>!f1mW;or=Nc2^=E{URFs_3_V*9;@1MFxlm`b4nEIch}Jqt1&($;3$uq8yJ%tfp*CzjHvEycYwceT9@M`*4$$#F zRfuV%g~fWdEC5BW1mq$oRhT6aSO5UynzTu#)}K!eseR2DOC5h;)+pzf23i4=Rwhgj#weg7#7)KX{u9Gr_dkQ$y_d8S;uE;Ptx?o2u z^aE|I_BOI9;TY_eO7+g(RuvyYy5Bpr_4Jr<{W$k~O4LSPZ0OqMxKk?xp>2dTG6B2Q z?5KREe6cq){wPQ>_1QnrKrm*T$4Z-xbp(5*vkEJj9B99H$*J}Hz%iYnnJ%hqRi|j2 z=BuJi*~1?T>McWfZ$0E2rI#8jh$(KzoobuF)dkpPB_tVsN*i|&v<#D|B=MF_oYtIE zfjpkS0R!9kt^9Q0&b0N-x1pUqw3jR7PJ1Kr=A+s(@b1r#Hko4qeP|F@Nl>@t0GyVH z)QpRIBM4T0GD74N&6g7VX-r{CLMlEk==#Eh#yEDMr_tkle`ajAPzSB8t)uM^4=`1<0N0ogIk z`JqI7N8*k=)QH25XeaJH@M;fCUd1iZ`ajeHn+AWO4X#GyZ4)4&C5~Zyhut7K9a@25IdrndAkZ-&CLm zphc4WkIa&{eUX!+(b||mH&1aITO4k-nLDk5k=(MYW^#sjIwok~U<&R8qg%+_AL!(s zhOe<>6|LmqNG7JxJ@~vN8Li3VUuO%3^m?;c1Tph>o2PFEj(bU@{-L{#i2JHgH$y{r z`FMS(iM&q+{(u?Mwbt=j-UiCEM9uKhAbaR$oSP@0k>Jg z>iMp}&YxM@9aJUnt1AvVdGQGyl<{kMMhhk#F(a!#AmEoLmuyz=$>7WZM3#v^UoNSO zYdP+}t{Nfa(>Gt3>cocuZb)kN2@*KXq0#Gm5;c&afoFbp!EP=l0%x=A7m!w3;s_F7-4Ht#(o?{hf6-T$t zCoD_~m`NGj))>PFLQWe{X&k2P&iu^G$Ur!Ma6FseYNgFpo&+1oiAsbMbppc`YqMfLit;*q{(gLz4IMHzt7ErB z6b5SuOFi}?X3*{H8HU@NgvwtG;14l!-fi?Rwmsk0z_lJh zdrpPl?*1Yh#Eeiud}*^p1XM|joc4A8{)Fv)8mvxW#s0aJj{2TO=JjUt;7fEDFHd`W zDY0GkWF;Q!B|N=#G7Qz9hZ=mji`MF!Zf?X(|H|VCVjd%~!G-?Cl%AkXuzwQZ1_f!& zJ3dK4@EKy6uc($5oWUTOfeYcO7c~Y z)!#1jH)POWiA$E#;BS=`-w%?b(AlDXxMA&vWbrN*u#hSXs(=hC&A4`ZZ*t*?@>{-{ zWhk*+*5e)yVKgx=Y8*$_w)Vi>KhZtZR`{1(mIP%~7VEsuinfw{K&}R{-+NgR9_G&s_S_QyMIV$b{UG7*%tyyg}^D*~25o z)@|qlYdC1do(oFYAn#oWZ8gI7=n-dq8+6Lv*Md|~k=vKlpig63i>XODwh)lD13I@eZZiG-wPW9;O8-4WRpdGvtP*7lgS(DLFS5i>b!-~k(sbg97Xggu)e3~=2DDiYnap$cpd6PSp}+yBSTk_d%E zN}kcjj_UzPT-(daHMR@d9}WbiJQX=2#I7J+PlLK{VJP96hwWir$|N@dHq`%R0Tk$@ zt6m2`xvl)`1t~I899%+T09=d*yW-W|OYtGh7ksi&VAx_)Z^jDyZlUQn)1@weN{lSv z#YZyMO~`@G%vd7{tu_u`a9?DbFW*!aIZz|(s6;Y191RSvfZc2jL*Vp$wBO~f?rd%nGj!JH^f3{K=RakdG zMR6+G|4#($pdO5Es+r}=mu*QoIfT8*d=BUI%LRQ0*o4MnamG_K>Dc@7GV|CZZWOTFL z|EW=ledJHv!nhnl~5h^#Q+LAWb0 zaI@UHd{sepsM`2&#L35AmdzD z!(w%J9%SYY*8$8RV=yIGZCPh=C>kBTncm8k7tFlwXg6|<22JUX49d~OVC>20*mY8t zrIK!e0@vBuC2V}xygLGlt=&?aO=p0Z(ZV;&SQT;7_^PhOY5VMU4#Xdf$@QJIpNK5r z5xKDdoUV{1?_Z5)vZkVT4cLAec3EqHo17Uy(mZ}q(B_wk?m&DCLeTBoNJHG7-!^-( zi&6%mrw8p;4PD9v&uu|Pbq&-Uy$x9!A_N?Ys_MZ2y)zaE@heSuBVGB^&~&?g#wH=| zvD0usozfQ>GbdK4!{2JI))$!Cw20^mT#s8{LGSHOQ=lu`hdf-15Op6MU68OlFD`De zkz&5R?2Ds*m(A?o*w~0v^wv20-fcz+Z==DI#ARwJq)w5vLE=BmgeWJMxrBK?@I2w- zyW>>}Ykq^yEkcR0t){g>V2V5`M9MJnb(%;6BiUz+^BJT1G3FZ_M$nHd=LT5Z_7gHN z=TohwL)N$~1{>G?(HDJr{lVz#rw|K+>vY0vBN**=KPp#wbNgFCIrzL=U=y?eM}Lq{78zcc@G;gUZkPxD3Xn$O*13TX>0oqzg^9GU`=we#+XR`h9tRe^FZw z^jJtH2T@R!{5(2f`+d`^s`loHQpwe@-#JT%CE&wbMYDvYZ)#|g*juV9QwO;eHIuD& zTbksuCgIEsOcO3tN~4wx;ucP26&AFk`T=(1Qc5BK>~A<7gIxr=93k_Q@T$tfdnajPzU~ZU!NrHr*_CRV`Ck$XCU#+d`zQAK;2U1H}PLE*3 zE@;r?4g3nAmbN?6R5{CmD)1AFS>=T=aOI~yrAjWUT{g}5&!(o*Wx5M>sHWsJnTv>q zJ;PYg#MXF6#b2_9Zjw%c{8r*PDU0PVibsktdDcez!Q8{v+MUJy%GTD5bG4=%ar#^~ z#I9PJns8{#9iFwIz6Gy3*Zm1hXp0*!x|EjRzjfA%q3T;(1(I5t7wrUmZVseL3I92x zwd~Rk=@VkzNobXPBjfgGCUM~C?>+rfkBK&OJq7Y-M;lbrFG=9@qk$>|_5n`A5l!z4 z@BR7l2c7AoMp?j1<0A(^GH;n5_Hu3c2A@0e33)dd#oJq|Nu3(BVg#`MDv%8=ELpy?={dmV)Jb{;fTr>Bs|se|P;;>yQ}`o`oGXg~CG z+#t;{Hjg?N^)$LKx=9ETr+?u6yHFC!3Lj)*pgyHam{SX18vzgKD19-p;EVu1gk5>9 zq>bLPtzO+JK50&=fs5R{I{ayP8-ik#xE zs^=xB&5fNa#gUfDavs7LKj}CMebx1^YZiubY>F)2v?Ci$ERWh59dk{}@Mni1Rq`b>Zgul-r8rh$kHX?$C^=2M2aLvC;gBJ=9lKba2ToL!bqdYA+-4sFz5R zZbb4hS5`t9@dsY^7>4S?B41ZP)n`-NKD~ziEc*uWvlKM02G8NYg^|bo^S+D1W0XHE zX`#6_w5go-{;ChWR+23`S0qena7}T(&l1VqVvTemmXMH_5I;j%#AuiFg7|zz>r^?1 z7BlFM_N-n8Wn-tT zw3g18yoKz0E&Ds?vEAL4xe9oiP$6h3hGO=00cfyFC4{jU<%KskC@cw##aA-PaYrf9 zKSh5~<+WRePd$6udQ#opKWz1Cn*!g5SW(gLq6Y~+!r%tu6>I>4(pv#Vrpa8y4**V@a>UIT} zbIx04Kd9j~g4$qoF2BzQT0kmnRI#p#{?$5?ad5gg|rq0ocCVe=NI4rP6j z#>PzJaXw?3q&)LkzE-O_$bm651h`1OsHf?Z1cn2Z)yQmCO@a;Wlh1O?MQ$j%bN;19 zoBAXM+C^lb+|(o(xYIZ7Myc3{n-6AiMD(5-YGgbddxZ_q7kkN)==`SNdWy%=wN77K zbgv!?(T=3m0MSysZKs$&oeNAU2A&}QkWAn%eiZ=Bx8z0HHW{uCCb0IWPjtz~EJsdK z9^+ZO07OW}-MtygIQ9C-ciWPEw|f}P*2bncnO@21Vned2xp{<&b?dZc9bsFLF+6wo z?&qZrfDVs54AeBw(nM$pL-6`!(~AeqX>P`R_XBo;j*T-!L@8CxK9?K~_p4+sbty?~ z|GXlvRK<%U#x?7Qp)83&M&1nAqtNd5WMR#%1c>BYf*qyU&`YK_Y2pAj9IZgvf?b12 z)*uf&4D{dvPgx}s*3T_lGJKeS-V2%LmVtr|;D7$P_zY$XeyiCrN-$o2@$A7hSKf-x z&hKM_%$Tb15k$uDg%5hah8ekfO4SIn$CyOBa~eYJr+y0kU{bDJp(UQqksEPI04!I9 z)pS^)XQtCQWVC-8g_;{bTenSe5pgHohbL6a8np>Fpz<_$EJ2M<llYIWTuVq^i$Eg06SyL{HUWDME6r$XiYJv!Dcq~l7==K85Z#!d zjUCXpusS-Rs0~NMzwUQr-6q~GUOtW9t9Y|a&Rlzrda!t87;u%(OL9I2R4hp&I-EAg zfcou-gpL2a%Cc)fTP*Ssh5j-t(!BsMCl78|gD>gtv>zEUc-EE}G4eFTj@g_Yekrs6m~rTvFb~e@zsse{1m^bE$53G^lG5 zMG3iOXo^+eOb?%l1EU^FcQnW(LH-U5TZb>y5z=*TAp)8|KK<(~~{iVgS=()lD z-z4QvuiLdQD5EZOa1FQ4r|OSE%}nqqfUhuf4bVy}ZN)Y+2lxKdn*aBTR1VkAQ~_p} zn)&n536mqzTE93Wv5|@Ppl>U4AZNJgU~iSJq%@-pv9}0;dEo~k^eYk`)E83}X=T<2 z{R8n~v>82f;O;!9{gHEGA35ivd3U`O@b23HsKS4!um~DEkfu6f!0ewqyc(z&3vRx2 z;Il35n?&`lpl(tv$b3zormcutG(daDpUt?E6J;2iqeEPbuL$Dsuu}9aV^Jg*;?vJj z+~<(&lSQBQAKQ*!)d&N&wmhKDZtt9`x-G$Xf8GRKW({MUJ${|FZE*zY&5p=qQYoxt zC$2zmgK^j~G z4-bD%yoh`&GbshJzHeL_%?!xC6EY}7cWa{Q6)i_6-$cnH8uaxqn)iCIeWyr>_O5iu zQi&b~aN+&ex9yzo{$n2|P>LtOqY_YYb89UaTT=ccT{OGF%?7%+ii(Q;rg3An-TW{n zfCjx~Ev+8cOY&+rEI|8LZZhYC;cukI;DU_C6$;+91|oiqh9hfzr`TU1zQm$kJaX}) zN{rYm1}juS=`cBP0@J0yaNo_#M`+3EozfBnWVEULkLjf;fTd5rFnH?p_xW%f;glCf zndhtbmK$QGY&LqH%J@g3w9&aFhiA%ls*=E1oQP5D^#Ph!!9-q=J+HY_EY##HRyh;FT&C@$C1*O+r# zAyGWx?qO3QkrLx@yYB7bLEAK^doR{?-haw{29e*a@pF5f8~MgIS&dh52R9%z4*?kzlB#sCu zCL>&*b4b(v;>JqjE~@nl6u8a7l$G^l4DkHR8(6S)U6tLd6G%-&=txf^!j6j{h>Lz# z6VZRc)QMV=?{}=e9e(Vdc-JlRACG=2nY)-UAaLv#1cF}&`WBwGL{^6Od%3%1(7X?w zEItgK4Lp3mcc@h*tNoQx$UQP*qG4o`DnSReK<>X$(f%ibZjbpq3j+>O2tIBs1ZaxH zB=V+e1*OJfPa84QOJ8)H=5mP=yw(I-dt>(Rstl(8aj6{yagByVy0iJQ1)x|Rq8%pR zD-*!HSRctM#l$P;k(ToVsuzd=vgFWoM@?z%grT0!JOMHjErvCI0gTk)e({B68|9!Gq!?Y{_7# z_zJ1Lrs~>#O0*cr@-oj1#3g55F&AEqOSS=tpfp-s(RNKeg7jyCy z;Ot4AI!L&jmAaT>d_n@&*O@_t+jAgbHL~x(G2oi?KZv)c`?amMJu53q%0sd9E1r?> z?Md&+a)Y{#4kIhLzV~qrNK0g-0wvNiFbrXZ0aiSXIDnjoYXZ_+v6>xL>lK-9SErnO zpTAwEr$jm!M@P1EVpC-DUTD^b+nGRIldrDymKX(-YpcIOBz{FD2c@$74yN!uqtl3N zI|>d`q`Y0HxsaAd5SA-_+HoO=WrUrAhH~;VaL47TlFem-6EtG`7Me{@e(>pvJA2SF zOgZ$F2tGpp21UCzp5jntDls`wl>aG*Bp$Nz3u0B%ek<=`8@$j60$qRPeS|TC>tlo& znhKgLrEDdC(HrI6x@7%TT@E+M_$FHh+QmnWNuriki^KSFnlVWu-f~(`Y%9!0^~3h{ zK~71WG zauRSTmFSqR!g~GSM!CVyTOf(Q+8&j&A59~nfygp3;>r`oq^66&o-fJWSpww=hG^+` z>dDUPvQVBC=g~)qvoFXpFJ5MI({13KU1BMnE1n!EL(?3u<$faT(SFDwkhDvO>v7Y9 zo>4Odfx17z#T)_=${s@aGfD9R9$D2`GZL+Cq$|vJ;Jsy&xNo7^bqEgYD#*Kk1bXR} zonHp-f`{~nqv5s1g#C(6i-(psjjZB-Z2ct~JeArC(VqGJT-nOUqx{I(a(pBaW zw;Q!}>)=I*^^`6$afS=wHsd_=Ek?&7mHThtsPZ!;wg@O%WO9V{z5JblZl%ZluJo#0 zArEUJ?P~EHMh*=4t1Z$%ar;DOe=OWyWxbM?@b!pQ>E41YZd#Xqk*fs-r60m>0U#0U#S z1CG3ShzUafZ>n*{V2FAA7bBC;D1UUAC=man{ltWsy~VxEN@2{*!*OxUGD-=y^~VM{ z;>dGuXiPR0a$loPv%(;on;5J@s)4~UNs@(hV+#PgDCDJlhaxBbHT2u*Ah@PF9Ti2z z#HsHiEaVnTc@X9J6Z8-7i@QYJiNN`8BMnQ+Phm>l_xRz_GB62<;`9awj!p;Po8vmQ z{Vo?usV*f)grMQyPU%sW4PcKKxPU&VkGH{g1eD77mO8J&N>X<0@V(&2y6BZL8vCt2 zT#SZygKg$C{_N&lWm8Dac%*TryWoI~E8|2hl*HeC+h1}Qw~_wP`pBMUDA)Ap zo{XFYDp`*JMNLQRHaHoy^o5PSXB`CSp`6IQ@`BJt9wfG5N#`yWf@Vj=?Zi7cncO}8 zOq28O(@62P>p{nJ?{Tm5yu={Wxn^_fHX7`dxM;A5;~-QuD^eQo?k6Oj+rjhO_Q_Hf zPshnC%k%xPIFTAFJG+Xx&3rNy(MyL)jsSIm&bk_pPBdFE|JnHXI65pWEL^qyN((L^ zH0xjM^yLhE^$UxPqy_A~?^W{P&|P2b&7*w6~^>3lJNQ7 z!RCf%@ldr7tuReDGy^TDD4lfs?POLKWceFh=%ioq+DchrB{M@-f7E0W9C*1>HeH6`SK zzi(T`O*?^o)*B#>Xgj+t$ww)>$N^5`$#7Pxh_RgvQS9vallpsJo!cWAZNfn8a+J zpOhU--6GAzaDP)ekgO>>#~3`dZ5g_{#uBh)D6^5+10rNZL7Tm1-Iqw`8B|}$#dN6* zB>>&RP%*GdlB><@M)bkZ_WZy`3=^&0+20emCr-}%mbLLXR;vBSgtfL8q>nuph6WAz zioTs;M$yXY0H7{u(^%6{Pv4vuh+RXHoF*KmAI$o&KW^;fM_xRW@vx15+z-D{&&bDi zgUelvT4WaCK<-*|2ED!DK=WL?BGwCI8mj$n4~O<~-&q|J-xgyi)$dT3QIjiYL!2%? z!Z@tn4pFY%;)COq%iv#D6Miq|3*$$3WFmqn^Aa z+oy6l{7&o=C)=-JKR$$^^Y?u~Zg3*AM~4l05lzy_C?c|B284`x@X`-M=#rqTGlOgo^oNFt1mU0m+!u(VSI~er3Z_)@G^Bu=k zAkH91NUs0o$4rqe#71&o@`QjBr{~WE`d5zs)tynXO<$7lbC09NWGDvbAdIg`MH?S^ zI+o!m0*Pv)Jw0q5%f-(YYK|hEc zSH33VqN+4%&4@MJeIQ-qzUKR(MFdmk7{A*(tP0vpO^7rnv1xpc*G!wFAnf*3NXsvz z;zqn7%95UYZ$tLd{Hb3_YUUWbf+F$-xh9wWL9lf`@2JXNE0J6eZvg7Ke*!E@8CNGB z6Ori2TvUIp-c5xss}i}<<>vm7*oJT$`egZhXdl?&&kb{$YbvlnlJFwiUwf_w!lJB$ zJVba&Y47z;av%?7^py(ksMD23a1FKG?uG(;OP(r96lVHOZ>eI2nw}oB2`HGuCqZ2% zsb|qK@Jatr znT+~gF}{Edwz{r{x3_Z$shNv3PMSo{l*YAOZF~m$!<+AmV9rR&wEfX4hp3OYUqW(x z%`xVeoA=okb1dk2gDu zhyolx$GXvwE($;+-=9uO&dg{kImRc8_y#8C%i-g7*enh+J?Xq>Lvd{7c9%7duwJuMi`VFV|y??SXMu7dD2;gsgQ8qxs?d8I6xM6!Qx{zj9S~* zyv`e5eIHJpvrl+Qp^5K)M#hM9q|q>|(nl+`*0wgK%lDIiW=vH;JjM+PAV#iNcczhl z_Rnw+8}W8}0|d)Dza7$snnHlCFNe_-2GOp>@9&-|mPFTY&e1wEq#f2^E9YSpL=&~I zP7G8QC1xYp0bTxYHs}++n+0u-oI4+%q~Cz%E9E-O+;1CbfCkLAHo!6}ftPsZ)&s}> z=eRL=I5CkL>RpxTc3$|5efy+;aNc|+HGLg;hHnWnjtOyg!7V~ltg|%Z(o>ZkwYH7@ z=&OxTZH(RTd&ioX)BX0Zi@3YnYAE-Z$e;tqVcp2qmSD=I-x2sum!@BglkArHUZqx9?chOGh zjM8e}f=A8e5JfA2Hb-#T{YK55?7GOuz{~TKOU4qdG?R%PNZfL|O_5&Q`jG8_EVDAm%gB+~RwpY|qH^7LFx!ilEqz#<*sONY~)8 zs;pawkoA})T-lPkJ#ON=ed&nxXz$Otx-D5?C;(@$$x_!3F*q6ZfZ8iBt^>fhm+t0R zT}enxo>ek#9&N_Sd`B(;?!4bIM?}}jWGxtoJJZK)DZ?A}y;Ae0L3K2!jTyb4RqWMS zCZ;P}y%Znb!p3jcJV+`k&$7KF@RcY5$Je4|5Y1hDn}9 zRHB4k?Z`!}V@R6CXx1Q&GY@wrI1bXWV^b>o2<=Kj=(h?HBVf#9%=AQD`;hsKSF^Oo zR}gmhbr|8lLGuI2{qO{}>MDrrNrBaGj{ECBOQeO9xWR`7wUz&xM7mECyH_dw$qf&3kyG#;IS|>*ZTakS?TiE*D(nM(gVT>WgetLERGjzhT}>3F97kp zg__y++>%b9T(Qt5msj0BH2uti*&=)A{{3bnuR%0C1)93NSE3FEWPz7%^ z@#CF)7@C>wZ8*xr46?3u2ghqK01JzfC(hzCjff@(r~0=RvG-o9;hiEh2tSu&lZ^m4I#}dl_OM;i9!z07k^H|W5Kb6ds58yyhny2U3eUjctZtVza zjK4DcshEqubR=vv*Lm1Nhp$A5wo$s$p1h}N0xd5#ry+)*#)BV!_uOB9gc`*~UUE~b zlKdb z9BH;gw)=4aZI}T+X-odz8u>r`*3MDMkK`^#bV?&@k;kJNK9U9x{%>cPfdl*@nxsJ*_kXla z846ZRvC44fehITt21r-0e{47}wNhY=#&vWSLLh%sj0h``abKd5h}5UQ59qvStmzZe+*^#L*|2AUspn0_EQGh@!jOv zw25xDpG{5h=@LV$pi#B*(C`4;R|E*7s(AbU!BEKlqhiGQeu(L_9-WAhd{_-HrhF_H zc;B3c-4Jug*v&h1)93>0_L6D3)^`!Uwwo7Wj(@06F9SXCrwbHfV_M5+Z3f}Q;E-v9 zn)Y|J_5!k&4@@DSQu*UzV(=u6fw~&1C~0}g$8do0QGD(#Q_haC|J$?)2-_)RzN`#| zAfiiv<4$S;W_jyuwf+9C`z=23^*Ud`bq@m%A77S&>eiV%E*~@ea1f!TGZ|aem7ZaK z(Z(CsbsABqMIy*Fcxtccji{FmSy$+hjp>)k36Ejc*B}r*5o!T0PKuu*^1#|};`>zE z-@1@15^@-`xtMp6+iySN&6GsCh>m6#%w2JNva01>Q&9KRYZMtZWRmVsO#}d1rQW%F z0zOlbd);D9u|^M$3CD`O`ule6H3+)bvEooseP@ON*YRxQNRPb{W~n%6zh|^yg_BDgyKfHq!&eTbg)iiKWy)^qI!(K8>X| zwbYtZ?m|#G-I{L0#R}Zw=h15stPrcz$dU;Nz$@;MjY(|f`F%Mnt`E1_+TJgatvDG%4%JBqh*2& z`%um@SebJ@P$1{epe6Wj?BR8V{3qaGk)#e((-yEd5(X4qz18~jVLTr=w@W^PSn0V$ zQpPt#JK@6hubo4P&-_EPc z3MAw&%LQkYhyJ{+as%#6%Z?Y^{hoi_?fZ%1U8fuvsog95+x(BV5081-umWrNL5S_N zCTwHD&NjG8;#0};en#!RuObmPzQ81UQu&4T`Tya;titv=^Jx|$-6TlH({y3wJ5=r- zxGoFnG>(#}d#B@lX)Nrhi?4g{|L7He6|DhL;7Wr7`VpfrKR^5Ze!&1%Amb;sVW7v; zDi;lh^qQUpV+gKAdePLNYA>HEXek+KQY{D2{cdO@ts5L2Bi8B}A=md0h$|rvptyZm zGv3{LA#CkL_;Ac(8W3BM!KWjEcL1s5Fl$8$8ZC*qvR0FRs27?Xmsoe0xTPT=QyHFE zu(mr#>!}7n7)q~3UabyPBine`PPil0Va1I1?E>CPN>n-ripX`H`dZmkwW@rKiD>>z z8o~`c9-#LSuWIujmpKRj8ZQhGJUp@oQJ$ARQzp3-=EGVTFfM<*KF!=OQgdfkUovH_ z_qO+X?FitytcCua7jkA&@0W+;?WIm3JBcCst_2{`{0S zef!uTIr?bY2AnaK^$TE&3>9K_%`_}IA^ZC#kHMgyZiw4wr$OpJ{#A+ev*%z-zQiaY z?C(KBtl;7z2tfTTKc~DEMKL1aKt?E@gM+4Gu)B87=_d2}uQ%^c*1IXGsi{%W(E5N_ zSE;>=m%DXYeSOmU`uhKBD}bYGt1leg#8YSeZ~J){i;*cH#*%h+=Cqv3i3ZG~XrQ5; z%)iY#tXr;4{!boip*aPAox1tn5b z0d78gdDvi^b+|ZGUzL=Q)PEDy0KiLL<{R$Z2@{4uM2;-Fe9*uk=oLzqvQSb6Q%?e$ zu(TnQhTwD1HA9i$Zb_HyQ3RWfj({?vzit2ADnyfnbLdBg9b~TQ~?~GXYWeN zX{v<}2oDER`AIg~JG4l4M#y59v|^y-gK{gMf_}aT!XKM}_1!OX^~+>c;gW zJ+-MEykm9Y7-qeVGg~h@3aYMZdJYYw4y{GtoUe1Gaq=0=Zl4o+t zgNboX1syyOCNan0;fb%g;qRD_|;#vxHp^ zma}CZj9EqMubgsBVYzqi{K=n|4dqv;KS3`9`whl$a)ghAUzt%UMlGW9j6=d@&43eQ zH)$vfi{&VRo;JXUnL#zK&T;x;TMGKDfUB+^hsee$;LQf(d3L`!?f9fn^=p0Ld|M3I zn6}Hr6AZY`j8!)xr#78bVW!qE1}FcD-S>-%cla(+TGb$%tfxtwbV7QErqqDM4*+2n zxc3?ExoDL8azO}=GZO6=3G`pO#N?@&XRwYGn`*mSej{7X<(*=-AYM0?p=stryKa^) z-ZdxC6Q-E~a+X#zs_r}S<$s7d`1#DzJSZ)WwB#FO?3qnIRB4a zX6--fN+bx>4&OxtTdc~t0zB+4UH*SsRGP-Rpy9KwU2}&o5}0}=24%D3vSAShUX zbV@BzJiRG$yKzD1cYd_&ze6d_JO!!E86LVuYnSMgSM$0txcn5D+0u$}sxbuE_oz#) zf{{k}ZIu|M7MA>`v(HHPY!x4tvRvrw*`$mV`Ir++OpRHqApaN=PmtBGR#sm1M z8Sb83yy#3m5MCCTp-EkWQ;flEqPUFMY}r|F9^sP)Er$6=KVCdD1DV9oM=JnIX`&ev zt0-k?rf4G5&JH(LB2NS~n?IYHzPFs}iER=({Rz{uy7%!(gSe;=Z>^0CPK3@Z1q!0v zog;|U@AsrI0M5LZRFC@?QDZV8h_s7)p<61+!Q4fzb<1#iA8k9VL)Sd`RbX*2$30{e z!78@F+zHTRz4f)a?tjhAjgN{#naJcu7WO#l35G|m1;S-v`;(KCYklt=Ryw>t4dn`Z zO5psL#n2DL&tg9VwSrRG+VA-6|AZwc;}K``I}QQC*8k~lOcQmhv`h{*EtonE8K9g?6B?k7G5yO%$w6yPN$8+}KMq{(dXlOYplq%>GptCrXH=|>zpPH8 zW--mOd|wKBKyQoyj}ax$4;rX>52_f>UcgZ8(NF5@nJuN#3kk@ACpe8i6)gFY2hn^T z{c+<|0-K1yJV^GhiqJ_vfo%kad|QVMS% z5@uHglxoCdV`4y$%}0Qt zsVXA?1C*P(aMti@KuvEL4_Y?k%|u_97A$(VJbu&;C?u5AT$}LW>0e3z2`6u+QUBIZ zuUTFeWO^<5P>4DIfhD&yrw6)72Y4M8KBiJA^mlO2=*&^Xr{!zfLs;&>sERwlh@z9K@(6g+*V6V>UB4DYEtK?)d4bWvSriC}w4!Ege zRBb~s>KMEh{+f*0g-}FtvN5quZ_E^ zgGA*%+(*ON7l<2ypAov*Ro%ZXs&ffbc9LnQoWc$K?w+S&&*dw8Q35L&t40hb)^jd= zMRavz;7Tty+g$cC=)~o+fvpqoJdG4??yU3oM8U&r*tne|19g)jHd79n_&K05q|W!a z+At}tD@Sg+QdfHk$X#B1p+BDjS!J&L|J^o}!NI{O=;-(r{eatMduuQjE<&9ziNX{p z-mRa)B`~{|baid$h{TIS==~@uS;|@T?HdZv+R+chWrqt6yCnys9ijqE>Oq@Ma@R7N!anPGxci z2;Q((dsN{ZIO&PR8i{7jVt<{WYvpB^Ad#ygwHj&mmmCS{#$ZwProcYo4*KgR3$xFRir;7jT(BssUU!>5QE6j_<6l&y$;c6fVMYv9U^_khZKDUeZ2 z#HFn>I6#mjVmiWPVt6ufAcDfWBkg~f_LDw`KpFUIXnPdE%q64#ZgnVfs$g%|fK zAin^-s2<}7@!5+fX&8oAIuOM=YQ$y3Yv;r1@C)bOHT4BhJK9yF^A(pgs8oMaz+X9q z(N7P5^qp!%VHq`M;Q|jrJo?Ko!LWOMT-~(rBlk=J7iv(X-m`i#MZ`v(mJUYn#ak({ zk$AJo^PA4bZO$9*AlT|2OX$;o2O>81xeBS(sGL7NDK)Afw2l`pvIc46)v#ft9Ph+C zi|e~8%L2x(N8^w_B^mIi#r+ZS^4aaZFN&q+TY;Sa=L#(qkL~a%8Pwf}8F(Gn?!leA zGuy~Cin!$FsUXT^JeYI-!2oAY6Ow{u9#QqX7i{YL#R$kMD;$u!U(nELPNnfO6nswC zG~0@kNxG*Rj|pcJW@S5O-fV19Nr5$|u6f3ClXR2?EA0>u0=n}=or$uOiQ$_$&ZQ;d zFf;8J=d(8KsY8sa(^XiVk7_4wBHt6r3yyS{(Xh>F0`v=^o-9a2z2eIp{se1wVeimQ z^4jm!ew$r|050Y4l08?<1NQ6$HMQP=R<$X%9x*^*WjnGM$X9S0jLTgG$X4R8LGr>a zpxznKschs1Q0YTW5l{~WO^ozQz2CFWF}Qh&ssD{d+|^*?@1O+vimYz6zX zA)SfG_21v%DUDPIMN*TNy(P@G?L9r@VmBi={v=Oq7}DkH5E2%y3wZI_+1-uygE%>X zH8eCNz;@?roqZ_2`?;R~>(aq+Qu=zn+Wwxk?(xyb+tJIbrFIGp4XxH@o;k_n`QM365SZFpKUi3gvbIX6k3=F-EPaL%%m$Uc1|gU-kPnhh8El_%vqaS z)oh|ROxNY!2F_0_i@$;9;+h4&kQc2(yiF0Uy}Bj_FFxx@{cX2=)-K~9ez=baYb1@Q zm;cP;ob?fYqQt3>eZ7hVJQt;h@t9UzdOwE&IU9nA89vE8a|oZ7LO%uRwiL5-8dB|; zd-?jiQhroA_Sueuokzg{J}i@r4z0WVlN~9TqeXk5Y>e7DoZo)ml*aP7wL*()`rn8qH&GwUM3;rX>IyW3`o*;nT-tW6<*>yUj zbQe=;n*#D-fHmpB>-48{dfW_USwC4?{P+3Gr9Y;Fx%%m_Z)=fwVI(R~kNu+D)hjo{ zZKtMD3G$JfyF6b6oQ~dz-^Z7}9cKN<%^5#5jM>U+W7s(tra*B< z#(9jMX>pzJ0#-y_ONKo9Bxy))Gb`V`RM^$h))Q@0D$Sh)gI)P{T@fvYFgx;wNg_vr z@%7NCtr(n^{b{|)b}5yqOO<_d(WVs5?41R=PWkA28!7)>(KlTW+{XBX^|%pwS6F8^4%Vdmt?roiJa-=nT9_O!M0SWY!&7Z4JigO znKGDYX+x`;a4c26WCt7Fq07V0o>plTGs}D{Rvpxj#mVrS7JMKY>q?R;p`RYZ6uWe| z`UcYfZ&AvhH~DW-D!uCga!vrrdt5$h@G?QfmGtJ4*J#Y&W?4tbQ?LId>+`@eiun}B zz!DU%<%J3tXvgkb{ef`yDc1Bk9V%6l4;KSg4dcCxwJ?m4rzmhh=uWwam_ zqN)l=jwt;uU9@r3 z6x$q;_%u$}b=YlY`e@27@-oI*QN2J-=~v4c`;ZDA2%|4kWJFt)eEb1r(*PfU_tK%eklrym3fx{#gAgj|#-cC#saX~1q1|bIKSxi(@ z>H#aJ`hG@Noew08GKp0c=*QB`GrsEN9YnulOA3Nm&2ydGQ>A{3&LezNf3-1C{i=#= z0mZIYw^{kgfQdqjbNzivsK}U5SH=UUQWu6J#K^8I`2$W|Wi12dz|p_jdeqv%j#QI) z7+r)mJHBN~|6ZsuOHhI$#lAV$w$b9Uvw3?`t4zTGuRW!QN#$BZoid4;-Ia(>+`k1f z^J(d?qQieADG|m5?x7Pxvfwz4-Bd%XH8!Kp@uQO14C6UoVa{48Ek9;AnYXU}j4)E^#_w~_Ov(aPvaT0W*A%Z^h0R`m6ItO^yrk2n`d}87@4h5lHRV1HCk4@NE zq^-Re82tG0B2@KwvLoM%E}A|D08jz|%oj_x&kZfBTF1>?4he+uTJ|?_;OA;r0#gDv z8|G}a&cxFbM4}Z&>*Uvh6ycoU4jsy#@iBl03SG2cE3f_}5&Q>rb9a%!-=(@FMVay! zd(o@m8(W2pYLt9&lTdo@`&n%O+rMCI0z#h~In}+#%=|wsz+kNdbLe@po>I;{$}|#Y zhiS%s$ucJJPEqRnf+Obc+{y4%1U1E4ZP!?72=ajGXPjp&SQ!(#uU^>sRMh;A_E>_* z2B>>?(5Fjpj)Yv7Qk}Hh4HZ&|pV0dL7=HJ@`oZLlFp#wq6j3(kJhGl2U2=*TwW1U9 z6-Zd#ajvR>i?5f?vXn(F@+6?Ol*OIZbNPUOerP}A)xZYlm=ix>`yQh1$%C6Fo2>4} zh)K*Q2t*x5{sydhr{f=vy2vIwe6;Qb)r%nNhAM($;`FM+<^8_;>gTKcy9wt77bsKSOF&u6gX^j81e zcZ%F*48NRqHwcp{x7Xw7<;F&L$3z|u3V;Gr5&Q4{sjzFi27RJCuNDxN3wD6om8}Ez z^OvE0+RfWeB`+nm!7#L{=$MUAIiF*(rmT#rxrD!16Nf>d zJGSK2RNj)Gd2*y0YhN){5j1&v5+=l}7;=qGr^@kF1 zSP`hlKPodGfFOZJEVpGZ)~t8gxern}u|_)ovOdCoMq5Dy*%C#IpJg{9PUkNgPZ`e> zpp9r2;M2439~9_>Hjy`q-u#)j~Ari#dn~V=rz4qMN`*(^koXiT050Y?6-z}+I7G-JFN8N1l z3o*AX{~-?8QCdbti%cA!;sIT(~NW0Vr4Ttu~PNra0}{PB^nYXKza$vAGL~L(;D8sl+~gW zhSD9><`k^E+O;Wz%;$pTfBL+@*HL$&0lnB? z8^zJXBe~T4MToie%m!uc49gMq+a{NWoOMY_h1hgl#4xy3Ws9$@@m#RYsoWjew>)Q@ z+Cf0>rnCqBwgewqVhJi}!HZ8tqB*BiS8hck>RV%JZ8f{>lKw7=_YXKCay9g2wcy%G zi&_UJNkF8I;8^(;PGA0Kl+w69FHHvH>I7t{Dj32#1iz>S@JtZ@u_B+O9AfcLDRI=- zu|XO7Nk%xbJP;*wS`6oS62_tG83<_{J&Er`>LMM5Sy_}`OFUcXVIY@@SAu0?;sW&% zNXg)ia&6{LDwwZ_k|XYcDd2Nf#3;JJ!ve6fq=n|I0`Y^1lzHNhxhlE8>PO5>pSCdu z=*&&#s&Q6YPFbS!8|dr5H@F2RB4#+;Z;r81=Cp4{PU!=0cR=+)MEMka_axli-psnR zOcC)IzFFH6-hU57Mt51eySrdELB>Y(bRGn> z%Pbf)dvUbSmnUeDiAq)4vG5hb4tmf)i41mVcE-6@{JF zvi8n8XU>f-^G&-HbkGN>j+HA0n<|v4Eg9(rjCW{_7xS)jH;M{kB`N(T8ARd3^`h|| zY0$M&ni-Ehsnm8?i%U*uTWL1H&iyjf$S-r6dVNfKbq8v z<3kDvl{F!6diLUWAGhzq*D;Me*XOfb5U<@AI5pj(mOs-`k!HW@TnoTK@RtV41IyrI z)R|oik-lCo?Y2$N!A>rdc*(=Kiku6J1W0ftZInFbkTNPRv&K6Nk&&Lz{zWp=z>f=d z2P;>GWY=Lhh_i)}2G=^4_1u>-UZUa;wa~(p(Jdh1Tm$Qf#a0iXN^W;ram*p_@hs|o zvtGQ$uECEyZJZGu9O^I`gaS1(7xO5fE(-W_Ce#r>Zo&qY_a@rfwW4pYZ_B>tcLG1S z{hzUySWt0<%8*fD;YxxeXP&yalkG-7*ZXAHDK^d1|F{#HC)h?B0vYryfj%mJw5Enl zhuFv!hS8;(dy2PbpPcPKExVkay>y=K=Wuf;v+0wJHVp*?3M~F$Iv2K@nh^2Ha}~ey zr({~^OANevg0l2iexyZd$9n^NxWTb(@?!Y5i7QV*OLx(YyRsalnX*1d^F zjD7q07ATCC0n^bwbT~tu@C|_y;1le?iJta+%dLIN{f)`>T{K3%m%~u<^S8M^)J&}0<+mN8_#jLcbHz>dX7t`Z2m45Ul@|Z+} z8G@)gQq+P{KYB!BXCf3MV34L(D-z|Nl(G8#q|6kStu^^1q14RO_z{)JBS8Z#Jnp>$ z9^i7)e#cLV5;AJCf8yWf>p2lI*fNRbfpA=H;&J3c!5XqmYy&m?k@rRRI#&50u$82_ zvkNX8JJsx3-(u-j1(o8p00v>;*6(ZN>G?ZIzwj;!xen_3@el9?+OpBVgGb%YAe&A8 z@3jx7XmsUPCNo;Qg6Yn0C|ndE+-{z+ z+7TSE`na+}} zrtx&+%cHlOSa|f09S0jOp|_jEcs!t-a8Xv$P@YJ?ICX9N?2=UlYR+N(Y+j4fZ!3q1 z!fXlBc`o$c;>H&(wD>U2KK;3h-0L=+=J{n`@iMU6z(cCXx;}a%DUaU}-sq9wSIq{0 z_hkR=KC{zb?2PT+@|eq!PvJ|i{U1ecq&Nd^?T79^DZNU`9J>vJu>SBUPpPh3LVuY?eSQHD^C0Yu)V zyGnsrV&?|(Az2+xMPchJE8i5ieM7pt`D^7%HNw*NQ-Lv|1s>`UbcmTPg|Xnz>6oNa z$zvFKnP1<@8Te!h>{TKTvUvO^(qoX<%HirAqHu%oDQGR~9`@7J*D;(go ziHcIs6r?oIXdZdVmwjN~#aYRd_vywu-|N1fDFJWaokMN2Q2B_2@8yzgLA<`2GoQ1Y za`v|@Osb@q=f6zQZH^z(GZ_ZR5{5H?qVE}3N%OkO#q+y;3cWs#J_<<`V;fj#{;t+pffu{C zC(R$j!6dCA!*BDv>UBEr^8Bd%uynJdpjlNjT=a;gbDjOwZF97t?qq0LmymrsPpN2F z_qg)omn@Dk+o4Bat&L%Ejv>E7-)CiKe7_m7>FKb{05-|os;pZ{j) z-tTiXT-@bbZO2Z#NXQ(O2aXI!Qoh^lyc@q*XeadA4@G-^K6N?qF2ukV{S@)em~ogR zk7E#DoOvLDk_gh@1+nw(AeeqDf;;cfPB0GCi4%V=gb94u+PJedx+})Mu6-)y`sr9R zRNrXtRNrpu`^VJqQ*22CT9NbAsxkDlEhjLi;DY~mPU!?KxwCvVSY*+Aqrol~Hu?f6 z18Wefc}%2h;|8f?2UcFblcvZLWsiGc#!?0)smNc3m(r%0=nPHaG6m@(G=PXIlAns( zo^XNuyuFA&PB_s6(CYQ2zbjHEoY|e8mF8%fx}}MCCJ!lwIxlX*M**`jL;G1Xml~oG z#)hH4NYh$Qt9EtXR0R2e>0^KXZo3y7*k?oGP6E17use;ThQ&sP69ZFX_r)iM8YnHd zXN>o^6124oAdby;D7B#igP^Z-emPg*R2DK+n)!DF9TGU#i*@6?Q~}uNt;9vJ;;_V% zdl>v5ApBYfk{>EaeZm2FsaYIFwTd!th1zS+y}z}B_?CCRmfetj)j6k%KMz39}a2U9S(#1M-_C+bC>j1hJRRRR!9h2nho03l5Ih^;2@W> zd;K|?2dGuB1K;(8FDB5X@7t#{w${50BHo_ol5{oalr0}f-U?+qYJiu?dG|=qv=O^R zWDN!4k3V%CE^Jy_JViydSS$*pYKDKA&7^hV1-#l#3-E2vkNftlvJnVK-;4|-P6n1P zT((b4L$u+(WA^t6ef4Gt@u4(?AMFS#BTcoQZ;NlZK-A=yj{fSQdEvcfnSFnH+4NBpuW zEIRTZ8hK7}?NS#tj%|ZDVOMyB<81AZP!_muj;kKNp#KRlO03SXwfOl`MBzSNBfXp= zIx>&9k061nHMT!Vwpt5|8bhoF62Fdr^sUu}H_L{{d5|)VV~PMgv-_9axN1suFJWnN zLCJXz<|n+^{V8okxAQ+t4eQ9{*BK>eOD*KuS_l?$@O1VG`h^ zwOcXt_mULZK}=XU`?erOe}gj({8qgJ=9f;PosKlIJuNXjpq;C8~)_| zhIVPn8jd&~mDcI;5KB$!=43u$!4H2-BH&lQ`o}^5^3c1j{G%kAwNA=EHI*PBxT;I% zlT(V^>N?8I+ER)ty<7F|Ut^)wo1R^uKc?8d0A=6{*C&a(%YXmsx&SnZ!`O0be46_r zTikr3aou7w6Z^XEO`G29B4c;y{)*)0#9s0`f#Rq5pYXlhTpht*!7)Qvm0yk|lnqJ0 zGiR%-8$>2o@-o(Zll*6mEac%y_56{X!rM_$?3w4?o6yE_}K zRh2NMuASqZBK0Uuo?$LRpH#*=L7cEm-2nyVEdOItlNeQQ5zhpXC5YHUFEP;Xa{8l| z3<3tXk!hB)MVsKTH@j0Y?e>Sy_j?BkzvMQ@Fci0M8Ma9GoFOHNf3H*?Z<03x3=pAvixv z3DJ%lbwnS_nwev<-~qu74(Q~#B%SPFGG~$`#Vr?!(jA!Lde@-gY}*t4Iq6b1TRdo^ zN=8oGDa;ke*k8aB&|c{6GNC`=jxq5+#X;TH<}%Gq#(Y}A6IlNqy8m=~@mzpD<*r;i z#qQB)pQtfBGK|?<56KYY^Z79L|+T+*q-!eovI&i*F+){$7->kI} zo=>znzprqyhi>6@J?PT$&qi?V$$~iG+L`n9K2qd4pQFCKB-b8f@^)o`CH7<{7I6I+ z(s6A`y!I?DxaL)d;n<vxLVcs8yj;_2SMdEvQo zRfoez?zu~MOEw)(dQ4)E_S9X^tk}S}POuK|zI(zrS;-H*ibGr4*B)VbnqhTWwN-^| zTf|&0#WzI88b#kNnQyc4KjylQZyIRVz|=8dMy(R#eG%GI2L3_EshE3c83N6*h`C1` zw!Rvq&U!~s%YdUbLbD!<%~1$blw2kwF@mHQy2teC9cf{tVZuzJh_lUhFwSF>41Wu< zxPHRgtKfQ9MbcyN^;}ls|KsW{-`b3tc5fs&6nA%*;ItG%aW5X6;9lIJxLYXh#VPK^ z-QA(ZwYar7^iA*Qx%b}3`v+Vfl4I6dGw1o68BX4qbnK09qAo}>6lO(uxs^K&5qXBh?Lk4NxdHWqeV3RpWymPTm2OK) zx36IMDOq)S*PrDXLVZnrmexBQ3bjZ#28^-9kdQ`W$|AwP$sc?2J2(`VAD*X%uI}(A`=)ufFG#D z_XbVnc)DLs%jFCSwNRXb+UyIZUBm`W$Nq~`wLaq01l{OKLQ}$&=5E!hu+z=zDq##W zxz&2M#er)oK1BJfLIJA29Cxt(-s_|@`UuhVkhuT4Ilr=t@*P##K(+FI8Mu~gN;>76 zjc2qsq@bpb+{#L)Bi2(eipq*{TC5#&%EHcM^E(l1hQo-pVa$+n(@JMIh4|;MBsJaI zTuR@t#mpUyBfA5j^l`OcAn~LZ)!k1VDAb?zB8^|$2tIYI#Us|}Y~|dIU-d>5(?q^* z%Z7Q~Rq=NGu|(Q<$*1UkER#C_dln(>bqC6r*e1S6-pUY)uDvS_TX_WI2Ky?^wiF^b z{M5-mjwj==Ig?T_6rcOi#?#%5m>YIh>!{WLD#4z8l4T*j=sIfa)n$liWkWu9@A9f*<@h%DsxAezI~g8V{C<>~a)X;>IgGg4+}x-4Y9 zIkl%x&Nm^v@s=Tyd-iU3HKH;J@MqInmvZ9K)F;w|U)IZ>)lk0W*+`CK#F4!ITwJ;I zK=OF4$4Pi$f-PF2_D>J%>>e9FXknZA^WnYrYc0n3Ikk4!U8fkw#zFs%uRq-vIr(&? ztQR`0hQR#PRG+UCO4ntuy%7b@%Eas9Z*a`&V8s-KA5)bIwRfH>)Q~(->mVX!1LH}* z0+OS}fmRp@Io)}yNpoQ3qsMhBvhUPYN0w`sxn-b+ae|pP<~#D|?E;y~RaXXjt>2ltT?}_pJf4aqkO4(6u0(|Ciah+S`o*Kc53=Rquatg76jb$y&DnVKj}FS zb01&+dSVp5Y^Q)9WSZZ$ATa^dy7Z z)ZKd;FkUO^_G9`Sy*31Mt{$8HS6#dgmms4%M3vyh`14D>wHJHB+!qJ5u6r}C*Xu!E zzw6;*uivsr;MRKhTX!tjJum9^oW1kuuk|3oT^q_L6_`TtpZZcsa3kqu#{i6WF=+=Y zEm6x0B_Pu=5gM2{MLq0!UXr+tv@6m)VbK6+A#x+?5D7>oh(Ygl2F{GkK)sUQUzrW> zmkhhnml|47+|>yg^Oq3=Rj~fg7hdMBDc(s;zGW!AuGNlWiGNkBDcmALGZ{6{m?j)--qUF<{n=BR?Iy zLA#G3(-W@5ad4<<5$WYlS{x~xtPaSU1Jj$k!XET0p{-e?dU!y{LO`l3;Q- zk{o%gpNhdnF?Ou!_PW?2j|`f$F?bINjX^#@`QrqCwpu~9vTJ*aR583%F?5Q5`HV{X zwF_SJrHB%XTd?rRO4-HW#=ZjP)Sr9yJScQLdXiLkZI3NT8p6j`e*#;mLD=WZy}{&L(c%De5))wsT!Y#4HXPLFunFbMm~yli@ANNN4O6XOiq)8zMqrM-UxdX}+4*i^gz zteXyT8?~M6RQs|leXtaT+POHb{yK5-`1snkoepk}qh2O9__I-d@`kUwK;v8G8~ z6GFLoS@@x^-BxQC!ju+cEf<%>oVP9bkf{Hy{spVwd+H@!q*^Z4pu=PJBL8j@-Eu;( zntH^KCBcQw)v%`a_SqNncWi89Z*x-QV9V9yODTfWonmhqGh+WRUpJMI zSt+kM-RL#oT~Xts;1nAD78-+Cx%IX$ar1so_5w;q`>eZ<1}BBtITqb$5V!19$U-A^%LK#0>JjhS4b4NSMEvVI^qtLxcWx)&U|iEAw6nOGtH_I{-K ziN+;?!$g zv&7nk`?7H?>znk4oT+yBi7`jQf;V$6qI2>-dDs{&_fQXCxIUVj3M@2_c$Y7df}ck4 z=3fcd2ISi51NW-ovcIW;Tm8q#Ia=|TJjrP!M3)$zoFwk-bZK5z#ab|@811$UG~Lxb zj3_I%#Lhsz<8j|x8{Q-vWKPJP>&H-YC`npaFutJ>#`&D=GELEdfR;CNoDRDD`D!VU zebJ0MKK3X}|Q?c zJ=xU>TX8M{zTpNG;fed$h#2`-P)v8~2m9V~%F27=SK6H*35#l9_4?;ztYX5d(R8Pj zOCcumPgV2R+pX4wNjZW={MZ`s#(3Cb^82#G?iIcp@7$bda}CnAj&m?X6l*ElD%kjq#Vm zYrkOI4nbYJiShZ7q&CSQT^aAdhw9NN`cq!0k|v(tsQmoRp7vfwnAtsQD2JZk=f%5Oj}bR%3EzO7{`*#<`hrgKZqeE7QefC0^r_4FSS z%cRehUdr%`x0sM4M0&YRl8Y*ReCSk^-yu9JAMmFfd01!PoI!B?cS8yVDAM{vOKeH|Ymv8gVVqI7Fem z9IZO2utdP^Kg$0>Ui57>KbZ z+Tl;E+L~`#%8tq8S!J`HnRWJppP3x9bF{X3&quSpoV9)Lb9GUl#>iH**|W2`iiF(0 z@~i|yJMAtX+aF+(74P7DOklb?bQCWOSpidF{=`b;esBIm$Ab}Wu1_^w)@^dW<2;$U zyAXYGEoE+pN|8V_{qk}wZ;)!{e_4RzNxND$gOD)xA6uN~eFe>e1mT$<{XMbRB%8qC z4P@pTj2p7I!0vBYM(w0iLU!A&W$07h`%WY}Q!K&$+D8KldiaBQNoe1x13aJu48t}d zrTG}F?uOun^>AsdDmfLqFn@Q>Uo9TIL*G^@WE1#X%7=9EQP(<_Dy5!>2jB)sNJs!v z>_M&?7#)L>=|V+&+F&noQX-;fKtGI0%ln~Z5;SJ&%-U{l=100RC|^Q^aR4SI5KCL; zflDN)&=X7y6a6HM0d{y>-R?b{74Isx#$vLqINbmw$F-CUmi|Ts__JU#Tty9NvPR~T z78DY`zpJL*JEH(yh4{1lT#vba@>OeV!hXnlgf=;&DxOmf7Cbe2w+Kq5P3@8pkqZe9 zkO$u!ytIcyuX-X!Y z|0>BD(W?w*r4Ndr=h$G>EVd(qMN$i6b%YCnL9mk|k_)fx%#3b`Z-7Bi-e*mg{FVDR z+?h)OSGveXKAkwPzGP}Y*Q46nv^7%&X*?wui6%HB4zn!A#uL|d4&1r8g6@t@i>5gx zmpW2dCaZ^zqnQ?}Y)ScSE0#*#Jym(CFuz2lSa)Wh3*@Gi|tRI1`!K zz@GHf-}-H4nwhN((~S2Y9$XF@f9NYQ;0Q^Pn(io9WDN+T2refw%IJz+Q5S{^Wtg6P zaG{pqhVLJ#zh*BX3?jYBCI}XR4IbSl&Tq_97dDw1a~F8%?BJ$%Ewi8@yeGDZbdG!Q zYjOmF_9bl%?{z;~0*@6b<~2(fS(3*F^7ocvnjeuC=<{V)rKV|u>JlrlmU9Gx%bE9F z7i{q?tHMM|mb2Zl7#GKL&m|t_*IE3(p@b#e$UBi#b$rw7h}P?3bO-fFZk?cOlr_< zuJteG&;?B2%4>f(t;FpA64wxwiv9iOy&xRbdzaH}NAow>g@0x}PW@c5#9j@zaf|1e z5C+VrkCN?+JzTYEEBP=8=64v@^PF~JpJ!?0E<}-9@eL9y-r^q+#2BaEwGqnQE#%jI zMHhdt;SzhYFNZS*^#7t)VN8b!=oSdcFINeQS1r{0JFl?B{D)4&=E$_TgQ9 zzR=vQM1ylQveTw}HQRwR{rrU%ez2>6Y~vwE&Vd^G;Y*khhBMJB$BYG^gt@XNbo?iR z(@}w8kS2QB&gNP=j)~GIx!QYT9uDN^Xo5%H9a zX)4@YD5MapaO%TDHjaExkBh|?j2mnXq(YHlTSy|j?F2>HXcu#gFu zW_(C~LMG;zjLHtjQBa?S@=L27AX#lsCN#<@BZ7km(Nz0`%-Vuc#Bb$Zc`Ra_7;zrz z^9g*uukXKG3m&wR+B8{XR`v~*YHxWTyZf_u76Sie8ODnwuH+@Xmi>q0$(7__E z03e6F;(!}5k~Kd@gn0!fTx9^P+0kql*}CTX5jf!&aK`0S?WFLw3RnV{EEfO*u^tD6yDHyU07 zVL>7SzPqc3N+hC<-QnkVBc>X;R-^s2S0sEU;G!-Z*e!n zktlebFxZed)KD0mxz)IX@61&Z9|OnrW>uFs5+(szjsNxbq(V779d=hfAfRO}$7QuJ z1Pjgtb)lSG9<+zuFLU!rp#8aub33>V4Fm@tPVv z`KSMto0V{`8lt!SYRGc65b3UlP5BUP2WvaQJ@vmR=PBFQOVjnjq0;goGrDT`=i)CO zR+#u4;gn3$9UCM((v`=*wI2VvR3D}8+trcljHjg2rNE}Jx(A!CG30Z^%Jt8HvMb4X zH!w9OL_HC)+Tw~JIKQ6?h$RLtGpVrF04rq^Z6c~u(t;dr%RE9N;C%f1XwGL>YEm5e zp^lf3kqy33#=ED)E?Syv)(v<^5KBV%-Ybph8N6^4yzqnM<=PzlVZCjWglJCSlnM$& zrPgd`FPTnQ`!A@3*t&s`W=T7tF^=C%{95kZ#+ea+H(?S#*L%*M!diYYd3;>rV`Yak zXJ$*w*C*MtUoZ0HkGX1vyQ3Zp?-!zyUWtlyQ{>w6rQ}=Y+D{fJkisde5yyF2q)OXy zmR1+e<0+-bs84_U9ev2E+pO?5ge@8{)2Siosgwa;5=8GHeIs2xF0@i(eTZ03<)f z;eu%%5^l#we9dX-W{0z<0<8{4o=^{Pe$mx_;EB1Rp^*-Z|EyU3U4xaKM~7-2rhh2; zrl9@&wUK_;+iI6e;=1*#=>-8!;b!@OW=^nNR44vfpMjxC)N4<4(4MYP30ThGBR#Yn zne>4nWb_>cQIF!akwV^`Y1j$4H8d_tpi@4jJ3qxU9H7Z(kE%~ePS+47R4C|R{j>1a6>l33Drl?d5!6DgcZYou3K(k-$?L}%e$`i))`&_ed1RvXEL z6Pu=-F0hDDf}skX&7_hG&<-q0H#?(Up_r5aYe|4x3opH%wp*J?qfjDSA5QHthG5UG zqZnT&RtvtzOIZm|De9CKJJ04f3A1&!j!S#0*Vmik`xfrBDA7##Ueg3F8o}0!dCeQW zCW(_FU7;zJ^Cy8_=c5YUd|SyMDTFLz)w1rN*!4V{3jIH!kS%$BwXDSufZ0IvKY~&m zC|Rm)q%F1S=Nyf&E(R^oG2bOKvynfHP8E<5Q49HE4E;P!rdXYiUHgo;*v0osnj2~- z1Uq{>G5>q^I6xCT(OflwIg4@0D&j;A=Cc(Y2uh{QL3yYoB{4p$s3tSSa&hWmxL`0^c{G=03_sGeK~EDtxe651)_W z`tQoyd5~?K_UBu{L>u3{pQ>7>D%9cE^El<*j2fS1jpP3p| z1`G%$MtSXRpE>rfAyr3!pSK@ota9ziQNmeMO$BKE-7$#nnjoVf_S84uB-T&8;|wo3 z?1J6pXC)|MIjX+v7`qvx?#PdKe6$W_oagTj*LV;CuqsD;Thgv$XjJ0XXwL=|4V3vKsw4p~S=dNsy8NsdId5Da{26p6Fa--S&t&`(>Wg+28u)pDW%NpZVwS;}xFPTF*KL z0$E@0u;pf_<8_I4>VXo4E{?o_zRTP2t;*m1dNUX&KNoAKxWgDvV++2~@uBsOMlpFRKZ&G?jEjD-wmF`SQ|J~!FXjA})B&EwAJmacF~P`;Y+<6y9RbWs zI$8Q4Il+uuCJihjg`+@Ql!*kaNG2NmX`m8NL{%tku#6rjoi?8dZk6bd(2cWEmPq4R zp+DF!Pjy9chx7ou_gxVK^4WPU9MA*%0ygt$gf;$)0zrPPEn17+NBj4yHt-xX*Sq$b zyP5JKD-tJ^NF8qw2I#sVKrC}$eH7i-vJ4B+<;rfTAcIqD=*1Zqk`%DUlAZ$oTbO1! zO^o7GkFz6!5<1Ih6HwXXM@0N}g!%pG$2({LazP23u?KriycsotC;69#IQ0{W!7_4n z4gm$4Ip&FPSaeZ=;1tWSMP!KDTPT%jvVuK!$jS6$NS^F94*6c6)BVaZ&8aB0eLMc` z;P@gFo<;D)-!n6}Y}i3q|Bs`G4~3<334iX!{X0kDnHnr}zM}=$Qq6I5O4!&kl9AcY zbXK6Qf({S2bJEVJ03pE#)6IqxMDq zZ8v~-iZdK0gpZkZxt!jl>*9;=#$um8TOIt}gVjt?NdK*w;7x}VPshl-VD0=>U$JAB zvFKdeO~ayM0;E0N?|lWK73HeloRO$S)-B7TZ-O+PL{V%5Q!)6I`T9LtNuxguwahzC$Y?!wHyh={99dN zdQRV|$$mJ>Cs&P7a%x+W8XgN|uIbfx!YW)q^u(*Tq6zE02$AbV0Dp?47l@bd2Pd|d zTC_;RPMNn{EXGI3JsD_$!?u*Al-8dzW2hS!%w8X7io*XzeLA-1fqcqL+h_*475?eF zt)bO^>6!sw`}(UTFCwq5dAN6;aJfq~2Vp71m2kkh?*#pQbick9SF!F7KTp=C6hUnM zg5kmwpred9mH5V9fI^x|ya#?`e-%K$Ng@pzZ4R1}m!99*zY6Vo z*x(1xHN-S0LI?wkrv(DjEz{7ph}0BjHne^&7+~=X(H}ZIk5Bv=F{g8qF^QOqNR~4{ zxAP23(KNP_1DhH>hCTGlT}7mr(=iS#vnJZS`s_;!e)7km=-_;{@T^$gvvbC3C7OsL zd}42#!X+I5ho?iLNSAU0Q`F#;r)J?u|59N`UfqBn1F?^?LU3vml$zZqzG`Z06gVg> zC2k_o>yCCyLxZt=?Dysa3n0fCO$0Uy!G=4GmF5hG>qLFe??QH%l~hgG|z-(v6glVs1TG6FWiAgCCtu&mXN=C&A?4Li8o{|87 zfi*u5ZKU1Np;a?2?$(+w;6Dz(VPXZ|{WktRkSXR*-iU`Au`X6 zb1UJ7&7gm(qQRQie`@~^2&VGpnAy5;hxLSVUB$RI#fAvcb&<&*V5?d;E85g`rK0Qj zd#kGyj6zM)+cpXA$5eyhNMn>4?+T_j$<{G>dPrilfU-j5WNnJ6F8qBm^!>5Y94~z= z5Im^Df)Wq(3mVIogCd-Hbe$bPV`U)AD(0_c>2f#g@Wf+m(_SJ*T{8xcf}u?^)EEH^ zq4i#54*VVtTbyc8J+1k?j?(yv(+i8h7cx`@+EdCmQN-c%1fhm+5qyoXVH!Tv%>$zO zZu!M34M+aB65vOx-AP!d@P&#ah%&(Itzr28khkk`AsNb`wa_WU>5S($-~X>o8Lf%0 z+*I=|C}#s>noV3?mf)8}?Qm^f=(;p=>Q&4fZ@|R<5Nv6giD?h9m`qQUNSWML)No}I zO)Lv#q-cH;$UvsMJc1AgqfPTGkzTOE5Ax?rC(8A3Lm=o8*l2YLzd6K zjoY$@BgrtOTZZT+OWR|h-TykG#VPNVt?%=Lz+Ve{ zJ{ZXSZqi0Eu}bLcDn`5wBaC9s@R~P(WhkdnBd`pG@1grg=TcPW(`=TgV!G+nB0JRJ zN1R0v?SlAPoI`gR@yA$`l_{YL*dQA6=!4Pq5e>a-3;%UW#iN z-hFq{br40isk(M20L#hg{+p94fnwn>i}Aa2u@4K_ii#RddV4^jy^KiT;fv0-l{hzs zReqq-5czgrA(?HfSQ2MI>R0olZCT4!XUDi!&wx~>JG~&B_$~|}CUAbXRT|j4Ldv$x zUk~(i=nC(eMDW~rOgyGql-ZVHQfuS)?cXT|B}P-`aa6Zn(S#rQ;D2x&_8^@n$ySdv z4|rd>>dH^N%o}aW%XP@+>ZZV@9_~D=O?|M58wdk6+haDcY$FF0+~GW?SSquALfwqY zF(MgBaD>l5Pa&+1G;WP1j%_EjM4!FHerKEy^+si3wxap%d~SyMvuR;8XT-i*!L5l6()dn4#B9 z`cWa3>|LI`Rj72MCb^E`SI>(NZE&VQ`;M!gRH)LPPnLigVkTz&SmPXmc;3|^*~f_y z7Cigi9~d+qE$W};7-@(;+H(c#Jx@rLe=x__;*`*)l28e@z9}c!)`-RxO}o$`foMYW zhq)Oi$wO+7PMVnD+TWs<3Zj-8VYSnuzKId*K?c}qCPg6xLj{IP+Ko8lghAd?jLTd% z3|Vo!7dtkUAVCEtJ(!D5gxT{jr3XGt3EKR-3tU`(@i30}TdbKViL>|E3Hu^GrpQ5d~%m$*Fvk^-=9ct}K{~q+_SM=786l z!N+Nr1rC5i*^Y=9i~HWF!bJMF)^Kz5s7+)h@xY-e z5#m}IWhbXTeckVP*bsU^LWQlYFvbxih5V6V)RG*{jORIRqp9?xt}J@PFzF0mC-rm) zqz`ItHR`H)o}#kxGJshxBYx?8>!G*z%$!lTKRpk9PrLYF!K!qr&=bK#`Dw77f%vwy z_|iZP>yyH}q%2v`rp^Hv0p~1I;S)PZ2{;V+?h~262-dY5-RHmT>wXYD2tbH*Sitl9 zT6-@E{2V*B4^%B8$h)4P14}y%k1(^@IG+hT!STC(M0!pmt>rIH!iBtFzD5VFp%sL0tW^Lm;##T1ObV7CH4i5BgdL^QFNQK ztx^b*5CInK@7DU7f=Q>`^Y{<)_Lqm>~B`qNFOs0PXVLp-?RukBs|} zI2IA9VEb(HUZDpu+{St3V8CHggjWEL_BJX-1qRGm{Kq53#)cyPOBo$L)gP`qv=4Q1kCNX8^6(Qy!KIuhA!*Y zuSYQclnt{P9ESC#l3DTvNpoSCq_sPLk>z^31|t8QpRH`wMvfx;THSut zV_3NRhZ1J!*hi<%=ZzQba%y3)94cQV!KlsFuT&cXAv>D`qIR(;RhGQ}qzACN5*P26 zAp|pXdfY(T?|lZUpt|-Ky5dq2Sx6D!Hx1a$R}RE@;5C7Z<^uf7r^F1Kmg7uPNMb@( zjB2R{1ZkqM1j*sV)5Jl=76Ll55nbis^Ds?**P2F?B9pCJm<9Hbe4K!@1B`ibB1rhX zl3cQs!I40(PU8PG%muEz{Ueqp1|l7xg^Dnc6vLInkF?pe%Kgb*^h2VE^$Nav0JJow zkz|ej_POzXR`PNn1t#s8k(#RUa1d$k`d~!18a8xd(hPM3NQJ0)mfiKVnb6vyaW+jI z2=O*{oxDZT4$ZZp~ZQO0!G=p1NP=x0L1~@o>8s^fI^-rm+-B% z74*X$ty&dsoXy;#?om%!>)N}^;s>29$8m&N#pPxd9BVJU#m($Pb0 zcOBUv#;ph`VN(^CckrnTZiCkl_yrJlF_0(r`faN@A5xZKE( z=NGec(YB2zOe@Ep#LqioYW$+efp>OyG<7#{Z?G=>&iPbfK(++NnkJ!LeSa@Ne7nfL z&2jO?fXiQFfiEU7L7n6y-sc^=COawkkI9`(_jrgMIH($u51r_84OS$i?_>u)D-@kN z&TnWY<1(1VPHK4~L!0FV%;g)1ikm%$N65)CB>gE6BW*lZNW#<0=vcV93gba32=G&; z7WigG_-F%z3exB-*5+8l)B=hebi{Ju$&O2S36)fglv5q)UD}#TCe=!%mslCYn2~5+c?*mWR0SqyNci za}*F2;_vyg@Wv{DnnG15+wKHntcUhguwTnn7U(YF?PdUebCQG(POA!Zw;(({IoWD$ zqRcIRar)*@7c(VNSF-IBBTmYQCxlGva-ok1o|q2EGR~#P1S;@YV)Z>Z;%DFJ^t|P~ z_%1M_EU*#(KWPPiQYnE|Laq13fjYAcmq1}2eU{b~i`0)|MwYfFfV2neTs2#SyAfK_ z2%T%UMuZ_EOsdHI2pb{v9hL&eIaX$M{!h(@`Ck$QWnhXxWvG#g?bhH z4H`}A-3VMg6$M`H7@SF+Ec~J%vc){zAHl(y0;yO?j675c#8DOwXAT%})Toc*sGJx2 zxB=a;6ysQzFs`d(IeI?k{xUl5>q|BL6{%UMIOXl zj-NaJO-r^c3q&Otl&5~mV?|aUG6MWr0;?4Gy8~%^gtci){&5n0@W`7NxcTcwq zbavho>2xsapI-B>6u`Q3C|+LB9=x^+iLUi!>7M*M?a7kZKjMMG(j0;p{Y^xH0ge@=EP|)ZG>}J36%Ke1 zn@MYVZ97_OcHjUu?T|5_DNGf!01Wc;$TX)1ngZ2#0jL%@I5Dkxt#4BZ(tY{<{vmsC zdenzP_H}xwdu8AUZn1*5G`C@tyib8D2w1IUe#XMcb^Rq8E%hl9d4iv2=y3G$T%3e} z-b@wdO9Rj7Wr*^q!o97kC3s@<$(Iu_-!I`;(%+K;OVjT7%Jym6%ldGFOF04Dr23Q+ zUhhIK8v)WOy?Sd{C-vyi$QG5!Ab%NnQPwl zgim6btQ^`y5>K&oq)X+k!;?>!_tk?7H>N^URO|PimDT4%tL2~X@XuO~r-YovcmL@Q zLjN1X_;5JvGS|wBawqCZQ85=fMpz~I0_r_k1zF^HZ8Jn4qbZ>!d`X|Pe(v^glle8E zmUcuE93XC^hQzppn0pj|LN}dVB3T{q^;cI?D6MwWXUmXMV9Z=r=`{33zkH;CUwl!h z0eB>$)5stbD>n_g1{!9;@5|TafJpY8R*EYo=9ML;`ZnCUuEpLkz7zB^^IxeIHdR7{v7K()e2q1EiI{E8LS`4e7%VMEf!rGt~BkvkP)Z zEm!heO22QUn~Q31JS5IACBva7p>O@!pSfBj0fg?(UoUG?<81zDFMFGRtwxnG4f{e%&hyq{<^v*K9Ys-Y!%Y8C@wv zKgx4CrW{M{*@#IN##o3pQeR)2-eXVMh|<9qN~SSD~3+I5!=a+Tjdx%KMNUfi-*3ptOTlk+j@}@j)PyZ5L;0U zH`!c(f0|>10UzI#_jds*N&*`)f^YswX@dt|yOmjmO7CSwyqA?iOpVRvLCw+Su_?o> z9*lGUdZD$$#=kS~!mLCiCiOLu)q}VH%=Z8Gl->ufWDH(TRSi8ii(T~>*we;XkNKjG z$50usVXo{92Twl#K0jHl^5x+P2wujj7itKlkMe!DjH04 zDKx=4#!!L{EnPpn^$;)_S|(5h()WMYRV-U0LX57%_h7+P?Iu$f&7yl-CMXOxnV0<_ zNITF@%T{L=9Y7Y=pr{q?16`YSdG|wO!b{C5hbR%r(+ck913Uw)t653WAIHNG$0Gt^ z41j8X+y6xbaM;dSJH_hpWdxvDP)cq+o%4=ZNRXdKfFgInNlzarG>J5@gFPJ~Hm__A zE8xr(@$ipc+MI7^oY~bb;@)i-X(LzE$}$|KGxNeG$_+AAfx}4%u~2cOL3Qh7wLkbM zAVO$aiktBxFK3kMyeok8<|lyD7t0FewmQY}O=-BgD!pdt1Q)&g$nz|m!&5;Ixv^@x z5nzBy(Hb!`#dq)u{xrcxugBm;ivjcIfo;?Vu~+QF6YO_SL5J?W+R z1UJ0cR%T?3D6)NYx+E?B+~X#6|5{?~ zzUH@(&9yf_kHXK6g1#(ZJ+;Yzt%mvYq5g6oPk$o_AEvT~FWGq4Vv=}#?^ww8m7%q- zAM4UWc?#EF$#q;AN8YZy?f?3ThmclyDO?KJ2g6@&e)n^c1m!GYAC{;H>nw)Y^VM?z zD&MW0Pz?^3kbRmQ3d)+Es*a@a%zhueStT|0ZmqS=y?6FM+N8(exR=$xuvDR#6y7%S z_1UcXcWa{jKeuM^UATap!?Dgal_#r{0$kO*Jpu-!cb^u1|m(j+qHpmtQB?itmLr2ns~DF#4gjb$C{+`bRa;ku<}E(1Rjnc zOuX@@c$U3N4}I+mHzKl+j4SUUE9>Y4^!l+>#(;x+1EPa9zr+In>|ywJqtzEZ8Y*<4 zVeOMUaV2z(R!(M#eHgt7 zA5%+;(fFGsW57x8pQ7*&=5+kd1dX-yUb1a{~BxGcRAtfI;bG<#mFgnX>iQD z@IcP2GB{wK947j%cwbxe7*0|cBOBZgo!HL}Km`&5K!WK4c9DppkTvj=d3>9bJ{(|c zh_}69f-hLMk7zy`p)YpMp1B{7kIGsmE+{DyAyT@dHI`0-Ky#OZzGtNx*V`Yy6V)#R z7cm;`Nnw+_M)sq!R;fvN{>n%1axa4_| zz+`9`Ef=VBa5E&BC|%6RhK2AHc3-5aTYgTWmcO3+z5U+`y(JdejO9Z#4Zy_!UNarzGDknBXO> zquHOU@%2D}l;)h}U>pWxqc~6~(WO~*og6gC3i%9ES|#jwo>(8Coj0wE@Bb6y(25~>kk$NdX*h6fV4mmWq%HJS5H?h7xG8M-?QK(qBoE% zQUUmPskG)oexKmQ`PDZ!OTBwfyf&_k$jDiU{`VJC2z>PjvPKq$S`F{f<2n(jEwvgK z6nn5@p609qlYvlboeo-0`S-(vJCKEdUQ`lwA3qk6y!Y4ZO3Vl6UElBcnw6d=JrU9{ zg17Stc)8E8IvhPrHo(>8~kaxzN7 zsvxuxoR85g2-Y8!9`vy2?4}bZ_?|8DMILBR#IJ<7JNKrk#_Lho-F$HK5W)1XfBd0J4qR#K|~?}NUE?D-h~03 zL^`_H|@XG|8^=axpvcxF|0_F`+#0}hn;SlO-$3GJnUPt$udf0asL zo^5sv`3YGf9;&LXu-*T0xI%AdHb32YUR00U-~S(nYXIgN!0?POKTHne`l_|_!H>u( zLcBW3Uwdm&X`1IFir*0`#?oMBnBtniR%@p664csY_p%M*Xn^pGu~HiIY!BwL<=!&z@2BPi zO=ZNO6MROh`!j3_scUZ$2-xxr9{1Jjf*$x5iORZYj?pA(;mI@Jad%BKD;oj^#&XYz zZqxXE2l<1c60m71u(y7M?m9}((g17$v;%<>Zi+Ze-=@&8D9Yz;|6CCLb#+3i-PK)C z)`MJ6HHL)@5ki5qgqpO3nRrU2Py=Y}(0aeU>0`U;T?Rvq) z!TY?6sou%=s=qG+VJNBA_gG(E7!_)^7)4cq_wb@DFqH|~I%u+F+!1GDqPbnP=##0h z0mEB43yIo2UwR-zMT_3@4lbWm2g+5V=1b>~x{V(fj*Qxu%(g=8%7e3|z0LW8UiFfd zV&&+N2dFvx6>-dyaD~l=?FTrrr8{rII0=!yh;wRVhvO#>n%A)$zmE8Z&JCfpU(MgF znkLUddOM|`7y-d!uAp(ExzMbf;+s#{rnB$G%+E3FA&Y-KN~QC#$|Ycm3Daa{O?@ z)EJe0ne)rlbNBS?u6Dr&`KONJi!ZK*^w|pm;D^FXew4(32;@H0$iZMpTb%-|vX90E zsDl#W%#gp&ijqljCNnACMI?7Kn569Jv#eMUVb}TonS4;ulQSH$* zPJ3}P(nh`M?FGjiv+O>;g&v9*>=93jqOpy_x+K~OPogho4HQAZgK6GIg^OkI}APNx_lBK5N4IX|mt!2%Ktv-B_^GA7sHY~m{+ zi=}5cq1+NquokHlOgUvRR4?hNsWGQY?xy_neY7na9h+FY=bxq82U&>;-}ds|ZmlMr zlGd8FY}o~Nqq#i78W&*69c(a%U7t&b^KIE*E~fj9)|!)pZRUB;9@w48q^V*@_YYQH z8YO7KJL^<|g{8^=4NIA`=bFR*)@Jn4lukKHG`h2RxH+MRu=j&p>Sj)pJ1JXQQg1CQ zlH#FnR1q`)Ed%Ilf9MNi<=yxv&v7GBbx-9yk zr(Sz=Ia<{(u8uCXk}w#mX?Vr#1zHVuM8?5k|J+7S2pGscmUw-IH$SJq9m*ULdrXmv zD;39Vrw5dZ!IjRTw7a3IjM8LB)51XAS{D=(@1G{dZ3xZ4M-Pl>v+h>sNP#@<{K>JQ zR-uW(xHZsa1hc@ubo1X7iuKSXWovxS(RUdKFiPS-tP9767rV%DamDsS0Q)IY$Yu34 zt$&VfCht}E=zBJQyzJ7Od*SpBr3&Lu!5eue@~$>N;JNxB)_ILp42rWjah z_||jZRe(GWq;LOP_PUg7qSvNYt$L6trkYdH!xd~RVejUj>Yz~3AA|abhzHIf5cZxK1g=2x5@r{8Qt^8 z{qEm5X0LY0@QuJ}W;6T0fq}k$#(p)~*qvJ|D?@A9-mQCs_9XL?nZvO6#TFYY(z{K7 zM<%e66=3I%Bd_RVV%TiJfd3`FiJKUnYe}p0piitDVaii~Yi$l=n)CgoUk%}ukaoXV zyC#+RH~S{Zy(-zqP2i#z`+wr^1i={=e{!4Hcg{k_9^HuZfB!u_|Cv$;Lk^Gr60Ldd zRBz8dzEOe&AkBXRkb%ZBClk?EMMSh^>HnkZE2E+gyKm|49J)(#Xru<|Mv(5IL^`Cq zV*u$v=`@h;?rxOsRB2E;?vM9>-y2`}##*eI!+y@$d!K!n>YX49`cS6An^z}5HS&P-|l?TC}V&KP)g%cG?~osy=%g$*SN*MLrnmE*MihR8@Wn4fKkAalFm$o}^bLmQ^VZ zaP_5#937{{QL8)rz})>mOuaL|7yn@O;^W$XY5J{%M(bsOa_jZ5;imqEK3u}2gwL*g z3=T0+o1paXgvJBP@7b-k1*wOCC66RO`$lT=YnbG-f=+mDR2_cBsl|n*L|7gd;_OfQ z%N7oOk@+UbDtVr8AQN#`ke*CHbM}J`2x=XzT;XV7dCT4fN1Pvm#P7_4Z)){4Eu;V| z#S5&l<%EDjA7p%c)JX9#gSK%tuDa}YS^8zw;yxkzQp(=6JdiFi3W@)#-JUcZ>!9)w z@Hj}|vDM^6QWi;2n}#va1A+iP8Dc=Fmij3wj7H#4=*W07A1)?PLNZM74G^t%IlFFI ze>;pOR;X=VXPb<&&XB`q_H$i?x;z4@+U6U&F2-Lj?5BqZb)$@H0O2}75SqGDBEW=S z26W}*eX7jE07&q?daM6!YC02(_V?El9Ld9|{w?Q+|NrWheJFhsToHk~G;Q&%;Lc#{ z^1}ipvafg=Bg~Xvdg{jGSXjHrWxz-##$kN9P%dZ8uWqu8GdX^p(C9Dwe;k~!RKOai zD#Bb|32}~v*NKE$+c$sT5@?MnwX>U}BMu6BaP>$qiISZW#^18VbVi(IO_hUx!&dX} ziRHe^hwE@y!*0Uhc5E2&cATeKu-~13-~2&e3<3xRx2dmaq-xaYwlby zU{(I=)|#tr8GnSmG|HY#BQK&VB@}4HoUwY0C_>V~2>Y$$t~>@`;@)1-t(RX9iU4-i zsE=sBV{G;RCjZyASK8{fLAJoAjpS3;!2f{!@JI1AVhYpO%wV$%NEbw!AM5vXK{yHJP_a?NjF zRi26LTwI%%F65XOppwYC)-a`l==uD2b1Jx~@{1N{UU^D_?g?WbV*?lwdAJKcpKk`O zg2-&*C^W&yi4-9)db;_y!-=oCng%QHg8~_R(CSPWS%`6U$^)~l3laM;fOl3;6U`7~ z?kjzH4MO=}XM;={dtN&4FA9O z(IjBBNWIU^e(M``7qs$n!ypJ9uyFW8cRg*l0RF|9dbDY?W9~jLdUt(oX?P9O#pu%w8{;rh4V{(RU~KzQiwlU4KXsNu}$RbrV5$PU+zl~1~2 z^gapkT&Tg$JSgdsuCHcv2OB@sIHzfoXqEG#w2CwDZ6d{RQ$=|=kOQQGMUL*FU+j2F zn3DpM(|hC;#hz43ucA?4Jq`#hDg891!QJgj%mH}Zh@E{I6e5I2qj0`y>Kr;JUqT2a0<=(|<5SHSW@2sEnO z#sF#+(BefYrjXjHy$h-B6ReW&iiYSVqV>C9%FFO|bcDF#}WOmfIc;@WPY; zwwCt`bx%#|D08u2Y-ab0?T*EjR>=0B{ohvYmHUdDSH=%u*VDP>s=0YT_WapwP>z{C z`!NRaUBEpH`-+Lr0-s;X|I?~5(`R^^VU`;&Z*ow{|FM8r>!7p|^7667Dv71v9LO@- zzsdDqCyAkq)!cD;xWEGG=lV+S!IGCcBG8KJ5ZjcudXcEkj&Uj|SUvoD!8bZR zZ*?+EHvr|=b+jl$lo=&K>>e204hwHVmuzTqd zO6ZI9v{vDBI7yAQGO@e+J9fpX*eLEcXQ>i75@x_HUsF`6_rD@`an~PU-L4~T9m362gA0DpY}H| zD`aPlfy+v&(+UNM`1nAH(KAaIwn`DUN*N}PX5fUO{7sO4F2AH-yvOw`Fd`UP1h=AH z{`^^0X@q+>^0SK92=Cyp1an|I?E9D%I>dfMPvT^Plbk>;r0PPy2iRS_%#yD)x()bx zChYZ}z)?+~T)ph~GSyb(yesum9P>-S6v3vaVF#ouNH*De92j`3+m3o$JTret|C{*( zBUWFiT8PoOX51z7e>O0SLo97;H(=)!>m$Hea0NkNN!6cOhh#V6APe`nafqxqz%LZ8 zeu=kg$NqIwpTJcEBx-!hP|Z9E93fVFUvVz<2E9DFEqNTN_BF%a7Pzt(`V`cOW41o@ zoBGg3Aw*-`v~3E9_!!TLhvzWogHIs$!Y{?kV+N8c(AQAoNvkb*eVsUtH{1Z_s=U6&V^cNfaCr^Y<=H@QdKC}1;!H_N6%S|ldpB^oz=dvdAQ$#iszh>LGNiQyajgJo=X6hixXkFi z*hx=fBN5+E+{2&5lfpaWfiwzHFP1H)*@VWlPZuU;$d$90EK`IL7endY`TWG{({J)4MD5SVq8=~G*{Tv2 zN-G)QUBtv_)Vzs5lR|8+d&=~c6yqh+iqFy?aAsK_y3c6lPAXlD8@EZZqa2`2x?MsU z!6*ioC5|K<3^aTn(Nv?{z=%r#B0F@MYy_DRm}P2N@L^aV6rY3;Zv+?EV66`-$}+pX zDja~5K!y)F=Gj5X4+P9{2?&3%IR+`*`bldmgr910hib?EMg4}^ZB!HjuIWF-d%+R0 zSF9RvQl^EPyfuXGlOr)1J3%%eV!h!&Xyc1hkfpj8qdxK>9$WEk55id zo*qw#qeh;tlb*Vy?|Y>;QZ=QzffLB_B5OXfXrCs05|X5DF`oWvK1Dp;Pd@cM|4{SE zS-kA>1hl|->g@O?hpmcXwLo+t?um(pYhmNPXQ734?2VdhrNG5{$+OU+pXhDou$MeE z7=hlW9Zf1bro>1o%>!&G%q2PD#O*MM&=ue5`3n;CZ3RZT*)S8^6hVaX6#B4X25yuO=9%5tv`s}*-hWh-+ln&uV z=X6X6gIAiY+1~Uht*Qw>k`c((1>z1RWoM2?j`hjz(LkIQ-IbyIVb%d_jA0Q};aHHB zLSaIFI&Ba0%MZt=EIx|9inGINzw`VbdR`P|W|fJfl!;;ZH{)Lx1{r)=RWka4>tHBy z7)HB*ZEP9rkWR8Z$FVpVglv%#c^OM~SVsCb(`I|+WU~KCUnoYf%v(#@llJ%*5>#>D zJp9wtL;#U{;lteOmB)Pt695M<-A;T=-?yCUg+yWN^<3nzimKji8g46&00nJR(Z9#d ze`fQmlSy5-#aDGVvPfl(4^Uiicr0V&4K;g){FFj1$AoZYRSk@u$2=xtN0h8qQrBl! zYPTSNdqjdl?7#yx@;j}HWG)We1|V7o@!*G8zzR;X`byCrvlP^*WjZ7fDFXAf3<3`G z;#Z@sC?PJf2ez5~l&qG8JIiV2-PdwtiX%waS(}HoKpdeTlmd7GN_>xUT@f$qy0kmuvaA+f3&R?L?*J_MQ zc3i~eY@-Bg3ayO{bi0k&VMaaN&UX$JB}v6K0KqY&qy<$~1oidx2Ax5Yxq+94K;&DT zH%hNrz?r<`S{p?|LIOKCx4f^fm|lem$9&VnmW zq1kvjQJ2E-G{H{cO?}l@pxr{vZW@MYMRT$Dwm=(Su;3d0Qmw8mu|+A41HVlAnDXt| zbZyTA$43Felc7G~;66*w$WkPNj*+H3SRs~ISq3Ew(Bhh?FDmrbd~#YYRBS zER3|PPRQh6gecKVEpTBTkcTweXACoKp^*_y4_dGZGSEc?6P6Xj%A8ShQNN#>v{GI+ z_w(pDhP?_deflzPkY8fH${eLDB7g&KEb=<+1c?-=uUOla2G1V|8 z&ZdD9Xp#WYC*<*Nv`u~D`o;AdIbVy`0FIRq2w-5mhR{!4rZ6_ z`7Zl6wd$clATZ(6g%EIl<|-}NFs+#kwFk#TU9!~OoLiGNnOq-7K-dl(TsFy@POt5P z&&Z;rrMAz(#8A9O+tc$sX!!SF8~G830)>2sG=CQf`y6_h#db7;%^O{xu$j1bz?Cq+ z)M+-cCSDZ$5@O7_`shf~z2Eq~z1jBc#Rk^uG7Bug3bx;7^H-TRuhfnj9OYmSoD!nJ z{TwtBaTcn%`5Nmf^Rbbsz^6j<-Q8U-YYSxh5(^c>jd0z0N*yYK3Qg^ou zmw5;E;^N}a_&BM>x0V(oNHxCHHN*h=^sxTq)T6a?y_;#iHO!VL#|J{mGdD4T)zt8d zb>`Fq5^q{7ZJ;|I56mru0L60Dykllh(odYD# zq(+}%SvAoPfaMRr1|v}V$~{#3@_- z68!c?sdBBQ10rqu-UdwTJ*pSE_a7T6O}VQ6NJ{7r+^a~&2qFK?lbBU`mQUb) z;uNHn%NnSIaM2^fe1*1Ihhiym4no>b=O=-v4DzMKzuatg#yzYRWEZZOdLRBUhIU7R zS*MG8dI>O>*2oO+Z)u^bwd3way@S0H zC9jCsU$XU?@!xd%%dn^@c9LI}**EHCg>hl?))nUdqZ$TI-9FYjA^Qf!?Fl|QscC08#aKvlj@Rdv9TQ{_pqW6oUo%hiIoHYhKqW8Q2T zK)CqPx${KL-c9rUSO{SetFZk%{GM)W#dA&4OkIYPt?EkxPV3DlK~>mHY|+PlDe6Q7 zD4@xX-sLkQNqYJg^TH`!L)4UGsisB|tp0f6|7teA^~8UcgegK=n8CFjX>;{~OwG(|_^^k^Qp^=$o8{2* z&_jp!dONw@M0WC#)ZOA_+|Jfwt)1JiDF#6yp@NDEysK4jx}fV`V9%5Db{DdJXz>`; z8rYmYwTgUm)%7;ER-W#c!ZnI%_IPLI>eY&A60`*!E1{NBL*Yf`&+Vkh-CrnuJjOUr zAHOS2ICiDYd0DAB%djD-5<+CfWU*gRSa7foLfbKMrff>ya>d7|y5@8_C~~qLa)E|! ztDUxqeQ%lA`B*J7{wc*II*pI2+f`b|W9E}u6<~^z`*M1QaaB{dj*+~qyY8;gBYSZz zjn0DKd<-k%P+QECF?<8z<5_Ew=`dPqb?%F)o_#o6bvh8sq7dkUwoO9WT1Y7?v-V~Xx?()?@KrQwmENs3|f z9HC${EB?Nw;4q87=tb0pXPBeDk@AOC+Nz&7BPNLFkG1w_|2w04{j)MCa^5_c#{6bPzshP5&oY5VI*5Ax zdO+lKdHqqZSF3JE>KF0l>Pe8uBqwamD$TgqnlV%;47z?h*I0NI_s|`(edr6Bt$w6& zJ>iu`_n5ajtsDeOw4Zig;r8Ano96x`DqiIHBi@{(*4yl#KK$Cg-)V!nxD?swz0Lxz zUCd*P8%cNTxj($>uJ)Yp=iEDB(T=_h4zqD{Rc2jHOREki{wFb)j%1Rw<1Zpd4=S-e zWHt@q{%5PTj$AXmF#R0!4`{f`o_p2snR|)R6Z6}^P{t#SEGu^ zH=&2Uh``}mb&Ik;ZK(sl{P)6XnvUMGmC`9&j+j!+#&SiHBS*c?h?YkIyOm|{fO~5` z4dYpdd9uBXIFk>7L2^u_QTyd=2g7U|AO-m#M2Lzg5dmkXK>Um+#>Y>YQ_=-5Ikn@g z(3!BA{x~e9E$`a8OnqV!G6>asnahFb84C8jpjD$rkR5yfysC{p$jU%BTlPHTC7`4CLu`uKvRvehl)|m?2KPDC<(J1UZ?XR|I&8Ge+ zDGhD@eUsc=g2QuRFqwHGn>2i3Gr?sBRZO6B?Uy-Vnp59A!$utAh8xxs_I`&|^78x>~IZM*Z(F-;z+ew36fTqz(Q#KpwYy zI*pdzP?Ww+_KAM|BH2CK{C%CA-m!s>{3e zC)#Jsk9)Wu24iX-q$*d`s*EmnB<>#ansQDnkrA7I}oGA?`@I z=xed3@Cv3xzHCm)NOtoTQOM%vud3CLLz7^|MxKMOR88WH!W;!u27mc+Ayg4`6+%#= z3};<5e(A>qsD`U8-MpN)A`Cl8r>B4D0**KrfArE+C^p6LJZmS!uzDRDlA$uERIpi~ zv2Ek#qPNzG*oT8L;E|YgoKGw6h1ryTBC^^7^;{!kT=*2JiP;}TfQe*H{b`vX?J!K* zQu4l$C)N4g8ILP*whVU)j^LD;Tkm+23DyDrH-fIeXP~l z8gdc$#2#b3|BqQxP@Ruz5-XC`i5Xt-5-rNXQu0a;*<1jH3=kYMM5Y5|%EeHa&zA$6 z{CwOa=wpP7Z&)DYyF{b}vn8mlX(c5GK$<`-dYz~K2Bw<2ymVEdZtkAA^f%qgUHSM9CKjBF9x8$y z_a4hX(S4OP8WwGk67ohhv#NgN$_Di_qeXup2ED?ien8t4#u-g23~D5GzcVEXi$YE z{bY3eNrvK5$uCwp>V21fLPqIQ@5;Aw!-dE>!jdcMhD0UeOtQf_v2i=B83e?K`5|_i)?`6$)S(jHM|P9k*WTueHe+av7%Ce^8>{eGO?5g0s~hZI90?M?E_ESV`8Hz z#J~H|b&{LK5fv=w4WkU+{Ee}8tZQBHIwT?xPeD}QR{g#IL6{Z`r3)kK<|o5_JH!1) zg2zO`dtIl~vU!H(-vv=vQjNKkcCrN5SH9aa`N;Q80f z7n(rZ;IF}K61H2R3SN0G*h^}gm0CYkG@&&j693Ux*Ti!S@HVRM6+LEQJFSe7SHYL-63wHX)8kxFA5ujKw`wB<6wI25-v&S0VSb@mW@I4ye zUS0DvjAfDnpU>%j3!F92FP-Nuie1ygJYOfq%7RlyJ!Ua?hKKl!8gpMy70ZE=9E7}QPR2$-c*-Xby;1wCCWVNVC;E!Vn`$wb1 zj2Y!5e>x#_x?TgMD_)oow&sbs6r5e=DX{11o>UeoCPD8mkKVet40b*qbiRWV3WFZ{ zfQaZ2;Jo~l`60UWwank!NZR|m&L?s9dL?|YUmA^60BHT`@rt+3aJo(Evs**nl{R<> z|Cri@))@9bZ7lIr++R79?DuCSpzU~ZHV<<$N;{!3*c-ENgLK%V)0i>fMH|adu4?ZJ znXSDXr8o73#bWVs4`LJ$8>wnndg7zOHZIw*Fep~cw3WrkCeDJo3O-WJN0nZl ztRLDlQK(n|jW=2Sbs^_#z5z-E#}KLc5@V=ksNt7xcpJ=_GYD@*10p!WPPw77V4U<` zNrXF|qO^CO1oc;V*RW{xnS52$rl!c~P97@Gaq#6fn!$EGpM|mFPVB%_Jx2GxMbd!f z>a-9el!j|KQQRMycBsr zTmlzy$puAq-v0R(gYQ*YgPakGq|)j6pDEf)3cfF@2jS^#$$_wzGZAbg+)WMV}VT`A<1?JDM5S)A7`c#LiwMP zmx#Sh#buxi*O5wWdWH89;S3|kb%5`d$XBYKA?zoO(zTG1+5+Ra`D00ZLU6^X+g