diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 3f18263e9e..73197915cf 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -152,6 +152,7 @@ jobs: python -c 'import torch; print(torch.__version__); print(torch.rand(5,3))' python -c "import monai; monai.config.print_config()" ./runtests.sh --min + shell: bash env: QUICKTEST: True diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index c0bb686149..c49f4e6479 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -471,7 +471,7 @@ def __call__( padding_mode: Optional[Union[GridSamplePadMode, str]] = None, align_corners: Optional[bool] = None, dtype: Union[DtypeLike, torch.dtype] = None, - ) -> torch.Tensor: + ) -> NdarrayOrTensor: """ Args: img: channel first array, must have shape: [chns, H, W] or [chns, H, W, D]. @@ -526,13 +526,11 @@ def __call__( align_corners=self.align_corners if align_corners is None else align_corners, reverse_indexing=True, ) - output: torch.Tensor = xform( - img_t.unsqueeze(0), - transform_t, - spatial_size=output_shape, - ) + output: torch.Tensor = xform(img_t.unsqueeze(0), transform_t, spatial_size=output_shape).squeeze(0) self._rotation_matrix = transform - return output.squeeze(0).detach().float() + out: NdarrayOrTensor + out, *_ = convert_to_dst_type(output, dst=img, dtype=output.dtype) + return out def get_rotation_matrix(self) -> Optional[np.ndarray]: """ @@ -799,7 +797,7 @@ def __call__( padding_mode: Optional[Union[GridSamplePadMode, str]] = None, align_corners: Optional[bool] = None, dtype: Union[DtypeLike, torch.dtype] = None, - ) -> torch.Tensor: + ) -> NdarrayOrTensor: """ Args: img: channel first array, must have shape 2D: (nchannels, H, W), or 3D: (nchannels, H, W, D). @@ -1290,7 +1288,7 @@ def __call__( grid: Optional[NdarrayOrTensor] = None, mode: Optional[Union[GridSampleMode, str]] = None, padding_mode: Optional[Union[GridSamplePadMode, str]] = None, - ) -> torch.Tensor: + ) -> NdarrayOrTensor: """ Args: img: shape must be (num_channels, H, W[, D]). @@ -1344,8 +1342,9 @@ def __call__( padding_mode=self.padding_mode.value if padding_mode is None else GridSamplePadMode(padding_mode).value, align_corners=True, )[0] - - return out + out_val: NdarrayOrTensor + out_val, *_ = convert_to_dst_type(out, dst=img, dtype=out.dtype) + return out_val class Affine(Transform): @@ -1425,7 +1424,7 @@ def __call__( spatial_size: Optional[Union[Sequence[int], int]] = None, mode: Optional[Union[GridSampleMode, str]] = None, padding_mode: Optional[Union[GridSamplePadMode, str]] = None, - ) -> Union[torch.Tensor, Tuple[torch.Tensor, NdarrayOrTensor]]: + ) -> Union[NdarrayOrTensor, Tuple[NdarrayOrTensor, NdarrayOrTensor]]: """ Args: img: shape must be (num_channels, H, W[, D]), @@ -1589,7 +1588,7 @@ def __call__( spatial_size: Optional[Union[Sequence[int], int]] = None, mode: Optional[Union[GridSampleMode, str]] = None, padding_mode: Optional[Union[GridSamplePadMode, str]] = None, - ) -> torch.Tensor: + ) -> NdarrayOrTensor: """ Args: img: shape must be (num_channels, H, W[, D]), @@ -1615,7 +1614,7 @@ def __call__( grid = self.get_identity_grid(sp_size) if self._do_transform: grid = self.rand_affine_grid(grid=grid) - out: torch.Tensor = self.resampler( + out: NdarrayOrTensor = self.resampler( img=img, grid=grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode ) return out @@ -1727,7 +1726,7 @@ def __call__( spatial_size: Optional[Union[Tuple[int, int], int]] = None, mode: Optional[Union[GridSampleMode, str]] = None, padding_mode: Optional[Union[GridSamplePadMode, str]] = None, - ) -> torch.Tensor: + ) -> NdarrayOrTensor: """ Args: img: shape must be (num_channels, H, W), @@ -1756,7 +1755,7 @@ def __call__( grid = CenterSpatialCrop(roi_size=sp_size)(grid[0]) else: grid = create_grid(spatial_size=sp_size) - out: torch.Tensor = self.resampler( + out: NdarrayOrTensor = self.resampler( img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode ) return out @@ -1877,7 +1876,7 @@ def __call__( spatial_size: Optional[Union[Tuple[int, int, int], int]] = None, mode: Optional[Union[GridSampleMode, str]] = None, padding_mode: Optional[Union[GridSamplePadMode, str]] = None, - ) -> torch.Tensor: + ) -> NdarrayOrTensor: """ Args: img: shape must be (num_channels, H, W, D), @@ -1902,7 +1901,7 @@ def __call__( offset = torch.as_tensor(self.rand_offset, device=self.device).unsqueeze(0) grid[:3] += gaussian(offset)[0] * self.magnitude grid = self.rand_affine_grid(grid=grid) - out: torch.Tensor = self.resampler( + out: NdarrayOrTensor = self.resampler( img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode ) return out diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 801f4316de..d794e51e80 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -820,10 +820,6 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, N if do_resampling: d[key] = self.rand_affine.resampler(d[key], grid, mode=mode, padding_mode=padding_mode) - # if not doing transform and spatial size is unchanged, only need to do convert to torch - else: - d[key], *_ = convert_data_type(d[key], torch.Tensor, dtype=torch.float32, device=device) - return d def inverse(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: @@ -1442,10 +1438,7 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, N self.randomize() d = dict(data) angle: Union[Sequence[float], float] = self.x if d[self.keys[0]].ndim == 3 else (self.x, self.y, self.z) - rotator = Rotate( - angle=angle, - keep_size=self.keep_size, - ) + rotator = Rotate(angle=angle, keep_size=self.keep_size) for key, mode, padding_mode, align_corners, dtype in self.key_iterator( d, self.mode, self.padding_mode, self.align_corners, self.dtype ): @@ -1460,7 +1453,6 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, N ) rot_mat = rotator.get_rotation_matrix() else: - d[key], *_ = convert_data_type(d[key], torch.Tensor) rot_mat = np.eye(d[key].ndim) self.push_transform( d, diff --git a/monai/utils/type_conversion.py b/monai/utils/type_conversion.py index 773f1cbc37..3636dbc6c0 100644 --- a/monai/utils/type_conversion.py +++ b/monai/utils/type_conversion.py @@ -248,11 +248,14 @@ def convert_data_type( return data, orig_type, orig_device -def convert_to_dst_type(src: Any, dst: NdarrayOrTensor) -> Tuple[NdarrayOrTensor, type, Optional[torch.device]]: +def convert_to_dst_type( + src: Any, dst: NdarrayOrTensor, dtype: Optional[Union[DtypeLike, torch.dtype]] = None +) -> Tuple[NdarrayOrTensor, type, Optional[torch.device]]: """ - If `dst` is `torch.Tensor` or its subclass, convert `src` to `torch.Tensor` with the same data type as `dst`, - if `dst` is `numpy.ndarray` or its subclass, convert to `numpy.ndarray` with the same data type as `dst`, + If `dst` is an instance of `torch.Tensor` or its subclass, convert `src` to `torch.Tensor` with the same data type as `dst`, + if `dst` is an instance of `numpy.ndarray` or its subclass, convert to `numpy.ndarray` with the same data type as `dst`, otherwise, convert to the type of `dst` directly. + `dtype` is an optional argument if the target `dtype` is different from the original `dst`'s data type. See Also: :func:`convert_data_type` @@ -261,6 +264,9 @@ def convert_to_dst_type(src: Any, dst: NdarrayOrTensor) -> Tuple[NdarrayOrTensor if isinstance(dst, torch.Tensor): device = dst.device + if dtype is None: + dtype = dst.dtype + output_type: Any if isinstance(dst, torch.Tensor): output_type = torch.Tensor @@ -268,4 +274,4 @@ def convert_to_dst_type(src: Any, dst: NdarrayOrTensor) -> Tuple[NdarrayOrTensor output_type = np.ndarray else: output_type = type(dst) - return convert_data_type(data=src, output_type=output_type, device=device, dtype=dst.dtype) + return convert_data_type(data=src, output_type=output_type, device=device, dtype=dtype) diff --git a/tests/test_affine_grid.py b/tests/test_affine_grid.py index ac7c2741b0..972cf20a1f 100644 --- a/tests/test_affine_grid.py +++ b/tests/test_affine_grid.py @@ -115,7 +115,7 @@ def test_affine_grid(self, input_param, input_data, expected_val): result, _ = g(**input_data) if "device" in input_data: self.assertEqual(result.device, input_data[device]) - assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) + assert_allclose(result, expected_val, type_test=False, rtol=1e-4, atol=1e-4) if __name__ == "__main__": diff --git a/tests/test_as_channel_first.py b/tests/test_as_channel_first.py index 0d1b1c7d3a..918e576011 100644 --- a/tests/test_as_channel_first.py +++ b/tests/test_as_channel_first.py @@ -34,7 +34,7 @@ def test_value(self, in_type, input_param, expected_shape): if isinstance(test_data, torch.Tensor): test_data = test_data.cpu().numpy() expected = np.moveaxis(test_data, input_param["channel_dim"], 0) - assert_allclose(expected, result) + assert_allclose(result, expected, type_test=False) if __name__ == "__main__": diff --git a/tests/test_ensure_type.py b/tests/test_ensure_type.py index f09c022f74..64094b2360 100644 --- a/tests/test_ensure_type.py +++ b/tests/test_ensure_type.py @@ -29,7 +29,7 @@ def test_array_input(self): if dtype == "NUMPY": self.assertTrue(result.dtype == np.float32) self.assertTrue(isinstance(result, torch.Tensor if dtype == "tensor" else np.ndarray)) - assert_allclose(result, test_data) + assert_allclose(result, test_data, type_test=False) self.assertTupleEqual(result.shape, (2, 2)) def test_single_input(self): @@ -43,7 +43,7 @@ def test_single_input(self): if isinstance(test_data, bool): self.assertFalse(result) else: - assert_allclose(result, test_data) + assert_allclose(result, test_data, type_test=False) self.assertEqual(result.ndim, 0) def test_string(self): diff --git a/tests/test_ensure_typed.py b/tests/test_ensure_typed.py index 5e3e941f59..a78df6cb3f 100644 --- a/tests/test_ensure_typed.py +++ b/tests/test_ensure_typed.py @@ -34,7 +34,7 @@ def test_array_input(self): if dtype == "NUMPY": self.assertTrue(result.dtype == np.float32) self.assertTrue(isinstance(result, torch.Tensor if dtype == "tensor" else np.ndarray)) - assert_allclose(result, test_data) + assert_allclose(result, test_data, type_test=False) self.assertTupleEqual(result.shape, (2, 2)) def test_single_input(self): @@ -48,7 +48,7 @@ def test_single_input(self): if isinstance(test_data, bool): self.assertFalse(result) else: - assert_allclose(result, test_data) + assert_allclose(result, test_data, type_test=False) self.assertEqual(result.ndim, 0) def test_string(self): diff --git a/tests/test_flip.py b/tests/test_flip.py index 404a3def7d..8547f8aeb4 100644 --- a/tests/test_flip.py +++ b/tests/test_flip.py @@ -34,12 +34,10 @@ def test_correct_results(self, _, spatial_axis): for p in TEST_NDARRAYS: im = p(self.imt[0]) flip = Flip(spatial_axis=spatial_axis) - expected = [] - for channel in self.imt[0]: - expected.append(np.flip(channel, spatial_axis)) + expected = [np.flip(channel, spatial_axis) for channel in self.imt[0]] expected = np.stack(expected) result = flip(im) - assert_allclose(expected, result) + assert_allclose(result, p(expected)) if __name__ == "__main__": diff --git a/tests/test_flipd.py b/tests/test_flipd.py index 1676723800..2fa783f8ad 100644 --- a/tests/test_flipd.py +++ b/tests/test_flipd.py @@ -33,12 +33,10 @@ def test_invalid_cases(self, _, spatial_axis, raises): def test_correct_results(self, _, spatial_axis): for p in TEST_NDARRAYS: flip = Flipd(keys="img", spatial_axis=spatial_axis) - expected = [] - for channel in self.imt[0]: - expected.append(np.flip(channel, spatial_axis)) + expected = [np.flip(channel, spatial_axis) for channel in self.imt[0]] expected = np.stack(expected) result = flip({"img": p(self.imt[0])})["img"] - assert_allclose(expected, result) + assert_allclose(result, p(expected)) if __name__ == "__main__": diff --git a/tests/test_inverse_collation.py b/tests/test_inverse_collation.py index fb6a3a1e80..bc0fc3ff1b 100644 --- a/tests/test_inverse_collation.py +++ b/tests/test_inverse_collation.py @@ -115,10 +115,7 @@ def tearDown(self): @parameterized.expand(TESTS_2D + TESTS_3D) def test_collation(self, _, transform, collate_fn, ndim): - if ndim == 3: - data = self.data_3d - else: - data = self.data_2d + data = self.data_3d if ndim == 3 else self.data_2d if collate_fn: modified_transform = transform else: diff --git a/tests/test_label_to_mask.py b/tests/test_label_to_mask.py index 9caa7252f3..6c8f935fbc 100644 --- a/tests/test_label_to_mask.py +++ b/tests/test_label_to_mask.py @@ -64,7 +64,7 @@ def test_value(self, argments, image, expected_data): self.assertEqual(type(result), type(image)) if isinstance(result, torch.Tensor): self.assertEqual(result.device, image.device) - assert_allclose(result, expected_data) + assert_allclose(result, expected_data, type_test=False) if __name__ == "__main__": diff --git a/tests/test_label_to_maskd.py b/tests/test_label_to_maskd.py index b8f0d3c171..b2073e8ac3 100644 --- a/tests/test_label_to_maskd.py +++ b/tests/test_label_to_maskd.py @@ -65,7 +65,7 @@ def test_value(self, argments, input_data, expected_data): self.assertEqual(type(r), type(i)) if isinstance(r, torch.Tensor): self.assertEqual(r.device, i.device) - assert_allclose(r, expected_data) + assert_allclose(r, expected_data, type_test=False) if __name__ == "__main__": diff --git a/tests/test_normalize_intensity.py b/tests/test_normalize_intensity.py index 2755eb4c25..41c6b053ec 100644 --- a/tests/test_normalize_intensity.py +++ b/tests/test_normalize_intensity.py @@ -31,51 +31,51 @@ "divisor": u(np.array([0.5, 0.5, 0.5, 0.5])), "nonzero": True, }, - np.array([0.0, 3.0, 0.0, 4.0]), - np.array([0.0, -1.0, 0.0, 1.0]), + p(np.array([0.0, 3.0, 0.0, 4.0])), + p(np.array([0.0, -1.0, 0.0, 1.0])), ] ) - TESTS.append([p, {"nonzero": True}, np.array([0.0, 0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0, 0.0])]) - TESTS.append([p, {"nonzero": False}, np.array([0.0, 0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0, 0.0])]) - TESTS.append([p, {"nonzero": False}, np.array([1, 1, 1, 1]), np.array([0.0, 0.0, 0.0, 0.0])]) + TESTS.append([p, {"nonzero": True}, p(np.array([0.0, 0.0, 0.0, 0.0])), p(np.array([0.0, 0.0, 0.0, 0.0]))]) + TESTS.append([p, {"nonzero": False}, p(np.array([0.0, 0.0, 0.0, 0.0])), p(np.array([0.0, 0.0, 0.0, 0.0]))]) + TESTS.append([p, {"nonzero": False}, p(np.array([1, 1, 1, 1])), p(np.array([0.0, 0.0, 0.0, 0.0]))]) TESTS.append( [ p, {"nonzero": False, "channel_wise": True, "subtrahend": [1, 2, 3]}, - np.ones((3, 2, 2)), - np.array([[[0.0, 0.0], [0.0, 0.0]], [[-1.0, -1.0], [-1.0, -1.0]], [[-2.0, -2.0], [-2.0, -2.0]]]), + p(np.ones((3, 2, 2))), + p(np.array([[[0.0, 0.0], [0.0, 0.0]], [[-1.0, -1.0], [-1.0, -1.0]], [[-2.0, -2.0], [-2.0, -2.0]]])), ] ) TESTS.append( [ p, {"nonzero": True, "channel_wise": True, "subtrahend": [1, 2, 3], "divisor": [0, 0, 2]}, - np.ones((3, 2, 2)), - np.array([[[0.0, 0.0], [0.0, 0.0]], [[-1.0, -1.0], [-1.0, -1.0]], [[-1.0, -1.0], [-1.0, -1.0]]]), + p(np.ones((3, 2, 2))), + p(np.array([[[0.0, 0.0], [0.0, 0.0]], [[-1.0, -1.0], [-1.0, -1.0]], [[-1.0, -1.0], [-1.0, -1.0]]])), ] ) TESTS.append( [ p, {"nonzero": True, "channel_wise": False, "subtrahend": 2, "divisor": 0}, - np.ones((3, 2, 2)), - np.ones((3, 2, 2)) * -1.0, + p(np.ones((3, 2, 2))), + p(np.ones((3, 2, 2)) * -1.0), ] ) TESTS.append( [ p, {"nonzero": True, "channel_wise": False, "subtrahend": np.ones((3, 2, 2)) * 0.5, "divisor": 0}, - np.ones((3, 2, 2)), - np.ones((3, 2, 2)) * 0.5, + p(np.ones((3, 2, 2))), + p(np.ones((3, 2, 2)) * 0.5), ] ) TESTS.append( [ p, {"nonzero": True, "channel_wise": True, "subtrahend": np.ones((3, 2, 2)) * 0.5, "divisor": [0, 1, 0]}, - np.ones((3, 2, 2)), - np.ones((3, 2, 2)) * 0.5, + p(np.ones((3, 2, 2))), + p(np.ones((3, 2, 2)) * 0.5), ] ) @@ -91,17 +91,14 @@ def test_default(self, im_type): self.assertEqual(im.device, normalized.device) self.assertTrue(normalized.dtype in (np.float32, torch.float32)) expected = (self.imt - np.mean(self.imt)) / np.std(self.imt) - assert_allclose(expected, normalized, rtol=1e-3) + assert_allclose(normalized, expected, type_test=False, rtol=1e-3) @parameterized.expand(TESTS) def test_nonzero(self, in_type, input_param, input_data, expected_data): normalizer = NormalizeIntensity(**input_param) im = in_type(input_data) normalized = normalizer(im) - self.assertEqual(type(im), type(normalized)) - if isinstance(normalized, torch.Tensor): - self.assertEqual(im.device, normalized.device) - assert_allclose(expected_data, normalized) + assert_allclose(normalized, in_type(expected_data)) @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_channel_wise(self, im_type): @@ -109,10 +106,7 @@ def test_channel_wise(self, im_type): input_data = im_type(np.array([[0.0, 3.0, 0.0, 4.0], [0.0, 4.0, 0.0, 5.0]])) expected = np.array([[0.0, -1.0, 0.0, 1.0], [0.0, -1.0, 0.0, 1.0]]) normalized = normalizer(input_data) - self.assertEqual(type(input_data), type(normalized)) - if isinstance(normalized, torch.Tensor): - self.assertEqual(input_data.device, normalized.device) - assert_allclose(expected, normalized) + assert_allclose(normalized, im_type(expected)) @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_value_errors(self, im_type): diff --git a/tests/test_normalize_intensityd.py b/tests/test_normalize_intensityd.py index e2cec5407a..60b1d05456 100644 --- a/tests/test_normalize_intensityd.py +++ b/tests/test_normalize_intensityd.py @@ -25,7 +25,7 @@ [ {"keys": ["img"], "nonzero": True}, {"img": p(np.array([0.0, 3.0, 0.0, 4.0]))}, - np.array([0.0, -1.0, 0.0, 1.0]), + p(np.array([0.0, -1.0, 0.0, 1.0])), ] ) TESTS.append( @@ -37,14 +37,14 @@ "nonzero": True, }, {"img": p(np.array([0.0, 3.0, 0.0, 4.0]))}, - np.array([0.0, -1.0, 0.0, 1.0]), + p(np.array([0.0, -1.0, 0.0, 1.0])), ] ) TESTS.append( [ {"keys": ["img"], "nonzero": True}, {"img": p(np.array([0.0, 0.0, 0.0, 0.0]))}, - np.array([0.0, 0.0, 0.0, 0.0]), + p(np.array([0.0, 0.0, 0.0, 0.0])), ] ) @@ -60,7 +60,7 @@ def test_image_normalize_intensityd(self, im_type): self.assertEqual(type(im), type(normalized)) if isinstance(normalized, torch.Tensor): self.assertEqual(im.device, normalized.device) - assert_allclose(normalized, expected, rtol=1e-3) + assert_allclose(normalized, im_type(expected), rtol=1e-3) @parameterized.expand(TESTS) def test_nonzero(self, input_param, input_data, expected_data): @@ -82,7 +82,7 @@ def test_channel_wise(self, im_type): if isinstance(normalized, torch.Tensor): self.assertEqual(input_data[key].device, normalized.device) expected = np.array([[0.0, -1.0, 0.0, 1.0], [0.0, -1.0, 0.0, 1.0]]) - assert_allclose(normalized, expected) + assert_allclose(normalized, im_type(expected)) if __name__ == "__main__": diff --git a/tests/test_rand_affine_grid.py b/tests/test_rand_affine_grid.py index 64c32c8d75..4fb534aba1 100644 --- a/tests/test_rand_affine_grid.py +++ b/tests/test_rand_affine_grid.py @@ -201,7 +201,7 @@ def test_rand_affine_grid(self, input_param, input_data, expected_val): result = g(**input_data) if "device" in input_data: self.assertEqual(result.device, input_data[device]) - assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) + assert_allclose(result, expected_val, type_test=False, rtol=1e-4, atol=1e-4) if __name__ == "__main__": diff --git a/tests/test_rand_axis_flip.py b/tests/test_rand_axis_flip.py index c05c3a1e0d..1772ef4987 100644 --- a/tests/test_rand_axis_flip.py +++ b/tests/test_rand_axis_flip.py @@ -22,10 +22,8 @@ def test_correct_results(self): for p in TEST_NDARRAYS: flip = RandAxisFlip(prob=1.0) result = flip(p(self.imt[0])) - expected = [] - for channel in self.imt[0]: - expected.append(np.flip(channel, flip._axis)) - assert_allclose(np.stack(expected), result) + expected = [np.flip(channel, flip._axis) for channel in self.imt[0]] + assert_allclose(result, p(np.stack(expected))) if __name__ == "__main__": diff --git a/tests/test_rand_axis_flipd.py b/tests/test_rand_axis_flipd.py index 7bef0baa63..37a17db69f 100644 --- a/tests/test_rand_axis_flipd.py +++ b/tests/test_rand_axis_flipd.py @@ -23,10 +23,8 @@ def test_correct_results(self): flip = RandAxisFlipd(keys="img", prob=1.0) result = flip({"img": p(self.imt[0])})["img"] - expected = [] - for channel in self.imt[0]: - expected.append(np.flip(channel, flip._axis)) - assert_allclose(np.stack(expected), result) + expected = [np.flip(channel, flip._axis) for channel in self.imt[0]] + assert_allclose(result, p(np.stack(expected))) if __name__ == "__main__": diff --git a/tests/test_rand_flip.py b/tests/test_rand_flip.py index b3c514cb1f..df49d60861 100644 --- a/tests/test_rand_flip.py +++ b/tests/test_rand_flip.py @@ -34,12 +34,10 @@ def test_correct_results(self, _, spatial_axis): for p in TEST_NDARRAYS: im = p(self.imt[0]) flip = RandFlip(prob=1.0, spatial_axis=spatial_axis) - expected = [] - for channel in self.imt[0]: - expected.append(np.flip(channel, spatial_axis)) + expected = [np.flip(channel, spatial_axis) for channel in self.imt[0]] expected = np.stack(expected) result = flip(im) - assert_allclose(expected, result) + assert_allclose(result, p(expected)) if __name__ == "__main__": diff --git a/tests/test_rand_flipd.py b/tests/test_rand_flipd.py index 8972024fd8..c2869537cb 100644 --- a/tests/test_rand_flipd.py +++ b/tests/test_rand_flipd.py @@ -26,11 +26,9 @@ def test_correct_results(self, _, spatial_axis): for p in TEST_NDARRAYS: flip = RandFlipd(keys="img", prob=1.0, spatial_axis=spatial_axis) result = flip({"img": p(self.imt[0])})["img"] - expected = [] - for channel in self.imt[0]: - expected.append(np.flip(channel, spatial_axis)) + expected = [np.flip(channel, spatial_axis) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(expected, result) + assert_allclose(result, p(expected)) if __name__ == "__main__": diff --git a/tests/test_rand_rotate90.py b/tests/test_rand_rotate90.py index f339158f94..9fc025fbbe 100644 --- a/tests/test_rand_rotate90.py +++ b/tests/test_rand_rotate90.py @@ -23,44 +23,36 @@ def test_default(self): for p in TEST_NDARRAYS: rotate.set_random_state(123) rotated = rotate(p(self.imt[0])) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 0, (0, 1))) + expected = [np.rot90(channel, 0, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated, expected, rtol=1.0e-5, atol=1.0e-8) + assert_allclose(rotated, p(expected), rtol=1.0e-5, atol=1.0e-8) def test_k(self): rotate = RandRotate90(max_k=2) for p in TEST_NDARRAYS: rotate.set_random_state(234) rotated = rotate(p(self.imt[0])) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 0, (0, 1))) + expected = [np.rot90(channel, 0, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated, expected, rtol=1.0e-5, atol=1.0e-8) + assert_allclose(rotated, p(expected), rtol=1.0e-5, atol=1.0e-8) def test_spatial_axes(self): rotate = RandRotate90(spatial_axes=(0, 1)) for p in TEST_NDARRAYS: rotate.set_random_state(234) rotated = rotate(p(self.imt[0])) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 0, (0, 1))) + expected = [np.rot90(channel, 0, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated, expected, rtol=1.0e-5, atol=1.0e-8) + assert_allclose(rotated, p(expected), rtol=1.0e-5, atol=1.0e-8) def test_prob_k_spatial_axes(self): rotate = RandRotate90(prob=1.0, max_k=2, spatial_axes=(0, 1)) for p in TEST_NDARRAYS: rotate.set_random_state(234) rotated = rotate(p(self.imt[0])) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 1, (0, 1))) + expected = [np.rot90(channel, 1, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated, expected, rtol=1.0e-5, atol=1.0e-8) + assert_allclose(rotated, p(expected), rtol=1.0e-5, atol=1.0e-8) if __name__ == "__main__": diff --git a/tests/test_rand_rotate90d.py b/tests/test_rand_rotate90d.py index f9083afb0c..3071aa82c8 100644 --- a/tests/test_rand_rotate90d.py +++ b/tests/test_rand_rotate90d.py @@ -24,11 +24,9 @@ def test_default(self): for p in TEST_NDARRAYS: rotate.set_random_state(123) rotated = rotate({key: p(self.imt[0])}) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 0, (0, 1))) + expected = [np.rot90(channel, 0, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated[key], expected) + assert_allclose(rotated[key], p(expected)) def test_k(self): key = "test" @@ -36,11 +34,9 @@ def test_k(self): for p in TEST_NDARRAYS: rotate.set_random_state(234) rotated = rotate({key: p(self.imt[0])}) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 0, (0, 1))) + expected = [np.rot90(channel, 0, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated[key], expected) + assert_allclose(rotated[key], p(expected)) def test_spatial_axes(self): key = "test" @@ -48,11 +44,9 @@ def test_spatial_axes(self): for p in TEST_NDARRAYS: rotate.set_random_state(234) rotated = rotate({key: p(self.imt[0])}) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 0, (0, 1))) + expected = [np.rot90(channel, 0, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated[key], expected) + assert_allclose(rotated[key], p(expected)) def test_prob_k_spatial_axes(self): key = "test" @@ -60,11 +54,9 @@ def test_prob_k_spatial_axes(self): for p in TEST_NDARRAYS: rotate.set_random_state(234) rotated = rotate({key: p(self.imt[0])}) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 1, (0, 1))) + expected = [np.rot90(channel, 1, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated[key], expected) + assert_allclose(rotated[key], p(expected)) def test_no_key(self): key = "unknown" diff --git a/tests/test_rand_scale_intensity.py b/tests/test_rand_scale_intensity.py index 750d88bfad..b863e2f874 100644 --- a/tests/test_rand_scale_intensity.py +++ b/tests/test_rand_scale_intensity.py @@ -25,7 +25,7 @@ def test_value(self): result = scaler(p(self.imt)) np.random.seed(0) expected = p((self.imt * (1 + np.random.uniform(low=-0.5, high=0.5))).astype(np.float32)) - assert_allclose(result, expected, rtol=1e-7, atol=0) + assert_allclose(result, p(expected), rtol=1e-7, atol=0) if __name__ == "__main__": diff --git a/tests/test_rand_scale_intensityd.py b/tests/test_rand_scale_intensityd.py index a8d2e63f65..fdcbd7146a 100644 --- a/tests/test_rand_scale_intensityd.py +++ b/tests/test_rand_scale_intensityd.py @@ -19,14 +19,14 @@ class TestRandScaleIntensityd(NumpyImageTestCase2D): def test_value(self): + key = "img" for p in TEST_NDARRAYS: - key = "img" scaler = RandScaleIntensityd(keys=[key], factors=0.5, prob=1.0) scaler.set_random_state(seed=0) result = scaler({key: p(self.imt)}) np.random.seed(0) expected = (self.imt * (1 + np.random.uniform(low=-0.5, high=0.5))).astype(np.float32) - assert_allclose(result[key], expected) + assert_allclose(result[key], p(expected)) if __name__ == "__main__": diff --git a/tests/test_rand_shift_intensityd.py b/tests/test_rand_shift_intensityd.py index 6766236146..c5dfb66722 100644 --- a/tests/test_rand_shift_intensityd.py +++ b/tests/test_rand_shift_intensityd.py @@ -19,14 +19,14 @@ class TestRandShiftIntensityd(NumpyImageTestCase2D): def test_value(self): + key = "img" for p in TEST_NDARRAYS: - key = "img" shifter = RandShiftIntensityd(keys=[key], offsets=1.0, prob=1.0) shifter.set_random_state(seed=0) result = shifter({key: p(self.imt)}) np.random.seed(0) expected = self.imt + np.random.uniform(low=-1.0, high=1.0) - assert_allclose(result[key], expected) + assert_allclose(result[key], p(expected)) def test_factor(self): key = "img" diff --git a/tests/test_rand_zoom.py b/tests/test_rand_zoom.py index 0ac1b92c39..6ccb265cca 100644 --- a/tests/test_rand_zoom.py +++ b/tests/test_rand_zoom.py @@ -35,11 +35,13 @@ def test_correct_results(self, min_zoom, max_zoom, mode, keep_size): ) random_zoom.set_random_state(1234) zoomed = random_zoom(p(self.imt[0])) - expected = [] - for channel in self.imt[0]: - expected.append(zoom_scipy(channel, zoom=random_zoom._zoom, mode="nearest", order=0, prefilter=False)) + expected = [ + zoom_scipy(channel, zoom=random_zoom._zoom, mode="nearest", order=0, prefilter=False) + for channel in self.imt[0] + ] + expected = np.stack(expected).astype(np.float32) - assert_allclose(zoomed, expected, atol=1.0) + assert_allclose(zoomed, p(expected), atol=1.0) def test_keep_size(self): for p in TEST_NDARRAYS: diff --git a/tests/test_rand_zoomd.py b/tests/test_rand_zoomd.py index fafaf748bd..842d207ca6 100644 --- a/tests/test_rand_zoomd.py +++ b/tests/test_rand_zoomd.py @@ -38,11 +38,13 @@ def test_correct_results(self, min_zoom, max_zoom, mode, align_corners, keep_siz random_zoom.set_random_state(1234) zoomed = random_zoom({key: p(self.imt[0])}) - expected = [] - for channel in self.imt[0]: - expected.append(zoom_scipy(channel, zoom=random_zoom._zoom, mode="nearest", order=0, prefilter=False)) + expected = [ + zoom_scipy(channel, zoom=random_zoom._zoom, mode="nearest", order=0, prefilter=False) + for channel in self.imt[0] + ] + expected = np.stack(expected).astype(np.float32) - assert_allclose(expected, zoomed[key], atol=1.0) + assert_allclose(zoomed[key], p(expected), atol=1.0) def test_keep_size(self): key = "img" diff --git a/tests/test_rotate90.py b/tests/test_rotate90.py index 03a967a16b..9857b26fe8 100644 --- a/tests/test_rotate90.py +++ b/tests/test_rotate90.py @@ -22,41 +22,33 @@ def test_rotate90_default(self): rotate = Rotate90() for p in TEST_NDARRAYS: rotated = rotate(p(self.imt[0])) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 1, (0, 1))) + expected = [np.rot90(channel, 1, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated, expected, rtol=1.0e-5, atol=1.0e-8) + assert_allclose(rotated, p(expected), rtol=1.0e-5, atol=1.0e-8) def test_k(self): rotate = Rotate90(k=2) for p in TEST_NDARRAYS: rotated = rotate(p(self.imt[0])) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 2, (0, 1))) + expected = [np.rot90(channel, 2, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated, expected, rtol=1.0e-5, atol=1.0e-8) + assert_allclose(rotated, p(expected), rtol=1.0e-5, atol=1.0e-8) def test_spatial_axes(self): rotate = Rotate90(spatial_axes=(0, -1)) for p in TEST_NDARRAYS: rotated = rotate(p(self.imt[0])) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 1, (0, -1))) + expected = [np.rot90(channel, 1, (0, -1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated, expected, rtol=1.0e-5, atol=1.0e-8) + assert_allclose(rotated, p(expected), rtol=1.0e-5, atol=1.0e-8) def test_prob_k_spatial_axes(self): rotate = Rotate90(k=2, spatial_axes=(0, 1)) for p in TEST_NDARRAYS: rotated = rotate(p(self.imt[0])) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 2, (0, 1))) + expected = [np.rot90(channel, 2, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated, expected, rtol=1.0e-5, atol=1.0e-8) + assert_allclose(rotated, p(expected), rtol=1.0e-5, atol=1.0e-8) if __name__ == "__main__": diff --git a/tests/test_rotate90d.py b/tests/test_rotate90d.py index a1fa3c977c..a2a4a27521 100644 --- a/tests/test_rotate90d.py +++ b/tests/test_rotate90d.py @@ -23,44 +23,36 @@ def test_rotate90_default(self): rotate = Rotate90d(keys=key) for p in TEST_NDARRAYS: rotated = rotate({key: p(self.imt[0])}) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 1, (0, 1))) + expected = [np.rot90(channel, 1, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated[key], expected) + assert_allclose(rotated[key], p(expected)) def test_k(self): key = None rotate = Rotate90d(keys=key, k=2) for p in TEST_NDARRAYS: rotated = rotate({key: p(self.imt[0])}) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 2, (0, 1))) + expected = [np.rot90(channel, 2, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated[key], expected) + assert_allclose(rotated[key], p(expected)) def test_spatial_axes(self): key = "test" rotate = Rotate90d(keys=key, spatial_axes=(0, 1)) for p in TEST_NDARRAYS: rotated = rotate({key: p(self.imt[0])}) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 1, (0, 1))) + expected = [np.rot90(channel, 1, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated[key], expected) + assert_allclose(rotated[key], p(expected)) def test_prob_k_spatial_axes(self): key = "test" rotate = Rotate90d(keys=key, k=2, spatial_axes=(0, 1)) for p in TEST_NDARRAYS: rotated = rotate({key: p(self.imt[0])}) - expected = [] - for channel in self.imt[0]: - expected.append(np.rot90(channel, 2, (0, 1))) + expected = [np.rot90(channel, 2, (0, 1)) for channel in self.imt[0]] expected = np.stack(expected) - assert_allclose(rotated[key], expected) + assert_allclose(rotated[key], p(expected)) def test_no_key(self): key = "unknown" diff --git a/tests/test_scale_intensity.py b/tests/test_scale_intensity.py index c2485af616..24c6900ba5 100644 --- a/tests/test_scale_intensity.py +++ b/tests/test_scale_intensity.py @@ -26,14 +26,14 @@ def test_range_scale(self): maxa = self.imt.max() norm = (self.imt - mina) / (maxa - mina) expected = p((norm * (2.0 - 1.0)) + 1.0) - assert_allclose(result, expected, rtol=1e-7, atol=0) + assert_allclose(result, expected, type_test=False, rtol=1e-7, atol=0) def test_factor_scale(self): for p in TEST_NDARRAYS: scaler = ScaleIntensity(minv=None, maxv=None, factor=0.1) result = scaler(p(self.imt)) expected = p((self.imt * (1 + 0.1)).astype(np.float32)) - assert_allclose(result, expected, rtol=1e-7, atol=0) + assert_allclose(result, p(expected), rtol=1e-7, atol=0) if __name__ == "__main__": diff --git a/tests/test_scale_intensity_range.py b/tests/test_scale_intensity_range.py index d64f09ae82..d06bfd3596 100644 --- a/tests/test_scale_intensity_range.py +++ b/tests/test_scale_intensity_range.py @@ -22,7 +22,7 @@ def test_image_scale_intensity_range(self): scaled = scaler(p(self.imt)) expected = (self.imt - 20) / 88 expected = expected * 30 + 50 - assert_allclose(scaled, expected) + assert_allclose(scaled, p(expected)) if __name__ == "__main__": diff --git a/tests/test_scale_intensity_range_percentiles.py b/tests/test_scale_intensity_range_percentiles.py index 5cd19581b3..5ba3a1e1ee 100644 --- a/tests/test_scale_intensity_range_percentiles.py +++ b/tests/test_scale_intensity_range_percentiles.py @@ -32,7 +32,7 @@ def test_scaling(self): scaler = ScaleIntensityRangePercentiles(lower=lower, upper=upper, b_min=b_min, b_max=b_max) for p in TEST_NDARRAYS: result = scaler(p(img)) - assert_allclose(expected, result) + assert_allclose(result, p(expected)) def test_relative_scaling(self): img = self.imt @@ -51,7 +51,7 @@ def test_relative_scaling(self): for p in TEST_NDARRAYS: result = scaler(p(img)) - assert_allclose(expected_img, result) + assert_allclose(result, p(expected_img)) def test_invalid_instantiation(self): self.assertRaises(ValueError, ScaleIntensityRangePercentiles, lower=-10, upper=99, b_min=0, b_max=255) diff --git a/tests/test_scale_intensity_ranged.py b/tests/test_scale_intensity_ranged.py index b4d8cbf65a..dc064a7708 100644 --- a/tests/test_scale_intensity_ranged.py +++ b/tests/test_scale_intensity_ranged.py @@ -23,7 +23,7 @@ def test_image_scale_intensity_ranged(self): scaled = scaler({key: p(self.imt)}) expected = (self.imt - 20) / 88 expected = expected * 30 + 50 - assert_allclose(scaled[key], expected) + assert_allclose(scaled[key], p(expected)) if __name__ == "__main__": diff --git a/tests/test_scale_intensityd.py b/tests/test_scale_intensityd.py index 6e13dbc272..ce298f20af 100644 --- a/tests/test_scale_intensityd.py +++ b/tests/test_scale_intensityd.py @@ -19,23 +19,23 @@ class TestScaleIntensityd(NumpyImageTestCase2D): def test_range_scale(self): + key = "img" for p in TEST_NDARRAYS: - key = "img" scaler = ScaleIntensityd(keys=[key], minv=1.0, maxv=2.0) result = scaler({key: p(self.imt)}) mina = np.min(self.imt) maxa = np.max(self.imt) norm = (self.imt - mina) / (maxa - mina) expected = (norm * (2.0 - 1.0)) + 1.0 - assert_allclose(result[key], expected) + assert_allclose(result[key], p(expected)) def test_factor_scale(self): + key = "img" for p in TEST_NDARRAYS: - key = "img" scaler = ScaleIntensityd(keys=[key], minv=None, maxv=None, factor=0.1) result = scaler({key: p(self.imt)}) expected = (self.imt * (1 + 0.1)).astype(np.float32) - assert_allclose(result[key], expected) + assert_allclose(result[key], p(expected)) if __name__ == "__main__": diff --git a/tests/test_shift_intensityd.py b/tests/test_shift_intensityd.py index 0396857781..66aad23b1e 100644 --- a/tests/test_shift_intensityd.py +++ b/tests/test_shift_intensityd.py @@ -24,7 +24,7 @@ def test_value(self): shifter = ShiftIntensityd(keys=[key], offset=1.0) result = shifter({key: p(self.imt)}) expected = self.imt + 1.0 - assert_allclose(result[key], expected) + assert_allclose(result[key], p(expected)) def test_factor(self): key = "img" diff --git a/tests/test_threshold_intensity.py b/tests/test_threshold_intensity.py index 0614514456..075a650ec0 100644 --- a/tests/test_threshold_intensity.py +++ b/tests/test_threshold_intensity.py @@ -29,7 +29,7 @@ class TestThresholdIntensity(unittest.TestCase): def test_value(self, in_type, input_param, expected_value): test_data = in_type(np.arange(10)) result = ThresholdIntensity(**input_param)(test_data) - assert_allclose(result, expected_value) + assert_allclose(result, in_type(expected_value)) if __name__ == "__main__": diff --git a/tests/test_threshold_intensityd.py b/tests/test_threshold_intensityd.py index 398f9cfe91..a2a9fdcf2b 100644 --- a/tests/test_threshold_intensityd.py +++ b/tests/test_threshold_intensityd.py @@ -47,9 +47,9 @@ class TestThresholdIntensityd(unittest.TestCase): def test_value(self, in_type, input_param, expected_value): test_data = {"image": in_type(np.arange(10)), "label": in_type(np.arange(10)), "extra": in_type(np.arange(10))} result = ThresholdIntensityd(**input_param)(test_data) - assert_allclose(result["image"], expected_value) - assert_allclose(result["label"], expected_value) - assert_allclose(result["extra"], expected_value) + assert_allclose(result["image"], in_type(expected_value)) + assert_allclose(result["label"], in_type(expected_value)) + assert_allclose(result["extra"], in_type(expected_value)) if __name__ == "__main__": diff --git a/tests/test_to_numpy.py b/tests/test_to_numpy.py index 09940e33ba..c7631540b8 100644 --- a/tests/test_to_numpy.py +++ b/tests/test_to_numpy.py @@ -31,7 +31,7 @@ def test_cupy_input(self): result = ToNumpy()(test_data) self.assertTrue(isinstance(result, np.ndarray)) self.assertTrue(result.flags["C_CONTIGUOUS"]) - assert_allclose(result, test_data.get()) + assert_allclose(result, test_data.get(), type_test=False) def test_numpy_input(self): test_data = np.array([[1, 2], [3, 4]]) @@ -41,7 +41,7 @@ def test_numpy_input(self): self.assertTrue(isinstance(result, np.ndarray)) self.assertTrue(result.dtype == np.float32) self.assertTrue(result.flags["C_CONTIGUOUS"]) - assert_allclose(result, test_data) + assert_allclose(result, test_data, type_test=False) def test_tensor_input(self): test_data = torch.tensor([[1, 2], [3, 4]]) @@ -50,7 +50,7 @@ def test_tensor_input(self): result = ToNumpy()(test_data) self.assertTrue(isinstance(result, np.ndarray)) self.assertTrue(result.flags["C_CONTIGUOUS"]) - assert_allclose(result, test_data) + assert_allclose(result, test_data, type_test=False) @skip_if_no_cuda def test_tensor_cuda_input(self): @@ -60,21 +60,21 @@ def test_tensor_cuda_input(self): result = ToNumpy()(test_data) self.assertTrue(isinstance(result, np.ndarray)) self.assertTrue(result.flags["C_CONTIGUOUS"]) - assert_allclose(result, test_data) + assert_allclose(result, test_data, type_test=False) def test_list_tuple(self): test_data = [[1, 2], [3, 4]] result = ToNumpy()(test_data) - assert_allclose(result, np.asarray(test_data)) + assert_allclose(result, np.asarray(test_data), type_test=False) test_data = ((1, 2), (3, 4)) result = ToNumpy()(test_data) - assert_allclose(result, np.asarray(test_data)) + assert_allclose(result, np.asarray(test_data), type_test=False) def test_single_value(self): for test_data in [5, np.array(5), torch.tensor(5)]: result = ToNumpy()(test_data) self.assertTrue(isinstance(result, np.ndarray)) - assert_allclose(result, np.asarray(test_data)) + assert_allclose(result, np.asarray(test_data), type_test=False) self.assertEqual(result.ndim, 0) diff --git a/tests/test_to_numpyd.py b/tests/test_to_numpyd.py index 5acaef39c7..0b0b032ef2 100644 --- a/tests/test_to_numpyd.py +++ b/tests/test_to_numpyd.py @@ -31,7 +31,7 @@ def test_cupy_input(self): result = ToNumpyd(keys="img")({"img": test_data})["img"] self.assertTrue(isinstance(result, np.ndarray)) self.assertTrue(result.flags["C_CONTIGUOUS"]) - assert_allclose(result, test_data.get()) + assert_allclose(result, test_data.get(), type_test=False) def test_numpy_input(self): test_data = np.array([[1, 2], [3, 4]]) @@ -40,7 +40,7 @@ def test_numpy_input(self): result = ToNumpyd(keys="img")({"img": test_data})["img"] self.assertTrue(isinstance(result, np.ndarray)) self.assertTrue(result.flags["C_CONTIGUOUS"]) - assert_allclose(result, test_data) + assert_allclose(result, test_data, type_test=False) def test_tensor_input(self): test_data = torch.tensor([[1, 2], [3, 4]]) @@ -49,7 +49,7 @@ def test_tensor_input(self): result = ToNumpyd(keys="img")({"img": test_data})["img"] self.assertTrue(isinstance(result, np.ndarray)) self.assertTrue(result.flags["C_CONTIGUOUS"]) - assert_allclose(result, test_data) + assert_allclose(result, test_data, type_test=False) @skip_if_no_cuda def test_tensor_cuda_input(self): @@ -59,7 +59,7 @@ def test_tensor_cuda_input(self): result = ToNumpyd(keys="img")({"img": test_data})["img"] self.assertTrue(isinstance(result, np.ndarray)) self.assertTrue(result.flags["C_CONTIGUOUS"]) - assert_allclose(result, test_data) + assert_allclose(result, test_data, type_test=False) if __name__ == "__main__": diff --git a/tests/test_to_pil.py b/tests/test_to_pil.py index 5690645dd8..b4581053c0 100644 --- a/tests/test_to_pil.py +++ b/tests/test_to_pil.py @@ -43,7 +43,7 @@ class TestToPIL(unittest.TestCase): def test_value(self, test_data): result = ToPIL()(test_data) self.assertTrue(isinstance(result, PILImageImage)) - assert_allclose(np.array(result), test_data) + assert_allclose(np.array(result), test_data, type_test=False) if __name__ == "__main__": diff --git a/tests/test_to_pild.py b/tests/test_to_pild.py index 3a15b1e507..3b83fa5258 100644 --- a/tests/test_to_pild.py +++ b/tests/test_to_pild.py @@ -30,9 +30,7 @@ PILImageImage, _ = optional_import("PIL.Image", name="Image") im = [[1.0, 2.0], [3.0, 4.0]] -TESTS = [] -for p in TEST_NDARRAYS: - TESTS.append([{"keys": "image"}, {"image": p(im)}]) +TESTS = [[{"keys": "image"}, {"image": p(im)}] for p in TEST_NDARRAYS] if has_pil: TESTS.append([{"keys": "image"}, {"image": pil_image_fromarray(np.array(im))}]) @@ -43,7 +41,7 @@ class TestToPIL(unittest.TestCase): def test_values(self, input_param, test_data): result = ToPILd(**input_param)(test_data)[input_param["keys"]] self.assertTrue(isinstance(result, PILImageImage)) - assert_allclose(np.array(result), test_data[input_param["keys"]]) + assert_allclose(np.array(result), test_data[input_param["keys"]], type_test=False) if __name__ == "__main__": diff --git a/tests/test_to_tensor.py b/tests/test_to_tensor.py index 74acb1016c..b065595e89 100644 --- a/tests/test_to_tensor.py +++ b/tests/test_to_tensor.py @@ -37,14 +37,14 @@ class TestToTensor(unittest.TestCase): def test_array_input(self, test_data, expected_shape): result = ToTensor(dtype=torch.float32, device="cpu")(test_data) self.assertTrue(isinstance(result, torch.Tensor)) - assert_allclose(result, test_data) + assert_allclose(result, test_data, type_test=False) self.assertTupleEqual(result.shape, expected_shape) @parameterized.expand(TESTS_SINGLE) def test_single_input(self, test_data): result = ToTensor()(test_data) self.assertTrue(isinstance(result, torch.Tensor)) - assert_allclose(result, test_data) + assert_allclose(result, test_data, type_test=False) self.assertEqual(result.ndim, 0) @unittest.skipUnless(has_cp, "CuPy is required.") @@ -53,7 +53,7 @@ def test_cupy(self): cupy_array = cp.ascontiguousarray(cp.asarray(test_data)) result = ToTensor()(cupy_array) self.assertTrue(isinstance(result, torch.Tensor)) - assert_allclose(result, test_data) + assert_allclose(result, test_data, type_test=False) if __name__ == "__main__": diff --git a/tests/test_transpose.py b/tests/test_transpose.py index 10882c9dd8..16cca49e1c 100644 --- a/tests/test_transpose.py +++ b/tests/test_transpose.py @@ -42,7 +42,7 @@ def test_transpose(self, im, indices): if isinstance(im, torch.Tensor): im = im.cpu().numpy() out2 = np.transpose(im, indices) - assert_allclose(out1, out2) + assert_allclose(out1, out2, type_test=False) if __name__ == "__main__": diff --git a/tests/test_transposed.py b/tests/test_transposed.py index 88ecd0c872..2f9558b74e 100644 --- a/tests/test_transposed.py +++ b/tests/test_transposed.py @@ -57,13 +57,13 @@ def test_transpose(self, im, indices): if isinstance(im, torch.Tensor): im = im.cpu().numpy() out_gt = np.transpose(im, indices) - assert_allclose(out_im1, out_gt) - assert_allclose(out_im2, out_gt) + assert_allclose(out_im1, out_gt, type_test=False) + assert_allclose(out_im2, out_gt, type_test=False) # test inverse fwd_inv_data = tr.inverse(out_data) for i, j in zip(data.values(), fwd_inv_data.values()): - assert_allclose(i, j) + assert_allclose(i, j, type_test=False) if __name__ == "__main__": diff --git a/tests/test_utils_pytorch_numpy_unification.py b/tests/test_utils_pytorch_numpy_unification.py index f05235187c..c8e0a35c92 100644 --- a/tests/test_utils_pytorch_numpy_unification.py +++ b/tests/test_utils_pytorch_numpy_unification.py @@ -32,7 +32,7 @@ def test_percentile(self): # pre torch 1.7, no `quantile`. Our own method doesn't interpolate, # so we can only be accurate to 0.5 atol = 0.5 if not hasattr(torch, "quantile") else 1e-4 - assert_allclose(results[0], results[-1], atol=atol) + assert_allclose(results[0], results[-1], type_test=False, atol=atol) def test_fails(self): for p in TEST_NDARRAYS: diff --git a/tests/test_zoom.py b/tests/test_zoom.py index a99e110052..9411988a7e 100644 --- a/tests/test_zoom.py +++ b/tests/test_zoom.py @@ -37,7 +37,7 @@ def test_correct_results(self, zoom, mode): for channel in self.imt[0]: expected.append(zoom_scipy(channel, zoom=zoom, mode="nearest", order=_order, prefilter=False)) expected = np.stack(expected).astype(np.float32) - assert_allclose(zoomed, expected, atol=1.0) + assert_allclose(zoomed, p(expected), atol=1.0) def test_keep_size(self): for p in TEST_NDARRAYS: diff --git a/tests/test_zoomd.py b/tests/test_zoomd.py index 1ebd7d2d08..6231978ca7 100644 --- a/tests/test_zoomd.py +++ b/tests/test_zoomd.py @@ -27,22 +27,18 @@ class TestZoomd(NumpyImageTestCase2D): @parameterized.expand(VALID_CASES) def test_correct_results(self, zoom, mode, keep_size): key = "img" - zoom_fn = Zoomd( - key, - zoom=zoom, - mode=mode, - keep_size=keep_size, - ) + zoom_fn = Zoomd(key, zoom=zoom, mode=mode, keep_size=keep_size) for p in TEST_NDARRAYS: zoomed = zoom_fn({key: p(self.imt[0])}) _order = 0 if mode.endswith("linear"): _order = 1 - expected = [] - for channel in self.imt[0]: - expected.append(zoom_scipy(channel, zoom=zoom, mode="nearest", order=_order, prefilter=False)) + expected = [ + zoom_scipy(channel, zoom=zoom, mode="nearest", order=_order, prefilter=False) for channel in self.imt[0] + ] + expected = np.stack(expected).astype(np.float32) - assert_allclose(expected, zoomed[key], atol=1.0) + assert_allclose(zoomed[key], p(expected), atol=1.0) def test_keep_size(self): key = "img" diff --git a/tests/utils.py b/tests/utils.py index 94f7f55fe0..6b7f6c4c16 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -57,19 +57,38 @@ def clone(data: NdarrayTensor) -> NdarrayTensor: return copy.deepcopy(data) -def assert_allclose(a: NdarrayOrTensor, b: NdarrayOrTensor, *args, **kwargs): +def assert_allclose( + actual: NdarrayOrTensor, + desired: NdarrayOrTensor, + type_test: bool = True, + device_test: bool = False, + *args, + **kwargs, +): """ - Assert that all values of two data objects are close. + Assert that types and all values of two data objects are close. Args: - a (NdarrayOrTensor): Pytorch Tensor or numpy array for comparison - b (NdarrayOrTensor): Pytorch Tensor or numpy array to compare against - args: extra arguments to pass on to `np.testing.assert_allclose` - kwargs: extra arguments to pass on to `np.testing.assert_allclose` + actual: Pytorch Tensor or numpy array for comparison. + desired: Pytorch Tensor or numpy array to compare against. + type_test: whether to test that `actual` and `desired` are both numpy arrays or torch tensors. + device_test: whether to test the device property. + args: extra arguments to pass on to `np.testing.assert_allclose`. + kwargs: extra arguments to pass on to `np.testing.assert_allclose`. + + """ - a = a.cpu().numpy() if isinstance(a, torch.Tensor) else a - b = b.cpu().numpy() if isinstance(b, torch.Tensor) else b - np.testing.assert_allclose(a, b, *args, **kwargs) + if type_test: + # check both actual and desired are of the same type + np.testing.assert_equal(isinstance(actual, np.ndarray), isinstance(desired, np.ndarray), "numpy type") + np.testing.assert_equal(isinstance(actual, torch.Tensor), isinstance(desired, torch.Tensor), "torch type") + + if isinstance(desired, torch.Tensor) or isinstance(actual, torch.Tensor): + if device_test: + np.testing.assert_equal(str(actual.device), str(desired.device), "torch device check") # type: ignore + actual = actual.cpu().numpy() if isinstance(actual, torch.Tensor) else actual + desired = desired.cpu().numpy() if isinstance(desired, torch.Tensor) else desired + np.testing.assert_allclose(actual, desired, *args, **kwargs) def test_pretrained_networks(network, input_param, device):