Skip to content

Commit 513af39

Browse files
committed
Apply black formatting to pass CI code style checks
1 parent de0b762 commit 513af39

File tree

3 files changed

+10
-15
lines changed

3 files changed

+10
-15
lines changed

monai/transforms/croppad/dictionary.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -549,9 +549,12 @@ def __call__(self, data: Mapping[Hashable, torch.Tensor], lazy: bool | None = No
549549

550550
lazy_ = self.lazy if lazy is None else lazy
551551
self.cropper = SpatialCrop(
552-
roi_center=roi_center, roi_size=roi_size,
553-
roi_start=roi_start, roi_end=roi_end,
554-
roi_slices=self._roi_slices, lazy=lazy_,
552+
roi_center=roi_center,
553+
roi_size=roi_size,
554+
roi_start=roi_start,
555+
roi_end=roi_end,
556+
roi_slices=self._roi_slices,
557+
lazy=lazy_,
555558
)
556559
for key in self.key_iterator(d):
557560
d[key] = self.cropper(d[key], lazy=lazy_)

tests/transforms/test_spatial_cropd.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -191,9 +191,7 @@ def test_inverse_with_string_keys(self):
191191
def test_pipeline_world_to_image_crop(self):
192192
"""Integration test: TransformPointsWorldToImaged -> SpatialCropd with string keys."""
193193
# Create image with a 2x scaling affine: world coords = 2 * voxel coords
194-
affine = torch.tensor(
195-
[[2.0, 0, 0, 0], [0, 2.0, 0, 0], [0, 0, 2.0, 0], [0, 0, 0, 1.0]], dtype=torch.float64
196-
)
194+
affine = torch.tensor([[2.0, 0, 0, 0], [0, 2.0, 0, 0], [0, 0, 2.0, 0], [0, 0, 0, 1.0]], dtype=torch.float64)
197195
img = MetaTensor(torch.rand(1, 32, 32, 32), affine=affine)
198196

199197
# World-space ROI: [4, 6, 8] to [20, 24, 28] -> voxel-space: [2, 3, 4] to [10, 12, 14]

tests/transforms/utility/test_transform_points_image_to_worldd.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -53,22 +53,16 @@ def test_matches_base_class(self, image, points, expected_output):
5353
"""Verify that TransformPointsImageToWorldd produces the same result as
5454
ApplyTransformToPointsd with invert_affine=False."""
5555
data = {"image": image, "point": points}
56-
convenience = TransformPointsImageToWorldd(
57-
keys="point", refer_keys="image", dtype=torch.int64
58-
)
59-
base = ApplyTransformToPointsd(
60-
keys="point", refer_keys="image", dtype=torch.int64, invert_affine=False
61-
)
56+
convenience = TransformPointsImageToWorldd(keys="point", refer_keys="image", dtype=torch.int64)
57+
base = ApplyTransformToPointsd(keys="point", refer_keys="image", dtype=torch.int64, invert_affine=False)
6258
out_convenience = convenience(dict(data))
6359
out_base = base(dict(data))
6460
self.assertTrue(torch.allclose(out_convenience["point"], out_base["point"]))
6561

6662
@parameterized.expand(TEST_CASES)
6763
def test_inverse(self, image, points, _expected_output):
6864
data = {"image": image, "point": points}
69-
transform = TransformPointsImageToWorldd(
70-
keys="point", refer_keys="image", dtype=torch.int64
71-
)
65+
transform = TransformPointsImageToWorldd(keys="point", refer_keys="image", dtype=torch.int64)
7266
output = transform(data)
7367
inverted = transform.inverse(output)
7468
self.assertTrue(torch.allclose(inverted["point"], points))

0 commit comments

Comments
 (0)