Skip to content

Commit f11f271

Browse files
ProGamerGovProGamerGov
authored andcommitted
Formatting changes for black 22.3.0
1 parent 167dbc9 commit f11f271

File tree

5 files changed

+10
-10
lines changed

5 files changed

+10
-10
lines changed

captum/optim/_core/loss.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,7 @@ class DeepDream(BaseLoss):
267267
def __call__(self, targets_to_values: ModuleOutputMapping) -> torch.Tensor:
268268
activations = targets_to_values[self.target]
269269
activations = activations[self.batch_index[0] : self.batch_index[1]]
270-
return activations ** 2
270+
return activations**2
271271

272272

273273
@loss_wrapper
@@ -587,7 +587,7 @@ def __call__(self, targets_to_values: ModuleOutputMapping) -> torch.Tensor:
587587
return activations * vec
588588

589589
dot = torch.mean(activations * vec)
590-
cossims = dot / (self.eps + torch.sqrt(torch.sum(activations ** 2)))
590+
cossims = dot / (self.eps + torch.sqrt(torch.sum(activations**2)))
591591
return dot * torch.clamp(cossims, min=0.1) ** self.cossim_pow
592592

593593

captum/optim/_param/image/images.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -385,7 +385,7 @@ def _setup_input(
385385
x = F.interpolate(init.clone(), size=(h, w), mode="bilinear")
386386
x = x / 6 # Prevents output from being all white
387387
upsample = torch.nn.Upsample(scale_factor=scale, mode="nearest")
388-
x = x * (scale ** power) / (32 ** power)
388+
x = x * (scale**power) / (32**power)
389389
x = torch.nn.Parameter(x)
390390
tensor_params.append(x)
391391
scaler.append(upsample)

captum/optim/_param/image/transforms.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1217,9 +1217,9 @@ def __init__(
12171217
padding_transform: Optional[nn.Module] = nn.ConstantPad2d(2, value=0.5),
12181218
translate: Optional[Union[int, List[int]]] = [4] * 10,
12191219
scale: Optional[NumSeqOrTensorOrProbDistType] = [
1220-
0.995 ** n for n in range(-5, 80)
1220+
0.995**n for n in range(-5, 80)
12211221
]
1222-
+ [0.998 ** n for n in 2 * list(range(20, 40))],
1222+
+ [0.998**n for n in 2 * list(range(20, 40))],
12231223
degrees: Optional[NumSeqOrTensorOrProbDistType] = list(range(-20, 20))
12241224
+ list(range(-10, 10))
12251225
+ list(range(-5, 5))

tests/optim/core/test_loss.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def test_channel_deepdream(self) -> None:
3030
model = BasicModel_ConvNet_Optim()
3131
loss = opt_loss.DeepDream(model.layer)
3232
expected = torch.as_tensor(
33-
[[[CHANNEL_ACTIVATION_0_LOSS ** 2]], [[CHANNEL_ACTIVATION_1_LOSS ** 2]]]
33+
[[[CHANNEL_ACTIVATION_0_LOSS**2]], [[CHANNEL_ACTIVATION_1_LOSS**2]]]
3434
)[None, :]
3535
assertTensorAlmostEqual(self, get_loss_value(model, loss), expected, mode="max")
3636

@@ -84,7 +84,7 @@ def test_l2(self) -> None:
8484
loss = opt_loss.L2(model.layer)
8585
self.assertAlmostEqual(
8686
get_loss_value(model, loss),
87-
(CHANNEL_ACTIVATION_0_LOSS ** 2 + CHANNEL_ACTIVATION_1_LOSS ** 2) ** 0.5,
87+
(CHANNEL_ACTIVATION_0_LOSS**2 + CHANNEL_ACTIVATION_1_LOSS**2) ** 0.5,
8888
places=5,
8989
)
9090

@@ -267,7 +267,7 @@ def test_pow(self) -> None:
267267
loss = opt_loss.ChannelActivation(model.layer, 0) ** 2
268268
self.assertAlmostEqual(
269269
get_loss_value(model, loss),
270-
CHANNEL_ACTIVATION_0_LOSS ** 2,
270+
CHANNEL_ACTIVATION_0_LOSS**2,
271271
places=6,
272272
)
273273

tests/optim/param/test_transforms.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1912,8 +1912,8 @@ def test_transform_robustness_init_transform_values(self) -> None:
19121912
for module in transform_robustness.jitter_transforms:
19131913
self.assertEqual(module.pad_range, 2 * 4)
19141914

1915-
expected_scale = [0.995 ** n for n in range(-5, 80)] + [
1916-
0.998 ** n for n in 2 * list(range(20, 40))
1915+
expected_scale = [0.995**n for n in range(-5, 80)] + [
1916+
0.998**n for n in 2 * list(range(20, 40))
19171917
]
19181918
self.assertEqual(transform_robustness.random_scale.scale, expected_scale)
19191919
expected_degrees = (

0 commit comments

Comments
 (0)