Skip to content

Commit 9bb7d47

Browse files
authored
Merge updates from master branch
2 parents dc3a07b + f11f271 commit 9bb7d47

File tree

13 files changed

+24
-24
lines changed

13 files changed

+24
-24
lines changed

captum/attr/_core/lime.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -632,7 +632,7 @@ def default_exp_kernel(original_inp, perturbed_inp, __, **kwargs):
632632
distance = torch.norm(flattened_original_inp - flattened_perturbed_inp)
633633
else:
634634
raise ValueError("distance_mode must be either cosine or euclidean.")
635-
return math.exp(-1 * (distance ** 2) / (2 * (kernel_width ** 2)))
635+
return math.exp(-1 * (distance**2) / (2 * (kernel_width**2)))
636636

637637
return default_exp_kernel
638638

captum/attr/_core/noise_tunnel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,7 @@ def update_sum_attribution_and_sq(
229229

230230
attribution = attribution.view(attribution_shape)
231231
current_attribution_sum = attribution.sum(dim=1, keepdim=False)
232-
current_attribution_sq = torch.sum(attribution ** 2, dim=1, keepdim=False)
232+
current_attribution_sq = torch.sum(attribution**2, dim=1, keepdim=False)
233233

234234
sum_attribution[i] = (
235235
current_attribution_sum

captum/attr/_utils/stat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,7 @@ def update(self, x: Tensor):
223223

224224
def get(self) -> Optional[Tensor]:
225225
var = self.var.get()
226-
return var ** 0.5 if var is not None else None
226+
return var**0.5 if var is not None else None
227227

228228

229229
class GeneralAccumFn(Stat):

captum/attr/_utils/visualization.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from numpy import ndarray
1313

1414
try:
15-
from IPython.core.display import display, HTML
15+
from IPython.display import display, HTML
1616

1717
HAS_IPYTHON = True
1818
except ImportError:

captum/influence/_core/tracincp_fast_rand_proj.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -494,8 +494,8 @@ def get_checkpoint_contribution(checkpoint):
494494
)
495495

496496
return (
497-
torch.sum(batch_jacobian ** 2, dim=1)
498-
* torch.sum(batch_layer_input ** 2, dim=1)
497+
torch.sum(batch_jacobian**2, dim=1)
498+
* torch.sum(batch_layer_input**2, dim=1)
499499
* learning_rate
500500
)
501501

@@ -1063,17 +1063,17 @@ def _set_projections_tracincp_fast_rand_proj(
10631063
# `projection_dim` corresponds to the variable d in the top of page 15 of
10641064
# the TracIn paper: https://arxiv.org/pdf/2002.08484.pdf.
10651065
if jacobian_dim * layer_input_dim > projection_dim:
1066-
jacobian_projection_dim = min(int(projection_dim ** 0.5), jacobian_dim)
1066+
jacobian_projection_dim = min(int(projection_dim**0.5), jacobian_dim)
10671067
layer_input_projection_dim = min(
1068-
int(projection_dim ** 0.5), layer_input_dim
1068+
int(projection_dim**0.5), layer_input_dim
10691069
)
10701070
jacobian_projection = torch.normal(
10711071
torch.zeros(jacobian_dim, jacobian_projection_dim),
1072-
1.0 / jacobian_projection_dim ** 0.5,
1072+
1.0 / jacobian_projection_dim**0.5,
10731073
)
10741074
layer_input_projection = torch.normal(
10751075
torch.zeros(layer_input_dim, layer_input_projection_dim),
1076-
1.0 / layer_input_projection_dim ** 0.5,
1076+
1.0 / layer_input_projection_dim**0.5,
10771077
)
10781078

10791079
projection_quantities = jacobian_projection, layer_input_projection
@@ -1157,7 +1157,7 @@ def _get_intermediate_quantities_tracincp_fast_rand_proj(
11571157
), "None returned from `checkpoints`, cannot load."
11581158

11591159
learning_rate = self.checkpoints_load_func(self.model, checkpoint)
1160-
learning_rate_root = learning_rate ** 0.5
1160+
learning_rate_root = learning_rate**0.5
11611161

11621162
for batch in dataloader:
11631163

captum/metrics/_core/infidelity.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -570,7 +570,7 @@ def _sum_infidelity_tensors(agg_tensors, tensors):
570570
beta = safe_div(beta_num, beta_denorm)
571571

572572
infidelity_values = (
573-
beta ** 2 * agg_tensors[0] - 2 * beta * agg_tensors[1] + agg_tensors[2]
573+
beta**2 * agg_tensors[0] - 2 * beta * agg_tensors[1] + agg_tensors[2]
574574
)
575575
else:
576576
infidelity_values = agg_tensors[0]

captum/optim/_core/loss.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,7 @@ class DeepDream(BaseLoss):
267267
def __call__(self, targets_to_values: ModuleOutputMapping) -> torch.Tensor:
268268
activations = targets_to_values[self.target]
269269
activations = activations[self.batch_index[0] : self.batch_index[1]]
270-
return activations ** 2
270+
return activations**2
271271

272272

273273
@loss_wrapper
@@ -587,7 +587,7 @@ def __call__(self, targets_to_values: ModuleOutputMapping) -> torch.Tensor:
587587
return activations * vec
588588

589589
dot = torch.mean(activations * vec)
590-
cossims = dot / (self.eps + torch.sqrt(torch.sum(activations ** 2)))
590+
cossims = dot / (self.eps + torch.sqrt(torch.sum(activations**2)))
591591
return dot * torch.clamp(cossims, min=0.1) ** self.cossim_pow
592592

593593

captum/optim/_param/image/images.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -385,7 +385,7 @@ def _setup_input(
385385
x = F.interpolate(init.clone(), size=(h, w), mode="bilinear")
386386
x = x / 6 # Prevents output from being all white
387387
upsample = torch.nn.Upsample(scale_factor=scale, mode="nearest")
388-
x = x * (scale ** power) / (32 ** power)
388+
x = x * (scale**power) / (32**power)
389389
x = torch.nn.Parameter(x)
390390
tensor_params.append(x)
391391
scaler.append(upsample)

captum/optim/_param/image/transforms.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1217,9 +1217,9 @@ def __init__(
12171217
padding_transform: Optional[nn.Module] = nn.ConstantPad2d(2, value=0.5),
12181218
translate: Optional[Union[int, List[int]]] = [4] * 10,
12191219
scale: Optional[NumSeqOrTensorOrProbDistType] = [
1220-
0.995 ** n for n in range(-5, 80)
1220+
0.995**n for n in range(-5, 80)
12211221
]
1222-
+ [0.998 ** n for n in 2 * list(range(20, 40))],
1222+
+ [0.998**n for n in 2 * list(range(20, 40))],
12231223
degrees: Optional[NumSeqOrTensorOrProbDistType] = list(range(-20, 20))
12241224
+ list(range(-10, 10))
12251225
+ list(range(-5, 5))

captum/robust/_core/fgsm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def __init__(
6464
self.forward_func = forward_func
6565
self.loss_func = loss_func
6666
self.bound = lambda x: torch.clamp(x, min=lower_bound, max=upper_bound)
67-
self.zero_thresh = 10 ** -6
67+
self.zero_thresh = 10**-6
6868

6969
def perturb(
7070
self,

0 commit comments

Comments
 (0)