Skip to content

Commit 4bcbdf1

Browse files
committed
Fix unused variable warnings and other ruff warnings
Signed-off-by: cyy <[email protected]>
1 parent 7bfe923 commit 4bcbdf1

File tree

10 files changed

+9
-31
lines changed

10 files changed

+9
-31
lines changed

bitsandbytes/backends/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
import triton.language as tl # noqa: F401
1919

2020
triton_available = True
21-
except ImportError as e:
21+
except ImportError:
2222
triton_available = False
2323

2424

bitsandbytes/functional.py

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,6 @@ def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8)
242242
assert e + p == total_bits - has_sign
243243
# the exponent is biased to 2^(e-1) -1 == 0
244244
evalues = []
245-
pvalues = []
246245
for i, val in enumerate(range(-(2 ** (exponent_bits - has_sign)), 2 ** (exponent_bits - has_sign), 1)):
247246
evalues.append(2**val)
248247

@@ -1365,8 +1364,6 @@ def optimizer_update_8bit_blockwise(
13651364
gnorm_scale: float = 1.0,
13661365
skip_zeros=False,
13671366
) -> None:
1368-
optim_func = None
1369-
13701367
is_on_gpu([p, g, state1, state2, qmap1, qmap2, absmax1, absmax2])
13711368

13721369
torch.ops.bitsandbytes.optimizer_update_8bit_blockwise(
@@ -2116,7 +2113,7 @@ def spmm_coo(
21162113
assert cooA.values.numel() == nnz
21172114
assert cooA.cols == B.shape[0]
21182115

2119-
transposed_B = False if B.is_contiguous() else True
2116+
transposed_B = not B.is_contiguous()
21202117

21212118
ldb = B.stride()[(1 if transposed_B else 0)]
21222119
ldc = B.shape[1]
@@ -2165,12 +2162,7 @@ def spmm_coo_very_sparse(cooA, B, dequant_stats=None, out=None):
21652162
assert cooA.values.numel() == nnz
21662163
assert cooA.cols == B.shape[0], f"{cooA.cols} vs {B.shape}"
21672164

2168-
transposed_B = False if B.is_contiguous() else True
2169-
2170-
ldb = B.stride()[(1 if transposed_B else 0)]
2171-
ldc = B.shape[1]
2172-
2173-
values, counts = torch.unique(cooA.rowidx, return_counts=True)
2165+
_, counts = torch.unique(cooA.rowidx, return_counts=True)
21742166
offset = counts.cumsum(0).int()
21752167
max_count, max_idx = torch.sort(counts, descending=True)
21762168
max_idx = max_idx.int()
@@ -2190,11 +2182,8 @@ def spmm_coo_very_sparse(cooA, B, dequant_stats=None, out=None):
21902182
cnnz_rows = ct.c_int32(counts.numel())
21912183
cnnz = ct.c_int32(cooA.nnz)
21922184
crowsA = ct.c_int32(cooA.rows)
2193-
ccolsA = ct.c_int32(cooA.cols)
21942185
crowsB = ct.c_int32(B.shape[1])
21952186
ccolsB = ct.c_int32(B.shape[1])
2196-
cldb = ct.c_int32(ldb)
2197-
cldc = ct.c_int32(ldc)
21982187

21992188
with _cuda_device_of(B):
22002189
is_on_gpu([cooA.rowidx, cooA.colidx, cooA.values, B, out, dequant_stats])

bitsandbytes/nn/modules.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -480,7 +480,7 @@ def __init__(
480480
)
481481
# self.persistent_buffers = [] # TODO consider as way to save quant state
482482
self.compute_dtype = compute_dtype
483-
self.compute_type_is_set = False if compute_dtype is None else True
483+
self.compute_type_is_set = compute_dtype is not None
484484
self.quant_state = None
485485
self.quant_storage = quant_storage
486486
self.ipex_linear_is_set = False
@@ -1150,4 +1150,4 @@ def forward(self, x):
11501150
if self.weight.CB is not None:
11511151
self.init_8bit_state()
11521152

1153-
out = bnb.matmul_mixed(x.half(), self.weight.half(), bias=None, state=self.state) + self.bias
1153+
return bnb.matmul_mixed(x.half(), self.weight.half(), bias=None, state=self.state) + self.bias

bitsandbytes/optim/lars.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -231,9 +231,6 @@ def step(self, closure=None):
231231
loss = closure()
232232

233233
for group in self.param_groups:
234-
params_with_grad = []
235-
d_p_list = []
236-
momentum_buffer_list = []
237234
weight_decay = group["weight_decay"]
238235
momentum = group["momentum"]
239236
dampening = group["dampening"]

bitsandbytes/optim/optimizer.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -272,8 +272,6 @@ def step(self, closure=None):
272272
with torch.enable_grad():
273273
loss = closure()
274274

275-
overflows = []
276-
277275
if not self.initialized:
278276
self.check_overrides()
279277
self.to_gpu() # needed for fairseq pure fp16 training

bitsandbytes/research/autograd/_functions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ def forward(ctx, A, B, out=None, bias=None, state: Optional[MatmulLtState] = Non
235235
# 2. Quantize B
236236
if state.has_fp16_weights:
237237
# print('B shape', B.shape)
238-
has_grad = True if (getattr(B, "grad", None) is not None) else False
238+
has_grad = getattr(B, "grad", None) is not None
239239
is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)
240240
if is_transposed:
241241
B = B.contiguous()

bitsandbytes/utils.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -92,11 +92,6 @@ def find_outlier_dims(weight, reduction_dim=0, zscore=4.0, topk=None, rdm=False)
9292
if rdm:
9393
return torch.randint(0, weight.shape[1], size=(topk,), device=weight.device).long()
9494

95-
m = weight.mean(reduction_dim)
96-
mm = m.mean()
97-
mstd = m.std()
98-
zm = (m - mm) / mstd
99-
10095
std = weight.std(reduction_dim)
10196
stdm = std.mean()
10297
stdstd = std.std()

install_cuda.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def main():
8787

8888
# Install CUDA version(s)
8989
if version == "all":
90-
for ver in cuda_versions.keys():
90+
for ver in cuda_versions:
9191
install_cuda(ver, base_path, download_path)
9292
elif version in cuda_versions:
9393
install_cuda(version, base_path, download_path)

pyproject.toml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -123,11 +123,10 @@ select = [
123123
ignore = [
124124
"B007", # Loop control variable not used within the loop body (TODO: enable)
125125
"B028", # Warning without stacklevel (TODO: enable)
126-
"E501", # Supress line-too-long warnings: trust yapf's judgement on this one.
126+
"E501", # Suppress line-too-long warnings: trust yapf's judgement on this one.
127127
"E701", # Multiple statements on one line (TODO: enable)
128128
"E712", # Allow using if x == False, as it's not always equivalent to if x.
129129
"E731", # Do not use lambda
130-
"F841", # Local assigned but not used (TODO: enable, these are likely bugs)
131130
"RUF012", # Mutable class attribute annotations
132131
"RUF034", # Useless if-else (TODO: enable)
133132
"ISC001", # single-line-implicit-string-concatenation incompatible with formatter

tests/test_generation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def test_pi(requires_cuda, model_and_tokenizer, inference_kernel, DQ, dtype):
112112
assert len(outputs) == n_cases
113113
failure_count = 0
114114
for i in range(n_cases):
115-
if not outputs[i][: len(str(math.pi))] == str(math.pi):
115+
if outputs[i][:len(str(math.pi))] != str(math.pi):
116116
failure_count += 1
117117
failure_max = 2 if fixture_config[0] == "huggyllama/llama-7b" else 4
118118
if failure_count > failure_max:

0 commit comments

Comments
 (0)