Skip to content

Commit 1f9cff4

Browse files
Merge branch 'main' into bdellabe/processor-init
2 parents 26a4066 + 5b3ddff commit 1f9cff4

File tree

1 file changed

+4
-8
lines changed

1 file changed

+4
-8
lines changed

tests/llmcompressor/transformers/kv_cache/test_kv_cache.py

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -231,14 +231,10 @@ def test_kv_cache_gptq_model_state_dict_attr(kv_cache_fixture, tmp_path):
231231

232232
output_dir, _ = next(kv_cache_fixture(recipe, tmp_path))
233233

234-
with init_empty_weights():
235-
# TODO: There is a bug in `apply_quantization_config` which means that, if using
236-
# CompressedLinears, the compression status is inferred to `compressed` and
237-
# therefore the attention kvcache parameters never undergo initializations
238-
model = AutoModelForCausalLM.from_pretrained(
239-
output_dir,
240-
quantization_config=CompressedTensorsConfig(run_compressed=False),
241-
)
234+
model = AutoModelForCausalLM.from_pretrained(
235+
output_dir,
236+
quantization_config=CompressedTensorsConfig(run_compressed=False),
237+
)
242238

243239
counts = 0
244240
for name, submodule in model.named_modules():

0 commit comments

Comments
 (0)