We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 24e1dc9 commit 9804ed4Copy full SHA for 9804ed4
examples/transform/quip_example.py
@@ -14,7 +14,7 @@
14
# NOTE: because the datafree pipeline is being used in this
15
# example, you can use additional GPUs to support larger models
16
MODEL_ID = "meta-llama/Llama-3.1-8B-Instruct"
17
-model = AutoModelForCausalLM.from_pretrained(MODEL_ID, dtype="auto")
+model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype="auto")
18
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
19
20
# Configure the quantization algorithm to run.
0 commit comments