-
Notifications
You must be signed in to change notification settings - Fork 202
[Quantization] Support mixed-precision compression #1713
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
c18a19d
2f27693
6dba8ba
a19623d
208fa17
bd18f09
2cbffd0
4c2b5bc
83385ab
88c6150
8c0ae56
3af1278
ad6fcfd
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,22 +1,71 @@ | ||
from typing import Optional | ||
from typing import List, Optional | ||
|
||
from compressed_tensors import CompressionFormat | ||
from compressed_tensors.config import SparsityStructure | ||
from compressed_tensors.quantization import QuantizationStrategy, QuantizationType | ||
from compressed_tensors.quantization import ( | ||
QuantizationArgs, | ||
QuantizationStrategy, | ||
QuantizationType, | ||
) | ||
from compressed_tensors.quantization.utils import is_module_quantized | ||
from loguru import logger | ||
|
||
__all__ = ["infer_quantization_format"] | ||
__all__ = ["infer_and_set_per_module_quantization_format"] | ||
|
||
|
||
def infer_quantization_format( | ||
def _get_quant_compression_format( | ||
input_args: QuantizationArgs, | ||
weight_args: QuantizationArgs, | ||
sparsity_structure: Optional[str] = None, | ||
): | ||
is_24_structure = ( | ||
SparsityStructure(sparsity_structure) == SparsityStructure.TWO_FOUR | ||
) | ||
is_weight_only = weight_args is not None and input_args is None | ||
|
||
if weight_args.num_bits == 4 and weight_args.type == QuantizationType.FLOAT.value: | ||
return CompressionFormat.nvfp4_pack_quantized | ||
|
||
if is_weight_only: # w4a16 and w8a16 | ||
is_valid_pack = ( | ||
weight_args.num_bits in [4, 8] | ||
and weight_args.type == QuantizationType.INT.value | ||
) | ||
if not is_valid_pack: # packing only valid for int4 and int 8 | ||
return CompressionFormat.naive_quantized | ||
if is_24_structure: | ||
if ( | ||
weight_args.strategy is not QuantizationStrategy.CHANNEL.value | ||
and weight_args.strategy is not QuantizationStrategy.GROUP.value | ||
): | ||
# marlin24 kernel only applicable for channel/group quantization | ||
return CompressionFormat.pack_quantized | ||
return CompressionFormat.marlin_24 | ||
return CompressionFormat.pack_quantized | ||
|
||
else: # w8a8 float and int | ||
if ( | ||
weight_args.type == QuantizationType.FLOAT.value | ||
and weight_args.num_bits == 8 | ||
): | ||
return CompressionFormat.float_quantized | ||
if weight_args.type == QuantizationType.INT.value: | ||
return CompressionFormat.int_quantized | ||
|
||
return CompressionFormat.naive_quantized | ||
|
||
|
||
def infer_and_set_per_module_quantization_format( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It seems like this function only sets the scheme on the modules if a There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah in the current lifecycle, this We can keep it so that the global format would also override any per-module defined format. Which I think would make sense. |
||
model, | ||
quantization_format: Optional[str] = None, | ||
save_compressed: bool = False, | ||
sparsity_structure: Optional[str] = None, | ||
) -> str: | ||
) -> Optional[List[str]]: | ||
""" | ||
Infers the quantization format for a model based on its state and provided | ||
compression arguments. | ||
compression arguments. Also updates thhe quantization_scheme.format value | ||
based on the inferred format. Returns the unique list of formats in the model | ||
or None if empty list | ||
For a summary of the formats, see `docs/guides/compression_formats.md`. | ||
|
@@ -27,74 +76,39 @@ def infer_quantization_format( | |
:param save_compressed: used to infer a quantization format if None is provided | ||
:return compression format appropriate for model | ||
""" | ||
if quantization_format is not None: | ||
return quantization_format | ||
|
||
weight_args, input_args = _get_unique_quant_args(model) | ||
if len(weight_args) <= 0: | ||
if not save_compressed: | ||
return None | ||
|
||
if save_compressed: | ||
is_24_structure = ( | ||
SparsityStructure(sparsity_structure) == SparsityStructure.TWO_FOUR | ||
) | ||
is_weight_only = len(input_args) == 0 and len(weight_args) > 0 | ||
if quantization_format: | ||
return [quantization_format] | ||
dsikka marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
if ( | ||
weight_args[0].num_bits == 4 | ||
and weight_args[0].type == QuantizationType.FLOAT.value | ||
): | ||
return CompressionFormat.nvfp4_pack_quantized | ||
|
||
if is_weight_only: # w4a16 and w8a16 | ||
is_valid_pack = all( | ||
weight_arg.num_bits in [4, 8] | ||
and weight_arg.type == QuantizationType.INT.value | ||
for weight_arg in weight_args | ||
) | ||
if not is_valid_pack: # packing only valid for int4 and int 8 | ||
return CompressionFormat.naive_quantized | ||
if is_24_structure: | ||
for arg in weight_args: | ||
if ( | ||
arg.strategy is not QuantizationStrategy.CHANNEL.value | ||
and arg.strategy is not QuantizationStrategy.GROUP.value | ||
): | ||
# marlin24 kernel only applicable for channel/group quantization | ||
return CompressionFormat.pack_quantized | ||
return CompressionFormat.marlin_24 | ||
return CompressionFormat.pack_quantized | ||
else: # w8a8 float and int | ||
if len(weight_args) == 1: | ||
if ( | ||
weight_args[0].type == QuantizationType.FLOAT.value | ||
and weight_args[0].num_bits == 8 | ||
): | ||
return CompressionFormat.float_quantized | ||
if weight_args[0].type == QuantizationType.INT.value: | ||
return CompressionFormat.int_quantized | ||
|
||
return CompressionFormat.naive_quantized | ||
else: | ||
# format will be inferred from config | ||
return None | ||
|
||
|
||
def _get_unique_quant_args(model): | ||
""" | ||
Gets a list of all the unique quantization settings present in model | ||
""" | ||
quant_info_weight = [] | ||
quant_info_inputs = [] | ||
unique_formats = [] | ||
for submodule in model.modules(): | ||
if is_module_quantized(submodule): | ||
weight_scheme = submodule.quantization_scheme.weights | ||
input_scheme = submodule.quantization_scheme.input_activations | ||
if weight_scheme is not None: | ||
if weight_scheme not in quant_info_weight: | ||
quant_info_weight.append(weight_scheme) | ||
if input_scheme is not None: | ||
if input_scheme not in quant_info_inputs: | ||
quant_info_inputs.append(input_scheme) | ||
|
||
return quant_info_weight, quant_info_inputs | ||
if weight_scheme is None: | ||
continue # no weight quant - nothing to compress | ||
compression_format = _get_quant_compression_format( | ||
input_scheme, weight_scheme, sparsity_structure | ||
) | ||
|
||
# If set, we check if it matches our inferred one | ||
if submodule.quantization_scheme.format is not None: | ||
# If it does not, warn the user | ||
if submodule.quantization_scheme.format != compression_format.value: | ||
logger.warning( | ||
"The provided format for the module does not match the " | ||
"inferred format. Compression may fail " | ||
) | ||
else: | ||
# If not set, we set ours | ||
submodule.quantization_scheme.format = compression_format.value | ||
|
||
if submodule.quantization_scheme.format not in unique_formats: | ||
unique_formats.append(submodule.quantization_scheme.format) | ||
|
||
if len(unique_formats) > 0: | ||
return unique_formats | ||
return None | ||
dsikka marked this conversation as resolved.
Show resolved
Hide resolved
|
Uh oh!
There was an error while loading. Please reload this page.