| 
 | 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates.  | 
 | 2 | +# All rights reserved.  | 
 | 3 | +#  | 
 | 4 | +# This source code is licensed under the BSD 3-Clause license found in the  | 
 | 5 | +# LICENSE file in the root directory of this source tree.  | 
 | 6 | + | 
 | 7 | +import unittest  | 
 | 8 | + | 
 | 9 | +import torch  | 
 | 10 | +from torch.testing._internal.common_utils import (  | 
 | 11 | +    TestCase,  | 
 | 12 | +    run_tests,  | 
 | 13 | +)  | 
 | 14 | + | 
 | 15 | +from torchao.quantization import (  | 
 | 16 | +    FbgemmConfig,  | 
 | 17 | +    quantize_,  | 
 | 18 | +)  | 
 | 19 | +from torchao.quantization.utils import compute_error  | 
 | 20 | +from torchao.utils import (  | 
 | 21 | +    TORCH_VERSION_AT_LEAST_2_8,  | 
 | 22 | +    _is_fbgemm_genai_gpu_available,  | 
 | 23 | +    is_sm_at_least_90,  | 
 | 24 | +)  | 
 | 25 | + | 
 | 26 | + | 
 | 27 | +@unittest.skipIf(not TORCH_VERSION_AT_LEAST_2_8, "Need pytorch 2.8+")  | 
 | 28 | +@unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available")  | 
 | 29 | +@unittest.skipIf(not is_sm_at_least_90(), "Nedd sm90+")  | 
 | 30 | +@unittest.skipIf(  | 
 | 31 | +    not _is_fbgemm_genai_gpu_available(), "Requires fbgemm-gpu-genai >= 1.2.0"  | 
 | 32 | +)  | 
 | 33 | +class TestInt4GroupwisePreshuffleTensor(TestCase):  | 
 | 34 | +    def setUp(self):  | 
 | 35 | +        self.config = FbgemmConfig(  | 
 | 36 | +            input_dtype=torch.bfloat16,  | 
 | 37 | +            weight_dtype=torch.int4,  | 
 | 38 | +            output_dtype=torch.bfloat16,  | 
 | 39 | +            block_size=[1, 128],  | 
 | 40 | +            preshuffle=True,  | 
 | 41 | +        )  | 
 | 42 | +        self.bmm_config = FbgemmConfig(  | 
 | 43 | +            input_dtype=torch.bfloat16,  | 
 | 44 | +            weight_dtype=torch.int4,  | 
 | 45 | +            output_dtype=torch.bfloat16,  | 
 | 46 | +            block_size=[1, 1, 128],  | 
 | 47 | +            preshuffle=True,  | 
 | 48 | +        )  | 
 | 49 | +        self.GPU_DEVICES = ["cuda"] if torch.cuda.is_available() else []  | 
 | 50 | + | 
 | 51 | +    def test_linear(self):  | 
 | 52 | +        dtype = torch.bfloat16  | 
 | 53 | +        device = "cuda"  | 
 | 54 | +        input = torch.randn(1, 128, dtype=dtype, device=device)  | 
 | 55 | +        linear = torch.nn.Linear(128, 256, dtype=dtype, device=device)  | 
 | 56 | +        original = linear(input)  | 
 | 57 | +        quantize_(linear, self.config)  | 
 | 58 | +        quantized = linear(input)  | 
 | 59 | +        self.assertTrue(compute_error(original, quantized) > 20)  | 
 | 60 | + | 
 | 61 | +    @unittest.skip("WIP: this doesn't work yet")  | 
 | 62 | +    def test_slice(self):  | 
 | 63 | +        dtype = torch.bfloat16  | 
 | 64 | +        device = "cuda"  | 
 | 65 | +        dummy = torch.nn.Linear(256, 256, bias=False, dtype=dtype, device=device)  | 
 | 66 | +        dummy1 = torch.nn.Linear(256, 64, bias=False, dtype=dtype, device=device)  | 
 | 67 | +        dummy1.weight = torch.nn.Parameter(  | 
 | 68 | +            dummy.weight.narrow(0, 0, 64), requires_grad=False  | 
 | 69 | +        )  | 
 | 70 | +        dummy2 = torch.nn.Linear(128, 256, dtype=dtype, device=device)  | 
 | 71 | +        dummy2.weight = torch.nn.Parameter(  | 
 | 72 | +            dummy.weight.narrow(1, 0, 128), requires_grad=False  | 
 | 73 | +        )  | 
 | 74 | + | 
 | 75 | +        quantize_(dummy, self.config)  | 
 | 76 | +        weight1 = dummy.weight.narrow(0, 0, 64)  | 
 | 77 | +        weight2 = dummy.weight.narrow(1, 0, 128)  | 
 | 78 | +        # check the slicing operation is correctly performend of the constituents Tensors  | 
 | 79 | +        self.assertEqual(  | 
 | 80 | +            weight1.packed_weight, dummy.weight.packed_weight.narrow(0, 0, 64)  | 
 | 81 | +        )  | 
 | 82 | +        self.assertEqual(weight1.group_scale, dummy.weight.group_scale.narrow(1, 0, 64))  | 
 | 83 | +        self.assertEqual(  | 
 | 84 | +            weight2.packed_weight, dummy.weight.packed_weight.narrow(1, 0, 64)  | 
 | 85 | +        )  | 
 | 86 | +        self.assertEqual(weight2.group_scale, dummy.weight.group_scale.narrow(0, 0, 1))  | 
 | 87 | + | 
 | 88 | +        # check for 1. sliced bf16 weight 2. sliced quantized weight  | 
 | 89 | +        # can produce similar results doing matmul on the same input Tensor  | 
 | 90 | + | 
 | 91 | +        input = torch.randn(2, 256, dtype=dtype, device=device)  | 
 | 92 | +        res_ref = dummy1(input)  | 
 | 93 | +        dummy.weight = torch.nn.Parameter(weight1, requires_grad=False)  | 
 | 94 | +        res = dummy(input)  | 
 | 95 | +        sqnr = compute_error(res, res_ref)  | 
 | 96 | +        assert sqnr > 20, f"Got: {sqnr}"  | 
 | 97 | + | 
 | 98 | +        input = torch.randn(2, 128, dtype=dtype, device=device)  | 
 | 99 | +        res_ref = dummy2(input)  | 
 | 100 | +        dummy.weight = torch.nn.Parameter(weight2, requires_grad=False)  | 
 | 101 | +        res = dummy(input)  | 
 | 102 | +        sqnr = compute_error(res, res_ref)  | 
 | 103 | +        assert sqnr > 15, f"Got: {sqnr}"  | 
 | 104 | + | 
 | 105 | +    def test_slice_and_copy_(self):  | 
 | 106 | +        l = torch.nn.Linear(1024, 1024).to("cuda").to(torch.bfloat16)  | 
 | 107 | +        l.weight = torch.nn.Parameter(  | 
 | 108 | +            torch.zeros(1024, 1024, dtype=torch.bfloat16, device="cuda")  | 
 | 109 | +        )  | 
 | 110 | +        quantize_(l, self.config)  | 
 | 111 | +        param = l.weight  | 
 | 112 | +        param_data = param.data  | 
 | 113 | +        param_data = param_data.narrow(0, 0, 512)  | 
 | 114 | +        assert (  | 
 | 115 | +            param.data.packed_weight.data_ptr() == param_data.packed_weight.data_ptr()  | 
 | 116 | +        )  | 
 | 117 | +        assert param.data.group_scale.data_ptr() == param_data.group_scale.data_ptr()  | 
 | 118 | +        assert param.data.group_zero.data_ptr() == param_data.group_zero.data_ptr()  | 
 | 119 | +        orig_value = param.data.packed_weight[0][0].item()  | 
 | 120 | + | 
 | 121 | +        # dummy_l has random input (shouldn't be 0)  | 
 | 122 | +        dummy_l = torch.nn.Linear(1024, 1024).to("cuda").to(torch.bfloat16)  | 
 | 123 | +        quantize_(dummy_l, self.config)  | 
 | 124 | +        quantized = dummy_l.weight  | 
 | 125 | +        quantized = quantized.narrow(0, 0, 512)  | 
 | 126 | + | 
 | 127 | +        param_data.copy_(quantized)  | 
 | 128 | + | 
 | 129 | +        # making sure param.data is updated  | 
 | 130 | +        assert param.data.packed_weight[0][0] != orig_value  | 
 | 131 | + | 
 | 132 | +    def test_bmm(self):  | 
 | 133 | +        class M(torch.nn.Module):  | 
 | 134 | +            def __init__(self, weight):  | 
 | 135 | +                super().__init__()  | 
 | 136 | +                self.weight = weight  | 
 | 137 | + | 
 | 138 | +            def forward(self, x):  | 
 | 139 | +                return torch.bmm(x, self.weight)  | 
 | 140 | + | 
 | 141 | +        dtype = torch.bfloat16  | 
 | 142 | +        device = "cuda"  | 
 | 143 | +        input = torch.randn(10, 32, 128, dtype=dtype, device=device)  | 
 | 144 | +        weight = torch.randn(10, 128, 256, dtype=dtype, device=device)  | 
 | 145 | +        m = M(weight).eval()  | 
 | 146 | +        original = m(input)  | 
 | 147 | +        m.weight = torch.nn.Parameter(m.weight.transpose(1, 2).contiguous())  | 
 | 148 | +        quantize_(m, self.bmm_config, filter_fn=lambda x, fqn: True)  | 
 | 149 | +        quantized = m(input)  | 
 | 150 | +        self.assertTrue(compute_error(original, quantized) > 18)  | 
 | 151 | + | 
 | 152 | +    def test_to_device(self):  | 
 | 153 | +        for device in self.GPU_DEVICES:  | 
 | 154 | +            linear = torch.nn.Linear(128, 256, dtype=torch.bfloat16)  | 
 | 155 | +            quantize_(linear, self.config)  | 
 | 156 | +            linear.to(device)  | 
 | 157 | + | 
 | 158 | +            linear = torch.nn.Linear(128, 256, dtype=torch.bfloat16)  | 
 | 159 | +            quantize_(linear, self.config)  | 
 | 160 | +            linear.to(device=device)  | 
 | 161 | + | 
 | 162 | +            linear = torch.nn.Linear(128, 256, dtype=torch.bfloat16)  | 
 | 163 | +            quantize_(linear, self.config)  | 
 | 164 | +            linear.to(device)  | 
 | 165 | + | 
 | 166 | +    def test_module_path(self):  | 
 | 167 | +        linear = torch.nn.Linear(128, 256, dtype=torch.bfloat16)  | 
 | 168 | +        quantize_(linear, self.config)  | 
 | 169 | +        self.assertEqual(  | 
 | 170 | +            str(type(linear.weight)),  | 
 | 171 | +            "<class 'torchao.quantization.Int4GroupwisePreshuffleTensor'>",  | 
 | 172 | +        )  | 
 | 173 | + | 
 | 174 | + | 
 | 175 | +if __name__ == "__main__":  | 
 | 176 | +    run_tests()  | 
0 commit comments