Skip to content

Commit a9d6274

Browse files
committed
Fix ruff after #2724
1 parent f01c956 commit a9d6274

File tree

7 files changed

+0
-9
lines changed

7 files changed

+0
-9
lines changed

test/quantization/pt2e/test_duplicate_dq.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111
from typing import Any
1212

1313
import torch
14-
from torch.export import export_for_training
1514
from torch.testing._internal.common_quantization import QuantizationTestCase
1615
from torch.testing._internal.common_utils import IS_WINDOWS, run_tests
1716

test/quantization/pt2e/test_quantize_pt2e.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
per_channel_weight_observer_range_neg_127_to_127,
2020
weight_observer_range_neg_127_to_127,
2121
)
22-
from torch.export import export_for_training
2322
from torch.fx import Node
2423
from torch.testing._internal.common_quantization import (
2524
NodeSpec as ns,

test/quantization/pt2e/test_quantize_pt2e_qat.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
default_symmetric_qnnpack_qat_qconfig,
1919
)
2020
from torch.ao.quantization.quantize_fx import prepare_qat_fx
21-
from torch.export import export_for_training
2221
from torch.testing._internal.common_cuda import TEST_CUDA
2322
from torch.testing._internal.common_quantization import (
2423
NodeSpec as ns,

test/quantization/pt2e/test_representation.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111

1212
import torch
1313
from torch._higher_order_ops.out_dtype import out_dtype # noqa: F401
14-
from torch.export import export_for_training
1514
from torch.testing._internal.common_quantization import (
1615
NodeSpec as ns,
1716
)

test/quantization/pt2e/test_x86inductor_quantizer.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212

1313
import torch
1414
import torch.nn as nn
15-
from torch.export import export_for_training
1615
from torch.testing._internal.common_quantization import (
1716
NodeSpec as ns,
1817
)

test/quantization/test_qat.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1805,9 +1805,6 @@ def test_qat_api_deprecation(self):
18051805
str(w.message),
18061806
)
18071807

1808-
@unittest.skipIf(
1809-
not TORCH_VERSION_AT_LEAST_2_4, "skipping when torch version is 2.4 or lower"
1810-
)
18111808
def test_qat_api_convert_no_quantization(self):
18121809
"""
18131810
Test that `QATConfig(step="convert")` swaps back to nn modules without quantization.

torchao/testing/pt2e/utils.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
_convert_to_reference_decomposed_fx,
1616
prepare_fx,
1717
)
18-
from torch.export import export_for_training
1918
from torch.testing._internal.common_quantization import (
2019
NodeSpec,
2120
QuantizationTestCase,

0 commit comments

Comments
 (0)