diff --git a/backends/apple/coreml/test/tester.py b/backends/apple/coreml/test/tester.py index f4a5f51ecbd..be424c8f811 100644 --- a/backends/apple/coreml/test/tester.py +++ b/backends/apple/coreml/test/tester.py @@ -4,23 +4,73 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -from typing import Any, List, Optional, Tuple +import functools +from typing import Any, List, Optional, Sequence, Tuple +import coremltools as ct import executorch import executorch.backends.test.harness.stages as BaseStages - import torch + +from executorch.backends.apple.coreml.compiler import CoreMLBackend from executorch.backends.apple.coreml.partition import CoreMLPartitioner +from executorch.backends.apple.coreml.quantizer import CoreMLQuantizer from executorch.backends.test.harness import Tester as TesterBase from executorch.backends.test.harness.stages import StageType from executorch.exir import EdgeCompileConfig from executorch.exir.backend.partitioner import Partitioner +def _create_default_partitioner( + minimum_deployment_target: Any = ct.target.iOS15, +) -> CoreMLPartitioner: + return CoreMLPartitioner( + compile_specs=CoreMLBackend.generate_compile_specs( + minimum_deployment_target=minimum_deployment_target + ) + ) + + +def _get_static_int8_linear_qconfig(): + return ct.optimize.torch.quantization.LinearQuantizerConfig( + global_config=ct.optimize.torch.quantization.ModuleLinearQuantizerConfig( + quantization_scheme="symmetric", + activation_dtype=torch.quint8, + weight_dtype=torch.qint8, + weight_per_channel=True, + ) + ) + + +class Quantize(BaseStages.Quantize): + def __init__( + self, + quantizer: Optional[CoreMLQuantizer] = None, + quantization_config: Optional[Any] = None, + calibrate: bool = True, + calibration_samples: Optional[Sequence[Any]] = None, + is_qat: Optional[bool] = False, + ): + super().__init__( + quantizer=quantizer + or CoreMLQuantizer( + quantization_config or _get_static_int8_linear_qconfig() + ), + calibrate=calibrate, + calibration_samples=calibration_samples, + is_qat=is_qat, + ) + + class Partition(BaseStages.Partition): - def __init__(self, partitioner: Optional[Partitioner] = None): + def __init__( + self, + partitioner: Optional[Partitioner] = None, + minimum_deployment_target: Optional[Any] = ct.target.iOS15, + ): super().__init__( - partitioner=partitioner or CoreMLPartitioner, + partitioner=partitioner + or _create_default_partitioner(minimum_deployment_target), ) @@ -29,9 +79,12 @@ def __init__( self, partitioners: Optional[List[Partitioner]] = None, edge_compile_config: Optional[EdgeCompileConfig] = None, + minimum_deployment_target: Optional[Any] = ct.target.iOS15, ): super().__init__( - default_partitioner_cls=CoreMLPartitioner, + default_partitioner_cls=lambda: _create_default_partitioner( + minimum_deployment_target + ), partitioners=partitioners, edge_compile_config=edge_compile_config, ) @@ -43,13 +96,20 @@ def __init__( module: torch.nn.Module, example_inputs: Tuple[torch.Tensor], dynamic_shapes: Optional[Tuple[Any]] = None, + minimum_deployment_target: Optional[Any] = ct.target.iOS15, ): # Specialize for XNNPACK stage_classes = ( executorch.backends.test.harness.Tester.default_stage_classes() | { - StageType.PARTITION: Partition, - StageType.TO_EDGE_TRANSFORM_AND_LOWER: ToEdgeTransformAndLower, + StageType.QUANTIZE: Quantize, + StageType.PARTITION: functools.partial( + Partition, minimum_deployment_target=minimum_deployment_target + ), + StageType.TO_EDGE_TRANSFORM_AND_LOWER: functools.partial( + ToEdgeTransformAndLower, + minimum_deployment_target=minimum_deployment_target, + ), } ) diff --git a/backends/test/harness/stages/quantize.py b/backends/test/harness/stages/quantize.py index e03db058080..b98c4faa3dd 100644 --- a/backends/test/harness/stages/quantize.py +++ b/backends/test/harness/stages/quantize.py @@ -25,13 +25,15 @@ def __init__( calibrate: bool = True, calibration_samples: Optional[Sequence[Any]] = None, is_qat: Optional[bool] = False, + set_global: bool = True, ): self.quantizer = quantizer self.quantization_config = quantization_config self.calibrate = calibrate self.calibration_samples = calibration_samples - self.quantizer.set_global(self.quantization_config) + if self.quantization_config is not None and set_global: + self.quantizer.set_global(self.quantization_config) self.converted_graph = None self.is_qat = is_qat diff --git a/backends/test/harness/tester.py b/backends/test/harness/tester.py index e418f795b35..06db1aae13d 100644 --- a/backends/test/harness/tester.py +++ b/backends/test/harness/tester.py @@ -1,6 +1,6 @@ import random from collections import Counter, OrderedDict -from typing import Any, Dict, List, Optional, Tuple, Type +from typing import Any, Callable, Dict, List, Optional, Tuple import torch @@ -33,7 +33,7 @@ def __init__( self, module: torch.nn.Module, example_inputs: Tuple[torch.Tensor], - stage_classes: Dict[StageType, Type], + stage_classes: Dict[StageType, Callable], dynamic_shapes: Optional[Tuple[Any]] = None, ): module.eval() @@ -81,7 +81,7 @@ def __init__( self.stage_output = None @staticmethod - def default_stage_classes() -> Dict[StageType, Type]: + def default_stage_classes() -> Dict[StageType, Callable]: """ Returns a map of StageType to default Stage implementation. """ diff --git a/backends/test/suite/__init__.py b/backends/test/suite/__init__.py index 86cb5a5716f..7190da4e0fd 100644 --- a/backends/test/suite/__init__.py +++ b/backends/test/suite/__init__.py @@ -129,7 +129,7 @@ def _make_wrapped_test( def wrapped_test(self): with TestContext(test_name, flow.name, params): test_kwargs = params or {} - test_kwargs["tester_factory"] = flow.tester_factory + test_kwargs["flow"] = flow test_func(self, **test_kwargs) @@ -175,7 +175,7 @@ def load_tests(loader, suite, pattern): class OperatorTest(unittest.TestCase): - def _test_op(self, model, inputs, tester_factory): + def _test_op(self, model, inputs, flow: TestFlow): context = get_active_test_context() # This should be set in the wrapped test. See _make_wrapped_test above. @@ -184,9 +184,8 @@ def _test_op(self, model, inputs, tester_factory): run_summary = run_test( model, inputs, - tester_factory, + flow, context.test_name, - context.flow_name, context.params, ) diff --git a/backends/test/suite/flow.py b/backends/test/suite/flow.py index bda85a76ffa..2006ac9a485 100644 --- a/backends/test/suite/flow.py +++ b/backends/test/suite/flow.py @@ -1,9 +1,10 @@ import logging -from dataclasses import dataclass +from dataclasses import dataclass, field from typing import Callable from executorch.backends.test.harness import Tester +from executorch.backends.test.harness.stages import Quantize logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) @@ -22,41 +23,43 @@ class TestFlow: backend: str """ The name of the target backend. """ - tester_factory: Callable[[], Tester] + tester_factory: Callable[..., Tester] """ A factory function that returns a Tester instance for this lowering flow. """ + quantize: bool = field(default=False) + """ Whether to tester should run the quantize stage on the model. """ -def create_xnnpack_flow() -> TestFlow | None: - try: - from executorch.backends.xnnpack.test.tester import Tester as XnnpackTester + quantize_stage_factory: Callable[..., Quantize] | None = None + """ A factory function which instantiates a Quantize stage. Can be None to use the tester's default. """ - return TestFlow( - name="xnnpack", - backend="xnnpack", - tester_factory=XnnpackTester, - ) - except Exception: - logger.info("Skipping XNNPACK flow registration due to import failure.") - return None +def all_flows() -> dict[str, TestFlow]: + flows = [] -def create_coreml_flow() -> TestFlow | None: try: - from executorch.backends.apple.coreml.test.tester import CoreMLTester + from executorch.backends.test.suite.flows.xnnpack import ( + XNNPACK_STATIC_INT8_PER_CHANNEL_TEST_FLOW, + XNNPACK_TEST_FLOW, + ) - return TestFlow( - name="coreml", - backend="coreml", - tester_factory=CoreMLTester, + flows += [ + XNNPACK_TEST_FLOW, + XNNPACK_STATIC_INT8_PER_CHANNEL_TEST_FLOW, + ] + except Exception as e: + logger.info(f"Skipping XNNPACK flow registration: {e}") + + try: + from executorch.backends.test.suite.flows.coreml import ( + COREML_STATIC_INT8_TEST_FLOW, + COREML_TEST_FLOW, ) - except Exception: - logger.info("Skipping Core ML flow registration due to import failure.") - return None + flows += [ + COREML_TEST_FLOW, + COREML_STATIC_INT8_TEST_FLOW, + ] + except Exception as e: + logger.info(f"Skipping Core ML flow registration: {e}") -def all_flows() -> dict[str, TestFlow]: - flows = [ - create_xnnpack_flow(), - create_coreml_flow(), - ] return {f.name: f for f in flows if f is not None} diff --git a/backends/test/suite/flows/__init__.py b/backends/test/suite/flows/__init__.py new file mode 100644 index 00000000000..6ac1a72bde6 --- /dev/null +++ b/backends/test/suite/flows/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe diff --git a/backends/test/suite/flows/coreml.py b/backends/test/suite/flows/coreml.py new file mode 100644 index 00000000000..fd956b64f05 --- /dev/null +++ b/backends/test/suite/flows/coreml.py @@ -0,0 +1,30 @@ +import functools +from typing import Any + +import coremltools + +from executorch.backends.apple.coreml.test.tester import CoreMLTester +from executorch.backends.test.suite.flow import TestFlow + + +def _create_coreml_flow( + name: str, + quantize: bool = False, + minimum_deployment_target: Any = coremltools.target.iOS15, +) -> TestFlow: + return TestFlow( + name, + backend="coreml", + tester_factory=functools.partial( + CoreMLTester, minimum_deployment_target=minimum_deployment_target + ), + quantize=quantize, + ) + + +COREML_TEST_FLOW = _create_coreml_flow("coreml") +COREML_STATIC_INT8_TEST_FLOW = _create_coreml_flow( + "coreml_static_int8", + quantize=True, + minimum_deployment_target=coremltools.target.iOS17, +) diff --git a/backends/test/suite/flows/xnnpack.py b/backends/test/suite/flows/xnnpack.py new file mode 100644 index 00000000000..9de071377ff --- /dev/null +++ b/backends/test/suite/flows/xnnpack.py @@ -0,0 +1,49 @@ +import logging +from typing import Callable + +from executorch.backends.test.harness.stages import Quantize +from executorch.backends.test.suite.flow import TestFlow +from executorch.backends.xnnpack.quantizer.xnnpack_quantizer import ( + get_symmetric_quantization_config, +) +from executorch.backends.xnnpack.test.tester import ( + Quantize as XnnpackQuantize, + Tester as XnnpackTester, +) + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + + +def _create_xnnpack_flow_base( + name: str, quantize_stage_factory: Callable[..., Quantize] | None = None +) -> TestFlow: + return TestFlow( + name, + backend="xnnpack", + tester_factory=XnnpackTester, + quantize=quantize_stage_factory is not None, + quantize_stage_factory=quantize_stage_factory, + ) + + +def _create_xnnpack_flow() -> TestFlow: + return _create_xnnpack_flow_base("xnnpack") + + +def _create_xnnpack_static_int8_per_channel_flow() -> TestFlow: + def create_quantize_stage() -> Quantize: + qparams = get_symmetric_quantization_config(is_per_channel=True) + return XnnpackQuantize( + quantization_config=qparams, + ) + + return _create_xnnpack_flow_base( + "xnnpack_static_int8_per_channel", create_quantize_stage + ) + + +XNNPACK_TEST_FLOW = _create_xnnpack_flow() +XNNPACK_STATIC_INT8_PER_CHANNEL_TEST_FLOW = ( + _create_xnnpack_static_int8_per_channel_flow() +) diff --git a/backends/test/suite/models/__init__.py b/backends/test/suite/models/__init__.py index cb89aa816fa..e155e3382c5 100644 --- a/backends/test/suite/models/__init__.py +++ b/backends/test/suite/models/__init__.py @@ -12,7 +12,6 @@ from typing import Any, Callable import torch -from executorch.backends.test.harness import Tester from executorch.backends.test.suite import get_test_flows from executorch.backends.test.suite.context import get_active_test_context, TestContext from executorch.backends.test.suite.flow import TestFlow @@ -49,7 +48,7 @@ def wrapped_test(self): "use_dynamic_shapes": use_dynamic_shapes, } with TestContext(test_name, flow.name, params): - test_func(self, dtype, use_dynamic_shapes, flow.tester_factory) + test_func(self, flow, dtype, use_dynamic_shapes) dtype_name = str(dtype)[6:] # strip "torch." test_name = f"{test_func.__name__}_{flow.name}_{dtype_name}" @@ -104,9 +103,9 @@ def inner_decorator(func: Callable) -> Callable: def run_model_test( model: torch.nn.Module, inputs: tuple[Any], + flow: TestFlow, dtype: torch.dtype, dynamic_shapes: Any | None, - tester_factory: Callable[[], Tester], ): model = model.to(dtype) context = get_active_test_context() @@ -117,9 +116,8 @@ def run_model_test( run_summary = run_test( model, inputs, - tester_factory, + flow, context.test_name, - context.flow_name, context.params, dynamic_shapes=dynamic_shapes, ) diff --git a/backends/test/suite/models/test_torchaudio.py b/backends/test/suite/models/test_torchaudio.py index 5d526fe708e..69f6de4684f 100644 --- a/backends/test/suite/models/test_torchaudio.py +++ b/backends/test/suite/models/test_torchaudio.py @@ -7,11 +7,12 @@ # pyre-unsafe import unittest -from typing import Callable, Tuple +from typing import Tuple import torch import torchaudio +from executorch.backends.test.suite.flow import TestFlow from executorch.backends.test.suite.models import ( model_test_cls, model_test_params, @@ -50,7 +51,7 @@ def forward( class TorchAudio(unittest.TestCase): @model_test_params(dtypes=[torch.float32], supports_dynamic_shapes=False) def test_conformer( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): inner_model = torchaudio.models.Conformer( input_dim=80, @@ -70,11 +71,11 @@ def test_conformer( encoder_padding_mask, ) - run_model_test(model, inputs, dtype, None, tester_factory) + run_model_test(model, inputs, flow, dtype, None) @model_test_params(dtypes=[torch.float32]) def test_wav2letter( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchaudio.models.Wav2Letter() inputs = (torch.randn(1, 1, 1024, dtype=dtype),) @@ -87,11 +88,14 @@ def test_wav2letter( if use_dynamic_shapes else None ) - run_model_test(model, inputs, dtype, dynamic_shapes, tester_factory) + run_model_test(model, inputs, flow, dtype, dynamic_shapes) @unittest.skip("This model times out on all backends.") def test_wavernn( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, + flow: TestFlow, + dtype: torch.dtype, + use_dynamic_shapes: bool, ): model = torchaudio.models.WaveRNN( upsample_scales=[5, 5, 8], n_classes=512, hop_length=200 @@ -103,4 +107,4 @@ def test_wavernn( torch.randn(1, 1, 128, 64), # specgram ) - run_model_test(model, inputs, dtype, None, tester_factory) + run_model_test(model, inputs, flow, dtype, None) diff --git a/backends/test/suite/models/test_torchvision.py b/backends/test/suite/models/test_torchvision.py index 2ef864ef42c..e69de80a871 100644 --- a/backends/test/suite/models/test_torchvision.py +++ b/backends/test/suite/models/test_torchvision.py @@ -7,11 +7,11 @@ # pyre-unsafe import unittest -from typing import Callable import torch import torchvision +from executorch.backends.test.suite.flow import TestFlow from executorch.backends.test.suite.models import ( model_test_cls, model_test_params, @@ -31,9 +31,9 @@ class TorchVision(unittest.TestCase): def _test_cv_model( self, model: torch.nn.Module, + flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool, - tester_factory: Callable, ): # Test a CV model that follows the standard conventions. inputs = (torch.randn(1, 3, 224, 224, dtype=dtype),) @@ -49,126 +49,124 @@ def _test_cv_model( else None ) - run_model_test(model, inputs, dtype, dynamic_shapes, tester_factory) + run_model_test(model, inputs, flow, dtype, dynamic_shapes) def test_alexnet( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.alexnet() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_convnext_small( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.convnext_small() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_densenet161( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.densenet161() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_efficientnet_b4( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.efficientnet_b4() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_efficientnet_v2_s( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.efficientnet_v2_s() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_googlenet( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.googlenet() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_inception_v3( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.inception_v3() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) @model_test_params(supports_dynamic_shapes=False) def test_maxvit_t( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.maxvit_t() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_mnasnet1_0( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.mnasnet1_0() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_mobilenet_v2( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.mobilenet_v2() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_mobilenet_v3_small( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.mobilenet_v3_small() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_regnet_y_1_6gf( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.regnet_y_1_6gf() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_resnet50( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.resnet50() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_resnext50_32x4d( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.resnext50_32x4d() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_shufflenet_v2_x1_0( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.shufflenet_v2_x1_0() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_squeezenet1_1( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.squeezenet1_1() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_swin_v2_t( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.swin_v2_t() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - def test_vgg11( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable - ): + def test_vgg11(self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool): model = torchvision.models.vgg11() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) @model_test_params(supports_dynamic_shapes=False) def test_vit_b_16( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.vit_b_16() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) def test_wide_resnet50_2( - self, dtype: torch.dtype, use_dynamic_shapes: bool, tester_factory: Callable + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool ): model = torchvision.models.wide_resnet50_2() - self._test_cv_model(model, dtype, use_dynamic_shapes, tester_factory) + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) diff --git a/backends/test/suite/operators/test_add.py b/backends/test/suite/operators/test_add.py index 970a4babbf0..2ff1644d672 100644 --- a/backends/test/suite/operators/test_add.py +++ b/backends/test/suite/operators/test_add.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -31,52 +30,52 @@ def forward(self, x, y): @operator_test class Add(OperatorTest): @dtype_test - def test_add_dtype(self, dtype, tester_factory: Callable) -> None: + def test_add_dtype(self, flow: TestFlow, dtype) -> None: self._test_op( Model(), ( (torch.rand(2, 10) * 100).to(dtype), (torch.rand(2, 10) * 100).to(dtype), ), - tester_factory, + flow, ) - def test_add_f32_bcast_first(self, tester_factory: Callable) -> None: + def test_add_f32_bcast_first(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(5), torch.randn(1, 5, 1, 5), ), - tester_factory, + flow, ) - def test_add_f32_bcast_second(self, tester_factory: Callable) -> None: + def test_add_f32_bcast_second(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(4, 4, 2, 7), torch.randn(2, 7), ), - tester_factory, + flow, ) - def test_add_f32_bcast_unary(self, tester_factory: Callable) -> None: + def test_add_f32_bcast_unary(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(5), torch.randn(1, 1, 5), ), - tester_factory, + flow, ) - def test_add_f32_alpha(self, tester_factory: Callable) -> None: + def test_add_f32_alpha(self, flow: TestFlow) -> None: self._test_op( ModelAlpha(alpha=2), ( torch.randn(1, 25), torch.randn(1, 25), ), - tester_factory, + flow, ) diff --git a/backends/test/suite/operators/test_div.py b/backends/test/suite/operators/test_div.py index 9e98775e855..1367a4bc8f7 100644 --- a/backends/test/suite/operators/test_div.py +++ b/backends/test/suite/operators/test_div.py @@ -7,11 +7,12 @@ # pyre-unsafe -from typing import Callable, Optional +from typing import Optional import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -31,7 +32,7 @@ def forward(self, x, y): @operator_test class Divide(OperatorTest): @dtype_test - def test_divide_dtype(self, dtype, tester_factory: Callable) -> None: + def test_divide_dtype(self, flow: TestFlow, dtype) -> None: self._test_op( Model(), ( @@ -40,10 +41,10 @@ def test_divide_dtype(self, dtype, tester_factory: Callable) -> None: dtype ), # Adding 0.1 to avoid division by zero ), - tester_factory, + flow, ) - def test_divide_f32_bcast_first(self, tester_factory: Callable) -> None: + def test_divide_f32_bcast_first(self, flow: TestFlow) -> None: self._test_op( Model(), ( @@ -51,10 +52,10 @@ def test_divide_f32_bcast_first(self, tester_factory: Callable) -> None: torch.randn(1, 5, 1, 5).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero ), - tester_factory, + flow, ) - def test_divide_f32_bcast_second(self, tester_factory: Callable) -> None: + def test_divide_f32_bcast_second(self, flow: TestFlow) -> None: self._test_op( Model(), ( @@ -62,10 +63,10 @@ def test_divide_f32_bcast_second(self, tester_factory: Callable) -> None: torch.randn(2, 7).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero ), - tester_factory, + flow, ) - def test_divide_f32_bcast_unary(self, tester_factory: Callable) -> None: + def test_divide_f32_bcast_unary(self, flow: TestFlow) -> None: self._test_op( Model(), ( @@ -73,10 +74,10 @@ def test_divide_f32_bcast_unary(self, tester_factory: Callable) -> None: torch.randn(1, 1, 5).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero ), - tester_factory, + flow, ) - def test_divide_f32_trunc(self, tester_factory: Callable) -> None: + def test_divide_f32_trunc(self, flow: TestFlow) -> None: self._test_op( ModelWithRounding(rounding_mode="trunc"), ( @@ -84,10 +85,10 @@ def test_divide_f32_trunc(self, tester_factory: Callable) -> None: torch.randn(3, 4).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero ), - tester_factory, + flow, ) - def test_divide_f32_floor(self, tester_factory: Callable) -> None: + def test_divide_f32_floor(self, flow: TestFlow) -> None: self._test_op( ModelWithRounding(rounding_mode="floor"), ( @@ -95,5 +96,5 @@ def test_divide_f32_floor(self, tester_factory: Callable) -> None: torch.randn(3, 4).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero ), - tester_factory, + flow, ) diff --git a/backends/test/suite/operators/test_elu.py b/backends/test/suite/operators/test_elu.py index 371a13aa26c..be4bb99bba0 100644 --- a/backends/test/suite/operators/test_elu.py +++ b/backends/test/suite/operators/test_elu.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -27,17 +26,17 @@ def forward(self, x): @operator_test class TestELU(OperatorTest): @dtype_test - def test_elu_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 100).to(dtype),), tester_factory) + def test_elu_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10) * 100).to(dtype),), flow) - def test_elu_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_elu_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_elu_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_elu_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_elu_f32_alpha(self, tester_factory: Callable) -> None: - self._test_op(Model(alpha=0.5), (torch.randn(3, 4, 5),), tester_factory) + def test_elu_f32_alpha(self, flow: TestFlow) -> None: + self._test_op(Model(alpha=0.5), (torch.randn(3, 4, 5),), flow) - def test_elu_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_elu_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) diff --git a/backends/test/suite/operators/test_gelu.py b/backends/test/suite/operators/test_gelu.py index 639b2fbb9b1..948947907d9 100644 --- a/backends/test/suite/operators/test_gelu.py +++ b/backends/test/suite/operators/test_gelu.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -26,28 +25,24 @@ def forward(self, x): @operator_test class TestGELU(OperatorTest): @dtype_test - def test_gelu_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op( - Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), tester_factory - ) + def test_gelu_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow) - def test_gelu_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_gelu_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_gelu_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_gelu_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_gelu_f32_tanh_approximation(self, tester_factory: Callable) -> None: - self._test_op( - Model(approximate="tanh"), (torch.randn(3, 4, 5),), tester_factory - ) + def test_gelu_f32_tanh_approximation(self, flow: TestFlow) -> None: + self._test_op(Model(approximate="tanh"), (torch.randn(3, 4, 5),), flow) - def test_gelu_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_gelu_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific values spanning negative and positive ranges x = torch.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) - def test_gelu_f32_tanh_boundary_values(self, tester_factory: Callable) -> None: + def test_gelu_f32_tanh_boundary_values(self, flow: TestFlow) -> None: # Test tanh approximation with specific values x = torch.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]) - self._test_op(Model(approximate="tanh"), (x,), tester_factory) + self._test_op(Model(approximate="tanh"), (x,), flow) diff --git a/backends/test/suite/operators/test_glu.py b/backends/test/suite/operators/test_glu.py index 74f46bb9532..b7126d5fdf5 100644 --- a/backends/test/suite/operators/test_glu.py +++ b/backends/test/suite/operators/test_glu.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -26,26 +25,24 @@ def forward(self, x): @operator_test class TestGLU(OperatorTest): @dtype_test - def test_glu_dtype(self, dtype, tester_factory: Callable) -> None: + def test_glu_dtype(self, flow: TestFlow, dtype) -> None: # Input must have even number of elements in the specified dimension - self._test_op( - Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), tester_factory - ) + self._test_op(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow) - def test_glu_f32_dim_last(self, tester_factory: Callable) -> None: + def test_glu_f32_dim_last(self, flow: TestFlow) -> None: # Default dim is -1 (last dimension) - self._test_op(Model(), (torch.randn(3, 4, 6),), tester_factory) + self._test_op(Model(), (torch.randn(3, 4, 6),), flow) - def test_glu_f32_dim_first(self, tester_factory: Callable) -> None: + def test_glu_f32_dim_first(self, flow: TestFlow) -> None: # Test with dim=0 (first dimension) - self._test_op(Model(dim=0), (torch.randn(4, 3, 5),), tester_factory) + self._test_op(Model(dim=0), (torch.randn(4, 3, 5),), flow) - def test_glu_f32_dim_middle(self, tester_factory: Callable) -> None: + def test_glu_f32_dim_middle(self, flow: TestFlow) -> None: # Test with dim=1 (middle dimension) - self._test_op(Model(dim=1), (torch.randn(3, 8, 5),), tester_factory) + self._test_op(Model(dim=1), (torch.randn(3, 8, 5),), flow) - def test_glu_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_glu_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific values spanning negative and positive ranges # Input must have even number of elements in the specified dimension x = torch.tensor([[-10.0, -5.0, -1.0, 0.0], [1.0, 5.0, 10.0, -2.0]]) - self._test_op(Model(dim=1), (x,), tester_factory) + self._test_op(Model(dim=1), (x,), flow) diff --git a/backends/test/suite/operators/test_hardsigmoid.py b/backends/test/suite/operators/test_hardsigmoid.py index f26877782db..7ad92819506 100644 --- a/backends/test/suite/operators/test_hardsigmoid.py +++ b/backends/test/suite/operators/test_hardsigmoid.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -26,19 +25,19 @@ def forward(self, x): @operator_test class TestHardsigmoid(OperatorTest): @dtype_test - def test_hardsigmoid_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op(Model(), ((torch.rand(2, 10)).to(dtype),), tester_factory) + def test_hardsigmoid_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10)).to(dtype),), flow) - def test_hardsigmoid_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_hardsigmoid_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_hardsigmoid_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_hardsigmoid_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_hardsigmoid_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_hardsigmoid_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_hardsigmoid_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_hardsigmoid_f32_boundary_values(self, flow: TestFlow) -> None: # Test with values that span the hardsigmoid's piecewise regions x = torch.tensor([-5.0, -3.0, -1.0, 0.0, 1.0, 3.0, 5.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_hardswish.py b/backends/test/suite/operators/test_hardswish.py index 0c2c6915760..e8d25266af5 100644 --- a/backends/test/suite/operators/test_hardswish.py +++ b/backends/test/suite/operators/test_hardswish.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -26,19 +25,19 @@ def forward(self, x): @operator_test class TestHardswish(OperatorTest): @dtype_test - def test_hardswish_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op(Model(), ((torch.rand(2, 10)).to(dtype),), tester_factory) + def test_hardswish_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10)).to(dtype),), flow) - def test_hardswish_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_hardswish_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_hardswish_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_hardswish_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_hardswish_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_hardswish_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_hardswish_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_hardswish_f32_boundary_values(self, flow: TestFlow) -> None: # Test with values that span the hardswish's piecewise regions x = torch.tensor([-5.0, -3.0, -1.0, 0.0, 1.0, 3.0, 5.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_hardtanh.py b/backends/test/suite/operators/test_hardtanh.py index f74c52e93db..ffef9977e01 100644 --- a/backends/test/suite/operators/test_hardtanh.py +++ b/backends/test/suite/operators/test_hardtanh.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -30,24 +29,22 @@ def forward(self, x): @operator_test class TestHardtanh(OperatorTest): @dtype_test - def test_hardtanh_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 4 - 2).to(dtype),), tester_factory) + def test_hardtanh_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10) * 4 - 2).to(dtype),), flow) - def test_hardtanh_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_hardtanh_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_hardtanh_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_hardtanh_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_hardtanh_f32_custom_range(self, tester_factory: Callable) -> None: - self._test_op( - Model(min_val=-2.0, max_val=2.0), (torch.randn(3, 4, 5),), tester_factory - ) + def test_hardtanh_f32_custom_range(self, flow: TestFlow) -> None: + self._test_op(Model(min_val=-2.0, max_val=2.0), (torch.randn(3, 4, 5),), flow) - def test_hardtanh_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_hardtanh_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_hardtanh_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_hardtanh_f32_boundary_values(self, flow: TestFlow) -> None: # Test with values that span the hardtanh's piecewise regions x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_leaky_relu.py b/backends/test/suite/operators/test_leaky_relu.py index 01d30e9c682..e753abf8bb6 100644 --- a/backends/test/suite/operators/test_leaky_relu.py +++ b/backends/test/suite/operators/test_leaky_relu.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -29,24 +28,22 @@ def forward(self, x): @operator_test class TestLeakyReLU(OperatorTest): @dtype_test - def test_leaky_relu_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 2 - 1).to(dtype),), tester_factory) + def test_leaky_relu_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10) * 2 - 1).to(dtype),), flow) - def test_leaky_relu_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_leaky_relu_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_leaky_relu_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_leaky_relu_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_leaky_relu_f32_custom_slope(self, tester_factory: Callable) -> None: - self._test_op( - Model(negative_slope=0.1), (torch.randn(3, 4, 5),), tester_factory - ) + def test_leaky_relu_f32_custom_slope(self, flow: TestFlow) -> None: + self._test_op(Model(negative_slope=0.1), (torch.randn(3, 4, 5),), flow) - def test_leaky_relu_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_leaky_relu_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_leaky_relu_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_leaky_relu_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific positive and negative values x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_logsigmoid.py b/backends/test/suite/operators/test_logsigmoid.py index ff6a2df83ae..ff62358a98e 100644 --- a/backends/test/suite/operators/test_logsigmoid.py +++ b/backends/test/suite/operators/test_logsigmoid.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -22,18 +21,16 @@ def forward(self, x): @operator_test class TestLogSigmoid(OperatorTest): @dtype_test - def test_logsigmoid_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op( - Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), tester_factory - ) + def test_logsigmoid_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow) - def test_logsigmoid_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_logsigmoid_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_logsigmoid_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_logsigmoid_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_logsigmoid_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_logsigmoid_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific values spanning negative and positive ranges x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_mul.py b/backends/test/suite/operators/test_mul.py index 19d1c8e939d..5914b455762 100644 --- a/backends/test/suite/operators/test_mul.py +++ b/backends/test/suite/operators/test_mul.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -22,42 +21,42 @@ def forward(self, x, y): @operator_test class Multiply(OperatorTest): @dtype_test - def test_multiply_dtype(self, dtype, tester_factory: Callable) -> None: + def test_multiply_dtype(self, flow: TestFlow, dtype) -> None: self._test_op( Model(), ( (torch.rand(2, 10) * 100).to(dtype), (torch.rand(2, 10) * 100).to(dtype), ), - tester_factory, + flow, ) - def test_multiply_f32_bcast_first(self, tester_factory: Callable) -> None: + def test_multiply_f32_bcast_first(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(5), torch.randn(1, 5, 1, 5), ), - tester_factory, + flow, ) - def test_multiply_f32_bcast_second(self, tester_factory: Callable) -> None: + def test_multiply_f32_bcast_second(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(4, 4, 2, 7), torch.randn(2, 7), ), - tester_factory, + flow, ) - def test_multiply_f32_bcast_unary(self, tester_factory: Callable) -> None: + def test_multiply_f32_bcast_unary(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(5), torch.randn(1, 1, 5), ), - tester_factory, + flow, ) diff --git a/backends/test/suite/operators/test_prelu.py b/backends/test/suite/operators/test_prelu.py index a9aee50bc18..5987f6bd75b 100644 --- a/backends/test/suite/operators/test_prelu.py +++ b/backends/test/suite/operators/test_prelu.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -26,33 +25,27 @@ def forward(self, x): @operator_test class TestPReLU(OperatorTest): @dtype_test - def test_prelu_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op( - Model().to(dtype), ((torch.rand(2, 10) * 2 - 1).to(dtype),), tester_factory - ) + def test_prelu_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model().to(dtype), ((torch.rand(2, 10) * 2 - 1).to(dtype),), flow) - def test_prelu_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_prelu_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_prelu_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_prelu_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_prelu_f32_custom_init(self, tester_factory: Callable) -> None: - self._test_op(Model(init=0.1), (torch.randn(3, 4, 5),), tester_factory) + def test_prelu_f32_custom_init(self, flow: TestFlow) -> None: + self._test_op(Model(init=0.1), (torch.randn(3, 4, 5),), flow) - def test_prelu_f32_channel_shared(self, tester_factory: Callable) -> None: + def test_prelu_f32_channel_shared(self, flow: TestFlow) -> None: # Default num_parameters=1 means the parameter is shared across all channels - self._test_op( - Model(num_parameters=1), (torch.randn(2, 3, 4, 5),), tester_factory - ) + self._test_op(Model(num_parameters=1), (torch.randn(2, 3, 4, 5),), flow) - def test_prelu_f32_per_channel_parameter(self, tester_factory: Callable) -> None: + def test_prelu_f32_per_channel_parameter(self, flow: TestFlow) -> None: # num_parameters=3 means each channel has its own parameter (for dim=1) - self._test_op( - Model(num_parameters=3), (torch.randn(2, 3, 4, 5),), tester_factory - ) + self._test_op(Model(num_parameters=3), (torch.randn(2, 3, 4, 5),), flow) - def test_prelu_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_prelu_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific positive and negative values x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_relu.py b/backends/test/suite/operators/test_relu.py index ab6d93d6279..d90a7c6f04e 100644 --- a/backends/test/suite/operators/test_relu.py +++ b/backends/test/suite/operators/test_relu.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -26,14 +25,14 @@ def forward(self, x): @operator_test class TestReLU(OperatorTest): @dtype_test - def test_relu_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 100).to(dtype),), tester_factory) + def test_relu_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10) * 100).to(dtype),), flow) - def test_relu_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_relu_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_relu_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_relu_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_relu_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_relu_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) diff --git a/backends/test/suite/operators/test_sigmoid.py b/backends/test/suite/operators/test_sigmoid.py index 7e70b30ff19..2a2c8c0539e 100644 --- a/backends/test/suite/operators/test_sigmoid.py +++ b/backends/test/suite/operators/test_sigmoid.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -22,18 +21,16 @@ def forward(self, x): @operator_test class TestSigmoid(OperatorTest): @dtype_test - def test_sigmoid_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op( - Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), tester_factory - ) + def test_sigmoid_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow) - def test_sigmoid_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_sigmoid_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_sigmoid_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_sigmoid_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_sigmoid_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_sigmoid_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific values spanning negative and positive ranges x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_silu.py b/backends/test/suite/operators/test_silu.py index a30b47a1c57..9d8afbaa716 100644 --- a/backends/test/suite/operators/test_silu.py +++ b/backends/test/suite/operators/test_silu.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -26,19 +25,19 @@ def forward(self, x): @operator_test class TestSiLU(OperatorTest): @dtype_test - def test_silu_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op(Model(), ((torch.randn(2, 10) * 100).to(dtype),), tester_factory) + def test_silu_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.randn(2, 10) * 100).to(dtype),), flow) - def test_silu_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_silu_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_silu_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_silu_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_silu_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_silu_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_silu_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_silu_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific values spanning negative and positive ranges x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_sub.py b/backends/test/suite/operators/test_sub.py index 19884419637..30c0db5878c 100644 --- a/backends/test/suite/operators/test_sub.py +++ b/backends/test/suite/operators/test_sub.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -31,52 +30,52 @@ def forward(self, x, y): @operator_test class Subtract(OperatorTest): @dtype_test - def test_subtract_dtype(self, dtype, tester_factory: Callable) -> None: + def test_subtract_dtype(self, flow: TestFlow, dtype) -> None: self._test_op( Model(), ( (torch.rand(2, 10) * 100).to(dtype), (torch.rand(2, 10) * 100).to(dtype), ), - tester_factory, + flow, ) - def test_subtract_f32_bcast_first(self, tester_factory: Callable) -> None: + def test_subtract_f32_bcast_first(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(5), torch.randn(1, 5, 1, 5), ), - tester_factory, + flow, ) - def test_subtract_f32_bcast_second(self, tester_factory: Callable) -> None: + def test_subtract_f32_bcast_second(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(4, 4, 2, 7), torch.randn(2, 7), ), - tester_factory, + flow, ) - def test_subtract_f32_bcast_unary(self, tester_factory: Callable) -> None: + def test_subtract_f32_bcast_unary(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(5), torch.randn(1, 1, 5), ), - tester_factory, + flow, ) - def test_subtract_f32_alpha(self, tester_factory: Callable) -> None: + def test_subtract_f32_alpha(self, flow: TestFlow) -> None: self._test_op( ModelAlpha(alpha=2), ( torch.randn(1, 25), torch.randn(1, 25), ), - tester_factory, + flow, ) diff --git a/backends/test/suite/operators/test_tanh.py b/backends/test/suite/operators/test_tanh.py index 1d7889a95da..b7e4ce7166b 100644 --- a/backends/test/suite/operators/test_tanh.py +++ b/backends/test/suite/operators/test_tanh.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -22,18 +21,16 @@ def forward(self, x): @operator_test class TestTanh(OperatorTest): @dtype_test - def test_tanh_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op( - Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), tester_factory - ) + def test_tanh_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow) - def test_tanh_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_tanh_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_tanh_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_tanh_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_tanh_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_tanh_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific values spanning negative and positive ranges x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_threshold.py b/backends/test/suite/operators/test_threshold.py index 97c84c58404..1dfac7dd007 100644 --- a/backends/test/suite/operators/test_threshold.py +++ b/backends/test/suite/operators/test_threshold.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -30,42 +29,36 @@ def forward(self, x): @operator_test class TestThreshold(OperatorTest): @dtype_test - def test_threshold_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op( - Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), tester_factory - ) + def test_threshold_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow) - def test_threshold_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_threshold_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_threshold_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_threshold_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_threshold_f32_custom_threshold(self, tester_factory: Callable) -> None: - self._test_op(Model(threshold=1.0), (torch.randn(3, 4, 5),), tester_factory) + def test_threshold_f32_custom_threshold(self, flow: TestFlow) -> None: + self._test_op(Model(threshold=1.0), (torch.randn(3, 4, 5),), flow) - def test_threshold_f32_custom_value(self, tester_factory: Callable) -> None: - self._test_op(Model(value=2.0), (torch.randn(3, 4, 5),), tester_factory) + def test_threshold_f32_custom_value(self, flow: TestFlow) -> None: + self._test_op(Model(value=2.0), (torch.randn(3, 4, 5),), flow) - def test_threshold_f32_custom_threshold_value( - self, tester_factory: Callable - ) -> None: - self._test_op( - Model(threshold=0.5, value=1.0), (torch.randn(3, 4, 5),), tester_factory - ) + def test_threshold_f32_custom_threshold_value(self, flow: TestFlow) -> None: + self._test_op(Model(threshold=0.5, value=1.0), (torch.randn(3, 4, 5),), flow) - def test_threshold_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_threshold_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_threshold_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_threshold_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific values around the threshold x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) - def test_threshold_f32_all_params(self, tester_factory: Callable) -> None: + def test_threshold_f32_all_params(self, flow: TestFlow) -> None: # Test with all parameters customized self._test_op( Model(threshold=0.5, value=3.0, inplace=True), (torch.randn(3, 4, 5),), - tester_factory, + flow, ) diff --git a/backends/test/suite/reporting.py b/backends/test/suite/reporting.py index d7181300873..ad32a8c74c9 100644 --- a/backends/test/suite/reporting.py +++ b/backends/test/suite/reporting.py @@ -15,22 +15,25 @@ class TestResult(IntEnum): EAGER_FAIL = 2 """ The test failed due to the model failing to run in eager mode. """ - EXPORT_FAIL = 3 + QUANTIZE_FAIL = 3 + """ The test failed due to the quantization stage failing. """ + + EXPORT_FAIL = 4 """ The test failed due to the model failing to export. """ - LOWER_FAIL = 4 + LOWER_FAIL = 5 """ The test failed due to a failure in partitioning or lowering. """ - PTE_LOAD_FAIL = 5 + PTE_LOAD_FAIL = 6 """ The test failed due to the resulting PTE failing to load. """ - PTE_RUN_FAIL = 6 + PTE_RUN_FAIL = 7 """ The test failed due to the resulting PTE failing to run. """ - OUTPUT_MISMATCH_FAIL = 7 + OUTPUT_MISMATCH_FAIL = 8 """ The test failed due to a mismatch between runtime and reference outputs. """ - UNKNOWN_FAIL = 8 + UNKNOWN_FAIL = 9 """ The test failed in an unknown or unexpected manner. """ def is_success(self): @@ -49,6 +52,8 @@ def display_name(self): return "Success (Undelegated)" elif self == TestResult.EAGER_FAIL: return "Fail (Eager)" + elif self == TestResult.QUANTIZE_FAIL: + return "Fail (Quantize)" elif self == TestResult.EXPORT_FAIL: return "Fail (Export)" elif self == TestResult.LOWER_FAIL: diff --git a/backends/test/suite/runner.py b/backends/test/suite/runner.py index 5c70ac415f3..3fe9084548c 100644 --- a/backends/test/suite/runner.py +++ b/backends/test/suite/runner.py @@ -3,13 +3,13 @@ import re import unittest -from typing import Any, Callable +from typing import Any import torch -from executorch.backends.test.harness import Tester from executorch.backends.test.harness.stages import StageType from executorch.backends.test.suite.discovery import discover_tests, TestFilter +from executorch.backends.test.suite.flow import TestFlow from executorch.backends.test.suite.reporting import ( begin_test_session, complete_test_session, @@ -29,9 +29,8 @@ def run_test( # noqa: C901 model: torch.nn.Module, inputs: Any, - tester_factory: Callable[[], Tester], + flow: TestFlow, test_name: str, - flow_name: str, params: dict | None, dynamic_shapes: Any | None = None, ) -> TestCaseSummary: @@ -46,14 +45,12 @@ def build_result( ) -> TestCaseSummary: return TestCaseSummary( name=test_name, - flow=flow_name, + flow=flow.name, params=params, result=result, error=error, ) - model.eval() - # Ensure the model can run in eager mode. try: model(*inputs) @@ -61,10 +58,18 @@ def build_result( return build_result(TestResult.EAGER_FAIL, e) try: - tester = tester_factory(model, inputs) + tester = flow.tester_factory(model, inputs) except Exception as e: return build_result(TestResult.UNKNOWN_FAIL, e) + if flow.quantize: + try: + tester.quantize( + flow.quantize_stage_factory() if flow.quantize_stage_factory else None + ) + except Exception as e: + return build_result(TestResult.QUANTIZE_FAIL, e) + try: # TODO Use Tester dynamic_shapes parameter once input generation can properly handle derived dims. tester.export( @@ -126,6 +131,9 @@ def print_summary(summary: RunSummary): print() print("[Failure]") + print( + f"{summary.aggregated_results.get(TestResult.QUANTIZE_FAIL, 0):>5} Quantization Fail" + ) print( f"{summary.aggregated_results.get(TestResult.LOWER_FAIL, 0):>5} Lowering Fail" )