diff --git a/pytext/config/pytext_config.py b/pytext/config/pytext_config.py index 22f8d1668..65724e9f9 100644 --- a/pytext/config/pytext_config.py +++ b/pytext/config/pytext_config.py @@ -91,8 +91,6 @@ class ExportConfig(ConfigBase): export_onnx_path: str = "/tmp/model.onnx" # Exported torchscript model will be stored here export_torchscript_path: Optional[str] = None - # Exported jit lite model will be stored here - export_lite_path: Optional[str] = None # Export quantized torchscript model torchscript_quantize: Optional[bool] = False # Accelerator options. diff --git a/pytext/workflow.py b/pytext/workflow.py index dbf3a9271..d31ff1037 100644 --- a/pytext/workflow.py +++ b/pytext/workflow.py @@ -210,16 +210,6 @@ def save_and_export( seq_padding_control=export_config.seq_padding_control, batch_padding_control=export_config.batch_padding_control, ) - if export_config.export_lite_path: - task.lite_export( - model=task.model, - export_path=export_config.export_lite_path, - quantize=export_config.torchscript_quantize, - inference_interface=export_config.inference_interface, - accelerate=export_config.accelerate, - seq_padding_control=export_config.seq_padding_control, - batch_padding_control=export_config.batch_padding_control, - ) def export_saved_model_to_caffe2(