Skip to content

Commit b440e82

Browse files
author
sidart
committed
Remove softmax
Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags:
1 parent eb253fd commit b440e82

File tree

3 files changed

+0
-38
lines changed

3 files changed

+0
-38
lines changed

backends/cortex_m/ops/operators.py

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,6 @@
1313
# New operator library with a custom namespace to allow fusion etc.
1414
lib = Library("cortex_m", "DEF")
1515

16-
# Import these for the cadence function signatures.
17-
import executorch.backends.cortex_m.cortex_m_ops_lib # noqa: F401
18-
1916
###
2017
# add.Tensor
2118
###
@@ -148,19 +145,3 @@ def dequantize_per_tensor_impl(
148145
return exir_ops.edge.quantized_decomposed.dequantize_per_tensor.default(
149146
input, scale, zero_point, quant_min, quant_max, dtype
150147
)
151-
152-
lib.define(
153-
"softmax(Tensor self, int dim, bool half_to_float) -> Tensor"
154-
)
155-
lib.define(
156-
"softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)"
157-
)
158-
@impl(lib, "softmax", "CompositeExplicitAutograd")
159-
def softmax_impl(self: torch.Tensor, dim: int, half_to_float: bool) -> torch.Tensor:
160-
# Call your custom edge op or fallback
161-
# return exir_ops.edge.cortex_m.softmax(self, dim, half_to_float)
162-
# ctx = get_kernel_ctx() # gets KernelRuntimeContext*
163-
return {}
164-
@impl(lib, "softmax.out", "CompositeExplicitAutograd")
165-
def softmax_out_impl(self: torch.Tensor, dim: int, half_to_float: bool, out: torch.Tensor) -> torch.Tensor:
166-
return exir_ops.edge.cortex_m.softmax_out(self, dim, half_to_float, out)

backends/cortex_m/ops/operators.yaml

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -28,14 +28,3 @@
2828
- arg_meta: null
2929
kernel_name: cortex_m::aten_add_tensor
3030

31-
- func: cortex_m::softmax(Tensor self, int dim, bool half_to_float) -> Tensor
32-
variants: function
33-
kernels:
34-
- arg_meta: null
35-
kernel_name: cortex_m::softmax
36-
37-
- func: cortex_m::softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
38-
variants: function
39-
kernels:
40-
- arg_meta: null
41-
kernel_name: cortex_m::softmax_out

backends/cortex_m/passes/replace_quant_nodes_pass.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -39,14 +39,6 @@ def __init__(self):
3939
"new_target": exir_ops.edge.cortex_m.add.Tensor,
4040
"qualifier": lambda args: True,
4141
},
42-
exir_ops.edge.aten._softmax.out: {
43-
"new_target": exir_ops.edge.cortex_m.softmax.out,
44-
"qualifier": lambda args: True,
45-
},
46-
exir_ops.edge.aten._softmax.default: {
47-
"new_target": exir_ops.edge.cortex_m.softmax, # or .softmax if you have an out variant
48-
"qualifier": lambda args: True,
49-
},
5042
exir_ops.edge.quantized_decomposed.quantize_per_tensor.default: {
5143
"new_target": exir_ops.edge.cortex_m.quantize_per_tensor.default,
5244
"qualifier": self._is_qualified_int8_node,

0 commit comments

Comments
 (0)