diff --git a/exir/tensor.py b/exir/tensor.py index 1345067354f..e11797927ec 100644 --- a/exir/tensor.py +++ b/exir/tensor.py @@ -67,12 +67,12 @@ def dim_order_from_stride(stride: Tuple[int]) -> Tuple[bytes]: Another example is: sizes = (1, 3, 1, 1) with strides = (3, 1, 3, 3), returned value is (0, 2, 3, 1) """ + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious, guard_or_false + for _, s in enumerate(stride): - if s == 0: + if guard_or_false(s == 0): raise ValueError("0 in strides is not supported for ExecuTorch.") - from torch.fx.experimental.symbolic_shapes import guard_size_oblivious - class K(NamedTuple): stride: int diff --git a/exir/tests/test_serde.py b/exir/tests/test_serde.py index 67821d0bffb..139c2ee77cf 100644 --- a/exir/tests/test_serde.py +++ b/exir/tests/test_serde.py @@ -237,6 +237,35 @@ def forward(self, x): == edge_deserialized.to_executorch().buffer ) + def test_dim_order_from_stride(self): + from executorch.exir import EdgeCompileConfig + + class Test(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, t1, t2): + idx = torch.nonzero(t1).reshape(-1) + y = torch.index_select(t2, 0, idx) + return y + + + M = Test() + x = torch.tensor([0, 1, 1, 0, 1], dtype=torch.bool) + y = torch.randn(5, 6) + M(x, y) + + expo_prog = torch.export.export_for_training(M, (x, y)) + print(expo_prog) + + edge_prog = to_edge_transform_and_lower( + expo_prog, + partitioner=[XnnpackFloatingPointPartitioner()], + compile_config=EdgeCompileConfig(_check_ir_validity=False, _use_edge_ops=True), + ) + edge_prog.to_executorch() + breakpoint() + def test_meta_stack_trace_module_hierarchy(self) -> None: class Model(nn.Module): def __init__(self):