Skip to content

Commit 0e08666

Browse files
committed
[mlir][linalg][nfc] Clean-up leftover code post #149156
In #149156, I ensured that we no longer generate spurious `tensor.empty` ops when vectorizing `linalg.unpack`. This follow-up removes leftover code that is now redundant but was missed in the original PR.
1 parent 90de4a4 commit 0e08666

File tree

2 files changed

+5
-16
lines changed

2 files changed

+5
-16
lines changed

mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1913,14 +1913,6 @@ vectorizeAsTensorUnpackOp(RewriterBase &rewriter, linalg::UnPackOp unpackOp,
19131913
readVectorSizes.append(sourceShape.begin() + vectorSizes.size(),
19141914
sourceShape.end());
19151915

1916-
ReifiedRankedShapedTypeDims reifiedRetShapes;
1917-
LogicalResult status =
1918-
cast<ReifyRankedShapedTypeOpInterface>(unpackOp.getOperation())
1919-
.reifyResultShapes(rewriter, reifiedRetShapes);
1920-
if (status.failed()) {
1921-
LDBG() << "Unable to reify result shapes of " << unpackOp;
1922-
return failure();
1923-
}
19241916
Location loc = unpackOp->getLoc();
19251917

19261918
auto padValue = arith::ConstantOp::create(

mlir/test/Dialect/Linalg/vectorization/linalg-ops.mlir

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -941,20 +941,17 @@ module attributes {transform.with_named_sequence} {
941941

942942
// CHECK-LABEL: func @test_vectorize_dynamic_shapes_unpack
943943
// CHECK-SAME: %[[ARG_0:.*]]: tensor<?x?xf32>,
944+
// CHECK-SAME: %[[ARG_1:.*]]: tensor<?x?x16x2xf32>
944945
func.func @test_vectorize_dynamic_shapes_unpack(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?x16x2xf32>) -> tensor<?x?xf32> {
945946
// CHECK: %[[C0:.*]] = arith.constant 0
946-
// CHECK: %[[DIM:.*]] = tensor.dim %arg0, %[[C0]] : tensor<?x?xf32>
947-
// CHECK: %[[C1:.*]] = arith.constant 1 : index
948-
// CHECK: %[[DIM0:.*]] = tensor.dim %arg0, %[[C1]] : tensor<?x?xf32>
949-
// CHECK: %[[CST:.*]] = arith.constant 0.000000e+00
950947
// CHECK: %[[C01:.*]] = arith.constant 0
951948
// CHECK: %[[C02:.*]] = arith.constant 0
952-
// CHECK: %[[DIM4:.*]] = tensor.dim %arg1, %[[C02]] : tensor<?x?x16x2xf32>
953-
// CHECK: %[[CNST14:.*]] = arith.constant 1
954-
// CHECK: %[[DIM6:.*]] = tensor.dim %arg1, %[[CNST14]] : tensor<?x?x16x2xf32>
949+
// CHECK: %[[DIM_0:.*]] = tensor.dim %[[ARG_1]], %[[C02]] : tensor<?x?x16x2xf32>
950+
// CHECK: %[[C1:.*]] = arith.constant 1
951+
// CHECK: %[[DIM6:.*]] = tensor.dim %[[ARG_1]], %[[C1]] : tensor<?x?x16x2xf32>
955952
// CHECK: %[[CNST16:.*]] = arith.constant 16 : index
956953
// CHECK: %[[CNST2:.*]] = arith.constant 2 : index
957-
// CHECK: %[[readMsk0:.*]] = vector.create_mask %[[DIM4]], %[[DIM6]], %[[CNST16]], %[[CNST2]] : vector<2x1x16x2xi1>
954+
// CHECK: %[[readMsk0:.*]] = vector.create_mask %[[DIM_0]], %[[DIM6]], %[[CNST16]], %[[CNST2]] : vector<2x1x16x2xi1>
958955
// CHECK: %[[read0:.*]] = vector.mask %[[readMsk0]] {{.*}} vector.transfer_read %{{.*}} : tensor<?x?x16x2xf32>, vector<2x1x16x2xf32> } : vector<2x1x16x2xi1> -> vector<2x1x16x2xf32>
959956
// CHECK: %[[trans0:.*]] = vector.transpose %[[read0]], [0, 3, 1, 2] : vector<2x1x16x2xf32> to vector<2x2x1x16xf32>
960957
// CHECK: %[[sc0:.*]] = vector.shape_cast %[[trans0]] : vector<2x2x1x16xf32> to vector<4x16xf32>

0 commit comments

Comments
 (0)