Skip to content

Commit aafbbfc

Browse files
committed
Update docs and verifiers
Signed-off-by: hanhanW <[email protected]>
1 parent 1e75325 commit aafbbfc

File tree

3 files changed

+20
-25
lines changed

3 files changed

+20
-25
lines changed

mlir/include/mlir/Dialect/Linalg/IR/LinalgRelayoutOps.td

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -150,9 +150,10 @@ def Linalg_PackOp : Linalg_RelayoutOp<"pack", [
150150

151151
`padding_value` specifies a padding value at the boundary on non-perfectly
152152
divisible dimensions. Padding is optional:
153-
- If absent, it is UB if the tile does not perfectly divide the dimension.
153+
- If absent, it assumes the tile perfectly divides the dimension.
154154
- If present, it will pad along high dimensions (high-padding) to make the
155-
tile complete.
155+
tile complete. Note that it is not allowed to have artificial padding that
156+
is not strictly required by linalg.pack.
156157

157158
Example:
158159
```mlir
@@ -167,6 +168,15 @@ def Linalg_PackOp : Linalg_RelayoutOp<"pack", [
167168
//
168169
// Note: Only tiled dimensions can be padded.
169170
```
171+
172+
Invalid example that has artificial padding:
173+
```mlir
174+
%0 = linalg.pack %src padding_value(%cst : f32) inner_dims_pos = [0]
175+
inner_tiles = [8] into %dest
176+
: tensor<9xf32> -> tensor<3x8xf32>
177+
// \
178+
// expect tensor<2x8xf32> because CeilDiv(9, 8) = 2
179+
```
170180
}];
171181
let arguments = (ins AnyRankedTensor:$source,
172182
AnyRankedTensor:$dest,

mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp

Lines changed: 5 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
#include "mlir/IR/OpImplementation.h"
3333
#include "mlir/IR/OperationSupport.h"
3434
#include "mlir/IR/PatternMatch.h"
35+
#include "mlir/IR/TypeUtilities.h"
3536
#include "mlir/Interfaces/InferTypeOpInterface.h"
3637
#include "mlir/Interfaces/SideEffectInterfaces.h"
3738

@@ -4599,22 +4600,6 @@ static bool isInvalidPackingPosSpecification(ArrayRef<int64_t> dimsPos,
45994600
});
46004601
}
46014602

4602-
/// Returns true if the dimension of `sourceShape` is smaller than the dimension
4603-
/// of the `limitShape`.
4604-
static bool isCompatibleShape(ArrayRef<int64_t> sourceShape,
4605-
ArrayRef<int64_t> limitShape) {
4606-
assert(
4607-
sourceShape.size() == limitShape.size() &&
4608-
"expected source shape rank, and limit of the shape to have same rank");
4609-
return llvm::all_of(
4610-
llvm::zip(sourceShape, limitShape), [](std::tuple<int64_t, int64_t> it) {
4611-
int64_t sourceExtent = std::get<0>(it);
4612-
int64_t limit = std::get<1>(it);
4613-
return ShapedType::isDynamic(sourceExtent) ||
4614-
ShapedType::isDynamic(limit) || sourceExtent == limit;
4615-
});
4616-
}
4617-
46184603
template <typename OpTy>
46194604
static LogicalResult commonVerifierPackAndUnPackOp(OpTy packOrUnPack) {
46204605
static_assert(llvm::is_one_of<OpTy, PackOp, UnPackOp>::value,
@@ -4689,10 +4674,10 @@ static LogicalResult commonVerifierPackAndUnPackOp(OpTy packOrUnPack) {
46894674
return op->emitError("mismatch in inner tile sizes specified and shaped of "
46904675
"tiled dimension in the packed type");
46914676
}
4692-
if (!isCompatibleShape(expectedPackedType.getShape(),
4693-
packedType.getShape())) {
4694-
return op->emitError("the shape of output is not large enough to hold the "
4695-
"packed data. Expected at least ")
4677+
if (failed(verifyCompatibleShape(expectedPackedType.getShape(),
4678+
packedType.getShape()))) {
4679+
return op->emitError("the shape of unpacked domain value is not large "
4680+
"enough to hold the packed data. Expected at least ")
46964681
<< expectedPackedType << ", got " << packedType;
46974682
}
46984683
return success();

mlir/test/Dialect/Linalg/invalid.mlir

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1828,23 +1828,23 @@ func.func @unpack_invalid_outer_dims_perm(%source: tensor<128x256xf32>, %dest: t
18281828
// The outer dims in the output tensor are incorrectly/unexpectedly transposed.
18291829
// This could be fixed by adding `outer_dims_perm = [1, 0]` (the default value assumes no transpose).
18301830
func.func @pack_invalid_result_shape(%input: tensor<256x128xf32>, %output: tensor<4x16x32x16xf32>) -> tensor<4x16x32x16xf32> {
1831-
// expected-error@+1 {{the shape of output is not large enough to hold the packed data. Expected at least 'tensor<16x4x32x16xf32>', got 'tensor<4x16x32x16xf32>'}}
1831+
// expected-error@+1 {{the shape of unpacked domain value is not large enough to hold the packed data. Expected at least 'tensor<16x4x32x16xf32>', got 'tensor<4x16x32x16xf32>'}}
18321832
%0 = linalg.pack %input inner_dims_pos = [1, 0] inner_tiles = [32, 16] into %output : tensor<256x128xf32> -> tensor<4x16x32x16xf32>
18331833
return %0 : tensor<4x16x32x16xf32>
18341834
}
18351835

18361836
// -----
18371837

18381838
func.func @pack_invalid(%input: tensor<256x128xf32>, %output: tensor<8x7x16x32xf32>) -> tensor<8x7x16x32xf32> {
1839-
// expected-error@+1 {{the shape of output is not large enough to hold the packed data. Expected at least 'tensor<8x8x16x32xf32>', got 'tensor<8x7x16x32xf32>'}}
1839+
// expected-error@+1 {{the shape of unpacked domain value is not large enough to hold the packed data. Expected at least 'tensor<8x8x16x32xf32>', got 'tensor<8x7x16x32xf32>'}}
18401840
%0 = linalg.pack %input inner_dims_pos = [1, 0] inner_tiles = [16, 32] into %output : tensor<256x128xf32> -> tensor<8x7x16x32xf32>
18411841
return %0 : tensor<8x7x16x32xf32>
18421842
}
18431843

18441844
// -----
18451845

18461846
func.func @unpack_invalid(%output: tensor<256x128xf32>, %input: tensor<8x8x4x32xf32>) -> tensor<256x128xf32> {
1847-
// expected-error@+1 {{the shape of output is not large enough to hold the packed data. Expected at least 'tensor<8x32x4x32xf32>', got 'tensor<8x8x4x32xf32>'}}
1847+
// expected-error@+1 {{the shape of unpacked domain value is not large enough to hold the packed data. Expected at least 'tensor<8x32x4x32xf32>', got 'tensor<8x8x4x32xf32>'}}
18481848
%0 = linalg.unpack %input inner_dims_pos = [1, 0] inner_tiles = [4, 32] into %output : tensor<8x8x4x32xf32> -> tensor<256x128xf32>
18491849
return %0 : tensor<256x128xf32>
18501850
}

0 commit comments

Comments
 (0)