|
1 | 1 | // RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s
|
2 | 2 |
|
| 3 | +///---------------------------------------------------------------------------------------- |
| 4 | +/// Tests for vectorizing operations implementing contraction op interface. |
| 5 | +/// Ops implementing the contraction interface are vectorized directly to their |
| 6 | +/// vector dialect named counterparts. |
| 7 | +///---------------------------------------------------------------------------------------- |
| 8 | + |
3 | 9 | func.func @matmul(%A: tensor<8x4xf32>, %B: tensor<4x16xf32>,
|
4 | 10 | %C: tensor<8x16xf32>) -> tensor<8x16xf32> {
|
5 | 11 | %0 = linalg.matmul
|
@@ -208,6 +214,12 @@ module attributes {transform.with_named_sequence} {
|
208 | 214 |
|
209 | 215 | // -----
|
210 | 216 |
|
| 217 | +/// Contractions' arbitrarty broadcasts are not supported in contraction interface |
| 218 | +/// vectorization. |
| 219 | +/// Dimension broadcasts are expected to be decomposed first which removes ambiguity |
| 220 | +/// caused by possible variants of dimensions materialization. |
| 221 | +/// For example, whether the below target LHS input layout is (m, k) or (k, m). |
| 222 | + |
211 | 223 | func.func @negative_matmul_broadcast(%A: tensor<4xf32>, %B: tensor<4x16xf32>,
|
212 | 224 | %C: tensor<8x16xf32>) -> tensor<8x16xf32> {
|
213 | 225 | %0 = linalg.matmul
|
@@ -368,7 +380,9 @@ module attributes {transform.with_named_sequence} {
|
368 | 380 |
|
369 | 381 | // -----
|
370 | 382 |
|
371 |
| -// Generic is currently ignored in direct lowering to a named contraction. |
| 383 | +/// Generic can represent contractions but it does not implement contraction interface. |
| 384 | +/// Thus, direct lowering to vector.contract is not supported. |
| 385 | +/// Vectorization still works and applies generic rewrite logic. |
372 | 386 |
|
373 | 387 | func.func @negative_generic(%A: tensor<8x4xf32>, %B: tensor<4x16xf32>,
|
374 | 388 | %C: tensor<8x16xf32>) -> tensor<8x16xf32> {
|
|
0 commit comments