Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 18 additions & 1 deletion lib/Conversion/TorchToTosa/TorchToTosa.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Tosa/IR/TosaOps.h"
#include "mlir/Dialect/Tosa/Utils/ConversionUtils.h"
#include "mlir/IR/DialectResourceBlobManager.h"
#include "mlir/IR/Matchers.h"
#include "mlir/Transforms/DialectConversion.h"
#include "torch-mlir/Conversion/TorchToTosa/TosaLegalizeCommon.h"
Expand Down Expand Up @@ -3000,7 +3001,23 @@ LogicalResult ConvertAtenOp<ValueTensorLiteralOp>::matchAndRewrite(
return success();
}
}
rewriter.replaceOpWithNewOp<tosa::ConstOp>(op, outputTy, adaptor.getValue());
ElementsAttr attr = cast<ElementsAttr>(adaptor.getValue());
if (auto res = dyn_cast<DenseResourceElementsAttr>(attr)) {
// Resource blobs preserve the producer's signedness, so retag them here to
// keep TOSA constants signless and avoid downstream type mismatches.
auto shapedAttrTy = cast<ShapedType>(res.getType());
if (auto intTy = dyn_cast<IntegerType>(shapedAttrTy.getElementType())) {
auto signlessTy =
IntegerType::get(rewriter.getContext(), intTy.getWidth());
if (intTy != signlessTy) {
auto newTy = RankedTensorType::get(shapedAttrTy.getShape(), signlessTy);
attr = DenseResourceElementsAttr::get(newTy, res.getRawHandle());
}
}
rewriter.replaceOpWithNewOp<tosa::ConstOp>(op, outputTy, attr);
return success();
}
rewriter.replaceOpWithNewOp<tosa::ConstOp>(op, outputTy, attr);
return success();
}

Expand Down
5 changes: 4 additions & 1 deletion projects/pt1/e2e_testing/xfail_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -679,6 +679,8 @@
"ChannelShuffleTrailingOnes_basic",
"ChannelShuffleDynamicDims_basic",
"ConstantBoolParameterModule_basic",
"ConstantInt32ParameterModule_basic",
"ConstantInt64ParameterModule_basic",
"ContainsIntList_False",
"ContainsIntList_True",
"Conv2dFP16NoBiasModule_basic",
Expand Down Expand Up @@ -2890,6 +2892,8 @@
"ColumnStack1dModule_basic",
"ColumnStack0dModule_basic",
"ConstantBoolParameterModule_basic",
"ConstantInt32ParameterModule_basic",
"ConstantInt64ParameterModule_basic",
"ContainsIntList_False",
"ContainsIntList_True",
"Conv1dModule_basic",
Expand Down Expand Up @@ -3691,7 +3695,6 @@
"BoolIntTrueModule_basic",
"BroadcastDynamicDimModule_basic",
"CeilFloatModule_basic",
"ConstantBoolParameterModule_basic",
"ContainsIntList_False",
"ContainsIntList_True",
"Conv1dModule_basic",
Expand Down
43 changes: 43 additions & 0 deletions projects/pt1/python/torch_mlir_e2e_test/test_suite/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -2976,6 +2976,49 @@ def TensorIntModule_basic(module, tu: TestUtils):
# ==============================================================================


class ConstantInt32ParameterModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.tensor([0, 10, 128, 17000], dtype=torch.int32)

@export
@annotate_args(
[
None,
]
)
def forward(self):
return self.tensor


@register_test_case(module_factory=lambda: ConstantInt32ParameterModule())
def ConstantInt32ParameterModule_basic(module, tu: TestUtils):
module.forward()


class ConstantInt64ParameterModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.tensor([1, -2, 3, -4], dtype=torch.int64)

@export
@annotate_args(
[
None,
]
)
def forward(self):
return self.tensor


@register_test_case(module_factory=lambda: ConstantInt64ParameterModule())
def ConstantInt64ParameterModule_basic(module, tu: TestUtils):
module.forward()


# ==============================================================================


class tensorFloatModule(torch.nn.Module):
def __init__(self):
super().__init__()
Expand Down
38 changes: 38 additions & 0 deletions test/Conversion/TorchToTosa/basic.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -1037,6 +1037,44 @@ func.func @torch.vtensor.literal_si32$basic() -> !torch.vtensor<[1,512],si32> {

// -----

// CHECK-LABEL: @torch.vtensor.literal_resource_si32$basic(
// CHECK: %[[CST:.*]] = "tosa.const"() <{values = dense_resource<torch_resource_i32> : tensor<4xi32>}>
// CHECK: %[[RET:.*]] = torch_c.from_builtin_tensor %[[CST]] : tensor<4xi32> -> !torch.vtensor<[4],si32>
// CHECK: return %[[RET]] : !torch.vtensor<[4],si32>
func.func @torch.vtensor.literal_resource_si32$basic() -> !torch.vtensor<[4],si32> {
%0 = torch.vtensor.literal(dense_resource<torch_resource_i32> : tensor<4xsi32>) : !torch.vtensor<[4],si32>
return %0 : !torch.vtensor<[4],si32>
}

{-#
dialect_resources: {
builtin: {
torch_resource_i32: "0x08000000000000000a0000008000000068420000"
}
}
#-}

// -----

// CHECK-LABEL: @torch.vtensor.literal_resource_si64$basic(
// CHECK: %[[CST:.*]] = "tosa.const"() <{values = dense_resource<torch_resource_i64> : tensor<3xi64>}>
// CHECK: %[[RET:.*]] = torch_c.from_builtin_tensor %[[CST]] : tensor<3xi64> -> !torch.vtensor<[3],si64>
// CHECK: return %[[RET]] : !torch.vtensor<[3],si64>
func.func @torch.vtensor.literal_resource_si64$basic() -> !torch.vtensor<[3],si64> {
%0 = torch.vtensor.literal(dense_resource<torch_resource_i64> : tensor<3xsi64>) : !torch.vtensor<[3],si64>
return %0 : !torch.vtensor<[3],si64>
}

{-#
dialect_resources: {
builtin: {
torch_resource_i64: "0x08000000010000000000000002000000000000000300000000000000"
}
}
#-}

// -----

// CHECK-LABEL: func.func @torch.aten.arange.start_step() -> !torch.vtensor<[5],si64> {
// CHECK: %[[VAL_0:.*]] = torch.constant.none
// CHECK: %[[VAL_1:.*]] = torch.constant.int 0
Expand Down
Loading