diff --git a/lib/Conversion/TorchOnnxToTorch/DefaultDomainQtoZ.cpp b/lib/Conversion/TorchOnnxToTorch/DefaultDomainQtoZ.cpp index d2e3c94733e9..e25d068f2ed8 100644 --- a/lib/Conversion/TorchOnnxToTorch/DefaultDomainQtoZ.cpp +++ b/lib/Conversion/TorchOnnxToTorch/DefaultDomainQtoZ.cpp @@ -1548,8 +1548,8 @@ void mlir::torch::onnx_c::populateDefaultDomainQtoZ( auto dty = dataTy.getDtype(); Value scalar; if (FloatType fpTy = dyn_cast(dty)) { - auto inf = - APFloat::getInf(fpTy.getFloatSemantics(), /*Negative=*/true); + auto inf = APFloat::getLargest(fpTy.getFloatSemantics(), + /*Negative=*/true); scalar = rewriter.create( binder.getLoc(), rewriter.getType(), rewriter.getFloatAttr(rewriter.getF64Type(), diff --git a/lib/Conversion/TorchToLinalg/Pooling.cpp b/lib/Conversion/TorchToLinalg/Pooling.cpp index 3d5ddf91ff40..37fccb026110 100644 --- a/lib/Conversion/TorchToLinalg/Pooling.cpp +++ b/lib/Conversion/TorchToLinalg/Pooling.cpp @@ -441,9 +441,9 @@ class ConvertAtenMaxPoolOp : public OpConversionPattern { Value self = adaptor.getSelf(); Type elementType = cast(self.getType()).getElementType(); TypedAttr smallestFPValueAttr = rewriter.getFloatAttr( - elementType, - APFloat::getInf(cast(elementType).getFloatSemantics(), - /*Negative=*/true)); + elementType, APFloat::getLargest( + cast(elementType).getFloatSemantics(), + /*Negative=*/true)); Value initValue = rewriter.create(op->getLoc(), smallestFPValueAttr); @@ -693,7 +693,7 @@ class ConvertAtenMaxPoolOp : public OpConversionPattern { if (auto fpty = dyn_cast(elementType)) { smallestValueAttr = rewriter.getFloatAttr( elementType, - APFloat::getInf(fpty.getFloatSemantics(), /*Negative=*/true)); + APFloat::getLargest(fpty.getFloatSemantics(), /*Negative=*/true)); } else if (auto intTy = dyn_cast(elementType)) { int64_t bw = intTy.getIntOrFloatBitWidth(); smallestValueAttr = rewriter.getIntegerAttr( @@ -1379,9 +1379,9 @@ class AdaptiveMaxPoolingHelper : public AdaptivePoolingHelper { typeConverter->convertType(op.getResult1().getType())); Type auxTensorElementType = auxTensorType.getElementType(); auto smallestFPValueAttr = rewriter.getFloatAttr( - elementType, - APFloat::getInf(cast(elementType).getFloatSemantics(), - /*Negative=*/true)); + elementType, APFloat::getLargest( + cast(elementType).getFloatSemantics(), + /*Negative=*/true)); buffVal = rewriter.create(loc, elementType, smallestFPValueAttr); auxTensor = rewriter.create( diff --git a/lib/Conversion/TorchToLinalg/Reduction.cpp b/lib/Conversion/TorchToLinalg/Reduction.cpp index e3635fbbd095..e0c4e218a00f 100644 --- a/lib/Conversion/TorchToLinalg/Reduction.cpp +++ b/lib/Conversion/TorchToLinalg/Reduction.cpp @@ -117,7 +117,7 @@ class ConvertAtenMinMaxDimOp : public OpConversionPattern { fillValue = rewriter.create( loc, rewriter.getFloatAttr( inElementType, - APFloat::getInf( + APFloat::getLargest( cast(inElementType).getFloatSemantics(), /*Negative=*/isMax))); } else if (!isUnsigned) { @@ -302,7 +302,7 @@ static Value createInitElementForReduceOp(OpBuilder &b, Location loc, return b.create( loc, b.getFloatAttr( elementType, - APFloat::getInf( + APFloat::getLargest( cast(elementType).getFloatSemantics(), /*Negative=*/true))); else if (isa(elementType) && @@ -318,7 +318,7 @@ static Value createInitElementForReduceOp(OpBuilder &b, Location loc, return b.create( loc, b.getFloatAttr( elementType, - APFloat::getInf( + APFloat::getLargest( cast(elementType).getFloatSemantics(), /*Negative=*/false))); else if (isa(elementType) && diff --git a/lib/Conversion/TorchToStablehlo/Reduction.cpp b/lib/Conversion/TorchToStablehlo/Reduction.cpp index c007ea7a69f5..1cfdf7f7acfb 100644 --- a/lib/Conversion/TorchToStablehlo/Reduction.cpp +++ b/lib/Conversion/TorchToStablehlo/Reduction.cpp @@ -62,9 +62,9 @@ static Value createInitialValueForReduceOp(Operation *op, Type elementTy, if (isa(op)) { if (isa(elementTy)) { constAttr = DenseElementsAttr::get( - constType, - {APFloat::getInf(cast(elementTy).getFloatSemantics(), - /*negative=*/true)}); + constType, {APFloat::getLargest( + cast(elementTy).getFloatSemantics(), + /*negative=*/true)}); } else if (isa(elementTy)) { constAttr = DenseElementsAttr::get( constType, @@ -75,9 +75,9 @@ static Value createInitialValueForReduceOp(Operation *op, Type elementTy, if (isa(op)) { if (isa(elementTy)) { constAttr = DenseElementsAttr::get( - constType, - {APFloat::getInf(cast(elementTy).getFloatSemantics(), - /*negative=*/false)}); + constType, {APFloat::getLargest( + cast(elementTy).getFloatSemantics(), + /*negative=*/false)}); } else if (isa(elementTy)) { constAttr = DenseElementsAttr::get( constType, diff --git a/lib/Conversion/TorchToTMTensor/TorchToTMTensor.cpp b/lib/Conversion/TorchToTMTensor/TorchToTMTensor.cpp index 8796fd249e9e..492366d43756 100644 --- a/lib/Conversion/TorchToTMTensor/TorchToTMTensor.cpp +++ b/lib/Conversion/TorchToTMTensor/TorchToTMTensor.cpp @@ -2072,7 +2072,7 @@ class ConvertAtenKthvalueOp : public OpConversionPattern { loc, rewriter.getFloatAttr( inputElementType, - APFloat::getInf( + APFloat::getLargest( cast(inputElementType).getFloatSemantics(), /*Negative=*/false))); // min float for linalg generic op tensor @@ -2080,7 +2080,7 @@ class ConvertAtenKthvalueOp : public OpConversionPattern { loc, rewriter.getFloatAttr( inputElementType, - APFloat::getInf( + APFloat::getLargest( cast(inputElementType).getFloatSemantics(), /*Negative=*/true))); } else if (!isUnsigned) { diff --git a/test/Conversion/TorchOnnxToTorch/simple_ops_q_to_z.mlir b/test/Conversion/TorchOnnxToTorch/simple_ops_q_to_z.mlir index da88f8782ede..f495a9212ed0 100644 --- a/test/Conversion/TorchOnnxToTorch/simple_ops_q_to_z.mlir +++ b/test/Conversion/TorchOnnxToTorch/simple_ops_q_to_z.mlir @@ -805,13 +805,13 @@ func.func @test_selu(%arg0: !torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4, // CHECK-LABEL: func.func @test_reduce_max_empty_set_fp func.func @test_reduce_max_empty_set_fp(%arg0: !torch.vtensor<[2,0,4],f32>, %arg1: !torch.vtensor<[1],si64>) -> !torch.vtensor<[2,1,4],f32> attributes {torch.onnx_meta.ir_version = 9 : si64, torch.onnx_meta.opset_version = 20 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} { - // CHECK-DAG: %[[INF:.+]] = torch.constant.float 0xFFF0000000000000 + // CHECK-DAG: %[[NEGMAX:.+]] = torch.constant.float -3.4028234663852886E+38 // CHECK-DAG: %[[INT2:.+]] = torch.constant.int 2 // CHECK-DAG: %[[INT1:.+]] = torch.constant.int 1 // CHECK-DAG: %[[INT4:.+]] = torch.constant.int 4 // CHECK-DAG: %[[NONE:.+]] = torch.constant.none // CHECK-DAG: %[[LIST:.+]] = torch.prim.ListConstruct %[[INT2]], %[[INT1]], %[[INT4]] - // CHECK-DAG: %[[FULL:.+]] = torch.aten.full %[[LIST]], %[[INF]], %[[NONE]], %[[NONE]], %[[NONE]] + // CHECK-DAG: %[[FULL:.+]] = torch.aten.full %[[LIST]], %[[NEGMAX]], %[[NONE]], %[[NONE]], %[[NONE]] // CHECK: return %[[FULL]] %0 = torch.operator "onnx.ReduceMax"(%arg0, %arg1) {torch.onnx.keepdims = 1 : si64} : (!torch.vtensor<[2,0,4],f32>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[2,1,4],f32> return %0 : !torch.vtensor<[2,1,4],f32> diff --git a/test/Conversion/TorchToLinalg/pooling.mlir b/test/Conversion/TorchToLinalg/pooling.mlir index b95e96c4a461..840f27e7fb70 100644 --- a/test/Conversion/TorchToLinalg/pooling.mlir +++ b/test/Conversion/TorchToLinalg/pooling.mlir @@ -7,7 +7,7 @@ func.func @forward_max_pool1d(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vten %int3 = torch.constant.int 3 %int4 = torch.constant.int 4 %false = torch.constant.bool false - // CHECK: %[[NEUTRAL:.*]] = arith.constant 0xFF800000 : f32 + // CHECK: %[[NEUTRAL:.*]] = arith.constant -3.40282347E+38 : f32 // CHECK: %[[PADDED:.*]] = tensor.pad %{{.*}} low[0, 0, 3] high[0, 0, 3] // CHECK: %[[OUT:.*]] = linalg.fill ins(%[[NEUTRAL]] : f32) outs(%{{.*}} : tensor) -> tensor // CHECK: %[[INIT:.*]] = tensor.empty() : tensor<1xf32> @@ -33,7 +33,7 @@ func.func @forward_max_pool2d(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vt %int7 = torch.constant.int 7 %int8 = torch.constant.int 8 %false = torch.constant.bool false - // CHECK: %[[NEUTRAL:.*]] = arith.constant 0xFF800000 : f32 + // CHECK: %[[NEUTRAL:.*]] = arith.constant -3.40282347E+38 : f32 // CHECK: %[[PADDED:.*]] = tensor.pad %{{.*}} low[0, 0, 5, 6] high[0, 0, 5, 6] // CHECK: %[[OUT:.*]] = linalg.fill ins(%[[NEUTRAL]] : f32) outs(%{{.*}} : tensor) -> tensor // CHECK: %[[INIT:.*]] = tensor.empty() : tensor<1x2xf32> @@ -106,7 +106,7 @@ func.func @forward_max_pool3d(%arg0: !torch.vtensor<[?,?,?,?,?],f32>) -> !torch. %4 = torch.aten.max_pool3d %arg0, %kernel_size, %stride, %padding, %dilation, %false : !torch.vtensor<[?,?,?,?,?],f32>, !torch.list, !torch.list, !torch.list, !torch.list, !torch.bool -> !torch.vtensor<[?,?,?,?,?],f32> - // CHECK: %[[MIN_VALUE:.*]] = arith.constant 0xFF800000 : f32 + // CHECK: %[[MIN_VALUE:.*]] = arith.constant -3.40282347E+38 : f32 // CHECK: %[[PADDED_INPUT_TENSOR:.*]] = tensor.pad %{{.*}} low[0, 0, 4, 4, 4] high[0, 0, 4, 4, 4] { // CHECK-NEXT: ^bb0(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index): // CHECK-NEXT: tensor.yield %[[MIN_VALUE:.*]] : f32