From 225d343750e130e45e3e6816384c093ec0cb2ef6 Mon Sep 17 00:00:00 2001 From: Princeton Ferro Date: Wed, 16 Apr 2025 17:17:47 -0700 Subject: [PATCH] [NVPTX] lower VECREDUCE max/min to 3-input on sm_100+ Add support for 3-input fmaxnum/fminnum/fmaximum/fminimum introduced in PTX 8.8 for sm_100+: - Use a tree reduction when 3-input operations are supported and the reduction has the `reassoc`. - If not on sm_100+/PTX 8.8, fallback to 2-input operations and use the default shuffle reduction. --- llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 138 ++ llvm/lib/Target/NVPTX/NVPTXISelLowering.h | 6 + llvm/lib/Target/NVPTX/NVPTXInstrInfo.td | 44 + .../Target/NVPTX/NVPTXTargetTransformInfo.h | 7 + .../CodeGen/NVPTX/reduction-intrinsics.ll | 1501 ++++++++++------- 5 files changed, 1097 insertions(+), 599 deletions(-) diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index 15f45a1f35e2f..9003ace263207 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -900,6 +900,17 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, if (STI.allowFP16Math() || STI.hasBF16Math()) setTargetDAGCombine(ISD::SETCC); + // Vector reduction operations. These may be turned into shuffle or tree + // reductions depending on what instructions are available for each type. + for (MVT VT : MVT::fixedlen_vector_valuetypes()) { + MVT EltVT = VT.getVectorElementType(); + if (EltVT == MVT::f32 || EltVT == MVT::f64) { + setOperationAction({ISD::VECREDUCE_FMAX, ISD::VECREDUCE_FMIN, + ISD::VECREDUCE_FMAXIMUM, ISD::VECREDUCE_FMINIMUM}, + VT, Custom); + } + } + // Promote fp16 arithmetic if fp16 hardware isn't available or the // user passed --nvptx-no-fp16-math. The flag is useful because, // although sm_53+ GPUs have some sort of FP16 support in @@ -1143,6 +1154,10 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const { MAKE_CASE(NVPTXISD::BFI) MAKE_CASE(NVPTXISD::PRMT) MAKE_CASE(NVPTXISD::FCOPYSIGN) + MAKE_CASE(NVPTXISD::FMAXNUM3) + MAKE_CASE(NVPTXISD::FMINNUM3) + MAKE_CASE(NVPTXISD::FMAXIMUM3) + MAKE_CASE(NVPTXISD::FMINIMUM3) MAKE_CASE(NVPTXISD::DYNAMIC_STACKALLOC) MAKE_CASE(NVPTXISD::STACKRESTORE) MAKE_CASE(NVPTXISD::STACKSAVE) @@ -1929,6 +1944,124 @@ static SDValue getPRMT(SDValue A, SDValue B, uint64_t Selector, SDLoc DL, return getPRMT(A, B, DAG.getConstant(Selector, DL, MVT::i32), DL, DAG, Mode); } +/// Reduces the elements using the scalar operations provided. The operations +/// are sorted descending in number of inputs they take. The flags on the +/// original reduction operation will be propagated to each scalar operation. +/// Nearby elements are grouped in tree reduction, unlike the shuffle reduction +/// used in ExpandReductions and SelectionDAG. +static SDValue buildTreeReduction( + const SmallVector &Elements, EVT EltTy, + ArrayRef> Ops, + const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG) { + // Build the reduction tree at each level, starting with all the elements. + SmallVector Level = Elements; + + unsigned OpIdx = 0; + while (Level.size() > 1) { + // Try to reduce this level using the current operator. + const auto [Op, NumInputs] = Ops[OpIdx]; + + // Build the next level by partially reducing all elements. + SmallVector ReducedLevel; + unsigned I = 0, E = Level.size(); + for (; I + NumInputs <= E; I += NumInputs) { + // Reduce elements in groups of [NumInputs], as much as possible. + ReducedLevel.push_back(DAG.getNode( + Op, DL, EltTy, ArrayRef(Level).slice(I, NumInputs), Flags)); + } + + if (I < E) { + // Handle leftover elements. + + if (ReducedLevel.empty()) { + // We didn't reduce anything at this level. We need to pick a smaller + // operator. + ++OpIdx; + assert(OpIdx < Ops.size() && "no smaller operators for reduction"); + continue; + } + + // We reduced some things but there's still more left, meaning the + // operator's number of inputs doesn't evenly divide this level size. Move + // these elements to the next level. + for (; I < E; ++I) + ReducedLevel.push_back(Level[I]); + } + + // Process the next level. + Level = ReducedLevel; + } + + return *Level.begin(); +} + +// Get scalar reduction opcode +static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode) { + switch (ReductionOpcode) { + case ISD::VECREDUCE_FMAX: + return ISD::FMAXNUM; + case ISD::VECREDUCE_FMIN: + return ISD::FMINNUM; + case ISD::VECREDUCE_FMAXIMUM: + return ISD::FMAXIMUM; + case ISD::VECREDUCE_FMINIMUM: + return ISD::FMINIMUM; + default: + llvm_unreachable("unhandled reduction opcode"); + } +} + +/// Get 3-input scalar reduction opcode +static std::optional +getScalar3OpcodeForReduction(unsigned ReductionOpcode) { + switch (ReductionOpcode) { + case ISD::VECREDUCE_FMAX: + return NVPTXISD::FMAXNUM3; + case ISD::VECREDUCE_FMIN: + return NVPTXISD::FMINNUM3; + case ISD::VECREDUCE_FMAXIMUM: + return NVPTXISD::FMAXIMUM3; + case ISD::VECREDUCE_FMINIMUM: + return NVPTXISD::FMINIMUM3; + default: + return std::nullopt; + } +} + +/// Lower reductions to either a sequence of operations or a tree if +/// reassociations are allowed. This method will use larger operations like +/// max3/min3 when the target supports them. +SDValue NVPTXTargetLowering::LowerVECREDUCE(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + const SDNodeFlags Flags = Op->getFlags(); + SDValue Vector = Op.getOperand(0); + + const unsigned Opcode = Op->getOpcode(); + const EVT EltTy = Vector.getValueType().getVectorElementType(); + + // Whether we can use 3-input min/max when expanding the reduction. + const bool CanUseMinMax3 = + EltTy == MVT::f32 && STI.getSmVersion() >= 100 && + STI.getPTXVersion() >= 88 && + (Opcode == ISD::VECREDUCE_FMAX || Opcode == ISD::VECREDUCE_FMIN || + Opcode == ISD::VECREDUCE_FMAXIMUM || Opcode == ISD::VECREDUCE_FMINIMUM); + + // A list of SDNode opcodes with equivalent semantics, sorted descending by + // number of inputs they take. + SmallVector, 2> ScalarOps; + + if (auto Opcode3Elem = getScalar3OpcodeForReduction(Opcode); + CanUseMinMax3 && Opcode3Elem) + ScalarOps.push_back({*Opcode3Elem, 3}); + ScalarOps.push_back({getScalarOpcodeForReduction(Opcode), 2}); + + SmallVector Elements; + DAG.ExtractVectorElements(Vector, Elements); + + return buildTreeReduction(Elements, EltTy, ScalarOps, DL, Flags, DAG); +} + SDValue NVPTXTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { // Handle bitcasting from v2i8 without hitting the default promotion // strategy which goes through stack memory. @@ -2808,6 +2941,11 @@ NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { return LowerVECTOR_SHUFFLE(Op, DAG); case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); + case ISD::VECREDUCE_FMAX: + case ISD::VECREDUCE_FMIN: + case ISD::VECREDUCE_FMAXIMUM: + case ISD::VECREDUCE_FMINIMUM: + return LowerVECREDUCE(Op, DAG); case ISD::STORE: return LowerSTORE(Op, DAG); case ISD::LOAD: diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h index cf72a1e6db89c..43e721a9c2a4c 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h @@ -64,6 +64,11 @@ enum NodeType : unsigned { UNPACK_VECTOR, FCOPYSIGN, + FMAXNUM3, + FMINNUM3, + FMAXIMUM3, + FMINIMUM3, + DYNAMIC_STACKALLOC, STACKRESTORE, STACKSAVE, @@ -286,6 +291,7 @@ class NVPTXTargetLowering : public TargetLowering { SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td index aac611d4c903a..1ab41bf9a3312 100644 --- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td +++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td @@ -347,6 +347,36 @@ multiclass FMINIMUMMAXIMUM { Requires<[hasBF16Math, hasSM<80>, hasPTX<70>]>; } +// Template for 3-input minimum/maximum instructions +// (sm_100+/PTX 8.8 and f32 only) +// +// Also defines ftz (flush subnormal inputs and results to sign-preserving +// zero) variants for fp32 functions. +multiclass FMINIMUMMAXIMUM3 { + defvar nan_str = !if(NaN, ".NaN", ""); + def f32rrr : + BasicFlagsNVPTXInst<(outs B32:$dst), + (ins B32:$a, B32:$b, B32:$c), + (ins FTZFlag:$ftz), + OpcStr # "$ftz" # nan_str # ".f32", + [(set f32:$dst, (OpNode f32:$a, f32:$b, f32:$c))]>, + Requires<[hasPTX<88>, hasSM<100>]>; + def f32rri : + BasicFlagsNVPTXInst<(outs B32:$dst), + (ins B32:$a, B32:$b, f32imm:$c), + (ins FTZFlag:$ftz), + OpcStr # "$ftz" # nan_str # ".f32", + [(set f32:$dst, (OpNode f32:$a, f32:$b, fpimm:$c))]>, + Requires<[hasPTX<88>, hasSM<100>]>; + def f32rii : + BasicFlagsNVPTXInst<(outs B32:$dst), + (ins B32:$a, f32imm:$b, f32imm:$c), + (ins FTZFlag:$ftz), + OpcStr # "$ftz" # nan_str # ".f32", + [(set f32:$dst, (OpNode f32:$a, fpimm:$b, fpimm:$c))]>, + Requires<[hasPTX<88>, hasSM<100>]>; +} + // Template for instructions which take three FP args. The // instructions are named ".f" (e.g. "add.f64"). // @@ -900,6 +930,20 @@ defm MAX : FMINIMUMMAXIMUM<"max", /* NaN */ false, fmaxnum>; defm MIN_NAN : FMINIMUMMAXIMUM<"min", /* NaN */ true, fminimum>; defm MAX_NAN : FMINIMUMMAXIMUM<"max", /* NaN */ true, fmaximum>; +def nvptx_fminnum3 : SDNode<"NVPTXISD::FMINNUM3", SDTFPTernaryOp, + [SDNPCommutative]>; +def nvptx_fmaxnum3 : SDNode<"NVPTXISD::FMAXNUM3", SDTFPTernaryOp, + [SDNPCommutative]>; +def nvptx_fminimum3 : SDNode<"NVPTXISD::FMINIMUM3", SDTFPTernaryOp, + [SDNPCommutative]>; +def nvptx_fmaximum3 : SDNode<"NVPTXISD::FMAXIMUM3", SDTFPTernaryOp, + [SDNPCommutative]>; + +defm FMIN3 : FMINIMUMMAXIMUM3<"min", /* NaN */ false, nvptx_fminnum3>; +defm FMAX3 : FMINIMUMMAXIMUM3<"max", /* NaN */ false, nvptx_fmaxnum3>; +defm FMINNAN3 : FMINIMUMMAXIMUM3<"min", /* NaN */ true, nvptx_fminimum3>; +defm FMAXNAN3 : FMINIMUMMAXIMUM3<"max", /* NaN */ true, nvptx_fmaximum3>; + defm FABS : F2<"abs", fabs>; defm FNEG : F2<"neg", fneg>; defm FABS_H: F2_Support_Half<"abs", fabs>; diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h index 9a6e261c811a0..b32d931bd3074 100644 --- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h +++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h @@ -87,6 +87,13 @@ class NVPTXTTIImpl final : public BasicTTIImplBase { } unsigned getMinVectorRegisterBitWidth() const override { return 32; } + bool shouldExpandReduction(const IntrinsicInst *II) const override { + // Turn off ExpandReductions pass for NVPTX, which doesn't have advanced + // swizzling operations. Our backend/Selection DAG can expand these + // reductions with less movs. + return false; + } + // We don't want to prevent inlining because of target-cpu and -features // attributes that were added to newer versions of LLVM/Clang: There are // no incompatible functions in PTX, ptxas will throw errors in such cases. diff --git a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll index 92cb51b17f0c8..94c2637ea7509 100644 --- a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll +++ b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll @@ -2,19 +2,18 @@ ; RUN: llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \ ; RUN: -disable-post-ra -verify-machineinstrs \ ; RUN: | FileCheck -check-prefixes CHECK,CHECK-SM80 %s -; RUN: %if ptxas-12.8 %{ llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \ +; RUN: %if ptxas-12.9 %{ llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \ ; RUN: -disable-post-ra -verify-machineinstrs \ ; RUN: | %ptxas-verify -arch=sm_80 %} -; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx87 -O0 \ +; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx88 -O0 \ ; RUN: -disable-post-ra -verify-machineinstrs \ ; RUN: | FileCheck -check-prefixes CHECK,CHECK-SM100 %s -; RUN: %if ptxas-12.8 %{ llc < %s -mcpu=sm_100 -mattr=+ptx87 -O0 \ +; RUN: %if ptxas-12.9 %{ llc < %s -mcpu=sm_100 -mattr=+ptx88 -O0 \ ; RUN: -disable-post-ra -verify-machineinstrs \ ; RUN: | %ptxas-verify -arch=sm_100 %} target triple = "nvptx64-nvidia-cuda" target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" -; Check straight line reduction. define half @reduce_fadd_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fadd_half( ; CHECK: { @@ -43,45 +42,22 @@ define half @reduce_fadd_half(<8 x half> %in) { } define half @reduce_fadd_half_reassoc(<8 x half> %in) { -; CHECK-SM80-LABEL: reduce_fadd_half_reassoc( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<6>; -; CHECK-SM80-NEXT: .reg .b32 %r<10>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0]; -; CHECK-SM80-NEXT: add.rn.f16x2 %r5, %r2, %r4; -; CHECK-SM80-NEXT: add.rn.f16x2 %r6, %r1, %r3; -; CHECK-SM80-NEXT: add.rn.f16x2 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: add.rn.f16x2 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: mov.b16 %rs4, 0x0000; -; CHECK-SM80-NEXT: add.rn.f16 %rs5, %rs3, %rs4; -; CHECK-SM80-NEXT: st.param.b16 [func_retval0], %rs5; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_fadd_half_reassoc( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<6>; -; CHECK-SM100-NEXT: .reg .b32 %r<10>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0]; -; CHECK-SM100-NEXT: add.rn.f16x2 %r5, %r2, %r4; -; CHECK-SM100-NEXT: add.rn.f16x2 %r6, %r1, %r3; -; CHECK-SM100-NEXT: add.rn.f16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: add.rn.f16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: mov.b16 %rs4, 0x0000; -; CHECK-SM100-NEXT: add.rn.f16 %rs5, %rs3, %rs4; -; CHECK-SM100-NEXT: st.param.b16 [func_retval0], %rs5; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_fadd_half_reassoc( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<6>; +; CHECK-NEXT: .reg .b32 %r<8>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0]; +; CHECK-NEXT: add.rn.f16x2 %r5, %r2, %r4; +; CHECK-NEXT: add.rn.f16x2 %r6, %r1, %r3; +; CHECK-NEXT: add.rn.f16x2 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: add.rn.f16 %rs3, %rs1, %rs2; +; CHECK-NEXT: mov.b16 %rs4, 0x0000; +; CHECK-NEXT: add.rn.f16 %rs5, %rs3, %rs4; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs5; +; CHECK-NEXT: ret; %res = call reassoc half @llvm.vector.reduce.fadd(half 0.0, <8 x half> %in) ret half %res } @@ -109,7 +85,6 @@ define half @reduce_fadd_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. define float @reduce_fadd_float(<8 x float> %in) { ; CHECK-LABEL: reduce_fadd_float( ; CHECK: { @@ -148,15 +123,15 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) { ; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fadd_float_reassoc_param_0]; ; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd4; ; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-SM80-NEXT: add.rn.f32 %r5, %r3, %r1; +; CHECK-SM80-NEXT: add.rn.f32 %r5, %r4, %r2; ; CHECK-SM80-NEXT: mov.b64 {%r6, %r7}, %rd3; ; CHECK-SM80-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-SM80-NEXT: add.rn.f32 %r10, %r8, %r6; -; CHECK-SM80-NEXT: add.rn.f32 %r11, %r4, %r2; -; CHECK-SM80-NEXT: add.rn.f32 %r12, %r9, %r7; -; CHECK-SM80-NEXT: add.rn.f32 %r13, %r12, %r11; -; CHECK-SM80-NEXT: add.rn.f32 %r14, %r10, %r5; -; CHECK-SM80-NEXT: add.rn.f32 %r15, %r14, %r13; +; CHECK-SM80-NEXT: add.rn.f32 %r10, %r9, %r7; +; CHECK-SM80-NEXT: add.rn.f32 %r11, %r10, %r5; +; CHECK-SM80-NEXT: add.rn.f32 %r12, %r3, %r1; +; CHECK-SM80-NEXT: add.rn.f32 %r13, %r8, %r6; +; CHECK-SM80-NEXT: add.rn.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: add.rn.f32 %r15, %r14, %r11; ; CHECK-SM80-NEXT: add.rn.f32 %r16, %r15, 0f00000000; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r16; ; CHECK-SM80-NEXT: ret; @@ -164,7 +139,7 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) { ; CHECK-SM100-LABEL: reduce_fadd_float_reassoc( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b32 %r<5>; -; CHECK-SM100-NEXT: .reg .b64 %rd<10>; +; CHECK-SM100-NEXT: .reg .b64 %rd<8>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fadd_float_reassoc_param_0+16]; @@ -172,11 +147,8 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) { ; CHECK-SM100-NEXT: add.rn.f32x2 %rd5, %rd2, %rd4; ; CHECK-SM100-NEXT: add.rn.f32x2 %rd6, %rd1, %rd3; ; CHECK-SM100-NEXT: add.rn.f32x2 %rd7, %rd6, %rd5; -; CHECK-SM100-NEXT: mov.b64 {_, %r1}, %rd7; -; CHECK-SM100-NEXT: // implicit-def: %r2 -; CHECK-SM100-NEXT: mov.b64 %rd8, {%r1, %r2}; -; CHECK-SM100-NEXT: add.rn.f32x2 %rd9, %rd7, %rd8; -; CHECK-SM100-NEXT: mov.b64 {%r3, _}, %rd9; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd7; +; CHECK-SM100-NEXT: add.rn.f32 %r3, %r1, %r2; ; CHECK-SM100-NEXT: add.rn.f32 %r4, %r3, 0f00000000; ; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-SM100-NEXT: ret; @@ -229,7 +201,6 @@ define float @reduce_fadd_float_reassoc_nonpow2(<7 x float> %in) { ret float %res } -; Check straight line reduction. define half @reduce_fmul_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fmul_half( ; CHECK: { @@ -256,41 +227,20 @@ define half @reduce_fmul_half(<8 x half> %in) { } define half @reduce_fmul_half_reassoc(<8 x half> %in) { -; CHECK-SM80-LABEL: reduce_fmul_half_reassoc( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<4>; -; CHECK-SM80-NEXT: .reg .b32 %r<10>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0]; -; CHECK-SM80-NEXT: mul.rn.f16x2 %r5, %r2, %r4; -; CHECK-SM80-NEXT: mul.rn.f16x2 %r6, %r1, %r3; -; CHECK-SM80-NEXT: mul.rn.f16x2 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: mul.rn.f16x2 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: st.param.b16 [func_retval0], %rs3; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_fmul_half_reassoc( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<10>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0]; -; CHECK-SM100-NEXT: mul.rn.f16x2 %r5, %r2, %r4; -; CHECK-SM100-NEXT: mul.rn.f16x2 %r6, %r1, %r3; -; CHECK-SM100-NEXT: mul.rn.f16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: mul.rn.f16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: st.param.b16 [func_retval0], %rs3; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_fmul_half_reassoc( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<8>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0]; +; CHECK-NEXT: mul.rn.f16x2 %r5, %r2, %r4; +; CHECK-NEXT: mul.rn.f16x2 %r6, %r1, %r3; +; CHECK-NEXT: mul.rn.f16x2 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: mul.rn.f16 %rs3, %rs1, %rs2; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs3; +; CHECK-NEXT: ret; %res = call reassoc half @llvm.vector.reduce.fmul(half 1.0, <8 x half> %in) ret half %res } @@ -321,7 +271,6 @@ define half @reduce_fmul_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. define float @reduce_fmul_float(<8 x float> %in) { ; CHECK-LABEL: reduce_fmul_float( ; CHECK: { @@ -359,22 +308,22 @@ define float @reduce_fmul_float_reassoc(<8 x float> %in) { ; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmul_float_reassoc_param_0]; ; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd4; ; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-SM80-NEXT: mul.rn.f32 %r5, %r3, %r1; +; CHECK-SM80-NEXT: mul.rn.f32 %r5, %r4, %r2; ; CHECK-SM80-NEXT: mov.b64 {%r6, %r7}, %rd3; ; CHECK-SM80-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-SM80-NEXT: mul.rn.f32 %r10, %r8, %r6; -; CHECK-SM80-NEXT: mul.rn.f32 %r11, %r4, %r2; -; CHECK-SM80-NEXT: mul.rn.f32 %r12, %r9, %r7; -; CHECK-SM80-NEXT: mul.rn.f32 %r13, %r12, %r11; -; CHECK-SM80-NEXT: mul.rn.f32 %r14, %r10, %r5; -; CHECK-SM80-NEXT: mul.rn.f32 %r15, %r14, %r13; +; CHECK-SM80-NEXT: mul.rn.f32 %r10, %r9, %r7; +; CHECK-SM80-NEXT: mul.rn.f32 %r11, %r10, %r5; +; CHECK-SM80-NEXT: mul.rn.f32 %r12, %r3, %r1; +; CHECK-SM80-NEXT: mul.rn.f32 %r13, %r8, %r6; +; CHECK-SM80-NEXT: mul.rn.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: mul.rn.f32 %r15, %r14, %r11; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-SM80-NEXT: ret; ; ; CHECK-SM100-LABEL: reduce_fmul_float_reassoc( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b32 %r<4>; -; CHECK-SM100-NEXT: .reg .b64 %rd<10>; +; CHECK-SM100-NEXT: .reg .b64 %rd<8>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmul_float_reassoc_param_0+16]; @@ -382,11 +331,8 @@ define float @reduce_fmul_float_reassoc(<8 x float> %in) { ; CHECK-SM100-NEXT: mul.rn.f32x2 %rd5, %rd2, %rd4; ; CHECK-SM100-NEXT: mul.rn.f32x2 %rd6, %rd1, %rd3; ; CHECK-SM100-NEXT: mul.rn.f32x2 %rd7, %rd6, %rd5; -; CHECK-SM100-NEXT: mov.b64 {_, %r1}, %rd7; -; CHECK-SM100-NEXT: // implicit-def: %r2 -; CHECK-SM100-NEXT: mov.b64 %rd8, {%r1, %r2}; -; CHECK-SM100-NEXT: mul.rn.f32x2 %rd9, %rd7, %rd8; -; CHECK-SM100-NEXT: mov.b64 {%r3, _}, %rd9; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd7; +; CHECK-SM100-NEXT: mul.rn.f32 %r3, %r1, %r2; ; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmul(float 1.0, <8 x float> %in) @@ -436,7 +382,6 @@ define float @reduce_fmul_float_reassoc_nonpow2(<7 x float> %in) { ret float %res } -; Check straight line reduction. define half @reduce_fmax_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fmax_half( ; CHECK: { @@ -501,84 +446,256 @@ define half @reduce_fmax_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. -define float @reduce_fmax_float(<8 x float> %in) { -; -; CHECK-LABEL: reduce_fmax_float( +define half @reduce_fmax_half_nnan(<8 x half> %in) { +; CHECK-LABEL: reduce_fmax_half_nnan( ; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<8>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: max.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: max.f32 %r10, %r9, %r7; -; CHECK-NEXT: max.f32 %r11, %r10, %r5; -; CHECK-NEXT: max.f32 %r12, %r3, %r1; -; CHECK-NEXT: max.f32 %r13, %r8, %r6; -; CHECK-NEXT: max.f32 %r14, %r13, %r12; -; CHECK-NEXT: max.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_half_nnan_param_0]; +; CHECK-NEXT: max.f16x2 %r5, %r2, %r4; +; CHECK-NEXT: max.f16x2 %r6, %r1, %r3; +; CHECK-NEXT: max.f16x2 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: max.f16 %rs3, %rs1, %rs2; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-NEXT: ret; - %res = call float @llvm.vector.reduce.fmax(<8 x float> %in) - ret float %res + %res = call nnan half @llvm.vector.reduce.fmax(<8 x half> %in) + ret half %res } -define float @reduce_fmax_float_reassoc(<8 x float> %in) { -; -; CHECK-LABEL: reduce_fmax_float_reassoc( +define half @reduce_fmax_half_nnan_nonpow2(<7 x half> %in) { +; CHECK-LABEL: reduce_fmax_half_nnan_nonpow2( ; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-NEXT: .reg .b16 %rs<12>; +; CHECK-NEXT: .reg .b32 %r<8>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: max.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: max.f32 %r10, %r9, %r7; -; CHECK-NEXT: max.f32 %r11, %r10, %r5; -; CHECK-NEXT: max.f32 %r12, %r3, %r1; -; CHECK-NEXT: max.f32 %r13, %r8, %r6; -; CHECK-NEXT: max.f32 %r14, %r13, %r12; -; CHECK-NEXT: max.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-NEXT: ld.param.b32 %r1, [reduce_fmax_half_nnan_nonpow2_param_0+8]; +; CHECK-NEXT: mov.b32 {%rs5, %rs6}, %r1; +; CHECK-NEXT: ld.param.v2.b32 {%r2, %r3}, [reduce_fmax_half_nnan_nonpow2_param_0]; +; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r3; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r2; +; CHECK-NEXT: ld.param.b16 %rs7, [reduce_fmax_half_nnan_nonpow2_param_0+12]; +; CHECK-NEXT: max.f16x2 %r4, %r2, %r1; +; CHECK-NEXT: mov.b16 %rs8, 0xFC00; +; CHECK-NEXT: mov.b32 %r5, {%rs7, %rs8}; +; CHECK-NEXT: max.f16x2 %r6, %r3, %r5; +; CHECK-NEXT: max.f16x2 %r7, %r4, %r6; +; CHECK-NEXT: mov.b32 {%rs9, %rs10}, %r7; +; CHECK-NEXT: max.f16 %rs11, %rs9, %rs10; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs11; ; CHECK-NEXT: ret; + %res = call nnan half @llvm.vector.reduce.fmax(<7 x half> %in) + ret half %res +} + +define float @reduce_fmax_float(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmax_float( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; + %res = call float @llvm.vector.reduce.fmax(<8 x float> %in) + ret float %res +} + +define float @reduce_fmax_float_reassoc(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float_reassoc( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmax_float_reassoc( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmax(<8 x float> %in) ret float %res } define float @reduce_fmax_float_reassoc_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float_reassoc_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0]; +; CHECK-SM80-NEXT: max.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: max.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: max.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmax_float_reassoc_nonpow2( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<14>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0]; -; CHECK-NEXT: max.f32 %r8, %r3, %r7; -; CHECK-NEXT: max.f32 %r9, %r1, %r5; -; CHECK-NEXT: max.f32 %r10, %r9, %r8; -; CHECK-NEXT: max.f32 %r11, %r2, %r6; -; CHECK-NEXT: max.f32 %r12, %r11, %r4; -; CHECK-NEXT: max.f32 %r13, %r10, %r12; -; CHECK-NEXT: st.param.b32 [func_retval0], %r13; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmax_float_reassoc_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0]; +; CHECK-SM100-NEXT: max.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: max.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmax(<7 x float> %in) ret float %res } -; Check straight line reduction. +define float @reduce_fmax_float_nnan(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float_nnan( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_nnan_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_nnan_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmax_float_nnan( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_nnan_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_nnan_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; + %res = call nnan float @llvm.vector.reduce.fmax(<8 x float> %in) + ret float %res +} + +define float @reduce_fmax_float_nnan_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float_nnan_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmax_float_nnan_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_nnan_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_nnan_nonpow2_param_0]; +; CHECK-SM80-NEXT: max.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: max.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: max.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmax_float_nnan_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmax_float_nnan_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_nnan_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_nnan_nonpow2_param_0]; +; CHECK-SM100-NEXT: max.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: max.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; + %res = call nnan float @llvm.vector.reduce.fmax(<7 x float> %in) + ret float %res +} + define half @reduce_fmin_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fmin_half( ; CHECK: { @@ -643,84 +760,256 @@ define half @reduce_fmin_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. -define float @reduce_fmin_float(<8 x float> %in) { -; -; CHECK-LABEL: reduce_fmin_float( +define half @reduce_fmin_half_nnan(<8 x half> %in) { +; CHECK-LABEL: reduce_fmin_half_nnan( ; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<8>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: min.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: min.f32 %r10, %r9, %r7; -; CHECK-NEXT: min.f32 %r11, %r10, %r5; -; CHECK-NEXT: min.f32 %r12, %r3, %r1; -; CHECK-NEXT: min.f32 %r13, %r8, %r6; -; CHECK-NEXT: min.f32 %r14, %r13, %r12; -; CHECK-NEXT: min.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_half_nnan_param_0]; +; CHECK-NEXT: min.f16x2 %r5, %r2, %r4; +; CHECK-NEXT: min.f16x2 %r6, %r1, %r3; +; CHECK-NEXT: min.f16x2 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: min.f16 %rs3, %rs1, %rs2; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-NEXT: ret; - %res = call float @llvm.vector.reduce.fmin(<8 x float> %in) - ret float %res + %res = call nnan half @llvm.vector.reduce.fmin(<8 x half> %in) + ret half %res } -define float @reduce_fmin_float_reassoc(<8 x float> %in) { -; -; CHECK-LABEL: reduce_fmin_float_reassoc( +define half @reduce_fmin_half_nnan_nonpow2(<7 x half> %in) { +; CHECK-LABEL: reduce_fmin_half_nnan_nonpow2( ; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-NEXT: .reg .b16 %rs<12>; +; CHECK-NEXT: .reg .b32 %r<8>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: min.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: min.f32 %r10, %r9, %r7; -; CHECK-NEXT: min.f32 %r11, %r10, %r5; -; CHECK-NEXT: min.f32 %r12, %r3, %r1; -; CHECK-NEXT: min.f32 %r13, %r8, %r6; -; CHECK-NEXT: min.f32 %r14, %r13, %r12; -; CHECK-NEXT: min.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-NEXT: ld.param.b32 %r1, [reduce_fmin_half_nnan_nonpow2_param_0+8]; +; CHECK-NEXT: mov.b32 {%rs5, %rs6}, %r1; +; CHECK-NEXT: ld.param.v2.b32 {%r2, %r3}, [reduce_fmin_half_nnan_nonpow2_param_0]; +; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r3; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r2; +; CHECK-NEXT: ld.param.b16 %rs7, [reduce_fmin_half_nnan_nonpow2_param_0+12]; +; CHECK-NEXT: min.f16x2 %r4, %r2, %r1; +; CHECK-NEXT: mov.b16 %rs8, 0x7C00; +; CHECK-NEXT: mov.b32 %r5, {%rs7, %rs8}; +; CHECK-NEXT: min.f16x2 %r6, %r3, %r5; +; CHECK-NEXT: min.f16x2 %r7, %r4, %r6; +; CHECK-NEXT: mov.b32 {%rs9, %rs10}, %r7; +; CHECK-NEXT: min.f16 %rs11, %rs9, %rs10; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs11; ; CHECK-NEXT: ret; + %res = call nnan half @llvm.vector.reduce.fmin(<7 x half> %in) + ret half %res +} + +define float @reduce_fmin_float(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmin_float( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; + %res = call float @llvm.vector.reduce.fmin(<8 x float> %in) + ret float %res +} + +define float @reduce_fmin_float_reassoc(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float_reassoc( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmin_float_reassoc( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmin(<8 x float> %in) ret float %res } define float @reduce_fmin_float_reassoc_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float_reassoc_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0]; +; CHECK-SM80-NEXT: min.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: min.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: min.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmin_float_reassoc_nonpow2( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<14>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0]; -; CHECK-NEXT: min.f32 %r8, %r3, %r7; -; CHECK-NEXT: min.f32 %r9, %r1, %r5; -; CHECK-NEXT: min.f32 %r10, %r9, %r8; -; CHECK-NEXT: min.f32 %r11, %r2, %r6; -; CHECK-NEXT: min.f32 %r12, %r11, %r4; -; CHECK-NEXT: min.f32 %r13, %r10, %r12; -; CHECK-NEXT: st.param.b32 [func_retval0], %r13; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmin_float_reassoc_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0]; +; CHECK-SM100-NEXT: min.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: min.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmin(<7 x float> %in) ret float %res } -; Check straight-line reduction. +define float @reduce_fmin_float_nnan(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float_nnan( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_nnan_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_nnan_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmin_float_nnan( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_nnan_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_nnan_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; + %res = call nnan float @llvm.vector.reduce.fmin(<8 x float> %in) + ret float %res +} + +define float @reduce_fmin_float_nnan_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float_nnan_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmin_float_nnan_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_nnan_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_nnan_nonpow2_param_0]; +; CHECK-SM80-NEXT: min.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: min.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: min.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmin_float_nnan_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmin_float_nnan_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_nnan_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_nnan_nonpow2_param_0]; +; CHECK-SM100-NEXT: min.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: min.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; + %res = call nnan float @llvm.vector.reduce.fmin(<7 x float> %in) + ret float %res +} + define half @reduce_fmaximum_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fmaximum_half( ; CHECK: { @@ -785,84 +1074,131 @@ define half @reduce_fmaximum_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. define float @reduce_fmaximum_float(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmaximum_float( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.NaN.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.NaN.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmaximum_float( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: max.NaN.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r7; -; CHECK-NEXT: max.NaN.f32 %r11, %r10, %r5; -; CHECK-NEXT: max.NaN.f32 %r12, %r3, %r1; -; CHECK-NEXT: max.NaN.f32 %r13, %r8, %r6; -; CHECK-NEXT: max.NaN.f32 %r14, %r13, %r12; -; CHECK-NEXT: max.NaN.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmaximum_float( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.NaN.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.NaN.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call float @llvm.vector.reduce.fmaximum(<8 x float> %in) ret float %res } define float @reduce_fmaximum_float_reassoc(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmaximum_float_reassoc( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.NaN.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.NaN.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmaximum_float_reassoc( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: max.NaN.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r7; -; CHECK-NEXT: max.NaN.f32 %r11, %r10, %r5; -; CHECK-NEXT: max.NaN.f32 %r12, %r3, %r1; -; CHECK-NEXT: max.NaN.f32 %r13, %r8, %r6; -; CHECK-NEXT: max.NaN.f32 %r14, %r13, %r12; -; CHECK-NEXT: max.NaN.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmaximum_float_reassoc( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.NaN.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.NaN.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmaximum(<8 x float> %in) ret float %res } define float @reduce_fmaximum_float_reassoc_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmaximum_float_reassoc_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0]; +; CHECK-SM80-NEXT: max.NaN.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmaximum_float_reassoc_nonpow2( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<14>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0]; -; CHECK-NEXT: max.NaN.f32 %r8, %r3, %r7; -; CHECK-NEXT: max.NaN.f32 %r9, %r1, %r5; -; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r8; -; CHECK-NEXT: max.NaN.f32 %r11, %r2, %r6; -; CHECK-NEXT: max.NaN.f32 %r12, %r11, %r4; -; CHECK-NEXT: max.NaN.f32 %r13, %r10, %r12; -; CHECK-NEXT: st.param.b32 [func_retval0], %r13; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmaximum_float_reassoc_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0]; +; CHECK-SM100-NEXT: max.NaN.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmaximum(<7 x float> %in) ret float %res } -; Check straight-line reduction. define half @reduce_fminimum_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fminimum_half( ; CHECK: { @@ -927,79 +1263,127 @@ define half @reduce_fminimum_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. define float @reduce_fminimum_float(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fminimum_float( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.NaN.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.NaN.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fminimum_float( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: min.NaN.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r7; -; CHECK-NEXT: min.NaN.f32 %r11, %r10, %r5; -; CHECK-NEXT: min.NaN.f32 %r12, %r3, %r1; -; CHECK-NEXT: min.NaN.f32 %r13, %r8, %r6; -; CHECK-NEXT: min.NaN.f32 %r14, %r13, %r12; -; CHECK-NEXT: min.NaN.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fminimum_float( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.NaN.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.NaN.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call float @llvm.vector.reduce.fminimum(<8 x float> %in) ret float %res } define float @reduce_fminimum_float_reassoc(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fminimum_float_reassoc( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.NaN.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.NaN.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fminimum_float_reassoc( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: min.NaN.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r7; -; CHECK-NEXT: min.NaN.f32 %r11, %r10, %r5; -; CHECK-NEXT: min.NaN.f32 %r12, %r3, %r1; -; CHECK-NEXT: min.NaN.f32 %r13, %r8, %r6; -; CHECK-NEXT: min.NaN.f32 %r14, %r13, %r12; -; CHECK-NEXT: min.NaN.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fminimum_float_reassoc( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.NaN.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.NaN.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fminimum(<8 x float> %in) ret float %res } define float @reduce_fminimum_float_reassoc_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fminimum_float_reassoc_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0]; +; CHECK-SM80-NEXT: min.NaN.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fminimum_float_reassoc_nonpow2( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<14>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0]; -; CHECK-NEXT: min.NaN.f32 %r8, %r3, %r7; -; CHECK-NEXT: min.NaN.f32 %r9, %r1, %r5; -; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r8; -; CHECK-NEXT: min.NaN.f32 %r11, %r2, %r6; -; CHECK-NEXT: min.NaN.f32 %r12, %r11, %r4; -; CHECK-NEXT: min.NaN.f32 %r13, %r10, %r12; -; CHECK-NEXT: st.param.b32 [func_retval0], %r13; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fminimum_float_reassoc_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0]; +; CHECK-SM100-NEXT: min.NaN.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fminimum(<7 x float> %in) ret float %res } @@ -1014,15 +1398,15 @@ define i16 @reduce_add_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: add.s16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: add.s16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: add.s16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: add.s16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: add.s16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: add.s16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: add.s16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: add.s16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: add.s16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: add.s16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: add.s16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: add.s16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: add.s16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: add.s16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1030,20 +1414,17 @@ define i16 @reduce_add_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_add_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i16_param_0]; ; CHECK-SM100-NEXT: add.s16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: add.s16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: add.s16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: add.s16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: add.s16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.add(<8 x i16> %in) ret i16 %res @@ -1103,13 +1484,13 @@ define i32 @reduce_add_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_add_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i32_param_0]; -; CHECK-NEXT: add.s32 %r9, %r3, %r7; -; CHECK-NEXT: add.s32 %r10, %r1, %r5; -; CHECK-NEXT: add.s32 %r11, %r4, %r8; -; CHECK-NEXT: add.s32 %r12, %r2, %r6; -; CHECK-NEXT: add.s32 %r13, %r12, %r11; -; CHECK-NEXT: add.s32 %r14, %r10, %r9; -; CHECK-NEXT: add.s32 %r15, %r14, %r13; +; CHECK-NEXT: add.s32 %r9, %r4, %r8; +; CHECK-NEXT: add.s32 %r10, %r2, %r6; +; CHECK-NEXT: add.s32 %r11, %r10, %r9; +; CHECK-NEXT: add.s32 %r12, %r3, %r7; +; CHECK-NEXT: add.s32 %r13, %r1, %r5; +; CHECK-NEXT: add.s32 %r14, %r13, %r12; +; CHECK-NEXT: add.s32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.add(<8 x i32> %in) @@ -1147,15 +1528,15 @@ define i16 @reduce_mul_i16(<8 x i16> %in) { ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i16_param_0]; ; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-NEXT: mul.lo.s16 %rs5, %rs3, %rs1; +; CHECK-NEXT: mul.lo.s16 %rs5, %rs4, %rs2; ; CHECK-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-NEXT: mul.lo.s16 %rs10, %rs8, %rs6; -; CHECK-NEXT: mul.lo.s16 %rs11, %rs4, %rs2; -; CHECK-NEXT: mul.lo.s16 %rs12, %rs9, %rs7; -; CHECK-NEXT: mul.lo.s16 %rs13, %rs12, %rs11; -; CHECK-NEXT: mul.lo.s16 %rs14, %rs10, %rs5; -; CHECK-NEXT: mul.lo.s16 %rs15, %rs14, %rs13; +; CHECK-NEXT: mul.lo.s16 %rs10, %rs9, %rs7; +; CHECK-NEXT: mul.lo.s16 %rs11, %rs10, %rs5; +; CHECK-NEXT: mul.lo.s16 %rs12, %rs3, %rs1; +; CHECK-NEXT: mul.lo.s16 %rs13, %rs8, %rs6; +; CHECK-NEXT: mul.lo.s16 %rs14, %rs13, %rs12; +; CHECK-NEXT: mul.lo.s16 %rs15, %rs14, %rs11; ; CHECK-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-NEXT: ret; @@ -1194,13 +1575,13 @@ define i32 @reduce_mul_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_mul_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i32_param_0]; -; CHECK-NEXT: mul.lo.s32 %r9, %r3, %r7; -; CHECK-NEXT: mul.lo.s32 %r10, %r1, %r5; -; CHECK-NEXT: mul.lo.s32 %r11, %r4, %r8; -; CHECK-NEXT: mul.lo.s32 %r12, %r2, %r6; -; CHECK-NEXT: mul.lo.s32 %r13, %r12, %r11; -; CHECK-NEXT: mul.lo.s32 %r14, %r10, %r9; -; CHECK-NEXT: mul.lo.s32 %r15, %r14, %r13; +; CHECK-NEXT: mul.lo.s32 %r9, %r4, %r8; +; CHECK-NEXT: mul.lo.s32 %r10, %r2, %r6; +; CHECK-NEXT: mul.lo.s32 %r11, %r10, %r9; +; CHECK-NEXT: mul.lo.s32 %r12, %r3, %r7; +; CHECK-NEXT: mul.lo.s32 %r13, %r1, %r5; +; CHECK-NEXT: mul.lo.s32 %r14, %r13, %r12; +; CHECK-NEXT: mul.lo.s32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.mul(<8 x i32> %in) @@ -1238,15 +1619,15 @@ define i16 @reduce_umax_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: max.u16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: max.u16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: max.u16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: max.u16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: max.u16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: max.u16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: max.u16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: max.u16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: max.u16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: max.u16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: max.u16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: max.u16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: max.u16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: max.u16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1254,20 +1635,17 @@ define i16 @reduce_umax_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_umax_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i16_param_0]; ; CHECK-SM100-NEXT: max.u16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: max.u16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: max.u16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: max.u16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: max.u16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.umax(<8 x i16> %in) ret i16 %res @@ -1327,13 +1705,13 @@ define i32 @reduce_umax_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umax_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i32_param_0]; -; CHECK-NEXT: max.u32 %r9, %r3, %r7; -; CHECK-NEXT: max.u32 %r10, %r1, %r5; -; CHECK-NEXT: max.u32 %r11, %r4, %r8; -; CHECK-NEXT: max.u32 %r12, %r2, %r6; -; CHECK-NEXT: max.u32 %r13, %r12, %r11; -; CHECK-NEXT: max.u32 %r14, %r10, %r9; -; CHECK-NEXT: max.u32 %r15, %r14, %r13; +; CHECK-NEXT: max.u32 %r9, %r4, %r8; +; CHECK-NEXT: max.u32 %r10, %r2, %r6; +; CHECK-NEXT: max.u32 %r11, %r10, %r9; +; CHECK-NEXT: max.u32 %r12, %r3, %r7; +; CHECK-NEXT: max.u32 %r13, %r1, %r5; +; CHECK-NEXT: max.u32 %r14, %r13, %r12; +; CHECK-NEXT: max.u32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.umax(<8 x i32> %in) @@ -1371,15 +1749,15 @@ define i16 @reduce_umin_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: min.u16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: min.u16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: min.u16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: min.u16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: min.u16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: min.u16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: min.u16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: min.u16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: min.u16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: min.u16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: min.u16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: min.u16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: min.u16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: min.u16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1387,20 +1765,17 @@ define i16 @reduce_umin_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_umin_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i16_param_0]; ; CHECK-SM100-NEXT: min.u16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: min.u16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: min.u16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: min.u16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: min.u16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.umin(<8 x i16> %in) ret i16 %res @@ -1460,13 +1835,13 @@ define i32 @reduce_umin_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umin_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i32_param_0]; -; CHECK-NEXT: min.u32 %r9, %r3, %r7; -; CHECK-NEXT: min.u32 %r10, %r1, %r5; -; CHECK-NEXT: min.u32 %r11, %r4, %r8; -; CHECK-NEXT: min.u32 %r12, %r2, %r6; -; CHECK-NEXT: min.u32 %r13, %r12, %r11; -; CHECK-NEXT: min.u32 %r14, %r10, %r9; -; CHECK-NEXT: min.u32 %r15, %r14, %r13; +; CHECK-NEXT: min.u32 %r9, %r4, %r8; +; CHECK-NEXT: min.u32 %r10, %r2, %r6; +; CHECK-NEXT: min.u32 %r11, %r10, %r9; +; CHECK-NEXT: min.u32 %r12, %r3, %r7; +; CHECK-NEXT: min.u32 %r13, %r1, %r5; +; CHECK-NEXT: min.u32 %r14, %r13, %r12; +; CHECK-NEXT: min.u32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.umin(<8 x i32> %in) @@ -1504,15 +1879,15 @@ define i16 @reduce_smax_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: max.s16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: max.s16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: max.s16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: max.s16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: max.s16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: max.s16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: max.s16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: max.s16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: max.s16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: max.s16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: max.s16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: max.s16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: max.s16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: max.s16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1520,20 +1895,17 @@ define i16 @reduce_smax_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_smax_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i16_param_0]; ; CHECK-SM100-NEXT: max.s16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: max.s16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: max.s16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: max.s16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: max.s16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.smax(<8 x i16> %in) ret i16 %res @@ -1593,13 +1965,13 @@ define i32 @reduce_smax_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smax_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i32_param_0]; -; CHECK-NEXT: max.s32 %r9, %r3, %r7; -; CHECK-NEXT: max.s32 %r10, %r1, %r5; -; CHECK-NEXT: max.s32 %r11, %r4, %r8; -; CHECK-NEXT: max.s32 %r12, %r2, %r6; -; CHECK-NEXT: max.s32 %r13, %r12, %r11; -; CHECK-NEXT: max.s32 %r14, %r10, %r9; -; CHECK-NEXT: max.s32 %r15, %r14, %r13; +; CHECK-NEXT: max.s32 %r9, %r4, %r8; +; CHECK-NEXT: max.s32 %r10, %r2, %r6; +; CHECK-NEXT: max.s32 %r11, %r10, %r9; +; CHECK-NEXT: max.s32 %r12, %r3, %r7; +; CHECK-NEXT: max.s32 %r13, %r1, %r5; +; CHECK-NEXT: max.s32 %r14, %r13, %r12; +; CHECK-NEXT: max.s32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.smax(<8 x i32> %in) @@ -1637,15 +2009,15 @@ define i16 @reduce_smin_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: min.s16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: min.s16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: min.s16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: min.s16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: min.s16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: min.s16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: min.s16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: min.s16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: min.s16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: min.s16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: min.s16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: min.s16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: min.s16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: min.s16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1653,20 +2025,17 @@ define i16 @reduce_smin_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_smin_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i16_param_0]; ; CHECK-SM100-NEXT: min.s16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: min.s16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: min.s16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: min.s16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: min.s16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.smin(<8 x i16> %in) ret i16 %res @@ -1726,13 +2095,13 @@ define i32 @reduce_smin_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smin_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i32_param_0]; -; CHECK-NEXT: min.s32 %r9, %r3, %r7; -; CHECK-NEXT: min.s32 %r10, %r1, %r5; -; CHECK-NEXT: min.s32 %r11, %r4, %r8; -; CHECK-NEXT: min.s32 %r12, %r2, %r6; -; CHECK-NEXT: min.s32 %r13, %r12, %r11; -; CHECK-NEXT: min.s32 %r14, %r10, %r9; -; CHECK-NEXT: min.s32 %r15, %r14, %r13; +; CHECK-NEXT: min.s32 %r9, %r4, %r8; +; CHECK-NEXT: min.s32 %r10, %r2, %r6; +; CHECK-NEXT: min.s32 %r11, %r10, %r9; +; CHECK-NEXT: min.s32 %r12, %r3, %r7; +; CHECK-NEXT: min.s32 %r13, %r1, %r5; +; CHECK-NEXT: min.s32 %r14, %r13, %r12; +; CHECK-NEXT: min.s32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.smin(<8 x i32> %in) @@ -1761,43 +2130,21 @@ define i32 @reduce_smin_i32_nonpow2(<7 x i32> %in) { } define i16 @reduce_and_i16(<8 x i16> %in) { -; CHECK-SM80-LABEL: reduce_and_i16( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<4>; -; CHECK-SM80-NEXT: .reg .b32 %r<11>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0]; -; CHECK-SM80-NEXT: and.b32 %r5, %r2, %r4; -; CHECK-SM80-NEXT: and.b32 %r6, %r1, %r3; -; CHECK-SM80-NEXT: and.b32 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: and.b32 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_and_i16( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0]; -; CHECK-SM100-NEXT: and.b32 %r5, %r2, %r4; -; CHECK-SM100-NEXT: and.b32 %r6, %r1, %r3; -; CHECK-SM100-NEXT: and.b32 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: and.b32 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_and_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0]; +; CHECK-NEXT: and.b32 %r5, %r2, %r4; +; CHECK-NEXT: and.b32 %r6, %r1, %r3; +; CHECK-NEXT: and.b32 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: and.b16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; +; CHECK-NEXT: ret; %res = call i16 @llvm.vector.reduce.and(<8 x i16> %in) ret i16 %res } @@ -1837,13 +2184,13 @@ define i32 @reduce_and_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_and_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i32_param_0]; -; CHECK-NEXT: and.b32 %r9, %r3, %r7; -; CHECK-NEXT: and.b32 %r10, %r1, %r5; -; CHECK-NEXT: and.b32 %r11, %r4, %r8; -; CHECK-NEXT: and.b32 %r12, %r2, %r6; -; CHECK-NEXT: and.b32 %r13, %r12, %r11; -; CHECK-NEXT: and.b32 %r14, %r10, %r9; -; CHECK-NEXT: and.b32 %r15, %r14, %r13; +; CHECK-NEXT: and.b32 %r9, %r4, %r8; +; CHECK-NEXT: and.b32 %r10, %r2, %r6; +; CHECK-NEXT: and.b32 %r11, %r10, %r9; +; CHECK-NEXT: and.b32 %r12, %r3, %r7; +; CHECK-NEXT: and.b32 %r13, %r1, %r5; +; CHECK-NEXT: and.b32 %r14, %r13, %r12; +; CHECK-NEXT: and.b32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.and(<8 x i32> %in) @@ -1872,43 +2219,21 @@ define i32 @reduce_and_i32_nonpow2(<7 x i32> %in) { } define i16 @reduce_or_i16(<8 x i16> %in) { -; CHECK-SM80-LABEL: reduce_or_i16( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<4>; -; CHECK-SM80-NEXT: .reg .b32 %r<11>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0]; -; CHECK-SM80-NEXT: or.b32 %r5, %r2, %r4; -; CHECK-SM80-NEXT: or.b32 %r6, %r1, %r3; -; CHECK-SM80-NEXT: or.b32 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: or.b32 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_or_i16( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0]; -; CHECK-SM100-NEXT: or.b32 %r5, %r2, %r4; -; CHECK-SM100-NEXT: or.b32 %r6, %r1, %r3; -; CHECK-SM100-NEXT: or.b32 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: or.b32 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_or_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0]; +; CHECK-NEXT: or.b32 %r5, %r2, %r4; +; CHECK-NEXT: or.b32 %r6, %r1, %r3; +; CHECK-NEXT: or.b32 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: or.b16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; +; CHECK-NEXT: ret; %res = call i16 @llvm.vector.reduce.or(<8 x i16> %in) ret i16 %res } @@ -1948,13 +2273,13 @@ define i32 @reduce_or_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_or_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i32_param_0]; -; CHECK-NEXT: or.b32 %r9, %r3, %r7; -; CHECK-NEXT: or.b32 %r10, %r1, %r5; -; CHECK-NEXT: or.b32 %r11, %r4, %r8; -; CHECK-NEXT: or.b32 %r12, %r2, %r6; -; CHECK-NEXT: or.b32 %r13, %r12, %r11; -; CHECK-NEXT: or.b32 %r14, %r10, %r9; -; CHECK-NEXT: or.b32 %r15, %r14, %r13; +; CHECK-NEXT: or.b32 %r9, %r4, %r8; +; CHECK-NEXT: or.b32 %r10, %r2, %r6; +; CHECK-NEXT: or.b32 %r11, %r10, %r9; +; CHECK-NEXT: or.b32 %r12, %r3, %r7; +; CHECK-NEXT: or.b32 %r13, %r1, %r5; +; CHECK-NEXT: or.b32 %r14, %r13, %r12; +; CHECK-NEXT: or.b32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.or(<8 x i32> %in) @@ -1983,43 +2308,21 @@ define i32 @reduce_or_i32_nonpow2(<7 x i32> %in) { } define i16 @reduce_xor_i16(<8 x i16> %in) { -; CHECK-SM80-LABEL: reduce_xor_i16( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<4>; -; CHECK-SM80-NEXT: .reg .b32 %r<11>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0]; -; CHECK-SM80-NEXT: xor.b32 %r5, %r2, %r4; -; CHECK-SM80-NEXT: xor.b32 %r6, %r1, %r3; -; CHECK-SM80-NEXT: xor.b32 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: xor.b32 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_xor_i16( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0]; -; CHECK-SM100-NEXT: xor.b32 %r5, %r2, %r4; -; CHECK-SM100-NEXT: xor.b32 %r6, %r1, %r3; -; CHECK-SM100-NEXT: xor.b32 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: xor.b32 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_xor_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0]; +; CHECK-NEXT: xor.b32 %r5, %r2, %r4; +; CHECK-NEXT: xor.b32 %r6, %r1, %r3; +; CHECK-NEXT: xor.b32 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: xor.b16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; +; CHECK-NEXT: ret; %res = call i16 @llvm.vector.reduce.xor(<8 x i16> %in) ret i16 %res } @@ -2059,13 +2362,13 @@ define i32 @reduce_xor_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_xor_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i32_param_0]; -; CHECK-NEXT: xor.b32 %r9, %r3, %r7; -; CHECK-NEXT: xor.b32 %r10, %r1, %r5; -; CHECK-NEXT: xor.b32 %r11, %r4, %r8; -; CHECK-NEXT: xor.b32 %r12, %r2, %r6; -; CHECK-NEXT: xor.b32 %r13, %r12, %r11; -; CHECK-NEXT: xor.b32 %r14, %r10, %r9; -; CHECK-NEXT: xor.b32 %r15, %r14, %r13; +; CHECK-NEXT: xor.b32 %r9, %r4, %r8; +; CHECK-NEXT: xor.b32 %r10, %r2, %r6; +; CHECK-NEXT: xor.b32 %r11, %r10, %r9; +; CHECK-NEXT: xor.b32 %r12, %r3, %r7; +; CHECK-NEXT: xor.b32 %r13, %r1, %r5; +; CHECK-NEXT: xor.b32 %r14, %r13, %r12; +; CHECK-NEXT: xor.b32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.xor(<8 x i32> %in)