Skip to content

Commit ffdf95f

Browse files
committed
[LV] Vectorize maxnum/minnum w/o fast-math flags.
Update LV to vectorize maxnum/minnum reductions without fast-math flags, by adding an extra check in the loop if any inputs to maxnum/minnum are NaN. If any input is NaN, *exit the vector loop, *compute the reduction result up to the vector iteration that contained NaN inputs and * resume in the scalar loop New recurrence kinds are added for reductions using maxnum/minnum without fast-math flags. The new recurrence kinds are not supported in the code to generate IR to perform the reductions to prevent accidential mis-use. Users need to add the required checks ensuring no NaN inputs, and convert to regular FMin/FMax recurrence kinds.
1 parent 39de14f commit ffdf95f

File tree

13 files changed

+355
-44
lines changed

13 files changed

+355
-44
lines changed

llvm/include/llvm/Analysis/IVDescriptors.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,8 @@ enum class RecurKind {
4747
FMul, ///< Product of floats.
4848
FMin, ///< FP min implemented in terms of select(cmp()).
4949
FMax, ///< FP max implemented in terms of select(cmp()).
50+
FMinNumNoFMFs, ///< FP min with llvm.minnum semantics and no fast-math flags.
51+
FMaxNumNoFMFs, ///< FP max with llvm.maxnumsemantics and no fast-math flags.
5052
FMinimum, ///< FP min with llvm.minimum semantics
5153
FMaximum, ///< FP max with llvm.maximum semantics
5254
FMinimumNum, ///< FP min with llvm.minimumnum semantics

llvm/lib/Analysis/IVDescriptors.cpp

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -941,10 +941,28 @@ RecurrenceDescriptor::InstDesc RecurrenceDescriptor::isRecurrenceInstr(
941941
m_Intrinsic<Intrinsic::minimumnum>(m_Value(), m_Value())) ||
942942
match(I, m_Intrinsic<Intrinsic::maximumnum>(m_Value(), m_Value()));
943943
};
944-
if (isIntMinMaxRecurrenceKind(Kind) ||
945-
(HasRequiredFMF() && isFPMinMaxRecurrenceKind(Kind)))
944+
if (isIntMinMaxRecurrenceKind(Kind))
946945
return isMinMaxPattern(I, Kind, Prev);
947-
else if (isFMulAddIntrinsic(I))
946+
if (isFPMinMaxRecurrenceKind(Kind)) {
947+
if (HasRequiredFMF())
948+
return isMinMaxPattern(I, Kind, Prev);
949+
// For FMax/FMin reductions using maxnum/minnum intrinsics with non-NaN
950+
// start value, we may be able to vectorize with extra checks ensuring the
951+
// inputs are not NaN.
952+
auto *StartV = dyn_cast<ConstantFP>(
953+
OrigPhi->getIncomingValueForBlock(L->getLoopPredecessor()));
954+
if (StartV && !StartV->getValue().isNaN() &&
955+
isMinMaxPattern(I, Kind, Prev).isRecurrence()) {
956+
if (((Kind == RecurKind::FMax &&
957+
match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) ||
958+
Kind == RecurKind::FMaxNumNoFMFs))
959+
return InstDesc(I, RecurKind::FMaxNumNoFMFs);
960+
if (((Kind == RecurKind::FMin &&
961+
match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) ||
962+
Kind == RecurKind::FMinNumNoFMFs))
963+
return InstDesc(I, RecurKind::FMinNumNoFMFs);
964+
}
965+
} else if (isFMulAddIntrinsic(I))
948966
return InstDesc(Kind == RecurKind::FMulAdd, I,
949967
I->hasAllowReassoc() ? nullptr : I);
950968
return InstDesc(false, I);

llvm/lib/Transforms/Utils/LoopUtils.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -938,8 +938,10 @@ constexpr Intrinsic::ID llvm::getReductionIntrinsicID(RecurKind RK) {
938938
case RecurKind::UMin:
939939
return Intrinsic::vector_reduce_umin;
940940
case RecurKind::FMax:
941+
case RecurKind::FMaxNumNoFMFs:
941942
return Intrinsic::vector_reduce_fmax;
942943
case RecurKind::FMin:
944+
case RecurKind::FMinNumNoFMFs:
943945
return Intrinsic::vector_reduce_fmin;
944946
case RecurKind::FMaximum:
945947
return Intrinsic::vector_reduce_fmaximum;

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4347,8 +4347,15 @@ bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
43474347
ElementCount VF) const {
43484348
// Cross iteration phis such as reductions need special handling and are
43494349
// currently unsupported.
4350-
if (any_of(OrigLoop->getHeader()->phis(),
4351-
[&](PHINode &Phi) { return Legal->isFixedOrderRecurrence(&Phi); }))
4350+
if (any_of(OrigLoop->getHeader()->phis(), [&](PHINode &Phi) {
4351+
if (Legal->isReductionVariable(&Phi)) {
4352+
RecurKind RK =
4353+
Legal->getRecurrenceDescriptor(&Phi).getRecurrenceKind();
4354+
return RK == RecurKind::FMinNumNoFMFs ||
4355+
RK == RecurKind::FMaxNumNoFMFs;
4356+
}
4357+
return Legal->isFixedOrderRecurrence(&Phi);
4358+
}))
43524359
return false;
43534360

43544361
// Phis with uses outside of the loop require special handling and are
@@ -8811,6 +8818,9 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
88118818

88128819
// Adjust the recipes for any inloop reductions.
88138820
adjustRecipesForReductions(Plan, RecipeBuilder, Range.Start);
8821+
if (!VPlanTransforms::runPass(
8822+
VPlanTransforms::handleMaxMinNumReductionsWithoutFastMath, *Plan))
8823+
return nullptr;
88148824

88158825
// Transform recipes to abstract recipes if it is legal and beneficial and
88168826
// clamp the range for better cost estimation.

llvm/lib/Transforms/Vectorize/VPlan.h

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1326,20 +1326,22 @@ class LLVM_ABI_FOR_TEST VPWidenRecipe : public VPRecipeWithIRFlags,
13261326
unsigned Opcode;
13271327

13281328
public:
1329-
VPWidenRecipe(unsigned Opcode, ArrayRef<VPValue *> Operands,
1330-
const VPIRFlags &Flags, DebugLoc DL)
1331-
: VPRecipeWithIRFlags(VPDef::VPWidenSC, Operands, Flags, DL),
1332-
Opcode(Opcode) {}
1333-
13341329
VPWidenRecipe(Instruction &I, ArrayRef<VPValue *> Operands)
13351330
: VPRecipeWithIRFlags(VPDef::VPWidenSC, Operands, I), VPIRMetadata(I),
13361331
Opcode(I.getOpcode()) {}
13371332

1333+
VPWidenRecipe(unsigned Opcode, ArrayRef<VPValue *> Operands,
1334+
const VPIRFlags &Flags, const VPIRMetadata &Metadata,
1335+
DebugLoc DL)
1336+
: VPRecipeWithIRFlags(VPDef::VPWidenSC, Operands, Flags, DL),
1337+
VPIRMetadata(Metadata), Opcode(Opcode) {}
1338+
13381339
~VPWidenRecipe() override = default;
13391340

13401341
VPWidenRecipe *clone() override {
1341-
auto *R = new VPWidenRecipe(*getUnderlyingInstr(), operands());
1342-
R->transferFlags(*this);
1342+
auto *R =
1343+
new VPWidenRecipe(getOpcode(), operands(), *this, *this, getDebugLoc());
1344+
R->setUnderlyingValue(getUnderlyingValue());
13431345
return R;
13441346
}
13451347

llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) {
8585
return ResTy;
8686
}
8787
case Instruction::ICmp:
88+
case Instruction::FCmp:
8889
case VPInstruction::ActiveLaneMask:
8990
assert(inferScalarType(R->getOperand(0)) ==
9091
inferScalarType(R->getOperand(1)) &&

llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp

Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -628,3 +628,134 @@ void VPlanTransforms::attachCheckBlock(VPlan &Plan, Value *Cond,
628628
Term->addMetadata(LLVMContext::MD_prof, BranchWeights);
629629
}
630630
}
631+
632+
static VPValue *getMinMaxCompareValue(VPSingleDefRecipe *MinMaxOp,
633+
VPReductionPHIRecipe *RedPhi) {
634+
auto *RepR = dyn_cast<VPReplicateRecipe>(MinMaxOp);
635+
if (!isa<VPWidenIntrinsicRecipe>(MinMaxOp) &&
636+
!(RepR && (isa<IntrinsicInst>(RepR->getUnderlyingInstr()))))
637+
return nullptr;
638+
639+
if (MinMaxOp->getOperand(0) == RedPhi)
640+
return MinMaxOp->getOperand(1);
641+
return MinMaxOp->getOperand(0);
642+
}
643+
644+
/// Returns true if there VPlan is read-only and execution can be resumed at the beginning of the last vector iteration in the scalar loop
645+
static bool canResumeInScalarLoopFromVectorLoop(VPlan &Plan) {
646+
for (VPBlockBase *VPB: vp_depth_first_shallow(Plan.getVectorLoopRegion()->getEntryBasicBlock())) {
647+
auto *VPBB = dyn_cast<VPBasicBlock>(VPB);
648+
if (!VPBB)
649+
return false;
650+
for (auto &R : *VPBB) {
651+
if (match(&R, m_BranchOnCount(m_VPValue(), m_VPValue())))
652+
continue;
653+
if (R.mayWriteToMemory())
654+
return false;
655+
}
656+
}
657+
return true;
658+
}
659+
660+
bool VPlanTransforms::handleMaxMinNumReductionsWithoutFastMath(VPlan &Plan) {
661+
VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
662+
VPValue *AnyNaN = nullptr;
663+
VPReductionPHIRecipe *RedPhiR = nullptr;
664+
VPRecipeWithIRFlags *MinMaxOp = nullptr;
665+
bool HasUnsupportedPhi = false;
666+
for (auto &R : LoopRegion->getEntryBasicBlock()->phis()) {
667+
HasUnsupportedPhi |= !isa<VPCanonicalIVPHIRecipe, VPWidenIntOrFpInductionRecipe, VPReductionPHIRecipe>(&R);
668+
auto *Cur = dyn_cast<VPReductionPHIRecipe>(&R);
669+
if (!Cur)
670+
continue;
671+
if (RedPhiR)
672+
return false;
673+
if (Cur->getRecurrenceKind() != RecurKind::FMaxNumNoFMFs &&
674+
Cur->getRecurrenceKind() != RecurKind::FMinNumNoFMFs)
675+
continue;
676+
677+
RedPhiR = Cur;
678+
MinMaxOp = dyn_cast<VPRecipeWithIRFlags>(
679+
RedPhiR->getBackedgeValue()->getDefiningRecipe());
680+
if (!MinMaxOp)
681+
return false;
682+
VPValue *In = getMinMaxCompareValue(MinMaxOp, RedPhiR);
683+
if (!In)
684+
return false;
685+
686+
auto *IsNaN =
687+
new VPInstruction(Instruction::FCmp, {In, In}, {CmpInst::FCMP_UNO}, {});
688+
IsNaN->insertBefore(MinMaxOp);
689+
AnyNaN = new VPInstruction(VPInstruction::AnyOf, {IsNaN});
690+
AnyNaN->getDefiningRecipe()->insertAfter(IsNaN);
691+
}
692+
693+
if (!AnyNaN)
694+
return true;
695+
696+
if (HasUnsupportedPhi || !canResumeInScalarLoopFromVectorLoop(Plan))
697+
return false;
698+
699+
auto *MiddleVPBB = Plan.getMiddleBlock();
700+
auto *RdxResult = dyn_cast<VPInstruction>(&MiddleVPBB->front());
701+
if (!RdxResult || RdxResult->getOpcode() != VPInstruction::ComputeReductionResult || RdxResult->getOperand(0) != RedPhiR)
702+
return false;
703+
704+
auto *ScalarPH = Plan.getScalarPreheader();
705+
// Update the resume phis in the scalar preheader. They all must either resume
706+
// from the reduction result or the canonical induction. Bail out if there are
707+
// other resume phis.
708+
for (auto &R : ScalarPH->phis()) {
709+
auto *ResumeR = cast<VPPhi>(&R);
710+
VPValue *VecV = ResumeR->getOperand(0);
711+
VPValue *BypassV = ResumeR->getOperand(ResumeR->getNumOperands() - 1);
712+
if (VecV != RdxResult && VecV != &Plan.getVectorTripCount())
713+
return false;
714+
ResumeR->setOperand(
715+
1, VecV == &Plan.getVectorTripCount() ? Plan.getCanonicalIV() : VecV);
716+
ResumeR->addOperand(BypassV);
717+
}
718+
719+
// Create a new reduction phi recipe with either FMin/FMax, replacing
720+
// FMinNumNoFMFs/FMaxNumNoFMFs.
721+
RecurKind NewRK = RedPhiR->getRecurrenceKind() != RecurKind::FMinNumNoFMFs
722+
? RecurKind::FMin
723+
: RecurKind::FMax;
724+
auto *NewRedPhiR = new VPReductionPHIRecipe(
725+
cast<PHINode>(RedPhiR->getUnderlyingValue()), NewRK,
726+
*RedPhiR->getStartValue(), RedPhiR->isInLoop(), RedPhiR->isOrdered());
727+
NewRedPhiR->addOperand(RedPhiR->getOperand(1));
728+
NewRedPhiR->insertBefore(RedPhiR);
729+
RedPhiR->replaceAllUsesWith(NewRedPhiR);
730+
RedPhiR->eraseFromParent();
731+
732+
// Update the loop exit condition to exit if either any of the inputs is NaN
733+
// or the vector trip count is reached.
734+
VPBasicBlock *LatchVPBB = LoopRegion->getExitingBasicBlock();
735+
VPBuilder Builder(LatchVPBB->getTerminator());
736+
auto *LatchExitingBranch = cast<VPInstruction>(LatchVPBB->getTerminator());
737+
assert(LatchExitingBranch->getOpcode() == VPInstruction::BranchOnCount &&
738+
"Unexpected terminator");
739+
auto *IsLatchExitTaken =
740+
Builder.createICmp(CmpInst::ICMP_EQ, LatchExitingBranch->getOperand(0),
741+
LatchExitingBranch->getOperand(1));
742+
auto *AnyExitTaken =
743+
Builder.createNaryOp(Instruction::Or, {AnyNaN, IsLatchExitTaken});
744+
Builder.createNaryOp(VPInstruction::BranchOnCond, AnyExitTaken);
745+
LatchExitingBranch->eraseFromParent();
746+
747+
// Split the middle block and introduce a new block, branching to the scalar
748+
// preheader to resume iteration in the scalar loop if any NaNs have been
749+
// encountered.
750+
MiddleVPBB->splitAt(std::prev(MiddleVPBB->end()));
751+
Builder.setInsertPoint(MiddleVPBB, MiddleVPBB->begin());
752+
auto *NewSel =
753+
Builder.createSelect(AnyNaN, NewRedPhiR, RdxResult->getOperand(1));
754+
RdxResult->setOperand(1, NewSel);
755+
Builder.setInsertPoint(MiddleVPBB);
756+
Builder.createNaryOp(VPInstruction::BranchOnCond, AnyNaN);
757+
VPBlockUtils::connectBlocks(MiddleVPBB, ScalarPH);
758+
MiddleVPBB->swapSuccessors();
759+
std::swap(ScalarPH->getPredecessors()[1], ScalarPH->getPredecessors().back());
760+
return true;
761+
}

llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -587,6 +587,7 @@ Value *VPInstruction::generate(VPTransformState &State) {
587587
Value *Op = State.get(getOperand(0), vputils::onlyFirstLaneUsed(this));
588588
return Builder.CreateFreeze(Op, Name);
589589
}
590+
case Instruction::FCmp:
590591
case Instruction::ICmp: {
591592
bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
592593
Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
@@ -863,7 +864,7 @@ Value *VPInstruction::generate(VPTransformState &State) {
863864
Value *Res = State.get(getOperand(0));
864865
for (VPValue *Op : drop_begin(operands()))
865866
Res = Builder.CreateOr(Res, State.get(Op));
866-
return Builder.CreateOrReduce(Res);
867+
return State.VF.isScalar() ? Res : Builder.CreateOrReduce(Res);
867868
}
868869
case VPInstruction::FirstActiveLane: {
869870
if (getNumOperands() == 1) {
@@ -1036,6 +1037,7 @@ bool VPInstruction::opcodeMayReadOrWriteFromMemory() const {
10361037
switch (getOpcode()) {
10371038
case Instruction::ExtractElement:
10381039
case Instruction::Freeze:
1040+
case Instruction::FCmp:
10391041
case Instruction::ICmp:
10401042
case Instruction::Select:
10411043
case VPInstruction::AnyOf:
@@ -1071,6 +1073,7 @@ bool VPInstruction::onlyFirstLaneUsed(const VPValue *Op) const {
10711073
return Op == getOperand(1);
10721074
case Instruction::PHI:
10731075
return true;
1076+
case Instruction::FCmp:
10741077
case Instruction::ICmp:
10751078
case Instruction::Select:
10761079
case Instruction::Or:
@@ -1103,6 +1106,7 @@ bool VPInstruction::onlyFirstPartUsed(const VPValue *Op) const {
11031106
switch (getOpcode()) {
11041107
default:
11051108
return false;
1109+
case Instruction::FCmp:
11061110
case Instruction::ICmp:
11071111
case Instruction::Select:
11081112
return vputils::onlyFirstPartUsed(this);
@@ -1787,7 +1791,7 @@ bool VPIRFlags::flagsValidForOpcode(unsigned Opcode) const {
17871791
return Opcode == Instruction::ZExt;
17881792
break;
17891793
case OperationType::Cmp:
1790-
return Opcode == Instruction::ICmp;
1794+
return Opcode == Instruction::FCmp || Opcode == Instruction::ICmp;
17911795
case OperationType::Other:
17921796
return true;
17931797
}

llvm/lib/Transforms/Vectorize/VPlanTransforms.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,11 @@ struct VPlanTransforms {
9999
/// not valid.
100100
static bool adjustFixedOrderRecurrences(VPlan &Plan, VPBuilder &Builder);
101101

102+
/// Check if \p Plan contains any FMaxNumNoFMFs or FMinNumNoFMFs reductions.
103+
/// If they do, try to update the vector loop to exit early if any input is
104+
/// NaN and resume executing in the scalar loop to handle the NaNs there.
105+
static bool handleMaxMinNumReductionsWithoutFastMath(VPlan &Plan);
106+
102107
/// Clear NSW/NUW flags from reduction instructions if necessary.
103108
static void clearReductionWrapFlags(VPlan &Plan);
104109

llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll

Lines changed: 45 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -42,18 +42,56 @@ define float @fmaxnum(ptr %src, i64 %n) {
4242
; CHECK-LABEL: define float @fmaxnum(
4343
; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) {
4444
; CHECK-NEXT: [[ENTRY:.*]]:
45+
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8
46+
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
47+
; CHECK: [[VECTOR_PH]]:
48+
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 8
49+
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
50+
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
51+
; CHECK: [[VECTOR_BODY]]:
52+
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
53+
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ <float -1.000000e+07, float 0xFFF8000000000000, float 0xFFF8000000000000, float 0xFFF8000000000000>, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
54+
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float 0xFFF8000000000000), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
55+
; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[IV]]
56+
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC]], i32 0
57+
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC]], i32 4
58+
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
59+
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
60+
; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]]
61+
; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD2]]
62+
; CHECK-NEXT: [[TMP5:%.*]] = or <4 x i1> [[TMP3]], [[TMP4]]
63+
; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
64+
; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]])
65+
; CHECK-NEXT: [[TMP8]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD2]])
66+
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8
67+
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
68+
; CHECK-NEXT: [[TMP10:%.*]] = or i1 [[TMP6]], [[TMP9]]
69+
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
70+
; CHECK: [[MIDDLE_BLOCK]]:
71+
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP6]], <4 x float> [[VEC_PHI]], <4 x float> [[TMP7]]
72+
; CHECK-NEXT: [[TMP12:%.*]] = select i1 [[TMP6]], <4 x float> [[VEC_PHI1]], <4 x float> [[TMP8]]
73+
; CHECK-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp olt <4 x float> [[TMP11]], [[TMP12]]
74+
; CHECK-NEXT: [[RDX_MINMAX_SELECT:%.*]] = select <4 x i1> [[RDX_MINMAX_CMP]], <4 x float> [[TMP11]], <4 x float> [[TMP12]]
75+
; CHECK-NEXT: [[TMP13:%.*]] = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> [[RDX_MINMAX_SELECT]])
76+
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
77+
; CHECK-NEXT: br i1 [[TMP6]], label %[[SCALAR_PH]], label %[[MIDDLE_BLOCK_SPLIT:.*]]
78+
; CHECK: [[MIDDLE_BLOCK_SPLIT]]:
79+
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
80+
; CHECK: [[SCALAR_PH]]:
81+
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK_SPLIT]] ], [ [[IV]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
82+
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP13]], %[[MIDDLE_BLOCK_SPLIT]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ], [ -1.000000e+07, %[[ENTRY]] ]
4583
; CHECK-NEXT: br label %[[LOOP:.*]]
4684
; CHECK: [[LOOP]]:
47-
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
48-
; CHECK-NEXT: [[MAX:%.*]] = phi float [ -1.000000e+07, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ]
49-
; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[IV]]
50-
; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP_SRC]], align 4
85+
; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
86+
; CHECK-NEXT: [[MAX:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ]
87+
; CHECK-NEXT: [[GEP_SRC1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[IV1]]
88+
; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP_SRC1]], align 4
5189
; CHECK-NEXT: [[MAX_NEXT]] = call float @llvm.maxnum.f32(float [[MAX]], float [[L]])
52-
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
90+
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
5391
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
54-
; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
92+
; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
5593
; CHECK: [[EXIT]]:
56-
; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ]
94+
; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ], [ [[TMP13]], %[[MIDDLE_BLOCK_SPLIT]] ]
5795
; CHECK-NEXT: ret float [[MAX_NEXT_LCSSA]]
5896
;
5997
entry:

0 commit comments

Comments
 (0)