Skip to content

Commit aecb3e1

Browse files
committed
[JSC][GreedyRegAlloc] Add some eviction/spill heuristic experiments
https://bugs.webkit.org/show_bug.cgi?id=288632 rdar://145680413 Reviewed by Keith Miller. A current weak point compared to the graph coloring allocator is spilling choice when under register pressure, which shows up especially on architectures with fewer registers such as x64. So, add some experimental configurations for adjusting the spill heuristic. I'm not sure which will win out yet, but I'd like to have these under git for future reference. I'll go back and clean up the unused ones after things settle. * Source/JavaScriptCore/b3/air/AirAllocateRegistersByGreedy.cpp: (JSC::B3::Air::Greedy::TmpData::CoalescableWith::dump const): (JSC::B3::Air::Greedy::TmpData::spillCost): (JSC::B3::Air::Greedy::TmpData::validate): (JSC::B3::Air::Greedy::GreedyAllocator::buildLiveRanges): (JSC::B3::Air::Greedy::GreedyAllocator::finalizeGroups): (JSC::B3::Air::Greedy::GreedyAllocator::initSpillCosts): (JSC::B3::Air::Greedy::GreedyAllocator::setStageAndEnqueue): (JSC::B3::Air::Greedy::GreedyAllocator::tryEvict): Canonical link: https://commits.webkit.org/291163@main
1 parent 54d96e2 commit aecb3e1

File tree

1 file changed

+32
-12
lines changed

1 file changed

+32
-12
lines changed

Source/JavaScriptCore/b3/air/AirAllocateRegistersByGreedy.cpp

Lines changed: 32 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,9 @@ namespace Greedy {
5656
static constexpr bool eagerGroups = true;
5757
static constexpr bool eagerGroupsSplitFully = false;
5858
static constexpr bool eagerGroupsExhaustiveSearch = false;
59+
static constexpr bool spillCostDivideBySize = false;
60+
static constexpr bool spillCostSizeBias = 100; // Only relevant when spillCostDivideBySize
61+
static constexpr bool evictHeuristicAggregatorIsMax = false;
5962

6063
// Quickly filters out short ranges from live range splitting consideration.
6164
static constexpr size_t splitMinRangeSize = 8;
@@ -513,11 +516,11 @@ struct TmpData {
513516
struct CoalescableWith {
514517
void dump(PrintStream& out) const
515518
{
516-
out.print("(", tmp, ", ", weight, ")");
519+
out.print("(", tmp, ", ", moveCost, ")");
517520
}
518521

519522
Tmp tmp;
520-
float weight;
523+
float moveCost; // The frequency-adjusted number of moves between TmpData's tmp and CoalescableWith.tmp
521524
};
522525

523526
void dump(PrintStream& out) const
@@ -535,13 +538,25 @@ struct TmpData {
535538

536539
float spillCost()
537540
{
538-
return unspillable ? unspillableCost : useDefCost;
541+
ASSERT(liveRange.size()); // 0-sized ranges shouldn't be allocated
542+
if (unspillable)
543+
return unspillableCost;
544+
545+
// Heuristic that favors not spilling higher use/def frequency-adjusted counts and
546+
// shorter ranges. The spillCostSizeBias causes the penalty for larger ranges to
547+
// be more dramatic as the range size gets larger.
548+
if (spillCostDivideBySize)
549+
return useDefCost / (liveRange.size() + spillCostSizeBias);
550+
551+
// Simplest heuristic: favors not spill higher use/def frequency-adjusted counts.
552+
return useDefCost;
539553
}
540554

541555
void validate()
542556
{
543557
ASSERT(!(spillSlot && assigned));
544558
ASSERT(!!assigned == (stage == Stage::Assigned));
559+
ASSERT(liveRange.intervals().isEmpty() == !liveRange.size());
545560
ASSERT_IMPLIES(spillSlot, stage == Stage::Spilled);
546561
ASSERT_IMPLIES(spillSlot, spillCost() != unspillableCost);
547562
ASSERT_IMPLIES(spillSlot, !isGroup()); // Should have been split
@@ -879,7 +894,7 @@ class GreedyAllocator {
879894
float freq = adjustedBlockFrequency(block);
880895
for (auto& with : tmpData.coalescables) {
881896
if (with.tmp == b) {
882-
with.weight += freq;
897+
with.moveCost += freq;
883898
return;
884899
}
885900
}
@@ -1073,11 +1088,11 @@ class GreedyAllocator {
10731088

10741089
struct Move {
10751090
Tmp tmp0, tmp1;
1076-
float weight;
1091+
float cost;
10771092

10781093
void dump(PrintStream& out) const
10791094
{
1080-
out.print(tmp0, ", ", tmp1, " ", weight);
1095+
out.print(tmp0, ", ", tmp1, " ", cost);
10811096
}
10821097
};
10831098
Vector<Move> moves;
@@ -1087,8 +1102,8 @@ class GreedyAllocator {
10871102
TmpData& data = m_map[tmp];
10881103
std::sort(data.coalescables.begin(), data.coalescables.end(),
10891104
[this] (const auto& a, const auto& b) -> bool {
1090-
if (a.weight != b.weight)
1091-
return a.weight > b.weight;
1105+
if (a.moveCost != b.moveCost)
1106+
return a.moveCost > b.moveCost;
10921107
// Favor coalescing shorter live ranges.
10931108
auto aSize = m_map[a.tmp].liveRange.size();
10941109
auto bSize = m_map[b.tmp].liveRange.size();
@@ -1102,15 +1117,15 @@ class GreedyAllocator {
11021117

11031118
for (auto& with : m_map[tmp].coalescables) {
11041119
if (tmp.tmpIndex(bank) < with.tmp.tmpIndex(bank))
1105-
moves.append({ tmp, with.tmp, with.weight });
1120+
moves.append({ tmp, with.tmp, with.moveCost });
11061121
}
11071122
});
11081123

11091124
ASSERT_IMPLIES(!eagerGroups, moves.isEmpty());
11101125
std::sort(moves.begin(), moves.end(),
11111126
[](Move& a, Move& b) -> bool {
1112-
if (a.weight != b.weight)
1113-
return a.weight > b.weight;
1127+
if (a.cost != b.cost)
1128+
return a.cost > b.cost;
11141129
if (a.tmp0.tmpIndex(bank) != b.tmp1.tmpIndex(bank))
11151130
return a.tmp0.tmpIndex(bank) < a.tmp0.tmpIndex(bank);
11161131
ASSERT(a.tmp1.tmpIndex(bank) != b.tmp1.tmpIndex(bank));
@@ -1236,6 +1251,7 @@ class GreedyAllocator {
12361251
tmpData.unspillable = true;
12371252
m_stats[bank].numUnspillableTmps++;
12381253
}
1254+
tmpData.validate();
12391255
});
12401256
m_code.forEachFastTmp([&](Tmp tmp) {
12411257
if (tmp.bank() != bank)
@@ -1274,6 +1290,7 @@ class GreedyAllocator {
12741290
void setStageAndEnqueue(Tmp tmp, TmpData& tmpData, Stage stage)
12751291
{
12761292
ASSERT(!tmp.isReg());
1293+
ASSERT(m_map[tmp].liveRange.size()); // 0-size ranges don't need a register and spillCost() depends on size() != 0
12771294
ASSERT(stage == Stage::Unspillable || stage == Stage::TryAllocate || stage == Stage::TrySplit || stage == Stage::Spill);
12781295
ASSERT(!tmpData.parentGroup); // Group member should not be enquened
12791296
ASSERT_IMPLIES(!eagerGroups, !tmpData.isGroup());
@@ -1458,7 +1475,10 @@ class GreedyAllocator {
14581475
conflictsSpillCost = unspillableCost;
14591476
return IterationStatus::Done;
14601477
}
1461-
conflictsSpillCost += cost;
1478+
if (evictHeuristicAggregatorIsMax)
1479+
conflictsSpillCost = std::max(conflictsSpillCost, cost);
1480+
else
1481+
conflictsSpillCost += cost;
14621482
return conflictsSpillCost >= minSpillCost ? IterationStatus::Done : IterationStatus::Continue;
14631483
});
14641484
if (conflictsSpillCost < minSpillCost) {

0 commit comments

Comments
 (0)