Fix details in local live range splitting with regmasks.

Perform all comparisons at instruction granularity, and make sure
register masks on uses count in both gaps.

llvm-svn: 150530
This commit is contained in:
Jakob Stoklund Olesen 2012-02-14 23:51:27 +00:00
parent e7d3f441b5
commit b0c0d340f8
1 changed files with 16 additions and 6 deletions

View File

@ -1358,18 +1358,28 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
if (!UsableRegs.empty()) { if (!UsableRegs.empty()) {
// Get regmask slots for the whole block. // Get regmask slots for the whole block.
ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber()); ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
DEBUG(dbgs() << RMS.size() << " regmasks in block:");
// Constrain to VirtReg's live range. // Constrain to VirtReg's live range.
unsigned ri = std::lower_bound(RMS.begin(), RMS.end(), Uses.front()) unsigned ri = std::lower_bound(RMS.begin(), RMS.end(),
- RMS.begin(); Uses.front().getRegSlot()) - RMS.begin();
unsigned re = RMS.size(); unsigned re = RMS.size();
for (unsigned i = 0; i != NumGaps && ri != re; ++i) { for (unsigned i = 0; i != NumGaps && ri != re; ++i) {
assert(Uses[i] <= RMS[ri]); // Look for Uses[i] <= RMS <= Uses[i+1].
if (Uses[i+1] <= RMS[ri]) assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i]));
if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri]))
continue; continue;
// Skip a regmask on the same instruction as the last use. It doesn't
// overlap the live range.
if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps)
break;
DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]);
RegMaskGaps.push_back(i); RegMaskGaps.push_back(i);
do ++ri; // Advance ri to the next gap. A regmask on one of the uses counts in
while (ri != re && RMS[ri] < Uses[i+1]); // both gaps.
while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1]))
++ri;
} }
DEBUG(dbgs() << '\n');
} }
// Since we allow local split results to be split again, there is a risk of // Since we allow local split results to be split again, there is a risk of