2004-02-24 07:08:11 +08:00
|
|
|
//===-- llvm/CodeGen/VirtRegMap.h - Virtual Register Map -*- C++ -*--------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2004-02-24 07:08:11 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2004-09-30 09:54:45 +08:00
|
|
|
// This file implements a virtual register map. This maps virtual registers to
|
|
|
|
// physical registers and virtual registers to stack slots. It is created and
|
|
|
|
// updated by a register allocator and then used by a machine code rewriter that
|
|
|
|
// adds spill code and rewrites virtual into physical register references.
|
2004-02-24 07:08:11 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef LLVM_CODEGEN_VIRTREGMAP_H
|
|
|
|
#define LLVM_CODEGEN_VIRTREGMAP_H
|
|
|
|
|
2009-03-13 13:55:11 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2009-09-05 04:41:11 +08:00
|
|
|
#include "llvm/CodeGen/LiveInterval.h"
|
2008-02-11 02:45:23 +08:00
|
|
|
#include "llvm/Target/TargetRegisterInfo.h"
|
2008-04-12 01:53:36 +08:00
|
|
|
#include "llvm/ADT/BitVector.h"
|
2009-05-04 02:32:42 +08:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2007-02-01 13:32:05 +08:00
|
|
|
#include "llvm/ADT/IndexedMap.h"
|
2008-02-27 11:04:06 +08:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2009-01-06 01:59:02 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2004-03-02 04:05:10 +08:00
|
|
|
#include <map>
|
2004-02-24 07:08:11 +08:00
|
|
|
|
|
|
|
namespace llvm {
|
2009-05-04 02:32:42 +08:00
|
|
|
class LiveIntervals;
|
2004-09-30 09:54:45 +08:00
|
|
|
class MachineInstr;
|
2007-08-08 00:34:05 +08:00
|
|
|
class MachineFunction;
|
2009-06-15 04:22:55 +08:00
|
|
|
class MachineRegisterInfo;
|
2006-09-05 10:12:02 +08:00
|
|
|
class TargetInstrInfo;
|
2009-05-04 11:30:11 +08:00
|
|
|
class TargetRegisterInfo;
|
2009-07-24 18:36:58 +08:00
|
|
|
class raw_ostream;
|
2011-02-19 06:03:18 +08:00
|
|
|
class SlotIndexes;
|
2004-09-30 09:54:45 +08:00
|
|
|
|
2009-03-13 13:55:11 +08:00
|
|
|
class VirtRegMap : public MachineFunctionPass {
|
2004-09-30 09:54:45 +08:00
|
|
|
public:
|
2007-03-20 16:13:50 +08:00
|
|
|
enum {
|
|
|
|
NO_PHYS_REG = 0,
|
2007-04-04 15:40:01 +08:00
|
|
|
NO_STACK_SLOT = (1L << 30)-1,
|
|
|
|
MAX_STACK_SLOT = (1L << 18)-1
|
2007-03-20 16:13:50 +08:00
|
|
|
};
|
|
|
|
|
2006-05-02 05:16:03 +08:00
|
|
|
enum ModRef { isRef = 1, isMod = 2, isModRef = 3 };
|
2004-10-02 07:15:36 +08:00
|
|
|
typedef std::multimap<MachineInstr*,
|
|
|
|
std::pair<unsigned, ModRef> > MI2VirtMapTy;
|
2004-09-30 09:54:45 +08:00
|
|
|
|
|
|
|
private:
|
2009-06-15 04:22:55 +08:00
|
|
|
MachineRegisterInfo *MRI;
|
2009-03-13 13:55:11 +08:00
|
|
|
const TargetInstrInfo *TII;
|
2009-05-04 11:30:11 +08:00
|
|
|
const TargetRegisterInfo *TRI;
|
2009-03-13 13:55:11 +08:00
|
|
|
MachineFunction *MF;
|
2009-05-04 11:30:11 +08:00
|
|
|
|
|
|
|
DenseMap<const TargetRegisterClass*, BitVector> allocatableRCRegs;
|
|
|
|
|
2004-10-01 08:35:07 +08:00
|
|
|
/// Virt2PhysMap - This is a virtual to physical register
|
|
|
|
/// mapping. Each virtual register is required to have an entry in
|
|
|
|
/// it; even spilled virtual registers (the register mapped to a
|
|
|
|
/// spilled register is the temporary used to load it from the
|
|
|
|
/// stack).
|
2007-02-01 13:32:05 +08:00
|
|
|
IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2PhysMap;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
|
2004-10-01 08:35:07 +08:00
|
|
|
/// Virt2StackSlotMap - This is virtual register to stack slot
|
|
|
|
/// mapping. Each spilled virtual register has an entry in it
|
|
|
|
/// which corresponds to the stack slot this register is spilled
|
|
|
|
/// at.
|
2007-02-01 13:32:05 +08:00
|
|
|
IndexedMap<int, VirtReg2IndexFunctor> Virt2StackSlotMap;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
|
2008-03-13 04:50:04 +08:00
|
|
|
/// Virt2ReMatIdMap - This is virtual register to rematerialization id
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
/// mapping. Each spilled virtual register that should be remat'd has an
|
|
|
|
/// entry in it which corresponds to the remat id.
|
2007-08-14 07:45:17 +08:00
|
|
|
IndexedMap<int, VirtReg2IndexFunctor> Virt2ReMatIdMap;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
|
|
|
|
/// Virt2SplitMap - This is virtual register to splitted virtual register
|
|
|
|
/// mapping.
|
|
|
|
IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2SplitMap;
|
|
|
|
|
2007-12-05 17:51:10 +08:00
|
|
|
/// Virt2SplitKillMap - This is splitted virtual register to its last use
|
2007-12-05 18:24:35 +08:00
|
|
|
/// (kill) index mapping.
|
2011-01-10 02:58:33 +08:00
|
|
|
IndexedMap<SlotIndex, VirtReg2IndexFunctor> Virt2SplitKillMap;
|
2007-12-05 17:51:10 +08:00
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
/// ReMatMap - This is virtual register to re-materialized instruction
|
|
|
|
/// mapping. Each virtual register whose definition is going to be
|
|
|
|
/// re-materialized has an entry in it.
|
|
|
|
IndexedMap<MachineInstr*, VirtReg2IndexFunctor> ReMatMap;
|
|
|
|
|
2004-10-01 08:35:07 +08:00
|
|
|
/// MI2VirtMap - This is MachineInstr to virtual register
|
|
|
|
/// mapping. In the case of memory spill code being folded into
|
|
|
|
/// instructions, we need to know which virtual register was
|
|
|
|
/// read/written by this instruction.
|
2004-09-30 10:15:18 +08:00
|
|
|
MI2VirtMapTy MI2VirtMap;
|
2005-04-22 06:36:52 +08:00
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
/// SpillPt2VirtMap - This records the virtual registers which should
|
|
|
|
/// be spilled right after the MachineInstr due to live interval
|
|
|
|
/// splitting.
|
2007-12-05 16:16:32 +08:00
|
|
|
std::map<MachineInstr*, std::vector<std::pair<unsigned,bool> > >
|
|
|
|
SpillPt2VirtMap;
|
2007-03-20 16:13:50 +08:00
|
|
|
|
2007-11-29 09:06:25 +08:00
|
|
|
/// RestorePt2VirtMap - This records the virtual registers which should
|
|
|
|
/// be restored right before the MachineInstr due to live interval
|
|
|
|
/// splitting.
|
|
|
|
std::map<MachineInstr*, std::vector<unsigned> > RestorePt2VirtMap;
|
|
|
|
|
2008-03-11 15:19:34 +08:00
|
|
|
/// EmergencySpillMap - This records the physical registers that should
|
|
|
|
/// be spilled / restored around the MachineInstr since the register
|
|
|
|
/// allocator has run out of registers.
|
|
|
|
std::map<MachineInstr*, std::vector<unsigned> > EmergencySpillMap;
|
|
|
|
|
|
|
|
/// EmergencySpillSlots - This records emergency spill slots used to
|
|
|
|
/// spill physical registers when the register allocator runs out of
|
|
|
|
/// registers. Ideally only one stack slot is used per function per
|
|
|
|
/// register class.
|
|
|
|
std::map<const TargetRegisterClass*, int> EmergencySpillSlots;
|
|
|
|
|
2007-03-20 16:13:50 +08:00
|
|
|
/// ReMatId - Instead of assigning a stack slot to a to be rematerialized
|
2007-04-04 15:40:01 +08:00
|
|
|
/// virtual register, an unique id is being assigned. This keeps track of
|
2007-03-20 16:13:50 +08:00
|
|
|
/// the highest id used so far. Note, this starts at (1<<18) to avoid
|
|
|
|
/// conflicts with stack slot numbers.
|
|
|
|
int ReMatId;
|
|
|
|
|
2008-02-27 11:04:06 +08:00
|
|
|
/// LowSpillSlot, HighSpillSlot - Lowest and highest spill slot indexes.
|
|
|
|
int LowSpillSlot, HighSpillSlot;
|
|
|
|
|
|
|
|
/// SpillSlotToUsesMap - Records uses for each register spill slot.
|
|
|
|
SmallVector<SmallPtrSet<MachineInstr*, 4>, 8> SpillSlotToUsesMap;
|
|
|
|
|
2008-04-12 01:53:36 +08:00
|
|
|
/// ImplicitDefed - One bit for each virtual register. If set it indicates
|
|
|
|
/// the register is implicitly defined.
|
|
|
|
BitVector ImplicitDefed;
|
|
|
|
|
2009-05-04 02:32:42 +08:00
|
|
|
/// UnusedRegs - A list of physical registers that have not been used.
|
|
|
|
BitVector UnusedRegs;
|
|
|
|
|
2010-11-16 08:41:01 +08:00
|
|
|
/// createSpillSlot - Allocate a spill slot for RC from MFI.
|
|
|
|
unsigned createSpillSlot(const TargetRegisterClass *RC);
|
|
|
|
|
2004-09-30 09:54:45 +08:00
|
|
|
VirtRegMap(const VirtRegMap&); // DO NOT IMPLEMENT
|
|
|
|
void operator=(const VirtRegMap&); // DO NOT IMPLEMENT
|
|
|
|
|
|
|
|
public:
|
2009-03-13 13:55:11 +08:00
|
|
|
static char ID;
|
2010-08-07 02:33:48 +08:00
|
|
|
VirtRegMap() : MachineFunctionPass(ID), Virt2PhysMap(NO_PHYS_REG),
|
2009-03-13 13:55:11 +08:00
|
|
|
Virt2StackSlotMap(NO_STACK_SLOT),
|
|
|
|
Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0),
|
2009-11-04 07:52:08 +08:00
|
|
|
Virt2SplitKillMap(SlotIndex()), ReMatMap(NULL),
|
2009-03-13 13:55:11 +08:00
|
|
|
ReMatId(MAX_STACK_SLOT+1),
|
|
|
|
LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) { }
|
|
|
|
virtual bool runOnMachineFunction(MachineFunction &MF);
|
|
|
|
|
|
|
|
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
AU.setPreservesAll();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
2004-09-30 09:54:45 +08:00
|
|
|
|
2010-07-27 07:44:11 +08:00
|
|
|
MachineFunction &getMachineFunction() const {
|
2010-12-11 02:36:02 +08:00
|
|
|
assert(MF && "getMachineFunction called before runOnMachineFunction");
|
2010-07-27 07:44:11 +08:00
|
|
|
return *MF;
|
|
|
|
}
|
|
|
|
|
2010-12-11 02:36:02 +08:00
|
|
|
MachineRegisterInfo &getRegInfo() const { return *MRI; }
|
|
|
|
const TargetRegisterInfo &getTargetRegInfo() const { return *TRI; }
|
|
|
|
|
2004-09-30 09:54:45 +08:00
|
|
|
void grow();
|
|
|
|
|
2004-10-01 08:35:07 +08:00
|
|
|
/// @brief returns true if the specified virtual register is
|
|
|
|
/// mapped to a physical register
|
2004-09-30 09:54:45 +08:00
|
|
|
bool hasPhys(unsigned virtReg) const {
|
|
|
|
return getPhys(virtReg) != NO_PHYS_REG;
|
|
|
|
}
|
|
|
|
|
2004-10-01 08:35:07 +08:00
|
|
|
/// @brief returns the physical register mapped to the specified
|
|
|
|
/// virtual register
|
2004-09-30 09:54:45 +08:00
|
|
|
unsigned getPhys(unsigned virtReg) const {
|
2008-02-11 02:45:23 +08:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
|
2004-09-30 10:15:18 +08:00
|
|
|
return Virt2PhysMap[virtReg];
|
2004-09-30 09:54:45 +08:00
|
|
|
}
|
|
|
|
|
2004-10-01 08:35:07 +08:00
|
|
|
/// @brief creates a mapping for the specified virtual register to
|
|
|
|
/// the specified physical register
|
2004-09-30 09:54:45 +08:00
|
|
|
void assignVirt2Phys(unsigned virtReg, unsigned physReg) {
|
2008-02-11 02:45:23 +08:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(virtReg) &&
|
|
|
|
TargetRegisterInfo::isPhysicalRegister(physReg));
|
2004-09-30 10:15:18 +08:00
|
|
|
assert(Virt2PhysMap[virtReg] == NO_PHYS_REG &&
|
2004-09-30 09:54:45 +08:00
|
|
|
"attempt to assign physical register to already mapped "
|
|
|
|
"virtual register");
|
2004-09-30 10:15:18 +08:00
|
|
|
Virt2PhysMap[virtReg] = physReg;
|
2004-09-30 09:54:45 +08:00
|
|
|
}
|
|
|
|
|
2004-10-01 08:35:07 +08:00
|
|
|
/// @brief clears the specified virtual register's, physical
|
|
|
|
/// register mapping
|
2004-09-30 09:54:45 +08:00
|
|
|
void clearVirt(unsigned virtReg) {
|
2008-02-11 02:45:23 +08:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
|
2004-09-30 10:15:18 +08:00
|
|
|
assert(Virt2PhysMap[virtReg] != NO_PHYS_REG &&
|
2004-09-30 09:54:45 +08:00
|
|
|
"attempt to clear a not assigned virtual register");
|
2004-09-30 10:15:18 +08:00
|
|
|
Virt2PhysMap[virtReg] = NO_PHYS_REG;
|
2004-09-30 09:54:45 +08:00
|
|
|
}
|
|
|
|
|
2004-10-01 08:35:07 +08:00
|
|
|
/// @brief clears all virtual to physical register mappings
|
2004-09-30 09:54:45 +08:00
|
|
|
void clearAllVirt() {
|
2004-09-30 10:15:18 +08:00
|
|
|
Virt2PhysMap.clear();
|
2004-09-30 09:54:45 +08:00
|
|
|
grow();
|
|
|
|
}
|
|
|
|
|
2009-06-15 04:22:55 +08:00
|
|
|
/// @brief returns the register allocation preference.
|
|
|
|
unsigned getRegAllocPref(unsigned virtReg);
|
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
/// @brief records virtReg is a split live interval from SReg.
|
|
|
|
void setIsSplitFromReg(unsigned virtReg, unsigned SReg) {
|
|
|
|
Virt2SplitMap[virtReg] = SReg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief returns the live interval virtReg is split from.
|
2011-02-19 06:35:20 +08:00
|
|
|
unsigned getPreSplitReg(unsigned virtReg) const {
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
return Virt2SplitMap[virtReg];
|
|
|
|
}
|
|
|
|
|
2011-02-19 08:38:43 +08:00
|
|
|
/// getOriginal - Return the original virtual register that VirtReg descends
|
|
|
|
/// from through splitting.
|
|
|
|
/// A register that was not created by splitting is its own original.
|
|
|
|
/// This operation is idempotent.
|
|
|
|
unsigned getOriginal(unsigned VirtReg) const {
|
|
|
|
unsigned Orig = getPreSplitReg(VirtReg);
|
|
|
|
return Orig ? Orig : VirtReg;
|
|
|
|
}
|
|
|
|
|
2008-03-13 04:50:04 +08:00
|
|
|
/// @brief returns true if the specified virtual register is not
|
2007-08-14 07:45:17 +08:00
|
|
|
/// mapped to a stack slot or rematerialized.
|
|
|
|
bool isAssignedReg(unsigned virtReg) const {
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
if (getStackSlot(virtReg) == NO_STACK_SLOT &&
|
|
|
|
getReMatId(virtReg) == NO_STACK_SLOT)
|
|
|
|
return true;
|
|
|
|
// Split register can be assigned a physical register as well as a
|
|
|
|
// stack slot or remat id.
|
|
|
|
return (Virt2SplitMap[virtReg] && Virt2PhysMap[virtReg] != NO_PHYS_REG);
|
2004-09-30 09:54:45 +08:00
|
|
|
}
|
|
|
|
|
2004-10-01 08:35:07 +08:00
|
|
|
/// @brief returns the stack slot mapped to the specified virtual
|
|
|
|
/// register
|
2004-09-30 09:54:45 +08:00
|
|
|
int getStackSlot(unsigned virtReg) const {
|
2008-02-11 02:45:23 +08:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
|
2004-09-30 10:15:18 +08:00
|
|
|
return Virt2StackSlotMap[virtReg];
|
2004-09-30 09:54:45 +08:00
|
|
|
}
|
|
|
|
|
2007-08-14 07:45:17 +08:00
|
|
|
/// @brief returns the rematerialization id mapped to the specified virtual
|
|
|
|
/// register
|
|
|
|
int getReMatId(unsigned virtReg) const {
|
2008-02-11 02:45:23 +08:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
|
2007-08-14 07:45:17 +08:00
|
|
|
return Virt2ReMatIdMap[virtReg];
|
|
|
|
}
|
|
|
|
|
2004-10-01 08:35:07 +08:00
|
|
|
/// @brief create a mapping for the specifed virtual register to
|
|
|
|
/// the next available stack slot
|
2004-09-30 09:54:45 +08:00
|
|
|
int assignVirt2StackSlot(unsigned virtReg);
|
2004-10-01 08:35:07 +08:00
|
|
|
/// @brief create a mapping for the specified virtual register to
|
|
|
|
/// the specified stack slot
|
2004-09-30 09:54:45 +08:00
|
|
|
void assignVirt2StackSlot(unsigned virtReg, int frameIndex);
|
|
|
|
|
2007-03-20 16:13:50 +08:00
|
|
|
/// @brief assign an unique re-materialization id to the specified
|
|
|
|
/// virtual register.
|
|
|
|
int assignVirtReMatId(unsigned virtReg);
|
2007-08-14 07:45:17 +08:00
|
|
|
/// @brief assign an unique re-materialization id to the specified
|
|
|
|
/// virtual register.
|
|
|
|
void assignVirtReMatId(unsigned virtReg, int id);
|
2007-03-20 16:13:50 +08:00
|
|
|
|
|
|
|
/// @brief returns true if the specified virtual register is being
|
|
|
|
/// re-materialized.
|
|
|
|
bool isReMaterialized(unsigned virtReg) const {
|
2007-08-14 07:45:17 +08:00
|
|
|
return ReMatMap[virtReg] != NULL;
|
2007-03-20 16:13:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief returns the original machine instruction being re-issued
|
|
|
|
/// to re-materialize the specified virtual register.
|
2007-08-14 07:45:17 +08:00
|
|
|
MachineInstr *getReMaterializedMI(unsigned virtReg) const {
|
2007-03-20 16:13:50 +08:00
|
|
|
return ReMatMap[virtReg];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief records the specified virtual register will be
|
|
|
|
/// re-materialized and the original instruction which will be re-issed
|
2007-08-14 07:45:17 +08:00
|
|
|
/// for this purpose. If parameter all is true, then all uses of the
|
|
|
|
/// registers are rematerialized and it's safe to delete the definition.
|
2007-03-20 16:13:50 +08:00
|
|
|
void setVirtIsReMaterialized(unsigned virtReg, MachineInstr *def) {
|
|
|
|
ReMatMap[virtReg] = def;
|
|
|
|
}
|
|
|
|
|
2007-12-05 17:51:10 +08:00
|
|
|
/// @brief record the last use (kill) of a split virtual register.
|
2009-11-04 07:52:08 +08:00
|
|
|
void addKillPoint(unsigned virtReg, SlotIndex index) {
|
2007-12-05 18:24:35 +08:00
|
|
|
Virt2SplitKillMap[virtReg] = index;
|
2007-12-05 17:51:10 +08:00
|
|
|
}
|
|
|
|
|
2009-11-04 07:52:08 +08:00
|
|
|
SlotIndex getKillPoint(unsigned virtReg) const {
|
2007-12-05 18:24:35 +08:00
|
|
|
return Virt2SplitKillMap[virtReg];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief remove the last use (kill) of a split virtual register.
|
2007-12-05 17:51:10 +08:00
|
|
|
void removeKillPoint(unsigned virtReg) {
|
2009-11-04 07:52:08 +08:00
|
|
|
Virt2SplitKillMap[virtReg] = SlotIndex();
|
2007-12-05 17:51:10 +08:00
|
|
|
}
|
|
|
|
|
2007-11-28 09:28:46 +08:00
|
|
|
/// @brief returns true if the specified MachineInstr is a spill point.
|
|
|
|
bool isSpillPt(MachineInstr *Pt) const {
|
|
|
|
return SpillPt2VirtMap.find(Pt) != SpillPt2VirtMap.end();
|
|
|
|
}
|
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
/// @brief returns the virtual registers that should be spilled due to
|
|
|
|
/// splitting right after the specified MachineInstr.
|
2007-12-05 16:16:32 +08:00
|
|
|
std::vector<std::pair<unsigned,bool> > &getSpillPtSpills(MachineInstr *Pt) {
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
return SpillPt2VirtMap[Pt];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief records the specified MachineInstr as a spill point for virtReg.
|
2007-12-05 16:16:32 +08:00
|
|
|
void addSpillPoint(unsigned virtReg, bool isKill, MachineInstr *Pt) {
|
2009-05-04 02:32:42 +08:00
|
|
|
std::map<MachineInstr*, std::vector<std::pair<unsigned,bool> > >::iterator
|
|
|
|
I = SpillPt2VirtMap.find(Pt);
|
|
|
|
if (I != SpillPt2VirtMap.end())
|
|
|
|
I->second.push_back(std::make_pair(virtReg, isKill));
|
2007-11-28 09:28:46 +08:00
|
|
|
else {
|
2007-12-05 16:16:32 +08:00
|
|
|
std::vector<std::pair<unsigned,bool> > Virts;
|
|
|
|
Virts.push_back(std::make_pair(virtReg, isKill));
|
2007-11-28 09:28:46 +08:00
|
|
|
SpillPt2VirtMap.insert(std::make_pair(Pt, Virts));
|
|
|
|
}
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
}
|
|
|
|
|
2008-03-12 05:34:46 +08:00
|
|
|
/// @brief - transfer spill point information from one instruction to
|
|
|
|
/// another.
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
void transferSpillPts(MachineInstr *Old, MachineInstr *New) {
|
2009-05-04 02:32:42 +08:00
|
|
|
std::map<MachineInstr*, std::vector<std::pair<unsigned,bool> > >::iterator
|
2007-12-05 16:16:32 +08:00
|
|
|
I = SpillPt2VirtMap.find(Old);
|
2007-11-28 09:28:46 +08:00
|
|
|
if (I == SpillPt2VirtMap.end())
|
|
|
|
return;
|
|
|
|
while (!I->second.empty()) {
|
2007-12-05 16:16:32 +08:00
|
|
|
unsigned virtReg = I->second.back().first;
|
|
|
|
bool isKill = I->second.back().second;
|
2007-11-28 09:28:46 +08:00
|
|
|
I->second.pop_back();
|
2007-12-05 16:16:32 +08:00
|
|
|
addSpillPoint(virtReg, isKill, New);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
}
|
2007-11-28 09:28:46 +08:00
|
|
|
SpillPt2VirtMap.erase(I);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
}
|
|
|
|
|
2007-11-29 09:06:25 +08:00
|
|
|
/// @brief returns true if the specified MachineInstr is a restore point.
|
|
|
|
bool isRestorePt(MachineInstr *Pt) const {
|
|
|
|
return RestorePt2VirtMap.find(Pt) != RestorePt2VirtMap.end();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief returns the virtual registers that should be restoreed due to
|
|
|
|
/// splitting right after the specified MachineInstr.
|
|
|
|
std::vector<unsigned> &getRestorePtRestores(MachineInstr *Pt) {
|
|
|
|
return RestorePt2VirtMap[Pt];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief records the specified MachineInstr as a restore point for virtReg.
|
|
|
|
void addRestorePoint(unsigned virtReg, MachineInstr *Pt) {
|
2009-05-04 02:32:42 +08:00
|
|
|
std::map<MachineInstr*, std::vector<unsigned> >::iterator I =
|
|
|
|
RestorePt2VirtMap.find(Pt);
|
|
|
|
if (I != RestorePt2VirtMap.end())
|
|
|
|
I->second.push_back(virtReg);
|
2007-11-29 09:06:25 +08:00
|
|
|
else {
|
|
|
|
std::vector<unsigned> Virts;
|
|
|
|
Virts.push_back(virtReg);
|
|
|
|
RestorePt2VirtMap.insert(std::make_pair(Pt, Virts));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-03-11 15:19:34 +08:00
|
|
|
/// @brief - transfer restore point information from one instruction to
|
|
|
|
/// another.
|
2007-11-29 09:06:25 +08:00
|
|
|
void transferRestorePts(MachineInstr *Old, MachineInstr *New) {
|
2009-05-04 02:32:42 +08:00
|
|
|
std::map<MachineInstr*, std::vector<unsigned> >::iterator I =
|
2007-11-29 09:06:25 +08:00
|
|
|
RestorePt2VirtMap.find(Old);
|
|
|
|
if (I == RestorePt2VirtMap.end())
|
|
|
|
return;
|
|
|
|
while (!I->second.empty()) {
|
|
|
|
unsigned virtReg = I->second.back();
|
|
|
|
I->second.pop_back();
|
|
|
|
addRestorePoint(virtReg, New);
|
|
|
|
}
|
|
|
|
RestorePt2VirtMap.erase(I);
|
|
|
|
}
|
|
|
|
|
2008-03-11 15:19:34 +08:00
|
|
|
/// @brief records that the specified physical register must be spilled
|
|
|
|
/// around the specified machine instr.
|
|
|
|
void addEmergencySpill(unsigned PhysReg, MachineInstr *MI) {
|
|
|
|
if (EmergencySpillMap.find(MI) != EmergencySpillMap.end())
|
|
|
|
EmergencySpillMap[MI].push_back(PhysReg);
|
|
|
|
else {
|
|
|
|
std::vector<unsigned> PhysRegs;
|
|
|
|
PhysRegs.push_back(PhysReg);
|
|
|
|
EmergencySpillMap.insert(std::make_pair(MI, PhysRegs));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief returns true if one or more physical registers must be spilled
|
|
|
|
/// around the specified instruction.
|
|
|
|
bool hasEmergencySpills(MachineInstr *MI) const {
|
|
|
|
return EmergencySpillMap.find(MI) != EmergencySpillMap.end();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief returns the physical registers to be spilled and restored around
|
|
|
|
/// the instruction.
|
|
|
|
std::vector<unsigned> &getEmergencySpills(MachineInstr *MI) {
|
|
|
|
return EmergencySpillMap[MI];
|
|
|
|
}
|
|
|
|
|
2008-03-12 05:34:46 +08:00
|
|
|
/// @brief - transfer emergency spill information from one instruction to
|
|
|
|
/// another.
|
|
|
|
void transferEmergencySpills(MachineInstr *Old, MachineInstr *New) {
|
|
|
|
std::map<MachineInstr*,std::vector<unsigned> >::iterator I =
|
|
|
|
EmergencySpillMap.find(Old);
|
|
|
|
if (I == EmergencySpillMap.end())
|
|
|
|
return;
|
|
|
|
while (!I->second.empty()) {
|
|
|
|
unsigned virtReg = I->second.back();
|
|
|
|
I->second.pop_back();
|
|
|
|
addEmergencySpill(virtReg, New);
|
|
|
|
}
|
|
|
|
EmergencySpillMap.erase(I);
|
|
|
|
}
|
|
|
|
|
2008-03-11 15:19:34 +08:00
|
|
|
/// @brief return or get a emergency spill slot for the register class.
|
|
|
|
int getEmergencySpillSlot(const TargetRegisterClass *RC);
|
|
|
|
|
2008-02-27 11:04:06 +08:00
|
|
|
/// @brief Return lowest spill slot index.
|
|
|
|
int getLowSpillSlot() const {
|
|
|
|
return LowSpillSlot;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Return highest spill slot index.
|
|
|
|
int getHighSpillSlot() const {
|
|
|
|
return HighSpillSlot;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Records a spill slot use.
|
|
|
|
void addSpillSlotUse(int FrameIndex, MachineInstr *MI);
|
|
|
|
|
|
|
|
/// @brief Returns true if spill slot has been used.
|
|
|
|
bool isSpillSlotUsed(int FrameIndex) const {
|
|
|
|
assert(FrameIndex >= 0 && "Spill slot index should not be negative!");
|
|
|
|
return !SpillSlotToUsesMap[FrameIndex-LowSpillSlot].empty();
|
|
|
|
}
|
|
|
|
|
2008-04-12 01:53:36 +08:00
|
|
|
/// @brief Mark the specified register as being implicitly defined.
|
|
|
|
void setIsImplicitlyDefined(unsigned VirtReg) {
|
2011-01-09 07:11:07 +08:00
|
|
|
ImplicitDefed.set(TargetRegisterInfo::virtReg2Index(VirtReg));
|
2008-04-12 01:53:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Returns true if the virtual register is implicitly defined.
|
|
|
|
bool isImplicitlyDefined(unsigned VirtReg) const {
|
2011-01-09 07:11:07 +08:00
|
|
|
return ImplicitDefed[TargetRegisterInfo::virtReg2Index(VirtReg)];
|
2008-04-12 01:53:36 +08:00
|
|
|
}
|
|
|
|
|
2004-10-02 07:15:36 +08:00
|
|
|
/// @brief Updates information about the specified virtual register's value
|
2007-12-02 16:30:39 +08:00
|
|
|
/// folded into newMI machine instruction.
|
|
|
|
void virtFolded(unsigned VirtReg, MachineInstr *OldMI, MachineInstr *NewMI,
|
|
|
|
ModRef MRInfo);
|
2004-09-30 09:54:45 +08:00
|
|
|
|
2007-10-13 10:50:24 +08:00
|
|
|
/// @brief Updates information about the specified virtual register's value
|
|
|
|
/// folded into the specified machine instruction.
|
|
|
|
void virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo);
|
|
|
|
|
2004-10-01 08:35:07 +08:00
|
|
|
/// @brief returns the virtual registers' values folded in memory
|
|
|
|
/// operands of this instruction
|
2004-09-30 10:15:18 +08:00
|
|
|
std::pair<MI2VirtMapTy::const_iterator, MI2VirtMapTy::const_iterator>
|
2004-09-30 09:54:45 +08:00
|
|
|
getFoldedVirts(MachineInstr* MI) const {
|
2004-09-30 10:15:18 +08:00
|
|
|
return MI2VirtMap.equal_range(MI);
|
2004-09-30 09:54:45 +08:00
|
|
|
}
|
2006-05-02 05:16:03 +08:00
|
|
|
|
2007-11-28 09:28:46 +08:00
|
|
|
/// RemoveMachineInstrFromMaps - MI is being erased, remove it from the
|
|
|
|
/// the folded instruction map and spill point map.
|
2008-02-27 11:04:06 +08:00
|
|
|
void RemoveMachineInstrFromMaps(MachineInstr *MI);
|
2004-09-30 09:54:45 +08:00
|
|
|
|
2009-05-04 02:32:42 +08:00
|
|
|
/// FindUnusedRegisters - Gather a list of allocatable registers that
|
|
|
|
/// have not been allocated to any virtual register.
|
2009-06-15 04:22:55 +08:00
|
|
|
bool FindUnusedRegisters(LiveIntervals* LIs);
|
2009-05-04 02:32:42 +08:00
|
|
|
|
|
|
|
/// HasUnusedRegisters - Return true if there are any allocatable registers
|
|
|
|
/// that have not been allocated to any virtual register.
|
|
|
|
bool HasUnusedRegisters() const {
|
|
|
|
return !UnusedRegs.none();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// setRegisterUsed - Remember the physical register is now used.
|
|
|
|
void setRegisterUsed(unsigned Reg) {
|
|
|
|
UnusedRegs.reset(Reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isRegisterUnused - Return true if the physical register has not been
|
|
|
|
/// used.
|
|
|
|
bool isRegisterUnused(unsigned Reg) const {
|
|
|
|
return UnusedRegs[Reg];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getFirstUnusedRegister - Return the first physical register that has not
|
|
|
|
/// been used.
|
|
|
|
unsigned getFirstUnusedRegister(const TargetRegisterClass *RC) {
|
|
|
|
int Reg = UnusedRegs.find_first();
|
|
|
|
while (Reg != -1) {
|
2009-05-04 11:30:11 +08:00
|
|
|
if (allocatableRCRegs[RC][Reg])
|
2009-05-04 02:32:42 +08:00
|
|
|
return (unsigned)Reg;
|
|
|
|
Reg = UnusedRegs.find_next(Reg);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-19 06:03:18 +08:00
|
|
|
/// rewrite - Rewrite all instructions in MF to use only physical registers
|
|
|
|
/// by mapping all virtual register operands to their assigned physical
|
|
|
|
/// registers.
|
|
|
|
///
|
|
|
|
/// @param Indexes Optionally remove deleted instructions from indexes.
|
|
|
|
void rewrite(SlotIndexes *Indexes);
|
|
|
|
|
2009-07-24 18:36:58 +08:00
|
|
|
void print(raw_ostream &OS, const Module* M = 0) const;
|
2004-09-30 09:54:45 +08:00
|
|
|
void dump() const;
|
|
|
|
};
|
|
|
|
|
2009-07-24 18:36:58 +08:00
|
|
|
inline raw_ostream &operator<<(raw_ostream &OS, const VirtRegMap &VRM) {
|
|
|
|
VRM.print(OS);
|
|
|
|
return OS;
|
|
|
|
}
|
2004-02-24 07:08:11 +08:00
|
|
|
} // End llvm namespace
|
|
|
|
|
|
|
|
#endif
|