forked from OSchip/llvm-project
[LLVM][Alignment] Make functions using log of alignment explicit
Summary: This patch renames functions that takes or returns alignment as log2, this patch will help with the transition to llvm::Align. The renaming makes it explicit that we deal with log(alignment) instead of a power of two alignment. A few renames uncovered dubious assignments: - `MirParser`/`MirPrinter` was expecting powers of two but `MachineFunction` and `MachineBasicBlock` were using deal with log2(align). This patch fixes it and updates the documentation. - `MachineBlockPlacement` exposes two flags (`align-all-blocks` and `align-all-nofallthru-blocks`) supposedly interpreted as power of two alignments, internally these values are interpreted as log2(align). This patch updates the documentation, - `MachineFunctionexposes` exposes `align-all-functions` also interpreted as power of two alignment, internally this value is interpreted as log2(align). This patch updates the documentation, Reviewers: lattner, thegameg, courbet Subscribers: dschuff, arsenm, jyknight, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, javed.absar, hiraditya, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, dexonsmith, PkmX, jocewei, jsji, Jim, s.egerton, llvm-commits, courbet Tags: #llvm Differential Revision: https://reviews.llvm.org/D65945 llvm-svn: 371045
This commit is contained in:
parent
84dd9f4d5b
commit
aff45e4b23
llvm
docs
include/llvm/CodeGen
lib
CodeGen
AsmPrinter
BranchRelaxation.cppMIRParser
MIRPrinter.cppMachineBasicBlock.cppMachineBlockPlacement.cppMachineFunction.cppPatchableFunction.cppTargetLoweringBase.cppTarget
AArch64
AMDGPU
ARC
ARM
ARM.tdARMBasicBlockInfo.cppARMConstantIslandPass.cppARMISelLowering.cppARMSubtarget.cppARMSubtarget.h
AVR
BPF
Hexagon
Lanai
MSP430
Mips
PowerPC
RISCV
Sparc
SystemZ
X86
XCore
test
|
@ -343,6 +343,8 @@ specified in brackets after the block's definition:
|
||||||
.. TODO: Describe the way the reference to an unnamed LLVM IR block can be
|
.. TODO: Describe the way the reference to an unnamed LLVM IR block can be
|
||||||
preserved.
|
preserved.
|
||||||
|
|
||||||
|
``Alignment`` is specified in bytes, and must be a power of two.
|
||||||
|
|
||||||
Machine Instructions
|
Machine Instructions
|
||||||
--------------------
|
--------------------
|
||||||
|
|
||||||
|
@ -614,9 +616,13 @@ following format is used:
|
||||||
alignment: <alignment>
|
alignment: <alignment>
|
||||||
isTargetSpecific: <target-specific>
|
isTargetSpecific: <target-specific>
|
||||||
|
|
||||||
where ``<index>`` is a 32-bit unsigned integer, ``<value>`` is a `LLVM IR Constant
|
where:
|
||||||
<https://www.llvm.org/docs/LangRef.html#constants>`_, alignment is a 32-bit
|
- ``<index>`` is a 32-bit unsigned integer;
|
||||||
unsigned integer, and ``<target-specific>`` is either true or false.
|
- ``<value>`` is a `LLVM IR Constant
|
||||||
|
<https://www.llvm.org/docs/LangRef.html#constants>`_;
|
||||||
|
- ``<alignment>`` is a 32-bit unsigned integer specified in bytes, and must be
|
||||||
|
power of two;
|
||||||
|
- ``<target-specific>`` is either true or false.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
|
|
|
@ -105,7 +105,7 @@ private:
|
||||||
|
|
||||||
/// Alignment of the basic block. Zero if the basic block does not need to be
|
/// Alignment of the basic block. Zero if the basic block does not need to be
|
||||||
/// aligned. The alignment is specified as log2(bytes).
|
/// aligned. The alignment is specified as log2(bytes).
|
||||||
unsigned Alignment = 0;
|
unsigned LogAlignment = 0;
|
||||||
|
|
||||||
/// Indicate that this basic block is entered via an exception handler.
|
/// Indicate that this basic block is entered via an exception handler.
|
||||||
bool IsEHPad = false;
|
bool IsEHPad = false;
|
||||||
|
@ -374,11 +374,11 @@ public:
|
||||||
|
|
||||||
/// Return alignment of the basic block. The alignment is specified as
|
/// Return alignment of the basic block. The alignment is specified as
|
||||||
/// log2(bytes).
|
/// log2(bytes).
|
||||||
unsigned getAlignment() const { return Alignment; }
|
unsigned getLogAlignment() const { return LogAlignment; }
|
||||||
|
|
||||||
/// Set alignment of the basic block. The alignment is specified as
|
/// Set alignment of the basic block. The alignment is specified as
|
||||||
/// log2(bytes).
|
/// log2(bytes).
|
||||||
void setAlignment(unsigned Align) { Alignment = Align; }
|
void setLogAlignment(unsigned A) { LogAlignment = A; }
|
||||||
|
|
||||||
/// Returns true if the block is a landing pad. That is this basic block is
|
/// Returns true if the block is a landing pad. That is this basic block is
|
||||||
/// entered via an exception handler.
|
/// entered via an exception handler.
|
||||||
|
|
|
@ -277,7 +277,7 @@ class MachineFunction {
|
||||||
unsigned FunctionNumber;
|
unsigned FunctionNumber;
|
||||||
|
|
||||||
/// Alignment - The alignment of the function.
|
/// Alignment - The alignment of the function.
|
||||||
unsigned Alignment;
|
unsigned LogAlignment;
|
||||||
|
|
||||||
/// ExposesReturnsTwice - True if the function calls setjmp or related
|
/// ExposesReturnsTwice - True if the function calls setjmp or related
|
||||||
/// functions with attribute "returns twice", but doesn't have
|
/// functions with attribute "returns twice", but doesn't have
|
||||||
|
@ -508,15 +508,16 @@ public:
|
||||||
const WinEHFuncInfo *getWinEHFuncInfo() const { return WinEHInfo; }
|
const WinEHFuncInfo *getWinEHFuncInfo() const { return WinEHInfo; }
|
||||||
WinEHFuncInfo *getWinEHFuncInfo() { return WinEHInfo; }
|
WinEHFuncInfo *getWinEHFuncInfo() { return WinEHInfo; }
|
||||||
|
|
||||||
/// getAlignment - Return the alignment (log2, not bytes) of the function.
|
/// getLogAlignment - Return the alignment of the function.
|
||||||
unsigned getAlignment() const { return Alignment; }
|
unsigned getLogAlignment() const { return LogAlignment; }
|
||||||
|
|
||||||
/// setAlignment - Set the alignment (log2, not bytes) of the function.
|
/// setLogAlignment - Set the alignment of the function.
|
||||||
void setAlignment(unsigned A) { Alignment = A; }
|
void setLogAlignment(unsigned A) { LogAlignment = A; }
|
||||||
|
|
||||||
/// ensureAlignment - Make sure the function is at least 1 << A bytes aligned.
|
/// ensureAlignment - Make sure the function is at least 1 << A bytes aligned.
|
||||||
void ensureAlignment(unsigned A) {
|
void ensureLogAlignment(unsigned A) {
|
||||||
if (Alignment < A) Alignment = A;
|
if (LogAlignment < A)
|
||||||
|
LogAlignment = A;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// exposesReturnsTwice - Returns true if the function calls setjmp or
|
/// exposesReturnsTwice - Returns true if the function calls setjmp or
|
||||||
|
|
|
@ -1582,18 +1582,18 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the minimum function alignment.
|
/// Return the minimum function alignment.
|
||||||
unsigned getMinFunctionAlignment() const {
|
unsigned getMinFunctionLogAlignment() const {
|
||||||
return MinFunctionAlignment;
|
return MinFunctionLogAlignment;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the preferred function alignment.
|
/// Return the preferred function alignment.
|
||||||
unsigned getPrefFunctionAlignment() const {
|
unsigned getPrefFunctionLogAlignment() const {
|
||||||
return PrefFunctionAlignment;
|
return PrefFunctionLogAlignment;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the preferred loop alignment.
|
/// Return the preferred loop alignment.
|
||||||
virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
|
virtual unsigned getPrefLoopLogAlignment(MachineLoop *ML = nullptr) const {
|
||||||
return PrefLoopAlignment;
|
return PrefLoopLogAlignment;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Should loops be aligned even when the function is marked OptSize (but not
|
/// Should loops be aligned even when the function is marked OptSize (but not
|
||||||
|
@ -2105,23 +2105,23 @@ protected:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the target's minimum function alignment (in log2(bytes))
|
/// Set the target's minimum function alignment (in log2(bytes))
|
||||||
void setMinFunctionAlignment(unsigned Align) {
|
void setMinFunctionLogAlignment(unsigned LogAlign) {
|
||||||
MinFunctionAlignment = Align;
|
MinFunctionLogAlignment = LogAlign;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the target's preferred function alignment. This should be set if
|
/// Set the target's preferred function alignment. This should be set if
|
||||||
/// there is a performance benefit to higher-than-minimum alignment (in
|
/// there is a performance benefit to higher-than-minimum alignment (in
|
||||||
/// log2(bytes))
|
/// log2(bytes))
|
||||||
void setPrefFunctionAlignment(unsigned Align) {
|
void setPrefFunctionLogAlignment(unsigned LogAlign) {
|
||||||
PrefFunctionAlignment = Align;
|
PrefFunctionLogAlignment = LogAlign;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the target's preferred loop alignment. Default alignment is zero, it
|
/// Set the target's preferred loop alignment. Default alignment is zero, it
|
||||||
/// means the target does not care about loop alignment. The alignment is
|
/// means the target does not care about loop alignment. The alignment is
|
||||||
/// specified in log2(bytes). The target may also override
|
/// specified in log2(bytes). The target may also override
|
||||||
/// getPrefLoopAlignment to provide per-loop values.
|
/// getPrefLoopAlignment to provide per-loop values.
|
||||||
void setPrefLoopAlignment(unsigned Align) {
|
void setPrefLoopLogAlignment(unsigned LogAlign) {
|
||||||
PrefLoopAlignment = Align;
|
PrefLoopLogAlignment = LogAlign;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the minimum stack alignment of an argument (in log2(bytes)).
|
/// Set the minimum stack alignment of an argument (in log2(bytes)).
|
||||||
|
@ -2692,14 +2692,14 @@ private:
|
||||||
|
|
||||||
/// The minimum function alignment (used when optimizing for size, and to
|
/// The minimum function alignment (used when optimizing for size, and to
|
||||||
/// prevent explicitly provided alignment from leading to incorrect code).
|
/// prevent explicitly provided alignment from leading to incorrect code).
|
||||||
unsigned MinFunctionAlignment;
|
unsigned MinFunctionLogAlignment;
|
||||||
|
|
||||||
/// The preferred function alignment (used when alignment unspecified and
|
/// The preferred function alignment (used when alignment unspecified and
|
||||||
/// optimizing for speed).
|
/// optimizing for speed).
|
||||||
unsigned PrefFunctionAlignment;
|
unsigned PrefFunctionLogAlignment;
|
||||||
|
|
||||||
/// The preferred loop alignment.
|
/// The preferred loop alignment (in log2 bot in bytes).
|
||||||
unsigned PrefLoopAlignment;
|
unsigned PrefLoopLogAlignment;
|
||||||
|
|
||||||
/// Size in bits of the maximum atomics size the backend supports.
|
/// Size in bits of the maximum atomics size the backend supports.
|
||||||
/// Accesses larger than this will be expanded by AtomicExpandPass.
|
/// Accesses larger than this will be expanded by AtomicExpandPass.
|
||||||
|
|
|
@ -667,7 +667,7 @@ void AsmPrinter::EmitFunctionHeader() {
|
||||||
|
|
||||||
EmitLinkage(&F, CurrentFnSym);
|
EmitLinkage(&F, CurrentFnSym);
|
||||||
if (MAI->hasFunctionAlignment())
|
if (MAI->hasFunctionAlignment())
|
||||||
EmitAlignment(MF->getAlignment(), &F);
|
EmitAlignment(MF->getLogAlignment(), &F);
|
||||||
|
|
||||||
if (MAI->hasDotTypeDotSizeDirective())
|
if (MAI->hasDotTypeDotSizeDirective())
|
||||||
OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_ELF_TypeFunction);
|
OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_ELF_TypeFunction);
|
||||||
|
@ -2905,8 +2905,8 @@ void AsmPrinter::EmitBasicBlockStart(const MachineBasicBlock &MBB) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Emit an alignment directive for this block, if needed.
|
// Emit an alignment directive for this block, if needed.
|
||||||
if (unsigned Align = MBB.getAlignment())
|
if (unsigned LogAlign = MBB.getLogAlignment())
|
||||||
EmitAlignment(Align);
|
EmitAlignment(LogAlign);
|
||||||
MCCodePaddingContext Context;
|
MCCodePaddingContext Context;
|
||||||
setupCodePaddingContext(MBB, Context);
|
setupCodePaddingContext(MBB, Context);
|
||||||
OutStreamer->EmitCodePaddingBasicBlockStart(Context);
|
OutStreamer->EmitCodePaddingBasicBlockStart(Context);
|
||||||
|
|
|
@ -203,8 +203,8 @@ void WinException::beginFunclet(const MachineBasicBlock &MBB,
|
||||||
|
|
||||||
// We want our funclet's entry point to be aligned such that no nops will be
|
// We want our funclet's entry point to be aligned such that no nops will be
|
||||||
// present after the label.
|
// present after the label.
|
||||||
Asm->EmitAlignment(std::max(Asm->MF->getAlignment(), MBB.getAlignment()),
|
Asm->EmitAlignment(
|
||||||
&F);
|
std::max(Asm->MF->getLogAlignment(), MBB.getLogAlignment()), &F);
|
||||||
|
|
||||||
// Now that we've emitted the alignment directive, point at our funclet.
|
// Now that we've emitted the alignment directive, point at our funclet.
|
||||||
Asm->OutStreamer->EmitLabel(Sym);
|
Asm->OutStreamer->EmitLabel(Sym);
|
||||||
|
|
|
@ -65,13 +65,13 @@ class BranchRelaxation : public MachineFunctionPass {
|
||||||
/// block.
|
/// block.
|
||||||
unsigned postOffset(const MachineBasicBlock &MBB) const {
|
unsigned postOffset(const MachineBasicBlock &MBB) const {
|
||||||
unsigned PO = Offset + Size;
|
unsigned PO = Offset + Size;
|
||||||
unsigned Align = MBB.getAlignment();
|
unsigned LogAlign = MBB.getLogAlignment();
|
||||||
if (Align == 0)
|
if (LogAlign == 0)
|
||||||
return PO;
|
return PO;
|
||||||
|
|
||||||
unsigned AlignAmt = 1 << Align;
|
unsigned AlignAmt = 1 << LogAlign;
|
||||||
unsigned ParentAlign = MBB.getParent()->getAlignment();
|
unsigned ParentLogAlign = MBB.getParent()->getLogAlignment();
|
||||||
if (Align <= ParentAlign)
|
if (LogAlign <= ParentLogAlign)
|
||||||
return PO + OffsetToAlignment(PO, AlignAmt);
|
return PO + OffsetToAlignment(PO, AlignAmt);
|
||||||
|
|
||||||
// The alignment of this MBB is larger than the function's alignment, so we
|
// The alignment of this MBB is larger than the function's alignment, so we
|
||||||
|
@ -128,9 +128,9 @@ void BranchRelaxation::verify() {
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
unsigned PrevNum = MF->begin()->getNumber();
|
unsigned PrevNum = MF->begin()->getNumber();
|
||||||
for (MachineBasicBlock &MBB : *MF) {
|
for (MachineBasicBlock &MBB : *MF) {
|
||||||
unsigned Align = MBB.getAlignment();
|
unsigned LogAlign = MBB.getLogAlignment();
|
||||||
unsigned Num = MBB.getNumber();
|
unsigned Num = MBB.getNumber();
|
||||||
assert(BlockInfo[Num].Offset % (1u << Align) == 0);
|
assert(BlockInfo[Num].Offset % (1u << LogAlign) == 0);
|
||||||
assert(!Num || BlockInfo[PrevNum].postOffset(MBB) <= BlockInfo[Num].Offset);
|
assert(!Num || BlockInfo[PrevNum].postOffset(MBB) <= BlockInfo[Num].Offset);
|
||||||
assert(BlockInfo[Num].Size == computeBlockSize(MBB));
|
assert(BlockInfo[Num].Size == computeBlockSize(MBB));
|
||||||
PrevNum = Num;
|
PrevNum = Num;
|
||||||
|
|
|
@ -641,7 +641,7 @@ bool MIParser::parseBasicBlockDefinition(
|
||||||
return error(Loc, Twine("redefinition of machine basic block with id #") +
|
return error(Loc, Twine("redefinition of machine basic block with id #") +
|
||||||
Twine(ID));
|
Twine(ID));
|
||||||
if (Alignment)
|
if (Alignment)
|
||||||
MBB->setAlignment(Alignment);
|
MBB->setLogAlignment(Log2_32(Alignment));
|
||||||
if (HasAddressTaken)
|
if (HasAddressTaken)
|
||||||
MBB->setHasAddressTaken();
|
MBB->setHasAddressTaken();
|
||||||
MBB->setIsEHPad(IsLandingPad);
|
MBB->setIsEHPad(IsLandingPad);
|
||||||
|
|
|
@ -393,7 +393,7 @@ MIRParserImpl::initializeMachineFunction(const yaml::MachineFunction &YamlMF,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (YamlMF.Alignment)
|
if (YamlMF.Alignment)
|
||||||
MF.setAlignment(YamlMF.Alignment);
|
MF.setLogAlignment(Log2_32(YamlMF.Alignment));
|
||||||
MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice);
|
MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice);
|
||||||
MF.setHasWinCFI(YamlMF.HasWinCFI);
|
MF.setHasWinCFI(YamlMF.HasWinCFI);
|
||||||
|
|
||||||
|
|
|
@ -197,7 +197,7 @@ void MIRPrinter::print(const MachineFunction &MF) {
|
||||||
|
|
||||||
yaml::MachineFunction YamlMF;
|
yaml::MachineFunction YamlMF;
|
||||||
YamlMF.Name = MF.getName();
|
YamlMF.Name = MF.getName();
|
||||||
YamlMF.Alignment = MF.getAlignment();
|
YamlMF.Alignment = 1UL << MF.getLogAlignment();
|
||||||
YamlMF.ExposesReturnsTwice = MF.exposesReturnsTwice();
|
YamlMF.ExposesReturnsTwice = MF.exposesReturnsTwice();
|
||||||
YamlMF.HasWinCFI = MF.hasWinCFI();
|
YamlMF.HasWinCFI = MF.hasWinCFI();
|
||||||
|
|
||||||
|
@ -629,9 +629,10 @@ void MIPrinter::print(const MachineBasicBlock &MBB) {
|
||||||
OS << "landing-pad";
|
OS << "landing-pad";
|
||||||
HasAttributes = true;
|
HasAttributes = true;
|
||||||
}
|
}
|
||||||
if (MBB.getAlignment()) {
|
if (MBB.getLogAlignment()) {
|
||||||
OS << (HasAttributes ? ", " : " (");
|
OS << (HasAttributes ? ", " : " (");
|
||||||
OS << "align " << MBB.getAlignment();
|
OS << "align "
|
||||||
|
<< (1UL << MBB.getLogAlignment());
|
||||||
HasAttributes = true;
|
HasAttributes = true;
|
||||||
}
|
}
|
||||||
if (HasAttributes)
|
if (HasAttributes)
|
||||||
|
|
|
@ -326,9 +326,9 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST,
|
||||||
OS << "landing-pad";
|
OS << "landing-pad";
|
||||||
HasAttributes = true;
|
HasAttributes = true;
|
||||||
}
|
}
|
||||||
if (getAlignment()) {
|
if (getLogAlignment()) {
|
||||||
OS << (HasAttributes ? ", " : " (");
|
OS << (HasAttributes ? ", " : " (");
|
||||||
OS << "align " << getAlignment();
|
OS << "align " << getLogAlignment();
|
||||||
HasAttributes = true;
|
HasAttributes = true;
|
||||||
}
|
}
|
||||||
if (HasAttributes)
|
if (HasAttributes)
|
||||||
|
|
|
@ -79,16 +79,17 @@ STATISTIC(CondBranchTakenFreq,
|
||||||
STATISTIC(UncondBranchTakenFreq,
|
STATISTIC(UncondBranchTakenFreq,
|
||||||
"Potential frequency of taking unconditional branches");
|
"Potential frequency of taking unconditional branches");
|
||||||
|
|
||||||
static cl::opt<unsigned> AlignAllBlock("align-all-blocks",
|
static cl::opt<unsigned> AlignAllBlock(
|
||||||
cl::desc("Force the alignment of all "
|
"align-all-blocks",
|
||||||
"blocks in the function."),
|
cl::desc("Force the alignment of all blocks in the function in log2 format "
|
||||||
cl::init(0), cl::Hidden);
|
"(e.g 4 means align on 16B boundaries)."),
|
||||||
|
cl::init(0), cl::Hidden);
|
||||||
|
|
||||||
static cl::opt<unsigned> AlignAllNonFallThruBlocks(
|
static cl::opt<unsigned> AlignAllNonFallThruBlocks(
|
||||||
"align-all-nofallthru-blocks",
|
"align-all-nofallthru-blocks",
|
||||||
cl::desc("Force the alignment of all "
|
cl::desc("Force the alignment of all blocks that have no fall-through "
|
||||||
"blocks that have no fall-through predecessors (i.e. don't add "
|
"predecessors (i.e. don't add nops that are executed). In log2 "
|
||||||
"nops that are executed)."),
|
"format (e.g 4 means align on 16B boundaries)."),
|
||||||
cl::init(0), cl::Hidden);
|
cl::init(0), cl::Hidden);
|
||||||
|
|
||||||
// FIXME: Find a good default for this flag and remove the flag.
|
// FIXME: Find a good default for this flag and remove the flag.
|
||||||
|
@ -2763,8 +2764,8 @@ void MachineBlockPlacement::alignBlocks() {
|
||||||
if (!L)
|
if (!L)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
unsigned Align = TLI->getPrefLoopAlignment(L);
|
unsigned LogAlign = TLI->getPrefLoopLogAlignment(L);
|
||||||
if (!Align)
|
if (!LogAlign)
|
||||||
continue; // Don't care about loop alignment.
|
continue; // Don't care about loop alignment.
|
||||||
|
|
||||||
// If the block is cold relative to the function entry don't waste space
|
// If the block is cold relative to the function entry don't waste space
|
||||||
|
@ -2788,7 +2789,7 @@ void MachineBlockPlacement::alignBlocks() {
|
||||||
// Force alignment if all the predecessors are jumps. We already checked
|
// Force alignment if all the predecessors are jumps. We already checked
|
||||||
// that the block isn't cold above.
|
// that the block isn't cold above.
|
||||||
if (!LayoutPred->isSuccessor(ChainBB)) {
|
if (!LayoutPred->isSuccessor(ChainBB)) {
|
||||||
ChainBB->setAlignment(Align);
|
ChainBB->setLogAlignment(LogAlign);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2800,7 +2801,7 @@ void MachineBlockPlacement::alignBlocks() {
|
||||||
MBPI->getEdgeProbability(LayoutPred, ChainBB);
|
MBPI->getEdgeProbability(LayoutPred, ChainBB);
|
||||||
BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb;
|
BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb;
|
||||||
if (LayoutEdgeFreq <= (Freq * ColdProb))
|
if (LayoutEdgeFreq <= (Freq * ColdProb))
|
||||||
ChainBB->setAlignment(Align);
|
ChainBB->setLogAlignment(LogAlign);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3062,14 +3063,14 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
|
||||||
if (AlignAllBlock)
|
if (AlignAllBlock)
|
||||||
// Align all of the blocks in the function to a specific alignment.
|
// Align all of the blocks in the function to a specific alignment.
|
||||||
for (MachineBasicBlock &MBB : MF)
|
for (MachineBasicBlock &MBB : MF)
|
||||||
MBB.setAlignment(AlignAllBlock);
|
MBB.setLogAlignment(AlignAllBlock);
|
||||||
else if (AlignAllNonFallThruBlocks) {
|
else if (AlignAllNonFallThruBlocks) {
|
||||||
// Align all of the blocks that have no fall-through predecessors to a
|
// Align all of the blocks that have no fall-through predecessors to a
|
||||||
// specific alignment.
|
// specific alignment.
|
||||||
for (auto MBI = std::next(MF.begin()), MBE = MF.end(); MBI != MBE; ++MBI) {
|
for (auto MBI = std::next(MF.begin()), MBE = MF.end(); MBI != MBE; ++MBI) {
|
||||||
auto LayoutPred = std::prev(MBI);
|
auto LayoutPred = std::prev(MBI);
|
||||||
if (!LayoutPred->isSuccessor(&*MBI))
|
if (!LayoutPred->isSuccessor(&*MBI))
|
||||||
MBI->setAlignment(AlignAllNonFallThruBlocks);
|
MBI->setLogAlignment(AlignAllNonFallThruBlocks);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (ViewBlockLayoutWithBFI != GVDT_None &&
|
if (ViewBlockLayoutWithBFI != GVDT_None &&
|
||||||
|
|
|
@ -78,10 +78,11 @@ using namespace llvm;
|
||||||
|
|
||||||
#define DEBUG_TYPE "codegen"
|
#define DEBUG_TYPE "codegen"
|
||||||
|
|
||||||
static cl::opt<unsigned>
|
static cl::opt<unsigned> AlignAllFunctions(
|
||||||
AlignAllFunctions("align-all-functions",
|
"align-all-functions",
|
||||||
cl::desc("Force the alignment of all functions."),
|
cl::desc("Force the alignment of all functions in log2 format (e.g. 4 "
|
||||||
cl::init(0), cl::Hidden);
|
"means align on 16B boundaries)."),
|
||||||
|
cl::init(0), cl::Hidden);
|
||||||
|
|
||||||
static const char *getPropertyName(MachineFunctionProperties::Property Prop) {
|
static const char *getPropertyName(MachineFunctionProperties::Property Prop) {
|
||||||
using P = MachineFunctionProperties::Property;
|
using P = MachineFunctionProperties::Property;
|
||||||
|
@ -172,16 +173,16 @@ void MachineFunction::init() {
|
||||||
FrameInfo->ensureMaxAlignment(F.getFnStackAlignment());
|
FrameInfo->ensureMaxAlignment(F.getFnStackAlignment());
|
||||||
|
|
||||||
ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
|
ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
|
||||||
Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
|
LogAlignment = STI->getTargetLowering()->getMinFunctionLogAlignment();
|
||||||
|
|
||||||
// FIXME: Shouldn't use pref alignment if explicit alignment is set on F.
|
// FIXME: Shouldn't use pref alignment if explicit alignment is set on F.
|
||||||
// FIXME: Use Function::hasOptSize().
|
// FIXME: Use Function::hasOptSize().
|
||||||
if (!F.hasFnAttribute(Attribute::OptimizeForSize))
|
if (!F.hasFnAttribute(Attribute::OptimizeForSize))
|
||||||
Alignment = std::max(Alignment,
|
LogAlignment = std::max(
|
||||||
STI->getTargetLowering()->getPrefFunctionAlignment());
|
LogAlignment, STI->getTargetLowering()->getPrefFunctionLogAlignment());
|
||||||
|
|
||||||
if (AlignAllFunctions)
|
if (AlignAllFunctions)
|
||||||
Alignment = AlignAllFunctions;
|
LogAlignment = AlignAllFunctions;
|
||||||
|
|
||||||
JumpTableInfo = nullptr;
|
JumpTableInfo = nullptr;
|
||||||
|
|
||||||
|
|
|
@ -78,7 +78,7 @@ bool PatchableFunction::runOnMachineFunction(MachineFunction &MF) {
|
||||||
MIB.add(MO);
|
MIB.add(MO);
|
||||||
|
|
||||||
FirstActualI->eraseFromParent();
|
FirstActualI->eraseFromParent();
|
||||||
MF.ensureAlignment(4);
|
MF.ensureLogAlignment(4);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -583,9 +583,9 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
|
||||||
BooleanFloatContents = UndefinedBooleanContent;
|
BooleanFloatContents = UndefinedBooleanContent;
|
||||||
BooleanVectorContents = UndefinedBooleanContent;
|
BooleanVectorContents = UndefinedBooleanContent;
|
||||||
SchedPreferenceInfo = Sched::ILP;
|
SchedPreferenceInfo = Sched::ILP;
|
||||||
MinFunctionAlignment = 0;
|
MinFunctionLogAlignment = 0;
|
||||||
PrefFunctionAlignment = 0;
|
PrefFunctionLogAlignment = 0;
|
||||||
PrefLoopAlignment = 0;
|
PrefLoopLogAlignment = 0;
|
||||||
GatherAllAliasesMaxDepth = 18;
|
GatherAllAliasesMaxDepth = 18;
|
||||||
MinStackArgumentAlignment = 1;
|
MinStackArgumentAlignment = 1;
|
||||||
// TODO: the default will be switched to 0 in the next commit, along
|
// TODO: the default will be switched to 0 in the next commit, along
|
||||||
|
|
|
@ -640,10 +640,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
|
||||||
EnableExtLdPromotion = true;
|
EnableExtLdPromotion = true;
|
||||||
|
|
||||||
// Set required alignment.
|
// Set required alignment.
|
||||||
setMinFunctionAlignment(2);
|
setMinFunctionLogAlignment(2);
|
||||||
// Set preferred alignments.
|
// Set preferred alignments.
|
||||||
setPrefFunctionAlignment(STI.getPrefFunctionAlignment());
|
setPrefFunctionLogAlignment(STI.getPrefFunctionLogAlignment());
|
||||||
setPrefLoopAlignment(STI.getPrefLoopAlignment());
|
setPrefLoopLogAlignment(STI.getPrefLoopLogAlignment());
|
||||||
|
|
||||||
// Only change the limit for entries in a jump table if specified by
|
// Only change the limit for entries in a jump table if specified by
|
||||||
// the sub target, but not at the command line.
|
// the sub target, but not at the command line.
|
||||||
|
|
|
@ -71,22 +71,22 @@ void AArch64Subtarget::initializeProperties() {
|
||||||
case CortexA35:
|
case CortexA35:
|
||||||
break;
|
break;
|
||||||
case CortexA53:
|
case CortexA53:
|
||||||
PrefFunctionAlignment = 3;
|
PrefFunctionLogAlignment = 3;
|
||||||
break;
|
break;
|
||||||
case CortexA55:
|
case CortexA55:
|
||||||
break;
|
break;
|
||||||
case CortexA57:
|
case CortexA57:
|
||||||
MaxInterleaveFactor = 4;
|
MaxInterleaveFactor = 4;
|
||||||
PrefFunctionAlignment = 4;
|
PrefFunctionLogAlignment = 4;
|
||||||
break;
|
break;
|
||||||
case CortexA65:
|
case CortexA65:
|
||||||
PrefFunctionAlignment = 3;
|
PrefFunctionLogAlignment = 3;
|
||||||
break;
|
break;
|
||||||
case CortexA72:
|
case CortexA72:
|
||||||
case CortexA73:
|
case CortexA73:
|
||||||
case CortexA75:
|
case CortexA75:
|
||||||
case CortexA76:
|
case CortexA76:
|
||||||
PrefFunctionAlignment = 4;
|
PrefFunctionLogAlignment = 4;
|
||||||
break;
|
break;
|
||||||
case Cyclone:
|
case Cyclone:
|
||||||
CacheLineSize = 64;
|
CacheLineSize = 64;
|
||||||
|
@ -97,14 +97,14 @@ void AArch64Subtarget::initializeProperties() {
|
||||||
case ExynosM1:
|
case ExynosM1:
|
||||||
MaxInterleaveFactor = 4;
|
MaxInterleaveFactor = 4;
|
||||||
MaxJumpTableSize = 8;
|
MaxJumpTableSize = 8;
|
||||||
PrefFunctionAlignment = 4;
|
PrefFunctionLogAlignment = 4;
|
||||||
PrefLoopAlignment = 3;
|
PrefLoopLogAlignment = 3;
|
||||||
break;
|
break;
|
||||||
case ExynosM3:
|
case ExynosM3:
|
||||||
MaxInterleaveFactor = 4;
|
MaxInterleaveFactor = 4;
|
||||||
MaxJumpTableSize = 20;
|
MaxJumpTableSize = 20;
|
||||||
PrefFunctionAlignment = 5;
|
PrefFunctionLogAlignment = 5;
|
||||||
PrefLoopAlignment = 4;
|
PrefLoopLogAlignment = 4;
|
||||||
break;
|
break;
|
||||||
case Falkor:
|
case Falkor:
|
||||||
MaxInterleaveFactor = 4;
|
MaxInterleaveFactor = 4;
|
||||||
|
@ -126,10 +126,10 @@ void AArch64Subtarget::initializeProperties() {
|
||||||
MinVectorRegisterBitWidth = 128;
|
MinVectorRegisterBitWidth = 128;
|
||||||
break;
|
break;
|
||||||
case NeoverseE1:
|
case NeoverseE1:
|
||||||
PrefFunctionAlignment = 3;
|
PrefFunctionLogAlignment = 3;
|
||||||
break;
|
break;
|
||||||
case NeoverseN1:
|
case NeoverseN1:
|
||||||
PrefFunctionAlignment = 4;
|
PrefFunctionLogAlignment = 4;
|
||||||
break;
|
break;
|
||||||
case Saphira:
|
case Saphira:
|
||||||
MaxInterleaveFactor = 4;
|
MaxInterleaveFactor = 4;
|
||||||
|
@ -138,8 +138,8 @@ void AArch64Subtarget::initializeProperties() {
|
||||||
break;
|
break;
|
||||||
case ThunderX2T99:
|
case ThunderX2T99:
|
||||||
CacheLineSize = 64;
|
CacheLineSize = 64;
|
||||||
PrefFunctionAlignment = 3;
|
PrefFunctionLogAlignment = 3;
|
||||||
PrefLoopAlignment = 2;
|
PrefLoopLogAlignment = 2;
|
||||||
MaxInterleaveFactor = 4;
|
MaxInterleaveFactor = 4;
|
||||||
PrefetchDistance = 128;
|
PrefetchDistance = 128;
|
||||||
MinPrefetchStride = 1024;
|
MinPrefetchStride = 1024;
|
||||||
|
@ -152,15 +152,15 @@ void AArch64Subtarget::initializeProperties() {
|
||||||
case ThunderXT81:
|
case ThunderXT81:
|
||||||
case ThunderXT83:
|
case ThunderXT83:
|
||||||
CacheLineSize = 128;
|
CacheLineSize = 128;
|
||||||
PrefFunctionAlignment = 3;
|
PrefFunctionLogAlignment = 3;
|
||||||
PrefLoopAlignment = 2;
|
PrefLoopLogAlignment = 2;
|
||||||
// FIXME: remove this to enable 64-bit SLP if performance looks good.
|
// FIXME: remove this to enable 64-bit SLP if performance looks good.
|
||||||
MinVectorRegisterBitWidth = 128;
|
MinVectorRegisterBitWidth = 128;
|
||||||
break;
|
break;
|
||||||
case TSV110:
|
case TSV110:
|
||||||
CacheLineSize = 64;
|
CacheLineSize = 64;
|
||||||
PrefFunctionAlignment = 4;
|
PrefFunctionLogAlignment = 4;
|
||||||
PrefLoopAlignment = 2;
|
PrefLoopLogAlignment = 2;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -198,8 +198,8 @@ protected:
|
||||||
uint16_t PrefetchDistance = 0;
|
uint16_t PrefetchDistance = 0;
|
||||||
uint16_t MinPrefetchStride = 1;
|
uint16_t MinPrefetchStride = 1;
|
||||||
unsigned MaxPrefetchIterationsAhead = UINT_MAX;
|
unsigned MaxPrefetchIterationsAhead = UINT_MAX;
|
||||||
unsigned PrefFunctionAlignment = 0;
|
unsigned PrefFunctionLogAlignment = 0;
|
||||||
unsigned PrefLoopAlignment = 0;
|
unsigned PrefLoopLogAlignment = 0;
|
||||||
unsigned MaxJumpTableSize = 0;
|
unsigned MaxJumpTableSize = 0;
|
||||||
unsigned WideningBaseCost = 0;
|
unsigned WideningBaseCost = 0;
|
||||||
|
|
||||||
|
@ -359,8 +359,10 @@ public:
|
||||||
unsigned getMaxPrefetchIterationsAhead() const {
|
unsigned getMaxPrefetchIterationsAhead() const {
|
||||||
return MaxPrefetchIterationsAhead;
|
return MaxPrefetchIterationsAhead;
|
||||||
}
|
}
|
||||||
unsigned getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
|
unsigned getPrefFunctionLogAlignment() const {
|
||||||
unsigned getPrefLoopAlignment() const { return PrefLoopAlignment; }
|
return PrefFunctionLogAlignment;
|
||||||
|
}
|
||||||
|
unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; }
|
||||||
|
|
||||||
unsigned getMaximumJumpTableSize() const { return MaxJumpTableSize; }
|
unsigned getMaximumJumpTableSize() const { return MaxJumpTableSize; }
|
||||||
|
|
||||||
|
|
|
@ -417,7 +417,7 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
|
||||||
|
|
||||||
// The starting address of all shader programs must be 256 bytes aligned.
|
// The starting address of all shader programs must be 256 bytes aligned.
|
||||||
// Regular functions just need the basic required instruction alignment.
|
// Regular functions just need the basic required instruction alignment.
|
||||||
MF.setAlignment(MFI->isEntryFunction() ? 8 : 2);
|
MF.setLogAlignment(MFI->isEntryFunction() ? 8 : 2);
|
||||||
|
|
||||||
SetupMachineFunction(MF);
|
SetupMachineFunction(MF);
|
||||||
|
|
||||||
|
|
|
@ -104,7 +104,7 @@ bool R600AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
|
||||||
|
|
||||||
|
|
||||||
// Functions needs to be cacheline (256B) aligned.
|
// Functions needs to be cacheline (256B) aligned.
|
||||||
MF.ensureAlignment(8);
|
MF.ensureLogAlignment(8);
|
||||||
|
|
||||||
SetupMachineFunction(MF);
|
SetupMachineFunction(MF);
|
||||||
|
|
||||||
|
|
|
@ -10681,15 +10681,15 @@ void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
|
||||||
Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
|
Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
|
unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
|
||||||
const unsigned PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
|
const unsigned PrefLogAlign = TargetLowering::getPrefLoopLogAlignment(ML);
|
||||||
const unsigned CacheLineAlign = 6; // log2(64)
|
const unsigned CacheLineLogAlign = 6; // log2(64)
|
||||||
|
|
||||||
// Pre-GFX10 target did not benefit from loop alignment
|
// Pre-GFX10 target did not benefit from loop alignment
|
||||||
if (!ML || DisableLoopAlignment ||
|
if (!ML || DisableLoopAlignment ||
|
||||||
(getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
|
(getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
|
||||||
getSubtarget()->hasInstFwdPrefetchBug())
|
getSubtarget()->hasInstFwdPrefetchBug())
|
||||||
return PrefAlign;
|
return PrefLogAlign;
|
||||||
|
|
||||||
// On GFX10 I$ is 4 x 64 bytes cache lines.
|
// On GFX10 I$ is 4 x 64 bytes cache lines.
|
||||||
// By default prefetcher keeps one cache line behind and reads two ahead.
|
// By default prefetcher keeps one cache line behind and reads two ahead.
|
||||||
|
@ -10703,28 +10703,28 @@ unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
|
||||||
|
|
||||||
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
|
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
|
||||||
const MachineBasicBlock *Header = ML->getHeader();
|
const MachineBasicBlock *Header = ML->getHeader();
|
||||||
if (Header->getAlignment() != PrefAlign)
|
if (Header->getLogAlignment() != PrefLogAlign)
|
||||||
return Header->getAlignment(); // Already processed.
|
return Header->getLogAlignment(); // Already processed.
|
||||||
|
|
||||||
unsigned LoopSize = 0;
|
unsigned LoopSize = 0;
|
||||||
for (const MachineBasicBlock *MBB : ML->blocks()) {
|
for (const MachineBasicBlock *MBB : ML->blocks()) {
|
||||||
// If inner loop block is aligned assume in average half of the alignment
|
// If inner loop block is aligned assume in average half of the alignment
|
||||||
// size to be added as nops.
|
// size to be added as nops.
|
||||||
if (MBB != Header)
|
if (MBB != Header)
|
||||||
LoopSize += (1 << MBB->getAlignment()) / 2;
|
LoopSize += (1 << MBB->getLogAlignment()) / 2;
|
||||||
|
|
||||||
for (const MachineInstr &MI : *MBB) {
|
for (const MachineInstr &MI : *MBB) {
|
||||||
LoopSize += TII->getInstSizeInBytes(MI);
|
LoopSize += TII->getInstSizeInBytes(MI);
|
||||||
if (LoopSize > 192)
|
if (LoopSize > 192)
|
||||||
return PrefAlign;
|
return PrefLogAlign;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LoopSize <= 64)
|
if (LoopSize <= 64)
|
||||||
return PrefAlign;
|
return PrefLogAlign;
|
||||||
|
|
||||||
if (LoopSize <= 128)
|
if (LoopSize <= 128)
|
||||||
return CacheLineAlign;
|
return CacheLineLogAlign;
|
||||||
|
|
||||||
// If any of parent loops is surrounded by prefetch instructions do not
|
// If any of parent loops is surrounded by prefetch instructions do not
|
||||||
// insert new for inner loop, which would reset parent's settings.
|
// insert new for inner loop, which would reset parent's settings.
|
||||||
|
@ -10732,7 +10732,7 @@ unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
|
||||||
if (MachineBasicBlock *Exit = P->getExitBlock()) {
|
if (MachineBasicBlock *Exit = P->getExitBlock()) {
|
||||||
auto I = Exit->getFirstNonDebugInstr();
|
auto I = Exit->getFirstNonDebugInstr();
|
||||||
if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
|
if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
|
||||||
return CacheLineAlign;
|
return CacheLineLogAlign;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10749,7 +10749,7 @@ unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
|
||||||
.addImm(2); // prefetch 1 line behind PC
|
.addImm(2); // prefetch 1 line behind PC
|
||||||
}
|
}
|
||||||
|
|
||||||
return CacheLineAlign;
|
return CacheLineLogAlign;
|
||||||
}
|
}
|
||||||
|
|
||||||
LLVM_ATTRIBUTE_UNUSED
|
LLVM_ATTRIBUTE_UNUSED
|
||||||
|
|
|
@ -379,8 +379,7 @@ public:
|
||||||
unsigned Depth = 0) const override;
|
unsigned Depth = 0) const override;
|
||||||
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
|
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
|
||||||
|
|
||||||
unsigned getPrefLoopAlignment(MachineLoop *ML) const override;
|
unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override;
|
||||||
|
|
||||||
|
|
||||||
void allocateHSAUserSGPRs(CCState &CCInfo,
|
void allocateHSAUserSGPRs(CCState &CCInfo,
|
||||||
MachineFunction &MF,
|
MachineFunction &MF,
|
||||||
|
|
|
@ -35,7 +35,7 @@ public:
|
||||||
: ReturnStackOffsetSet(false), VarArgsFrameIndex(0),
|
: ReturnStackOffsetSet(false), VarArgsFrameIndex(0),
|
||||||
ReturnStackOffset(-1U), MaxCallStackReq(0) {
|
ReturnStackOffset(-1U), MaxCallStackReq(0) {
|
||||||
// Functions are 4-byte (2**2) aligned.
|
// Functions are 4-byte (2**2) aligned.
|
||||||
MF.setAlignment(2);
|
MF.setLogAlignment(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
~ARCFunctionInfo() {}
|
~ARCFunctionInfo() {}
|
||||||
|
|
|
@ -302,7 +302,7 @@ def FeatureVMLxForwarding : SubtargetFeature<"vmlx-forwarding",
|
||||||
def FeaturePref32BitThumb : SubtargetFeature<"32bit", "Pref32BitThumb", "true",
|
def FeaturePref32BitThumb : SubtargetFeature<"32bit", "Pref32BitThumb", "true",
|
||||||
"Prefer 32-bit Thumb instrs">;
|
"Prefer 32-bit Thumb instrs">;
|
||||||
|
|
||||||
def FeaturePrefLoopAlign32 : SubtargetFeature<"loop-align", "PrefLoopAlignment","2",
|
def FeaturePrefLoopAlign32 : SubtargetFeature<"loop-align", "PrefLoopLogAlignment","2",
|
||||||
"Prefer 32-bit alignment for loops">;
|
"Prefer 32-bit alignment for loops">;
|
||||||
|
|
||||||
def FeatureMVEVectorCostFactor1 : SubtargetFeature<"mve1beat", "MVEVectorCostFactor", "1",
|
def FeatureMVEVectorCostFactor1 : SubtargetFeature<"mve1beat", "MVEVectorCostFactor", "1",
|
||||||
|
|
|
@ -63,7 +63,7 @@ void ARMBasicBlockUtils::computeBlockSize(MachineBasicBlock *MBB) {
|
||||||
// tBR_JTr contains a .align 2 directive.
|
// tBR_JTr contains a .align 2 directive.
|
||||||
if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
|
if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
|
||||||
BBI.PostAlign = 2;
|
BBI.PostAlign = 2;
|
||||||
MBB->getParent()->ensureAlignment(2);
|
MBB->getParent()->ensureLogAlignment(2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,7 +126,7 @@ void ARMBasicBlockUtils::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
|
||||||
for(unsigned i = BBNum + 1, e = MF.getNumBlockIDs(); i < e; ++i) {
|
for(unsigned i = BBNum + 1, e = MF.getNumBlockIDs(); i < e; ++i) {
|
||||||
// Get the offset and known bits at the end of the layout predecessor.
|
// Get the offset and known bits at the end of the layout predecessor.
|
||||||
// Include the alignment of the current block.
|
// Include the alignment of the current block.
|
||||||
unsigned LogAlign = MF.getBlockNumbered(i)->getAlignment();
|
unsigned LogAlign = MF.getBlockNumbered(i)->getLogAlignment();
|
||||||
unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
|
unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
|
||||||
unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);
|
unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);
|
||||||
|
|
||||||
|
|
|
@ -396,7 +396,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
|
||||||
// Functions with jump tables need an alignment of 4 because they use the ADR
|
// Functions with jump tables need an alignment of 4 because they use the ADR
|
||||||
// instruction, which aligns the PC to 4 bytes before adding an offset.
|
// instruction, which aligns the PC to 4 bytes before adding an offset.
|
||||||
if (!T2JumpTables.empty())
|
if (!T2JumpTables.empty())
|
||||||
MF->ensureAlignment(2);
|
MF->ensureLogAlignment(2);
|
||||||
|
|
||||||
/// Remove dead constant pool entries.
|
/// Remove dead constant pool entries.
|
||||||
MadeChange |= removeUnusedCPEntries();
|
MadeChange |= removeUnusedCPEntries();
|
||||||
|
@ -486,20 +486,21 @@ ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs)
|
||||||
MF->push_back(BB);
|
MF->push_back(BB);
|
||||||
|
|
||||||
// MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
|
// MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
|
||||||
unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
|
unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment());
|
||||||
|
|
||||||
// Mark the basic block as required by the const-pool.
|
// Mark the basic block as required by the const-pool.
|
||||||
BB->setAlignment(MaxAlign);
|
BB->setLogAlignment(MaxLogAlign);
|
||||||
|
|
||||||
// The function needs to be as aligned as the basic blocks. The linker may
|
// The function needs to be as aligned as the basic blocks. The linker may
|
||||||
// move functions around based on their alignment.
|
// move functions around based on their alignment.
|
||||||
MF->ensureAlignment(BB->getAlignment());
|
MF->ensureLogAlignment(BB->getLogAlignment());
|
||||||
|
|
||||||
// Order the entries in BB by descending alignment. That ensures correct
|
// Order the entries in BB by descending alignment. That ensures correct
|
||||||
// alignment of all entries as long as BB is sufficiently aligned. Keep
|
// alignment of all entries as long as BB is sufficiently aligned. Keep
|
||||||
// track of the insertion point for each alignment. We are going to bucket
|
// track of the insertion point for each alignment. We are going to bucket
|
||||||
// sort the entries as they are created.
|
// sort the entries as they are created.
|
||||||
SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
|
SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxLogAlign + 1,
|
||||||
|
BB->end());
|
||||||
|
|
||||||
// Add all of the constants from the constant pool to the end block, use an
|
// Add all of the constants from the constant pool to the end block, use an
|
||||||
// identity mapping of CPI's to CPE's.
|
// identity mapping of CPI's to CPE's.
|
||||||
|
@ -524,7 +525,7 @@ ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs)
|
||||||
|
|
||||||
// Ensure that future entries with higher alignment get inserted before
|
// Ensure that future entries with higher alignment get inserted before
|
||||||
// CPEMI. This is bucket sort with iterators.
|
// CPEMI. This is bucket sort with iterators.
|
||||||
for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
|
for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a)
|
||||||
if (InsPoint[a] == InsAt)
|
if (InsPoint[a] == InsAt)
|
||||||
InsPoint[a] = CPEMI;
|
InsPoint[a] = CPEMI;
|
||||||
|
|
||||||
|
@ -685,7 +686,7 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
|
||||||
BBInfoVector &BBInfo = BBUtils->getBBInfo();
|
BBInfoVector &BBInfo = BBUtils->getBBInfo();
|
||||||
// The known bits of the entry block offset are determined by the function
|
// The known bits of the entry block offset are determined by the function
|
||||||
// alignment.
|
// alignment.
|
||||||
BBInfo.front().KnownBits = MF->getAlignment();
|
BBInfo.front().KnownBits = MF->getLogAlignment();
|
||||||
|
|
||||||
// Compute block offsets and known bits.
|
// Compute block offsets and known bits.
|
||||||
BBUtils->adjustBBOffsetsAfter(&MF->front());
|
BBUtils->adjustBBOffsetsAfter(&MF->front());
|
||||||
|
@ -1015,14 +1016,14 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
|
||||||
BBInfoVector &BBInfo = BBUtils->getBBInfo();
|
BBInfoVector &BBInfo = BBUtils->getBBInfo();
|
||||||
unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
|
unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
|
||||||
unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
|
unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
|
||||||
unsigned NextBlockOffset, NextBlockAlignment;
|
unsigned NextBlockOffset, NextBlockLogAlignment;
|
||||||
MachineFunction::const_iterator NextBlock = Water->getIterator();
|
MachineFunction::const_iterator NextBlock = Water->getIterator();
|
||||||
if (++NextBlock == MF->end()) {
|
if (++NextBlock == MF->end()) {
|
||||||
NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
|
NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
|
||||||
NextBlockAlignment = 0;
|
NextBlockLogAlignment = 0;
|
||||||
} else {
|
} else {
|
||||||
NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
|
NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
|
||||||
NextBlockAlignment = NextBlock->getAlignment();
|
NextBlockLogAlignment = NextBlock->getLogAlignment();
|
||||||
}
|
}
|
||||||
unsigned Size = U.CPEMI->getOperand(2).getImm();
|
unsigned Size = U.CPEMI->getOperand(2).getImm();
|
||||||
unsigned CPEEnd = CPEOffset + Size;
|
unsigned CPEEnd = CPEOffset + Size;
|
||||||
|
@ -1034,13 +1035,13 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
|
||||||
Growth = CPEEnd - NextBlockOffset;
|
Growth = CPEEnd - NextBlockOffset;
|
||||||
// Compute the padding that would go at the end of the CPE to align the next
|
// Compute the padding that would go at the end of the CPE to align the next
|
||||||
// block.
|
// block.
|
||||||
Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment);
|
Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockLogAlignment);
|
||||||
|
|
||||||
// If the CPE is to be inserted before the instruction, that will raise
|
// If the CPE is to be inserted before the instruction, that will raise
|
||||||
// the offset of the instruction. Also account for unknown alignment padding
|
// the offset of the instruction. Also account for unknown alignment padding
|
||||||
// in blocks between CPE and the user.
|
// in blocks between CPE and the user.
|
||||||
if (CPEOffset < UserOffset)
|
if (CPEOffset < UserOffset)
|
||||||
UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign);
|
UserOffset += Growth + UnknownPadding(MF->getLogAlignment(), CPELogAlign);
|
||||||
} else
|
} else
|
||||||
// CPE fits in existing padding.
|
// CPE fits in existing padding.
|
||||||
Growth = 0;
|
Growth = 0;
|
||||||
|
@ -1315,7 +1316,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||||
// Try to split the block so it's fully aligned. Compute the latest split
|
// Try to split the block so it's fully aligned. Compute the latest split
|
||||||
// point where we can add a 4-byte branch instruction, and then align to
|
// point where we can add a 4-byte branch instruction, and then align to
|
||||||
// LogAlign which is the largest possible alignment in the function.
|
// LogAlign which is the largest possible alignment in the function.
|
||||||
unsigned LogAlign = MF->getAlignment();
|
unsigned LogAlign = MF->getLogAlignment();
|
||||||
assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
|
assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
|
||||||
unsigned KnownBits = UserBBI.internalKnownBits();
|
unsigned KnownBits = UserBBI.internalKnownBits();
|
||||||
unsigned UPad = UnknownPadding(LogAlign, KnownBits);
|
unsigned UPad = UnknownPadding(LogAlign, KnownBits);
|
||||||
|
@ -1493,9 +1494,9 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
|
||||||
// Always align the new block because CP entries can be smaller than 4
|
// Always align the new block because CP entries can be smaller than 4
|
||||||
// bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may
|
// bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may
|
||||||
// be an already aligned constant pool block.
|
// be an already aligned constant pool block.
|
||||||
const unsigned Align = isThumb ? 1 : 2;
|
const unsigned LogAlign = isThumb ? 1 : 2;
|
||||||
if (NewMBB->getAlignment() < Align)
|
if (NewMBB->getLogAlignment() < LogAlign)
|
||||||
NewMBB->setAlignment(Align);
|
NewMBB->setLogAlignment(LogAlign);
|
||||||
|
|
||||||
// Remove the original WaterList entry; we want subsequent insertions in
|
// Remove the original WaterList entry; we want subsequent insertions in
|
||||||
// this vicinity to go after the one we're about to insert. This
|
// this vicinity to go after the one we're about to insert. This
|
||||||
|
@ -1524,7 +1525,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
|
||||||
decrementCPEReferenceCount(CPI, CPEMI);
|
decrementCPEReferenceCount(CPI, CPEMI);
|
||||||
|
|
||||||
// Mark the basic block as aligned as required by the const-pool entry.
|
// Mark the basic block as aligned as required by the const-pool entry.
|
||||||
NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
|
NewIsland->setLogAlignment(getCPELogAlign(U.CPEMI));
|
||||||
|
|
||||||
// Increase the size of the island block to account for the new entry.
|
// Increase the size of the island block to account for the new entry.
|
||||||
BBUtils->adjustBBSize(NewIsland, Size);
|
BBUtils->adjustBBSize(NewIsland, Size);
|
||||||
|
@ -1558,10 +1559,10 @@ void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
|
||||||
BBInfo[CPEBB->getNumber()].Size = 0;
|
BBInfo[CPEBB->getNumber()].Size = 0;
|
||||||
|
|
||||||
// This block no longer needs to be aligned.
|
// This block no longer needs to be aligned.
|
||||||
CPEBB->setAlignment(0);
|
CPEBB->setLogAlignment(0);
|
||||||
} else
|
} else
|
||||||
// Entries are sorted by descending alignment, so realign from the front.
|
// Entries are sorted by descending alignment, so realign from the front.
|
||||||
CPEBB->setAlignment(getCPELogAlign(&*CPEBB->begin()));
|
CPEBB->setLogAlignment(getCPELogAlign(&*CPEBB->begin()));
|
||||||
|
|
||||||
BBUtils->adjustBBOffsetsAfter(CPEBB);
|
BBUtils->adjustBBOffsetsAfter(CPEBB);
|
||||||
// An island has only one predecessor BB and one successor BB. Check if
|
// An island has only one predecessor BB and one successor BB. Check if
|
||||||
|
|
|
@ -1419,9 +1419,9 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
|
||||||
// Prefer likely predicted branches to selects on out-of-order cores.
|
// Prefer likely predicted branches to selects on out-of-order cores.
|
||||||
PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
|
PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
|
||||||
|
|
||||||
setPrefLoopAlignment(Subtarget->getPrefLoopAlignment());
|
setPrefLoopLogAlignment(Subtarget->getPrefLoopLogAlignment());
|
||||||
|
|
||||||
setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
|
setMinFunctionLogAlignment(Subtarget->isThumb() ? 1 : 2);
|
||||||
|
|
||||||
if (Subtarget->isThumb() || Subtarget->isThumb2())
|
if (Subtarget->isThumb() || Subtarget->isThumb2())
|
||||||
setTargetDAGCombine(ISD::ABS);
|
setTargetDAGCombine(ISD::ABS);
|
||||||
|
|
|
@ -300,7 +300,7 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
|
||||||
LdStMultipleTiming = SingleIssuePlusExtras;
|
LdStMultipleTiming = SingleIssuePlusExtras;
|
||||||
MaxInterleaveFactor = 4;
|
MaxInterleaveFactor = 4;
|
||||||
if (!isThumb())
|
if (!isThumb())
|
||||||
PrefLoopAlignment = 3;
|
PrefLoopLogAlignment = 3;
|
||||||
break;
|
break;
|
||||||
case Kryo:
|
case Kryo:
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -470,7 +470,7 @@ protected:
|
||||||
int PreISelOperandLatencyAdjustment = 2;
|
int PreISelOperandLatencyAdjustment = 2;
|
||||||
|
|
||||||
/// What alignment is preferred for loop bodies, in log2(bytes).
|
/// What alignment is preferred for loop bodies, in log2(bytes).
|
||||||
unsigned PrefLoopAlignment = 0;
|
unsigned PrefLoopLogAlignment = 0;
|
||||||
|
|
||||||
/// The cost factor for MVE instructions, representing the multiple beats an
|
/// The cost factor for MVE instructions, representing the multiple beats an
|
||||||
// instruction can take. The default is 2, (set in initSubtargetFeatures so
|
// instruction can take. The default is 2, (set in initSubtargetFeatures so
|
||||||
|
@ -859,9 +859,7 @@ public:
|
||||||
return isROPI() || !isTargetELF();
|
return isROPI() || !isTargetELF();
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned getPrefLoopAlignment() const {
|
unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; }
|
||||||
return PrefLoopAlignment;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned getMVEVectorCostFactor() const { return MVEVectorCostFactor; }
|
unsigned getMVEVectorCostFactor() const { return MVEVectorCostFactor; }
|
||||||
|
|
||||||
|
|
|
@ -236,7 +236,7 @@ AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM,
|
||||||
setLibcallName(RTLIB::SIN_F32, "sin");
|
setLibcallName(RTLIB::SIN_F32, "sin");
|
||||||
setLibcallName(RTLIB::COS_F32, "cos");
|
setLibcallName(RTLIB::COS_F32, "cos");
|
||||||
|
|
||||||
setMinFunctionAlignment(1);
|
setMinFunctionLogAlignment(1);
|
||||||
setMinimumJumpTableEntries(UINT_MAX);
|
setMinimumJumpTableEntries(UINT_MAX);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -133,8 +133,8 @@ BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
|
||||||
setBooleanContents(ZeroOrOneBooleanContent);
|
setBooleanContents(ZeroOrOneBooleanContent);
|
||||||
|
|
||||||
// Function alignments (log2)
|
// Function alignments (log2)
|
||||||
setMinFunctionAlignment(3);
|
setMinFunctionLogAlignment(3);
|
||||||
setPrefFunctionAlignment(3);
|
setPrefFunctionLogAlignment(3);
|
||||||
|
|
||||||
if (BPFExpandMemcpyInOrder) {
|
if (BPFExpandMemcpyInOrder) {
|
||||||
// LLVM generic code will try to expand memcpy into load/store pairs at this
|
// LLVM generic code will try to expand memcpy into load/store pairs at this
|
||||||
|
|
|
@ -105,11 +105,11 @@ void HexagonBranchRelaxation::computeOffset(MachineFunction &MF,
|
||||||
// offset of the current instruction from the start.
|
// offset of the current instruction from the start.
|
||||||
unsigned InstOffset = 0;
|
unsigned InstOffset = 0;
|
||||||
for (auto &B : MF) {
|
for (auto &B : MF) {
|
||||||
if (B.getAlignment()) {
|
if (B.getLogAlignment()) {
|
||||||
// Although we don't know the exact layout of the final code, we need
|
// Although we don't know the exact layout of the final code, we need
|
||||||
// to account for alignment padding somehow. This heuristic pads each
|
// to account for alignment padding somehow. This heuristic pads each
|
||||||
// aligned basic block according to the alignment value.
|
// aligned basic block according to the alignment value.
|
||||||
int ByteAlign = (1u << B.getAlignment()) - 1;
|
int ByteAlign = (1u << B.getLogAlignment()) - 1;
|
||||||
InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign);
|
InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign);
|
||||||
}
|
}
|
||||||
OffsetMap[&B] = InstOffset;
|
OffsetMap[&B] = InstOffset;
|
||||||
|
|
|
@ -114,11 +114,11 @@ bool HexagonFixupHwLoops::fixupLoopInstrs(MachineFunction &MF) {
|
||||||
|
|
||||||
// First pass - compute the offset of each basic block.
|
// First pass - compute the offset of each basic block.
|
||||||
for (const MachineBasicBlock &MBB : MF) {
|
for (const MachineBasicBlock &MBB : MF) {
|
||||||
if (MBB.getAlignment()) {
|
if (MBB.getLogAlignment()) {
|
||||||
// Although we don't know the exact layout of the final code, we need
|
// Although we don't know the exact layout of the final code, we need
|
||||||
// to account for alignment padding somehow. This heuristic pads each
|
// to account for alignment padding somehow. This heuristic pads each
|
||||||
// aligned basic block according to the alignment value.
|
// aligned basic block according to the alignment value.
|
||||||
int ByteAlign = (1u << MBB.getAlignment()) - 1;
|
int ByteAlign = (1u << MBB.getLogAlignment()) - 1;
|
||||||
InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign);
|
InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1235,9 +1235,9 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
|
||||||
Subtarget(ST) {
|
Subtarget(ST) {
|
||||||
auto &HRI = *Subtarget.getRegisterInfo();
|
auto &HRI = *Subtarget.getRegisterInfo();
|
||||||
|
|
||||||
setPrefLoopAlignment(4);
|
setPrefLoopLogAlignment(4);
|
||||||
setPrefFunctionAlignment(4);
|
setPrefFunctionLogAlignment(4);
|
||||||
setMinFunctionAlignment(2);
|
setMinFunctionLogAlignment(2);
|
||||||
setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
|
setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
|
||||||
setBooleanContents(TargetLoweringBase::UndefinedBooleanContent);
|
setBooleanContents(TargetLoweringBase::UndefinedBooleanContent);
|
||||||
setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent);
|
setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent);
|
||||||
|
|
|
@ -145,8 +145,8 @@ LanaiTargetLowering::LanaiTargetLowering(const TargetMachine &TM,
|
||||||
setTargetDAGCombine(ISD::XOR);
|
setTargetDAGCombine(ISD::XOR);
|
||||||
|
|
||||||
// Function alignments (log2)
|
// Function alignments (log2)
|
||||||
setMinFunctionAlignment(2);
|
setMinFunctionLogAlignment(2);
|
||||||
setPrefFunctionAlignment(2);
|
setPrefFunctionLogAlignment(2);
|
||||||
|
|
||||||
setJumpIsExpensive(true);
|
setJumpIsExpensive(true);
|
||||||
|
|
||||||
|
|
|
@ -327,8 +327,8 @@ MSP430TargetLowering::MSP430TargetLowering(const TargetMachine &TM,
|
||||||
setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::MSP430_BUILTIN);
|
setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::MSP430_BUILTIN);
|
||||||
// TODO: __mspabi_srall, __mspabi_srlll, __mspabi_sllll
|
// TODO: __mspabi_srall, __mspabi_srlll, __mspabi_sllll
|
||||||
|
|
||||||
setMinFunctionAlignment(1);
|
setMinFunctionLogAlignment(1);
|
||||||
setPrefFunctionAlignment(1);
|
setPrefFunctionLogAlignment(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
SDValue MSP430TargetLowering::LowerOperation(SDValue Op,
|
SDValue MSP430TargetLowering::LowerOperation(SDValue Op,
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
|
|
||||||
// Log2 of the NaCl MIPS sandbox's instruction bundle size.
|
// Log2 of the NaCl MIPS sandbox's instruction bundle size.
|
||||||
static const unsigned MIPS_NACL_BUNDLE_ALIGN = 4u;
|
static const unsigned MIPS_NACL_BUNDLE_LOG_ALIGN = 4u;
|
||||||
|
|
||||||
bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx,
|
bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx,
|
||||||
bool *IsStore = nullptr);
|
bool *IsStore = nullptr);
|
||||||
|
|
|
@ -270,7 +270,7 @@ MCELFStreamer *createMipsNaClELFStreamer(MCContext &Context,
|
||||||
S->getAssembler().setRelaxAll(true);
|
S->getAssembler().setRelaxAll(true);
|
||||||
|
|
||||||
// Set bundle-alignment as required by the NaCl ABI for the target.
|
// Set bundle-alignment as required by the NaCl ABI for the target.
|
||||||
S->EmitBundleAlignMode(MIPS_NACL_BUNDLE_ALIGN);
|
S->EmitBundleAlignMode(MIPS_NACL_BUNDLE_LOG_ALIGN);
|
||||||
|
|
||||||
return S;
|
return S;
|
||||||
}
|
}
|
||||||
|
|
|
@ -400,7 +400,7 @@ void MipsAsmPrinter::EmitFunctionEntryLabel() {
|
||||||
// NaCl sandboxing requires that indirect call instructions are masked.
|
// NaCl sandboxing requires that indirect call instructions are masked.
|
||||||
// This means that function entry points should be bundle-aligned.
|
// This means that function entry points should be bundle-aligned.
|
||||||
if (Subtarget->isTargetNaCl())
|
if (Subtarget->isTargetNaCl())
|
||||||
EmitAlignment(std::max(MF->getAlignment(), MIPS_NACL_BUNDLE_ALIGN));
|
EmitAlignment(std::max(MF->getLogAlignment(), MIPS_NACL_BUNDLE_LOG_ALIGN));
|
||||||
|
|
||||||
if (Subtarget->inMicroMipsMode()) {
|
if (Subtarget->inMicroMipsMode()) {
|
||||||
TS.emitDirectiveSetMicroMips();
|
TS.emitDirectiveSetMicroMips();
|
||||||
|
@ -1278,14 +1278,14 @@ void MipsAsmPrinter::NaClAlignIndirectJumpTargets(MachineFunction &MF) {
|
||||||
const std::vector<MachineBasicBlock*> &MBBs = JT[I].MBBs;
|
const std::vector<MachineBasicBlock*> &MBBs = JT[I].MBBs;
|
||||||
|
|
||||||
for (unsigned J = 0; J < MBBs.size(); ++J)
|
for (unsigned J = 0; J < MBBs.size(); ++J)
|
||||||
MBBs[J]->setAlignment(MIPS_NACL_BUNDLE_ALIGN);
|
MBBs[J]->setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If basic block address is taken, block can be target of indirect branch.
|
// If basic block address is taken, block can be target of indirect branch.
|
||||||
for (auto &MBB : MF) {
|
for (auto &MBB : MF) {
|
||||||
if (MBB.hasAddressTaken())
|
if (MBB.hasAddressTaken())
|
||||||
MBB.setAlignment(MIPS_NACL_BUNDLE_ALIGN);
|
MBB.setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -507,7 +507,7 @@ void MipsBranchExpansion::expandToLongBranch(MBBInfo &I) {
|
||||||
.addImm(0);
|
.addImm(0);
|
||||||
if (STI->isTargetNaCl())
|
if (STI->isTargetNaCl())
|
||||||
// Bundle-align the target of indirect branch JR.
|
// Bundle-align the target of indirect branch JR.
|
||||||
TgtMBB->setAlignment(MIPS_NACL_BUNDLE_ALIGN);
|
TgtMBB->setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN);
|
||||||
|
|
||||||
// In NaCl, modifying the sp is not allowed in branch delay slot.
|
// In NaCl, modifying the sp is not allowed in branch delay slot.
|
||||||
// For MIPS32R6, we can skip using a delay slot branch.
|
// For MIPS32R6, we can skip using a delay slot branch.
|
||||||
|
|
|
@ -534,21 +534,22 @@ MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
|
||||||
MF->push_back(BB);
|
MF->push_back(BB);
|
||||||
|
|
||||||
// MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
|
// MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
|
||||||
unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
|
unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment());
|
||||||
|
|
||||||
// Mark the basic block as required by the const-pool.
|
// Mark the basic block as required by the const-pool.
|
||||||
// If AlignConstantIslands isn't set, use 4-byte alignment for everything.
|
// If AlignConstantIslands isn't set, use 4-byte alignment for everything.
|
||||||
BB->setAlignment(AlignConstantIslands ? MaxAlign : 2);
|
BB->setLogAlignment(AlignConstantIslands ? MaxLogAlign : 2);
|
||||||
|
|
||||||
// The function needs to be as aligned as the basic blocks. The linker may
|
// The function needs to be as aligned as the basic blocks. The linker may
|
||||||
// move functions around based on their alignment.
|
// move functions around based on their alignment.
|
||||||
MF->ensureAlignment(BB->getAlignment());
|
MF->ensureLogAlignment(BB->getLogAlignment());
|
||||||
|
|
||||||
// Order the entries in BB by descending alignment. That ensures correct
|
// Order the entries in BB by descending alignment. That ensures correct
|
||||||
// alignment of all entries as long as BB is sufficiently aligned. Keep
|
// alignment of all entries as long as BB is sufficiently aligned. Keep
|
||||||
// track of the insertion point for each alignment. We are going to bucket
|
// track of the insertion point for each alignment. We are going to bucket
|
||||||
// sort the entries as they are created.
|
// sort the entries as they are created.
|
||||||
SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
|
SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxLogAlign + 1,
|
||||||
|
BB->end());
|
||||||
|
|
||||||
// Add all of the constants from the constant pool to the end block, use an
|
// Add all of the constants from the constant pool to the end block, use an
|
||||||
// identity mapping of CPI's to CPE's.
|
// identity mapping of CPI's to CPE's.
|
||||||
|
@ -576,7 +577,7 @@ MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
|
||||||
|
|
||||||
// Ensure that future entries with higher alignment get inserted before
|
// Ensure that future entries with higher alignment get inserted before
|
||||||
// CPEMI. This is bucket sort with iterators.
|
// CPEMI. This is bucket sort with iterators.
|
||||||
for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
|
for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a)
|
||||||
if (InsPoint[a] == InsAt)
|
if (InsPoint[a] == InsAt)
|
||||||
InsPoint[a] = CPEMI;
|
InsPoint[a] = CPEMI;
|
||||||
// Add a new CPEntry, but no corresponding CPUser yet.
|
// Add a new CPEntry, but no corresponding CPUser yet.
|
||||||
|
@ -942,14 +943,14 @@ bool MipsConstantIslands::isWaterInRange(unsigned UserOffset,
|
||||||
unsigned &Growth) {
|
unsigned &Growth) {
|
||||||
unsigned CPELogAlign = getCPELogAlign(*U.CPEMI);
|
unsigned CPELogAlign = getCPELogAlign(*U.CPEMI);
|
||||||
unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
|
unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
|
||||||
unsigned NextBlockOffset, NextBlockAlignment;
|
unsigned NextBlockOffset, NextBlockLogAlignment;
|
||||||
MachineFunction::const_iterator NextBlock = ++Water->getIterator();
|
MachineFunction::const_iterator NextBlock = ++Water->getIterator();
|
||||||
if (NextBlock == MF->end()) {
|
if (NextBlock == MF->end()) {
|
||||||
NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
|
NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
|
||||||
NextBlockAlignment = 0;
|
NextBlockLogAlignment = 0;
|
||||||
} else {
|
} else {
|
||||||
NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
|
NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
|
||||||
NextBlockAlignment = NextBlock->getAlignment();
|
NextBlockLogAlignment = NextBlock->getLogAlignment();
|
||||||
}
|
}
|
||||||
unsigned Size = U.CPEMI->getOperand(2).getImm();
|
unsigned Size = U.CPEMI->getOperand(2).getImm();
|
||||||
unsigned CPEEnd = CPEOffset + Size;
|
unsigned CPEEnd = CPEOffset + Size;
|
||||||
|
@ -961,7 +962,7 @@ bool MipsConstantIslands::isWaterInRange(unsigned UserOffset,
|
||||||
Growth = CPEEnd - NextBlockOffset;
|
Growth = CPEEnd - NextBlockOffset;
|
||||||
// Compute the padding that would go at the end of the CPE to align the next
|
// Compute the padding that would go at the end of the CPE to align the next
|
||||||
// block.
|
// block.
|
||||||
Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment);
|
Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockLogAlignment);
|
||||||
|
|
||||||
// If the CPE is to be inserted before the instruction, that will raise
|
// If the CPE is to be inserted before the instruction, that will raise
|
||||||
// the offset of the instruction. Also account for unknown alignment padding
|
// the offset of the instruction. Also account for unknown alignment padding
|
||||||
|
@ -1258,7 +1259,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||||
// Try to split the block so it's fully aligned. Compute the latest split
|
// Try to split the block so it's fully aligned. Compute the latest split
|
||||||
// point where we can add a 4-byte branch instruction, and then align to
|
// point where we can add a 4-byte branch instruction, and then align to
|
||||||
// LogAlign which is the largest possible alignment in the function.
|
// LogAlign which is the largest possible alignment in the function.
|
||||||
unsigned LogAlign = MF->getAlignment();
|
unsigned LogAlign = MF->getLogAlignment();
|
||||||
assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
|
assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
|
||||||
unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
|
unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
|
||||||
LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
|
LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
|
||||||
|
@ -1399,7 +1400,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
|
||||||
++NumCPEs;
|
++NumCPEs;
|
||||||
|
|
||||||
// Mark the basic block as aligned as required by the const-pool entry.
|
// Mark the basic block as aligned as required by the const-pool entry.
|
||||||
NewIsland->setAlignment(getCPELogAlign(*U.CPEMI));
|
NewIsland->setLogAlignment(getCPELogAlign(*U.CPEMI));
|
||||||
|
|
||||||
// Increase the size of the island block to account for the new entry.
|
// Increase the size of the island block to account for the new entry.
|
||||||
BBInfo[NewIsland->getNumber()].Size += Size;
|
BBInfo[NewIsland->getNumber()].Size += Size;
|
||||||
|
@ -1431,10 +1432,10 @@ void MipsConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
|
||||||
BBInfo[CPEBB->getNumber()].Size = 0;
|
BBInfo[CPEBB->getNumber()].Size = 0;
|
||||||
|
|
||||||
// This block no longer needs to be aligned.
|
// This block no longer needs to be aligned.
|
||||||
CPEBB->setAlignment(0);
|
CPEBB->setLogAlignment(0);
|
||||||
} else
|
} else
|
||||||
// Entries are sorted by descending alignment, so realign from the front.
|
// Entries are sorted by descending alignment, so realign from the front.
|
||||||
CPEBB->setAlignment(getCPELogAlign(*CPEBB->begin()));
|
CPEBB->setLogAlignment(getCPELogAlign(*CPEBB->begin()));
|
||||||
|
|
||||||
adjustBBOffsetsAfter(CPEBB);
|
adjustBBOffsetsAfter(CPEBB);
|
||||||
// An island has only one predecessor BB and one successor BB. Check if
|
// An island has only one predecessor BB and one successor BB. Check if
|
||||||
|
@ -1529,7 +1530,7 @@ MipsConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
|
||||||
// We should have a way to back out this alignment restriction if we "can" later.
|
// We should have a way to back out this alignment restriction if we "can" later.
|
||||||
// but it is not harmful.
|
// but it is not harmful.
|
||||||
//
|
//
|
||||||
DestBB->setAlignment(2);
|
DestBB->setLogAlignment(2);
|
||||||
Br.MaxDisp = ((1<<24)-1) * 2;
|
Br.MaxDisp = ((1<<24)-1) * 2;
|
||||||
MI->setDesc(TII->get(Mips::JalB16));
|
MI->setDesc(TII->get(Mips::JalB16));
|
||||||
}
|
}
|
||||||
|
|
|
@ -518,7 +518,7 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
|
||||||
setLibcallName(RTLIB::SRA_I128, nullptr);
|
setLibcallName(RTLIB::SRA_I128, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
setMinFunctionAlignment(Subtarget.isGP64bit() ? 3 : 2);
|
setMinFunctionLogAlignment(Subtarget.isGP64bit() ? 3 : 2);
|
||||||
|
|
||||||
// The arguments on the stack are defined in terms of 4-byte slots on O32
|
// The arguments on the stack are defined in terms of 4-byte slots on O32
|
||||||
// and 8-byte slots on N32/N64.
|
// and 8-byte slots on N32/N64.
|
||||||
|
|
|
@ -81,14 +81,14 @@ FunctionPass *llvm::createPPCBranchSelectionPass() {
|
||||||
/// original Offset.
|
/// original Offset.
|
||||||
unsigned PPCBSel::GetAlignmentAdjustment(MachineBasicBlock &MBB,
|
unsigned PPCBSel::GetAlignmentAdjustment(MachineBasicBlock &MBB,
|
||||||
unsigned Offset) {
|
unsigned Offset) {
|
||||||
unsigned Align = MBB.getAlignment();
|
unsigned LogAlign = MBB.getLogAlignment();
|
||||||
if (!Align)
|
if (!LogAlign)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
unsigned AlignAmt = 1 << Align;
|
unsigned AlignAmt = 1 << LogAlign;
|
||||||
unsigned ParentAlign = MBB.getParent()->getAlignment();
|
unsigned ParentLogAlign = MBB.getParent()->getLogAlignment();
|
||||||
|
|
||||||
if (Align <= ParentAlign)
|
if (LogAlign <= ParentLogAlign)
|
||||||
return OffsetToAlignment(Offset, AlignAmt);
|
return OffsetToAlignment(Offset, AlignAmt);
|
||||||
|
|
||||||
// The alignment of this MBB is larger than the function's alignment, so we
|
// The alignment of this MBB is larger than the function's alignment, so we
|
||||||
|
@ -179,21 +179,21 @@ int PPCBSel::computeBranchSize(MachineFunction &Fn,
|
||||||
const MachineBasicBlock *Dest,
|
const MachineBasicBlock *Dest,
|
||||||
unsigned BrOffset) {
|
unsigned BrOffset) {
|
||||||
int BranchSize;
|
int BranchSize;
|
||||||
unsigned MaxAlign = 2;
|
unsigned MaxLogAlign = 2;
|
||||||
bool NeedExtraAdjustment = false;
|
bool NeedExtraAdjustment = false;
|
||||||
if (Dest->getNumber() <= Src->getNumber()) {
|
if (Dest->getNumber() <= Src->getNumber()) {
|
||||||
// If this is a backwards branch, the delta is the offset from the
|
// If this is a backwards branch, the delta is the offset from the
|
||||||
// start of this block to this branch, plus the sizes of all blocks
|
// start of this block to this branch, plus the sizes of all blocks
|
||||||
// from this block to the dest.
|
// from this block to the dest.
|
||||||
BranchSize = BrOffset;
|
BranchSize = BrOffset;
|
||||||
MaxAlign = std::max(MaxAlign, Src->getAlignment());
|
MaxLogAlign = std::max(MaxLogAlign, Src->getLogAlignment());
|
||||||
|
|
||||||
int DestBlock = Dest->getNumber();
|
int DestBlock = Dest->getNumber();
|
||||||
BranchSize += BlockSizes[DestBlock].first;
|
BranchSize += BlockSizes[DestBlock].first;
|
||||||
for (unsigned i = DestBlock+1, e = Src->getNumber(); i < e; ++i) {
|
for (unsigned i = DestBlock+1, e = Src->getNumber(); i < e; ++i) {
|
||||||
BranchSize += BlockSizes[i].first;
|
BranchSize += BlockSizes[i].first;
|
||||||
MaxAlign = std::max(MaxAlign,
|
MaxLogAlign =
|
||||||
Fn.getBlockNumbered(i)->getAlignment());
|
std::max(MaxLogAlign, Fn.getBlockNumbered(i)->getLogAlignment());
|
||||||
}
|
}
|
||||||
|
|
||||||
NeedExtraAdjustment = (FirstImpreciseBlock >= 0) &&
|
NeedExtraAdjustment = (FirstImpreciseBlock >= 0) &&
|
||||||
|
@ -204,11 +204,11 @@ int PPCBSel::computeBranchSize(MachineFunction &Fn,
|
||||||
unsigned StartBlock = Src->getNumber();
|
unsigned StartBlock = Src->getNumber();
|
||||||
BranchSize = BlockSizes[StartBlock].first - BrOffset;
|
BranchSize = BlockSizes[StartBlock].first - BrOffset;
|
||||||
|
|
||||||
MaxAlign = std::max(MaxAlign, Dest->getAlignment());
|
MaxLogAlign = std::max(MaxLogAlign, Dest->getLogAlignment());
|
||||||
for (unsigned i = StartBlock+1, e = Dest->getNumber(); i != e; ++i) {
|
for (unsigned i = StartBlock+1, e = Dest->getNumber(); i != e; ++i) {
|
||||||
BranchSize += BlockSizes[i].first;
|
BranchSize += BlockSizes[i].first;
|
||||||
MaxAlign = std::max(MaxAlign,
|
MaxLogAlign =
|
||||||
Fn.getBlockNumbered(i)->getAlignment());
|
std::max(MaxLogAlign, Fn.getBlockNumbered(i)->getLogAlignment());
|
||||||
}
|
}
|
||||||
|
|
||||||
NeedExtraAdjustment = (FirstImpreciseBlock >= 0) &&
|
NeedExtraAdjustment = (FirstImpreciseBlock >= 0) &&
|
||||||
|
@ -258,7 +258,7 @@ int PPCBSel::computeBranchSize(MachineFunction &Fn,
|
||||||
// The computed offset is at most ((1 << alignment) - 4) bytes smaller
|
// The computed offset is at most ((1 << alignment) - 4) bytes smaller
|
||||||
// than actual offset. So we add this number to the offset for safety.
|
// than actual offset. So we add this number to the offset for safety.
|
||||||
if (NeedExtraAdjustment)
|
if (NeedExtraAdjustment)
|
||||||
BranchSize += (1 << MaxAlign) - 4;
|
BranchSize += (1 << MaxLogAlign) - 4;
|
||||||
|
|
||||||
return BranchSize;
|
return BranchSize;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1180,9 +1180,9 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
|
||||||
setJumpIsExpensive();
|
setJumpIsExpensive();
|
||||||
}
|
}
|
||||||
|
|
||||||
setMinFunctionAlignment(2);
|
setMinFunctionLogAlignment(2);
|
||||||
if (Subtarget.isDarwin())
|
if (Subtarget.isDarwin())
|
||||||
setPrefFunctionAlignment(4);
|
setPrefFunctionLogAlignment(4);
|
||||||
|
|
||||||
switch (Subtarget.getDarwinDirective()) {
|
switch (Subtarget.getDarwinDirective()) {
|
||||||
default: break;
|
default: break;
|
||||||
|
@ -1199,8 +1199,8 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
|
||||||
case PPC::DIR_PWR7:
|
case PPC::DIR_PWR7:
|
||||||
case PPC::DIR_PWR8:
|
case PPC::DIR_PWR8:
|
||||||
case PPC::DIR_PWR9:
|
case PPC::DIR_PWR9:
|
||||||
setPrefFunctionAlignment(4);
|
setPrefFunctionLogAlignment(4);
|
||||||
setPrefLoopAlignment(4);
|
setPrefLoopLogAlignment(4);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14007,7 +14007,7 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
|
unsigned PPCTargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
|
||||||
switch (Subtarget.getDarwinDirective()) {
|
switch (Subtarget.getDarwinDirective()) {
|
||||||
default: break;
|
default: break;
|
||||||
case PPC::DIR_970:
|
case PPC::DIR_970:
|
||||||
|
@ -14050,7 +14050,7 @@ unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return TargetLowering::getPrefLoopAlignment(ML);
|
return TargetLowering::getPrefLoopLogAlignment(ML);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// getConstraintType - Given a constraint, return the type of
|
/// getConstraintType - Given a constraint, return the type of
|
||||||
|
|
|
@ -735,7 +735,7 @@ namespace llvm {
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
unsigned Depth = 0) const override;
|
unsigned Depth = 0) const override;
|
||||||
|
|
||||||
unsigned getPrefLoopAlignment(MachineLoop *ML) const override;
|
unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override;
|
||||||
|
|
||||||
bool shouldInsertFencesForAtomic(const Instruction *I) const override {
|
bool shouldInsertFencesForAtomic(const Instruction *I) const override {
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -199,8 +199,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
|
||||||
|
|
||||||
// Function alignments (log2).
|
// Function alignments (log2).
|
||||||
unsigned FunctionAlignment = Subtarget.hasStdExtC() ? 1 : 2;
|
unsigned FunctionAlignment = Subtarget.hasStdExtC() ? 1 : 2;
|
||||||
setMinFunctionAlignment(FunctionAlignment);
|
setMinFunctionLogAlignment(FunctionAlignment);
|
||||||
setPrefFunctionAlignment(FunctionAlignment);
|
setPrefFunctionLogAlignment(FunctionAlignment);
|
||||||
|
|
||||||
// Effectively disable jump table generation.
|
// Effectively disable jump table generation.
|
||||||
setMinimumJumpTableEntries(INT_MAX);
|
setMinimumJumpTableEntries(INT_MAX);
|
||||||
|
|
|
@ -1805,7 +1805,7 @@ SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM,
|
||||||
|
|
||||||
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
|
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
|
||||||
|
|
||||||
setMinFunctionAlignment(2);
|
setMinFunctionLogAlignment(2);
|
||||||
|
|
||||||
computeRegisterProperties(Subtarget->getRegisterInfo());
|
computeRegisterProperties(Subtarget->getRegisterInfo());
|
||||||
}
|
}
|
||||||
|
|
|
@ -120,9 +120,9 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
|
||||||
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
|
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
|
||||||
|
|
||||||
// Instructions are strings of 2-byte aligned 2-byte values.
|
// Instructions are strings of 2-byte aligned 2-byte values.
|
||||||
setMinFunctionAlignment(2);
|
setMinFunctionLogAlignment(2);
|
||||||
// For performance reasons we prefer 16-byte alignment.
|
// For performance reasons we prefer 16-byte alignment.
|
||||||
setPrefFunctionAlignment(4);
|
setPrefFunctionLogAlignment(4);
|
||||||
|
|
||||||
// Handle operations that are handled in a similar way for all types.
|
// Handle operations that are handled in a similar way for all types.
|
||||||
for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
|
for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
|
||||||
|
|
|
@ -87,7 +87,7 @@ struct MBBInfo {
|
||||||
|
|
||||||
// The minimum alignment of the block, as a log2 value.
|
// The minimum alignment of the block, as a log2 value.
|
||||||
// This value never changes.
|
// This value never changes.
|
||||||
unsigned Alignment = 0;
|
unsigned LogAlignment = 0;
|
||||||
|
|
||||||
// The number of terminators in this block. This value never changes.
|
// The number of terminators in this block. This value never changes.
|
||||||
unsigned NumTerminators = 0;
|
unsigned NumTerminators = 0;
|
||||||
|
@ -127,7 +127,8 @@ struct BlockPosition {
|
||||||
// as the runtime address.
|
// as the runtime address.
|
||||||
unsigned KnownBits;
|
unsigned KnownBits;
|
||||||
|
|
||||||
BlockPosition(unsigned InitialAlignment) : KnownBits(InitialAlignment) {}
|
BlockPosition(unsigned InitialLogAlignment)
|
||||||
|
: KnownBits(InitialLogAlignment) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
class SystemZLongBranch : public MachineFunctionPass {
|
class SystemZLongBranch : public MachineFunctionPass {
|
||||||
|
@ -178,16 +179,16 @@ const uint64_t MaxForwardRange = 0xfffe;
|
||||||
// instructions.
|
// instructions.
|
||||||
void SystemZLongBranch::skipNonTerminators(BlockPosition &Position,
|
void SystemZLongBranch::skipNonTerminators(BlockPosition &Position,
|
||||||
MBBInfo &Block) {
|
MBBInfo &Block) {
|
||||||
if (Block.Alignment > Position.KnownBits) {
|
if (Block.LogAlignment > Position.KnownBits) {
|
||||||
// When calculating the address of Block, we need to conservatively
|
// When calculating the address of Block, we need to conservatively
|
||||||
// assume that Block had the worst possible misalignment.
|
// assume that Block had the worst possible misalignment.
|
||||||
Position.Address += ((uint64_t(1) << Block.Alignment) -
|
Position.Address += ((uint64_t(1) << Block.LogAlignment) -
|
||||||
(uint64_t(1) << Position.KnownBits));
|
(uint64_t(1) << Position.KnownBits));
|
||||||
Position.KnownBits = Block.Alignment;
|
Position.KnownBits = Block.LogAlignment;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Align the addresses.
|
// Align the addresses.
|
||||||
uint64_t AlignMask = (uint64_t(1) << Block.Alignment) - 1;
|
uint64_t AlignMask = (uint64_t(1) << Block.LogAlignment) - 1;
|
||||||
Position.Address = (Position.Address + AlignMask) & ~AlignMask;
|
Position.Address = (Position.Address + AlignMask) & ~AlignMask;
|
||||||
|
|
||||||
// Record the block's position.
|
// Record the block's position.
|
||||||
|
@ -275,13 +276,13 @@ uint64_t SystemZLongBranch::initMBBInfo() {
|
||||||
Terminators.clear();
|
Terminators.clear();
|
||||||
Terminators.reserve(NumBlocks);
|
Terminators.reserve(NumBlocks);
|
||||||
|
|
||||||
BlockPosition Position(MF->getAlignment());
|
BlockPosition Position(MF->getLogAlignment());
|
||||||
for (unsigned I = 0; I < NumBlocks; ++I) {
|
for (unsigned I = 0; I < NumBlocks; ++I) {
|
||||||
MachineBasicBlock *MBB = MF->getBlockNumbered(I);
|
MachineBasicBlock *MBB = MF->getBlockNumbered(I);
|
||||||
MBBInfo &Block = MBBs[I];
|
MBBInfo &Block = MBBs[I];
|
||||||
|
|
||||||
// Record the alignment, for quick access.
|
// Record the alignment, for quick access.
|
||||||
Block.Alignment = MBB->getAlignment();
|
Block.LogAlignment = MBB->getLogAlignment();
|
||||||
|
|
||||||
// Calculate the size of the fixed part of the block.
|
// Calculate the size of the fixed part of the block.
|
||||||
MachineBasicBlock::iterator MI = MBB->begin();
|
MachineBasicBlock::iterator MI = MBB->begin();
|
||||||
|
@ -339,7 +340,7 @@ bool SystemZLongBranch::mustRelaxABranch() {
|
||||||
// must be long.
|
// must be long.
|
||||||
void SystemZLongBranch::setWorstCaseAddresses() {
|
void SystemZLongBranch::setWorstCaseAddresses() {
|
||||||
SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
|
SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
|
||||||
BlockPosition Position(MF->getAlignment());
|
BlockPosition Position(MF->getLogAlignment());
|
||||||
for (auto &Block : MBBs) {
|
for (auto &Block : MBBs) {
|
||||||
skipNonTerminators(Position, Block);
|
skipNonTerminators(Position, Block);
|
||||||
for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) {
|
for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) {
|
||||||
|
@ -440,7 +441,7 @@ void SystemZLongBranch::relaxBranch(TerminatorInfo &Terminator) {
|
||||||
// Run a shortening pass and relax any branches that need to be relaxed.
|
// Run a shortening pass and relax any branches that need to be relaxed.
|
||||||
void SystemZLongBranch::relaxBranches() {
|
void SystemZLongBranch::relaxBranches() {
|
||||||
SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
|
SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
|
||||||
BlockPosition Position(MF->getAlignment());
|
BlockPosition Position(MF->getLogAlignment());
|
||||||
for (auto &Block : MBBs) {
|
for (auto &Block : MBBs) {
|
||||||
skipNonTerminators(Position, Block);
|
skipNonTerminators(Position, Block);
|
||||||
for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) {
|
for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) {
|
||||||
|
|
|
@ -1892,13 +1892,13 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
|
||||||
MaxLoadsPerMemcmpOptSize = 2;
|
MaxLoadsPerMemcmpOptSize = 2;
|
||||||
|
|
||||||
// Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
|
// Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
|
||||||
setPrefLoopAlignment(ExperimentalPrefLoopAlignment);
|
setPrefLoopLogAlignment(ExperimentalPrefLoopAlignment);
|
||||||
|
|
||||||
// An out-of-order CPU can speculatively execute past a predictable branch,
|
// An out-of-order CPU can speculatively execute past a predictable branch,
|
||||||
// but a conditional move could be stalled by an expensive earlier operation.
|
// but a conditional move could be stalled by an expensive earlier operation.
|
||||||
PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
|
PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
|
||||||
EnableExtLdPromotion = true;
|
EnableExtLdPromotion = true;
|
||||||
setPrefFunctionAlignment(4); // 2^4 bytes.
|
setPrefFunctionLogAlignment(4); // 2^4 bytes.
|
||||||
|
|
||||||
verifyIntrinsicTables();
|
verifyIntrinsicTables();
|
||||||
}
|
}
|
||||||
|
|
|
@ -279,7 +279,7 @@ void X86RetpolineThunks::populateThunk(MachineFunction &MF,
|
||||||
|
|
||||||
CallTarget->addLiveIn(Reg);
|
CallTarget->addLiveIn(Reg);
|
||||||
CallTarget->setHasAddressTaken();
|
CallTarget->setHasAddressTaken();
|
||||||
CallTarget->setAlignment(4);
|
CallTarget->setLogAlignment(4);
|
||||||
insertRegReturnAddrClobber(*CallTarget, Reg);
|
insertRegReturnAddrClobber(*CallTarget, Reg);
|
||||||
CallTarget->back().setPreInstrSymbol(MF, TargetSym);
|
CallTarget->back().setPreInstrSymbol(MF, TargetSym);
|
||||||
BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc));
|
BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc));
|
||||||
|
|
|
@ -171,8 +171,8 @@ XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM,
|
||||||
setTargetDAGCombine(ISD::INTRINSIC_VOID);
|
setTargetDAGCombine(ISD::INTRINSIC_VOID);
|
||||||
setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
|
setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
|
||||||
|
|
||||||
setMinFunctionAlignment(1);
|
setMinFunctionLogAlignment(1);
|
||||||
setPrefFunctionAlignment(2);
|
setPrefFunctionLogAlignment(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
|
bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
|
||||||
|
|
|
@ -892,12 +892,12 @@ body: |
|
||||||
# CHECK-NEXT: renamable $q12 = VDUP32q killed renamable $r5, 14, $noreg
|
# CHECK-NEXT: renamable $q12 = VDUP32q killed renamable $r5, 14, $noreg
|
||||||
# CHECK-NEXT: t2B %bb.2, 14, $noreg
|
# CHECK-NEXT: t2B %bb.2, 14, $noreg
|
||||||
# CHECK-NEXT: {{^ $}}
|
# CHECK-NEXT: {{^ $}}
|
||||||
# CHECK-NEXT: bb.1 (align 2):
|
# CHECK-NEXT: bb.1 (align 4):
|
||||||
# CHECK-NEXT: successors:{{ }}
|
# CHECK-NEXT: successors:{{ }}
|
||||||
# CHECK-NEXT: {{^ $}}
|
# CHECK-NEXT: {{^ $}}
|
||||||
# CHECK-NEXT: CONSTPOOL_ENTRY 1, %const.0, 4
|
# CHECK-NEXT: CONSTPOOL_ENTRY 1, %const.0, 4
|
||||||
# CHECK-NEXT: {{^ $}}
|
# CHECK-NEXT: {{^ $}}
|
||||||
# CHECK-NEXT: bb.2.entry (align 1):
|
# CHECK-NEXT: bb.2.entry (align 2):
|
||||||
# CHECK-NEXT: liveins: $d13, $s27, $r10, $r9, $r8, $s26, $d12, $s25, $s24,
|
# CHECK-NEXT: liveins: $d13, $s27, $r10, $r9, $r8, $s26, $d12, $s25, $s24,
|
||||||
# CHECK-SAME: $d15, $s30, $s31, $d14, $s28, $s29, $lr, $r0, $d21,
|
# CHECK-SAME: $d15, $s30, $s31, $d14, $s28, $s29, $lr, $r0, $d21,
|
||||||
# CHECK-SAME: $r3, $q10, $d20, $d17, $r2, $d25, $q11, $d22, $d23,
|
# CHECK-SAME: $r3, $q10, $d20, $d17, $r2, $d25, $q11, $d22, $d23,
|
||||||
|
|
|
@ -55,16 +55,16 @@ constants:
|
||||||
alignment: 2
|
alignment: 2
|
||||||
|
|
||||||
#CHECK: B %[[BB4:bb.[0-9]]]
|
#CHECK: B %[[BB4:bb.[0-9]]]
|
||||||
#CHECK: bb.{{.}} (align 2):
|
#CHECK: bb.{{.}} (align 4):
|
||||||
|
#CHECK: successors:
|
||||||
|
#CHECK: CONSTPOOL_ENTRY {{.}}, %const.{{.}}, 4
|
||||||
|
#CHECK: bb.{{.}} (align 4):
|
||||||
#CHECK: successors:
|
#CHECK: successors:
|
||||||
#CHECK: CONSTPOOL_ENTRY {{.}}, %const.{{.}}, 4
|
#CHECK: CONSTPOOL_ENTRY {{.}}, %const.{{.}}, 4
|
||||||
#CHECK: bb.{{.}} (align 2):
|
#CHECK: bb.{{.}} (align 2):
|
||||||
#CHECK: successors:
|
#CHECK: successors:
|
||||||
#CHECK: CONSTPOOL_ENTRY {{.}}, %const.{{.}}, 4
|
|
||||||
#CHECK: bb.{{.}} (align 1):
|
|
||||||
#CHECK: successors:
|
|
||||||
#CHECK: CONSTPOOL_ENTRY {{.}}, %const.{{.}}, 2
|
#CHECK: CONSTPOOL_ENTRY {{.}}, %const.{{.}}, 2
|
||||||
#CHECK: [[BB4]].entry (align 2):
|
#CHECK: [[BB4]].entry (align 4):
|
||||||
|
|
||||||
body: |
|
body: |
|
||||||
bb.0.entry:
|
bb.0.entry:
|
||||||
|
|
|
@ -53,13 +53,13 @@ constants:
|
||||||
alignment: 2
|
alignment: 2
|
||||||
|
|
||||||
#CHECK: t2B %[[BB3:bb.[0-9]]]
|
#CHECK: t2B %[[BB3:bb.[0-9]]]
|
||||||
#CHECK: bb.{{.}} (align 2):
|
#CHECK: bb.{{.}} (align 4):
|
||||||
#CHECK: successors:
|
#CHECK: successors:
|
||||||
#CHECK: CONSTPOOL_ENTRY 2, %const.{{.}}, 4
|
#CHECK: CONSTPOOL_ENTRY 2, %const.{{.}}, 4
|
||||||
#CHECK: bb.{{.}} (align 1):
|
#CHECK: bb.{{.}} (align 2):
|
||||||
#CHECK: successors:
|
#CHECK: successors:
|
||||||
#CHECK: CONSTPOOL_ENTRY 3, %const.{{.}}, 2
|
#CHECK: CONSTPOOL_ENTRY 3, %const.{{.}}, 2
|
||||||
#CHECK: [[BB3]].entry (align 1):
|
#CHECK: [[BB3]].entry (align 2):
|
||||||
|
|
||||||
body: |
|
body: |
|
||||||
bb.0.entry:
|
bb.0.entry:
|
||||||
|
|
|
@ -76,11 +76,11 @@ constants:
|
||||||
isTargetSpecific: false
|
isTargetSpecific: false
|
||||||
|
|
||||||
|
|
||||||
#CHECK: bb.{{.*}} (align 1):
|
#CHECK: bb.{{.*}} (align 2):
|
||||||
#CHECK: successors:
|
#CHECK: successors:
|
||||||
#CHECK: CONSTPOOL_ENTRY 1, %const{{.*}}, 2
|
#CHECK: CONSTPOOL_ENTRY 1, %const{{.*}}, 2
|
||||||
# We want this block to be 4 byte aligned:
|
# We want this block to be 4 byte aligned:
|
||||||
#CHECK: bb.{{.*}}.LA (align 2):
|
#CHECK: bb.{{.*}}.LA (align 4):
|
||||||
|
|
||||||
body: |
|
body: |
|
||||||
bb.0.entry:
|
bb.0.entry:
|
||||||
|
|
|
@ -77,7 +77,7 @@ constants:
|
||||||
isTargetSpecific: false
|
isTargetSpecific: false
|
||||||
|
|
||||||
|
|
||||||
#CHECK: bb.{{.*}} (align 1):
|
#CHECK: bb.{{.*}} (align 2):
|
||||||
#CHECK: successors:
|
#CHECK: successors:
|
||||||
#CHECK: CONSTPOOL_ENTRY 1, %const{{.*}}, 2
|
#CHECK: CONSTPOOL_ENTRY 1, %const{{.*}}, 2
|
||||||
#
|
#
|
||||||
|
|
|
@ -118,7 +118,7 @@ body: |
|
||||||
# CHECK: c: 60 25 90 03 swr $1, 3($5)
|
# CHECK: c: 60 25 90 03 swr $1, 3($5)
|
||||||
|
|
||||||
# CHECK-LABEL: g2:
|
# CHECK-LABEL: g2:
|
||||||
# CHECK: 14: 60 24 64 00 lwle $1, 0($4)
|
# CHECK: 12: 60 24 64 00 lwle $1, 0($4)
|
||||||
# CHECK: 18: 60 24 66 03 lwre $1, 3($4)
|
# CHECK: 16: 60 24 66 03 lwre $1, 3($4)
|
||||||
# CHECK: 1c: 60 25 a0 00 swle $1, 0($5)
|
# CHECK: 1a: 60 25 a0 00 swle $1, 0($5)
|
||||||
# CHECK: 20: 60 25 a2 03 swre $1, 3($5)
|
# CHECK: 1e: 60 25 a2 03 swre $1, 3($5)
|
||||||
|
|
|
@ -212,7 +212,7 @@ body: |
|
||||||
; CHECK: successors: %bb.11(0x80000000)
|
; CHECK: successors: %bb.11(0x80000000)
|
||||||
; CHECK: B %bb.11
|
; CHECK: B %bb.11
|
||||||
|
|
||||||
; CHECK: bb.8.while.body.i (align 4):
|
; CHECK: bb.8.while.body.i (align 16):
|
||||||
; CHECK: successors: %bb.11(0x04000000), %bb.9(0x7c000000)
|
; CHECK: successors: %bb.11(0x04000000), %bb.9(0x7c000000)
|
||||||
; CHECK: BCC 76, killed renamable $cr0, %bb.11
|
; CHECK: BCC 76, killed renamable $cr0, %bb.11
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ body: |
|
||||||
; CHECK: $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8)
|
; CHECK: $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8)
|
||||||
; CHECK: TEST64rr $rax, $rax, implicit-def $eflags
|
; CHECK: TEST64rr $rax, $rax, implicit-def $eflags
|
||||||
; CHECK: JCC_1 %bb.1, 4, implicit $eflags
|
; CHECK: JCC_1 %bb.1, 4, implicit $eflags
|
||||||
; CHECK: bb.5 (align 4):
|
; CHECK: bb.5 (align 16):
|
||||||
; CHECK: successors: %bb.6(0x71555555), %bb.8(0x0eaaaaab)
|
; CHECK: successors: %bb.6(0x71555555), %bb.8(0x0eaaaaab)
|
||||||
; CHECK: CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load 8), (load 8)
|
; CHECK: CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load 8), (load 8)
|
||||||
; CHECK: JCC_1 %bb.8, 5, implicit $eflags
|
; CHECK: JCC_1 %bb.8, 5, implicit $eflags
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
# Checking that we have two compile units with two sets of high/lo_pc.
|
# Checking that we have two compile units with two sets of high/lo_pc.
|
||||||
# CHECK: .debug_info contents
|
# CHECK: .debug_info contents
|
||||||
# CHECK: DW_TAG_compile_unit
|
# CHECK: DW_TAG_compile_unit
|
||||||
# CHECK: DW_AT_low_pc {{.*}} (0x0000000000000020 ".text")
|
# CHECK: DW_AT_low_pc {{.*}} (0x0000000000000018 ".text")
|
||||||
# CHECK: DW_AT_high_pc
|
# CHECK: DW_AT_high_pc
|
||||||
#
|
#
|
||||||
# CHECK: DW_TAG_subprogram
|
# CHECK: DW_TAG_subprogram
|
||||||
|
@ -42,8 +42,8 @@
|
||||||
# CHECK: DW_TAG_formal_parameter
|
# CHECK: DW_TAG_formal_parameter
|
||||||
# CHECK-NOT: DW_TAG
|
# CHECK-NOT: DW_TAG
|
||||||
# CHECK: DW_AT_location [DW_FORM_sec_offset] ({{.*}}
|
# CHECK: DW_AT_location [DW_FORM_sec_offset] ({{.*}}
|
||||||
# CHECK-NEXT: [0x00000029, 0x00000037): DW_OP_breg0 EAX+0, DW_OP_deref
|
# CHECK-NEXT: [0x00000021, 0x0000002f): DW_OP_breg0 EAX+0, DW_OP_deref
|
||||||
# CHECK-NEXT: [0x00000037, 0x00000063): DW_OP_breg5 EBP-8, DW_OP_deref, DW_OP_deref
|
# CHECK-NEXT: [0x0000002f, 0x0000005b): DW_OP_breg5 EBP-8, DW_OP_deref, DW_OP_deref
|
||||||
# CHECK-NEXT: DW_AT_name [DW_FORM_strp]{{.*}}"a"
|
# CHECK-NEXT: DW_AT_name [DW_FORM_strp]{{.*}}"a"
|
||||||
#
|
#
|
||||||
# CHECK: DW_TAG_variable
|
# CHECK: DW_TAG_variable
|
||||||
|
|
Loading…
Reference in New Issue