forked from OSchip/llvm-project
Remove the Function::getFnAttributes method in favor of using the AttributeSet
directly. This is in preparation for removing the use of the 'Attribute' class as a collection of attributes. That will shift to the AttributeSet class instead. llvm-svn: 171253
This commit is contained in:
parent
6190254e0f
commit
698e84fc4f
|
@ -341,6 +341,20 @@ public:
|
|||
return getAttributes(Idx).getAlignment();
|
||||
}
|
||||
|
||||
/// \brief Return true if the attribute exists at the given index.
|
||||
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const;
|
||||
|
||||
/// \brief Return true if attribute exists at the given index.
|
||||
bool hasAttributes(unsigned Index) const;
|
||||
|
||||
/// \brief Get the stack alignment.
|
||||
unsigned getStackAlignment(unsigned Index) const;
|
||||
|
||||
/// \brief Return the attributes at the index as a string.
|
||||
std::string getAsString(unsigned Index) const;
|
||||
|
||||
uint64_t getBitMask(unsigned Index) const;
|
||||
|
||||
/// \brief Return true if the specified attribute is set for at least one
|
||||
/// parameter or for the return value.
|
||||
bool hasAttrSomewhere(Attribute::AttrKind Attr) const;
|
||||
|
|
|
@ -168,12 +168,6 @@ public:
|
|||
///
|
||||
void setAttributes(const AttributeSet &attrs) { AttributeList = attrs; }
|
||||
|
||||
/// getFnAttributes - Return the function attributes for querying.
|
||||
///
|
||||
Attribute getFnAttributes() const {
|
||||
return AttributeList.getFnAttributes();
|
||||
}
|
||||
|
||||
/// addFnAttr - Add function attributes to this function.
|
||||
///
|
||||
void addFnAttr(Attribute::AttrKind N) {
|
||||
|
@ -219,7 +213,8 @@ public:
|
|||
|
||||
/// @brief Determine if the function does not access memory.
|
||||
bool doesNotAccessMemory() const {
|
||||
return getFnAttributes().hasAttribute(Attribute::ReadNone);
|
||||
return AttributeList.hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::ReadNone);
|
||||
}
|
||||
void setDoesNotAccessMemory() {
|
||||
addFnAttr(Attribute::ReadNone);
|
||||
|
@ -228,7 +223,8 @@ public:
|
|||
/// @brief Determine if the function does not access or only reads memory.
|
||||
bool onlyReadsMemory() const {
|
||||
return doesNotAccessMemory() ||
|
||||
getFnAttributes().hasAttribute(Attribute::ReadOnly);
|
||||
AttributeList.hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::ReadOnly);
|
||||
}
|
||||
void setOnlyReadsMemory() {
|
||||
addFnAttr(Attribute::ReadOnly);
|
||||
|
@ -236,7 +232,8 @@ public:
|
|||
|
||||
/// @brief Determine if the function cannot return.
|
||||
bool doesNotReturn() const {
|
||||
return getFnAttributes().hasAttribute(Attribute::NoReturn);
|
||||
return AttributeList.hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::NoReturn);
|
||||
}
|
||||
void setDoesNotReturn() {
|
||||
addFnAttr(Attribute::NoReturn);
|
||||
|
@ -244,7 +241,8 @@ public:
|
|||
|
||||
/// @brief Determine if the function cannot unwind.
|
||||
bool doesNotThrow() const {
|
||||
return getFnAttributes().hasAttribute(Attribute::NoUnwind);
|
||||
return AttributeList.hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::NoUnwind);
|
||||
}
|
||||
void setDoesNotThrow() {
|
||||
addFnAttr(Attribute::NoUnwind);
|
||||
|
@ -252,7 +250,8 @@ public:
|
|||
|
||||
/// @brief Determine if the call cannot be duplicated.
|
||||
bool cannotDuplicate() const {
|
||||
return getFnAttributes().hasAttribute(Attribute::NoDuplicate);
|
||||
return AttributeList.hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::NoDuplicate);
|
||||
}
|
||||
void setCannotDuplicate() {
|
||||
addFnAttr(Attribute::NoDuplicate);
|
||||
|
@ -261,7 +260,8 @@ public:
|
|||
/// @brief True if the ABI mandates (or the user requested) that this
|
||||
/// function be in a unwind table.
|
||||
bool hasUWTable() const {
|
||||
return getFnAttributes().hasAttribute(Attribute::UWTable);
|
||||
return AttributeList.hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::UWTable);
|
||||
}
|
||||
void setHasUWTable() {
|
||||
addFnAttr(Attribute::UWTable);
|
||||
|
|
|
@ -203,7 +203,8 @@ void CodeMetrics::analyzeFunction(Function *F, const DataLayout *TD) {
|
|||
// as volatile if they are live across a setjmp call, and they probably
|
||||
// won't do this in callers.
|
||||
exposesReturnsTwice = F->callsFunctionThatReturnsTwice() &&
|
||||
!F->getFnAttributes().hasAttribute(Attribute::ReturnsTwice);
|
||||
!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::ReturnsTwice);
|
||||
|
||||
// Look at the size of the callee.
|
||||
for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
|
||||
|
|
|
@ -694,7 +694,8 @@ bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) {
|
|||
|
||||
bool CallAnalyzer::visitCallSite(CallSite CS) {
|
||||
if (CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice() &&
|
||||
!F.getFnAttributes().hasAttribute(Attribute::ReturnsTwice)) {
|
||||
!F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::ReturnsTwice)) {
|
||||
// This aborts the entire analysis.
|
||||
ExposesReturnsTwice = true;
|
||||
return false;
|
||||
|
@ -1143,7 +1144,8 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee,
|
|||
|
||||
// Calls to functions with always-inline attributes should be inlined
|
||||
// whenever possible.
|
||||
if (Callee->getFnAttributes().hasAttribute(Attribute::AlwaysInline)) {
|
||||
if (Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::AlwaysInline)) {
|
||||
if (isInlineViable(*Callee))
|
||||
return llvm::InlineCost::getAlways();
|
||||
return llvm::InlineCost::getNever();
|
||||
|
@ -1153,7 +1155,8 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee,
|
|||
// something else. Don't inline functions marked noinline or call sites
|
||||
// marked noinline.
|
||||
if (Callee->mayBeOverridden() ||
|
||||
Callee->getFnAttributes().hasAttribute(Attribute::NoInline) ||
|
||||
Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::NoInline) ||
|
||||
CS.isNoInline())
|
||||
return llvm::InlineCost::getNever();
|
||||
|
||||
|
@ -1175,7 +1178,8 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee,
|
|||
}
|
||||
|
||||
bool InlineCostAnalyzer::isInlineViable(Function &F) {
|
||||
bool ReturnsTwice =F.getFnAttributes().hasAttribute(Attribute::ReturnsTwice);
|
||||
bool ReturnsTwice =F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::ReturnsTwice);
|
||||
for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
|
||||
// Disallow inlining of functions which contain an indirect branch.
|
||||
if (isa<IndirectBrInst>(BI->getTerminator()))
|
||||
|
|
|
@ -327,8 +327,8 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
|
|||
return 0;
|
||||
|
||||
if (LIOffs+NewLoadByteSize > MemLocEnd &&
|
||||
LI->getParent()->getParent()->getFnAttributes().
|
||||
hasAttribute(Attribute::AddressSafety))
|
||||
LI->getParent()->getParent()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::AddressSafety))
|
||||
// We will be reading past the location accessed by the original program.
|
||||
// While this is safe in a regular build, Address Safety analysis tools
|
||||
// may start reporting false warnings. So, don't do widening.
|
||||
|
|
|
@ -570,8 +570,8 @@ static bool ProfitableToMerge(MachineBasicBlock *MBB1,
|
|||
// instructions that would be deleted in the merge.
|
||||
MachineFunction *MF = MBB1->getParent();
|
||||
if (EffectiveTailLen >= 2 &&
|
||||
MF->getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::OptimizeForSize) &&
|
||||
MF->getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) &&
|
||||
(I1 == MBB1->begin() || I2 == MBB2->begin()))
|
||||
return true;
|
||||
|
||||
|
|
|
@ -373,7 +373,8 @@ bool CodePlacementOpt::OptimizeIntraLoopEdges(MachineFunction &MF) {
|
|||
///
|
||||
bool CodePlacementOpt::AlignLoops(MachineFunction &MF) {
|
||||
const Function *F = MF.getFunction();
|
||||
if (F->getFnAttributes().hasAttribute(Attribute::OptimizeForSize))
|
||||
if (F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::OptimizeForSize))
|
||||
return false;
|
||||
|
||||
unsigned Align = TLI->getPrefLoopAlignment();
|
||||
|
|
|
@ -1013,8 +1013,8 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
|
|||
// exclusively on the loop info here so that we can align backedges in
|
||||
// unnatural CFGs and backedges that were introduced purely because of the
|
||||
// loop rotations done during this layout pass.
|
||||
if (F.getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::OptimizeForSize))
|
||||
if (F.getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize))
|
||||
return;
|
||||
unsigned Align = TLI->getPrefLoopAlignment();
|
||||
if (!Align)
|
||||
|
|
|
@ -60,13 +60,15 @@ MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
|
|||
MFInfo = 0;
|
||||
FrameInfo = new (Allocator) MachineFrameInfo(*TM.getFrameLowering(),
|
||||
TM.Options.RealignStack);
|
||||
if (Fn->getFnAttributes().hasAttribute(Attribute::StackAlignment))
|
||||
if (Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::StackAlignment))
|
||||
FrameInfo->ensureMaxAlignment(Fn->getAttributes().
|
||||
getFnAttributes().getStackAlignment());
|
||||
getStackAlignment(AttributeSet::FunctionIndex));
|
||||
ConstantPool = new (Allocator) MachineConstantPool(TM.getDataLayout());
|
||||
Alignment = TM.getTargetLowering()->getMinFunctionAlignment();
|
||||
// FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
|
||||
if (!Fn->getFnAttributes().hasAttribute(Attribute::OptimizeForSize))
|
||||
if (!Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::OptimizeForSize))
|
||||
Alignment = std::max(Alignment,
|
||||
TM.getTargetLowering()->getPrefFunctionAlignment());
|
||||
FunctionNumber = FunctionNum;
|
||||
|
|
|
@ -96,7 +96,8 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
|
|||
placeCSRSpillsAndRestores(Fn);
|
||||
|
||||
// Add the code to save and restore the callee saved registers
|
||||
if (!F->getFnAttributes().hasAttribute(Attribute::Naked))
|
||||
if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::Naked))
|
||||
insertCSRSpillsAndRestores(Fn);
|
||||
|
||||
// Allow the target machine to make final modifications to the function
|
||||
|
@ -111,7 +112,8 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
|
|||
// called functions. Because of this, calculateCalleeSavedRegisters()
|
||||
// must be called before this function in order to set the AdjustsStack
|
||||
// and MaxCallFrameSize variables.
|
||||
if (!F->getFnAttributes().hasAttribute(Attribute::Naked))
|
||||
if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::Naked))
|
||||
insertPrologEpilogCode(Fn);
|
||||
|
||||
// Replace all MO_FrameIndex operands with physical register references
|
||||
|
@ -191,13 +193,13 @@ void PEI::calculateCallsInformation(MachineFunction &Fn) {
|
|||
|
||||
/// calculateCalleeSavedRegisters - Scan the function for modified callee saved
|
||||
/// registers.
|
||||
void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
|
||||
const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo();
|
||||
const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering();
|
||||
MachineFrameInfo *MFI = Fn.getFrameInfo();
|
||||
void PEI::calculateCalleeSavedRegisters(MachineFunction &F) {
|
||||
const TargetRegisterInfo *RegInfo = F.getTarget().getRegisterInfo();
|
||||
const TargetFrameLowering *TFI = F.getTarget().getFrameLowering();
|
||||
MachineFrameInfo *MFI = F.getFrameInfo();
|
||||
|
||||
// Get the callee saved register list...
|
||||
const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(&Fn);
|
||||
const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(&F);
|
||||
|
||||
// These are used to keep track the callee-save area. Initialize them.
|
||||
MinCSFrameIndex = INT_MAX;
|
||||
|
@ -208,13 +210,14 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
|
|||
return;
|
||||
|
||||
// In Naked functions we aren't going to save any registers.
|
||||
if (Fn.getFunction()->getFnAttributes().hasAttribute(Attribute::Naked))
|
||||
if (F.getFunction()->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::Naked))
|
||||
return;
|
||||
|
||||
std::vector<CalleeSavedInfo> CSI;
|
||||
for (unsigned i = 0; CSRegs[i]; ++i) {
|
||||
unsigned Reg = CSRegs[i];
|
||||
if (Fn.getRegInfo().isPhysRegUsed(Reg)) {
|
||||
if (F.getRegInfo().isPhysRegUsed(Reg)) {
|
||||
// If the reg is modified, save it!
|
||||
CSI.push_back(CalleeSavedInfo(Reg));
|
||||
}
|
||||
|
@ -235,7 +238,7 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
|
|||
const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
|
||||
|
||||
int FrameIdx;
|
||||
if (RegInfo->hasReservedSpillSlot(Fn, Reg, FrameIdx)) {
|
||||
if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) {
|
||||
I->setFrameIdx(FrameIdx);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -7751,8 +7751,9 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
|
|||
|
||||
// We only use vectors if the constant is known to be zero and the
|
||||
// function is not marked with the noimplicitfloat attribute.
|
||||
if (NonZero || (DAG.getMachineFunction().getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::NoImplicitFloat)))
|
||||
if (NonZero || (DAG.getMachineFunction().getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::NoImplicitFloat)))
|
||||
LastLegalVectorType = 0;
|
||||
|
||||
// Check if we found a legal integer type to store.
|
||||
|
|
|
@ -3545,8 +3545,8 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
|
|||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
bool OptSize =
|
||||
MF.getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::OptimizeForSize);
|
||||
MF.getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
|
||||
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
|
||||
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
|
||||
DstAlignCanChange = true;
|
||||
|
@ -3651,8 +3651,8 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
|
|||
bool DstAlignCanChange = false;
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
bool OptSize = MF.getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::OptimizeForSize);
|
||||
bool OptSize = MF.getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
|
||||
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
|
||||
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
|
||||
DstAlignCanChange = true;
|
||||
|
@ -3730,8 +3730,8 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
|
|||
bool DstAlignCanChange = false;
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
bool OptSize = MF.getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::OptimizeForSize);
|
||||
bool OptSize = MF.getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
|
||||
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
|
||||
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
|
||||
DstAlignCanChange = true;
|
||||
|
|
|
@ -4296,7 +4296,8 @@ static SDValue ExpandPowI(DebugLoc DL, SDValue LHS, SDValue RHS,
|
|||
return DAG.getConstantFP(1.0, LHS.getValueType());
|
||||
|
||||
const Function *F = DAG.getMachineFunction().getFunction();
|
||||
if (!F->getFnAttributes().hasAttribute(Attribute::OptimizeForSize) ||
|
||||
if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::OptimizeForSize) ||
|
||||
// If optimizing for size, don't insert too many multiplies. This
|
||||
// inserts up to 5 multiplies.
|
||||
CountPopulation_32(Val)+Log2_32(Val) < 7) {
|
||||
|
|
|
@ -137,10 +137,12 @@ bool StackProtector::ContainsProtectableArray(Type *Ty, bool InStruct) const {
|
|||
/// add a guard variable to functions that call alloca, and functions with
|
||||
/// buffers larger than SSPBufferSize bytes.
|
||||
bool StackProtector::RequiresStackProtector() const {
|
||||
if (F->getFnAttributes().hasAttribute(Attribute::StackProtectReq))
|
||||
if (F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::StackProtectReq))
|
||||
return true;
|
||||
|
||||
if (!F->getFnAttributes().hasAttribute(Attribute::StackProtect))
|
||||
if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::StackProtect))
|
||||
return false;
|
||||
|
||||
for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
|
||||
|
|
|
@ -551,8 +551,8 @@ TailDuplicatePass::shouldTailDuplicate(const MachineFunction &MF,
|
|||
// compensate for the duplication.
|
||||
unsigned MaxDuplicateCount;
|
||||
if (TailDuplicateSize.getNumOccurrences() == 0 &&
|
||||
MF.getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::OptimizeForSize))
|
||||
MF.getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize))
|
||||
MaxDuplicateCount = 1;
|
||||
else
|
||||
MaxDuplicateCount = TailDuplicateSize;
|
||||
|
|
|
@ -3329,8 +3329,9 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
|||
// instructions).
|
||||
if (Latency > 0 && Subtarget.isThumb2()) {
|
||||
const MachineFunction *MF = DefMI->getParent()->getParent();
|
||||
if (MF->getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::OptimizeForSize))
|
||||
if (MF->getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::OptimizeForSize))
|
||||
--Latency;
|
||||
}
|
||||
return Latency;
|
||||
|
|
|
@ -326,7 +326,8 @@ needsStackRealignment(const MachineFunction &MF) const {
|
|||
unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
|
||||
bool requiresRealignment =
|
||||
((MFI->getMaxAlignment() > StackAlign) ||
|
||||
F->getFnAttributes().hasAttribute(Attribute::StackAlignment));
|
||||
F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::StackAlignment));
|
||||
|
||||
return requiresRealignment && canRealignStack(MF);
|
||||
}
|
||||
|
|
|
@ -1152,7 +1152,8 @@ static void checkNumAlignedDPRCS2Regs(MachineFunction &MF) {
|
|||
return;
|
||||
|
||||
// Naked functions don't spill callee-saved registers.
|
||||
if (MF.getFunction()->getFnAttributes().hasAttribute(Attribute::Naked))
|
||||
if (MF.getFunction()->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::Naked))
|
||||
return;
|
||||
|
||||
// We are planning to use NEON instructions vst1 / vld1.
|
||||
|
|
|
@ -1616,8 +1616,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
|||
|
||||
// FIXME: handle tail calls differently.
|
||||
unsigned CallOpc;
|
||||
bool HasMinSizeAttr = MF.getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::MinSize);
|
||||
bool HasMinSizeAttr = MF.getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
|
||||
if (Subtarget->isThumb()) {
|
||||
if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
|
||||
CallOpc = ARMISD::CALL_NOLINK;
|
||||
|
@ -6689,8 +6689,9 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
|
|||
UnitSize = 2;
|
||||
} else {
|
||||
// Check whether we can use NEON instructions.
|
||||
if (!MF->getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::NoImplicitFloat) &&
|
||||
if (!MF->getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::NoImplicitFloat) &&
|
||||
Subtarget->hasNEON()) {
|
||||
if ((Align % 16 == 0) && SizeVal >= 16) {
|
||||
ldrOpc = ARM::VLD1q32wb_fixed;
|
||||
|
@ -9458,7 +9459,8 @@ EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
|
|||
// See if we can use NEON instructions for this...
|
||||
if ((!IsMemset || ZeroMemset) &&
|
||||
Subtarget->hasNEON() &&
|
||||
!F->getFnAttributes().hasAttribute(Attribute::NoImplicitFloat)) {
|
||||
!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::NoImplicitFloat)) {
|
||||
bool Fast;
|
||||
if (Size >= 16 &&
|
||||
(memOpAlign(SrcAlign, DstAlign, 16) ||
|
||||
|
|
|
@ -961,9 +961,11 @@ bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
|
|||
STI = &TM.getSubtarget<ARMSubtarget>();
|
||||
|
||||
// Optimizing / minimizing size?
|
||||
Attribute FnAttrs = MF.getFunction()->getFnAttributes();
|
||||
OptimizeSize = FnAttrs.hasAttribute(Attribute::OptimizeForSize);
|
||||
MinimizeSize = FnAttrs.hasAttribute(Attribute::MinSize);
|
||||
AttributeSet FnAttrs = MF.getFunction()->getAttributes();
|
||||
OptimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::OptimizeForSize);
|
||||
MinimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::MinSize);
|
||||
|
||||
bool Modified = false;
|
||||
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
|
||||
|
|
|
@ -198,8 +198,8 @@ void PPCFrameLowering::determineFrameLayout(MachineFunction &MF) const {
|
|||
// to adjust the stack pointer (we fit in the Red Zone). For 64-bit
|
||||
// SVR4, we also require a stack frame if we need to spill the CR,
|
||||
// since this spill area is addressed relative to the stack pointer.
|
||||
bool DisableRedZone = MF.getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::NoRedZone);
|
||||
bool DisableRedZone = MF.getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::NoRedZone);
|
||||
// FIXME SVR4 The 32-bit SVR4 ABI has no red zone. However, it can
|
||||
// still generate stackless code if all local vars are reg-allocated.
|
||||
// Try: (FrameSize <= 224
|
||||
|
@ -261,7 +261,8 @@ bool PPCFrameLowering::needsFP(const MachineFunction &MF) const {
|
|||
|
||||
// Naked functions have no stack frame pushed, so we don't have a frame
|
||||
// pointer.
|
||||
if (MF.getFunction()->getFnAttributes().hasAttribute(Attribute::Naked))
|
||||
if (MF.getFunction()->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::Naked))
|
||||
return false;
|
||||
|
||||
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
|
||||
|
|
|
@ -6822,8 +6822,8 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
|
|||
bool is31 = (getTargetMachine().Options.DisableFramePointerElim(MF) ||
|
||||
MFI->hasVarSizedObjects()) &&
|
||||
MFI->getStackSize() &&
|
||||
!MF.getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::Naked);
|
||||
!MF.getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::Naked);
|
||||
unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) :
|
||||
(is31 ? PPC::R31 : PPC::R1);
|
||||
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
|
||||
|
|
|
@ -597,7 +597,8 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
|||
// to Offset to get the correct offset.
|
||||
// Naked functions have stack size 0, although getStackSize may not reflect that
|
||||
// because we didn't call all the pieces that compute it for naked functions.
|
||||
if (!MF.getFunction()->getFnAttributes().hasAttribute(Attribute::Naked))
|
||||
if (!MF.getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::Naked))
|
||||
Offset += MFI->getStackSize();
|
||||
|
||||
// If we can, encode the offset directly into the instruction. If this is a
|
||||
|
|
|
@ -691,7 +691,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
|
|||
// pointer, calls, or dynamic alloca then we do not need to adjust the
|
||||
// stack pointer (we fit in the Red Zone). We also check that we don't
|
||||
// push and pop from the stack.
|
||||
if (Is64Bit && !Fn->getFnAttributes().hasAttribute(Attribute::NoRedZone) &&
|
||||
if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::NoRedZone) &&
|
||||
!RegInfo->needsStackRealignment(MF) &&
|
||||
!MFI->hasVarSizedObjects() && // No dynamic alloca.
|
||||
!MFI->adjustsStack() && // No calls.
|
||||
|
|
|
@ -431,8 +431,8 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
|
|||
|
||||
void X86DAGToDAGISel::PreprocessISelDAG() {
|
||||
// OptForSize is used in pattern predicates that isel is matching.
|
||||
OptForSize = MF->getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::OptimizeForSize);
|
||||
OptForSize = MF->getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
|
||||
|
||||
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
|
||||
E = CurDAG->allnodes_end(); I != E; ) {
|
||||
|
|
|
@ -1389,7 +1389,8 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size,
|
|||
MachineFunction &MF) const {
|
||||
const Function *F = MF.getFunction();
|
||||
if ((!IsMemset || ZeroMemset) &&
|
||||
!F->getFnAttributes().hasAttribute(Attribute::NoImplicitFloat)) {
|
||||
!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::NoImplicitFloat)) {
|
||||
if (Size >= 16 &&
|
||||
(Subtarget->isUnalignedMemAccessFast() ||
|
||||
((DstAlign == 0 || DstAlign >= 16) &&
|
||||
|
@ -2068,8 +2069,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs,
|
||||
TotalNumIntRegs);
|
||||
|
||||
bool NoImplicitFloatOps = Fn->getFnAttributes().
|
||||
hasAttribute(Attribute::NoImplicitFloat);
|
||||
bool NoImplicitFloatOps = Fn->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
|
||||
assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
|
||||
"SSE register cannot be used when SSE is disabled!");
|
||||
assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat &&
|
||||
|
@ -2547,8 +2548,9 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
|||
OpFlags = X86II::MO_DARWIN_STUB;
|
||||
} else if (Subtarget->isPICStyleRIPRel() &&
|
||||
isa<Function>(GV) &&
|
||||
cast<Function>(GV)->getFnAttributes().
|
||||
hasAttribute(Attribute::NonLazyBind)) {
|
||||
cast<Function>(GV)->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::NonLazyBind)) {
|
||||
// If the function is marked as non-lazy, generate an indirect call
|
||||
// which loads from the GOT directly. This avoids runtime overhead
|
||||
// at the cost of eager binding (and one extra byte of encoding).
|
||||
|
@ -6734,8 +6736,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
|
|||
bool HasFp256 = Subtarget->hasFp256();
|
||||
bool HasInt256 = Subtarget->hasInt256();
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
bool OptForSize = MF.getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::OptimizeForSize);
|
||||
bool OptForSize = MF.getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
|
||||
|
||||
assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
|
||||
|
||||
|
@ -10103,8 +10105,9 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
|
|||
// Sanity Check: Make sure using fp_offset makes sense.
|
||||
assert(!getTargetMachine().Options.UseSoftFloat &&
|
||||
!(DAG.getMachineFunction()
|
||||
.getFunction()->getFnAttributes()
|
||||
.hasAttribute(Attribute::NoImplicitFloat)) &&
|
||||
.getFunction()->getAttributes()
|
||||
.hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::NoImplicitFloat)) &&
|
||||
Subtarget->hasSSE1());
|
||||
}
|
||||
|
||||
|
@ -16574,8 +16577,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
|
|||
return SDValue();
|
||||
|
||||
const Function *F = DAG.getMachineFunction().getFunction();
|
||||
bool NoImplicitFloatOps = F->getFnAttributes().
|
||||
hasAttribute(Attribute::NoImplicitFloat);
|
||||
bool NoImplicitFloatOps = F->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
|
||||
bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
|
||||
&& Subtarget->hasSSE2();
|
||||
if ((VT.isVector() ||
|
||||
|
|
|
@ -3864,8 +3864,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
|||
|
||||
// Unless optimizing for size, don't fold to avoid partial
|
||||
// register update stalls
|
||||
if (!MF.getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::OptimizeForSize) &&
|
||||
if (!MF.getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) &&
|
||||
hasPartialRegUpdate(MI->getOpcode()))
|
||||
return 0;
|
||||
|
||||
|
@ -3906,8 +3906,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
|||
|
||||
// Unless optimizing for size, don't fold to avoid partial
|
||||
// register update stalls
|
||||
if (!MF.getFunction()->getFnAttributes().
|
||||
hasAttribute(Attribute::OptimizeForSize) &&
|
||||
if (!MF.getFunction()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) &&
|
||||
hasPartialRegUpdate(MI->getOpcode()))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -419,7 +419,8 @@ bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
|
|||
unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
|
||||
bool requiresRealignment =
|
||||
((MFI->getMaxAlignment() > StackAlign) ||
|
||||
F->getFnAttributes().hasAttribute(Attribute::StackAlignment));
|
||||
F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::StackAlignment));
|
||||
|
||||
// If we've requested that we force align the stack do so now.
|
||||
if (ForceStackAlign)
|
||||
|
|
|
@ -87,7 +87,8 @@ InlineCost AlwaysInliner::getInlineCost(CallSite CS) {
|
|||
// that are viable for inlining. FIXME: We shouldn't even get here for
|
||||
// declarations.
|
||||
if (Callee && !Callee->isDeclaration() &&
|
||||
Callee->getFnAttributes().hasAttribute(Attribute::AlwaysInline) &&
|
||||
Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::AlwaysInline) &&
|
||||
CA.isInlineViable(*Callee))
|
||||
return InlineCost::getAlways();
|
||||
|
||||
|
|
|
@ -93,10 +93,13 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
|
|||
|
||||
// If the inlined function had a higher stack protection level than the
|
||||
// calling function, then bump up the caller's stack protection level.
|
||||
if (Callee->getFnAttributes().hasAttribute(Attribute::StackProtectReq))
|
||||
if (Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::StackProtectReq))
|
||||
Caller->addFnAttr(Attribute::StackProtectReq);
|
||||
else if (Callee->getFnAttributes().hasAttribute(Attribute::StackProtect) &&
|
||||
!Caller->getFnAttributes().hasAttribute(Attribute::StackProtectReq))
|
||||
else if (Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::StackProtect) &&
|
||||
!Caller->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::StackProtectReq))
|
||||
Caller->addFnAttr(Attribute::StackProtect);
|
||||
|
||||
// Look at all of the allocas that we inlined through this call site. If we
|
||||
|
@ -209,7 +212,8 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const {
|
|||
// would decrease the threshold.
|
||||
Function *Caller = CS.getCaller();
|
||||
bool OptSize = Caller && !Caller->isDeclaration() &&
|
||||
Caller->getFnAttributes().hasAttribute(Attribute::OptimizeForSize);
|
||||
Caller->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::OptimizeForSize);
|
||||
if (!(InlineLimit.getNumOccurrences() > 0) && OptSize &&
|
||||
OptSizeThreshold < thres)
|
||||
thres = OptSizeThreshold;
|
||||
|
@ -218,9 +222,11 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const {
|
|||
// and the caller does not need to minimize its size.
|
||||
Function *Callee = CS.getCalledFunction();
|
||||
bool InlineHint = Callee && !Callee->isDeclaration() &&
|
||||
Callee->getFnAttributes().hasAttribute(Attribute::InlineHint);
|
||||
Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::InlineHint);
|
||||
if (InlineHint && HintThreshold > thres
|
||||
&& !Caller->getFnAttributes().hasAttribute(Attribute::MinSize))
|
||||
&& !Caller->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::MinSize))
|
||||
thres = HintThreshold;
|
||||
|
||||
return thres;
|
||||
|
@ -536,7 +542,8 @@ bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
|
|||
// about always-inline functions. This is a bit of a hack to share code
|
||||
// between here and the InlineAlways pass.
|
||||
if (AlwaysInlineOnly &&
|
||||
!F->getFnAttributes().hasAttribute(Attribute::AlwaysInline))
|
||||
!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::AlwaysInline))
|
||||
continue;
|
||||
|
||||
// If the only remaining users of the function are dead constants, remove
|
||||
|
|
|
@ -1018,7 +1018,8 @@ bool AddressSanitizer::runOnFunction(Function &F) {
|
|||
// If needed, insert __asan_init before checking for AddressSafety attr.
|
||||
maybeInsertAsanInitAtFunctionEntry(F);
|
||||
|
||||
if (!F.getFnAttributes().hasAttribute(Attribute::AddressSafety))
|
||||
if (!F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::AddressSafety))
|
||||
return false;
|
||||
|
||||
if (!ClDebugFunc.empty() && ClDebugFunc != F.getName())
|
||||
|
|
|
@ -150,7 +150,8 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
|
|||
TLInfo = &getAnalysis<TargetLibraryInfo>();
|
||||
DT = getAnalysisIfAvailable<DominatorTree>();
|
||||
PFI = getAnalysisIfAvailable<ProfileInfo>();
|
||||
OptSize = F.getFnAttributes().hasAttribute(Attribute::OptimizeForSize);
|
||||
OptSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::OptimizeForSize);
|
||||
|
||||
/// This optimization identifies DIV instructions that can be
|
||||
/// profitably bypassed and carried out with a shorter, faster divide.
|
||||
|
|
|
@ -146,8 +146,9 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
|
|||
// not user specified.
|
||||
unsigned Threshold = CurrentThreshold;
|
||||
if (!UserThreshold &&
|
||||
Header->getParent()->getFnAttributes().
|
||||
hasAttribute(Attribute::OptimizeForSize))
|
||||
Header->getParent()->getAttributes().
|
||||
hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::OptimizeForSize))
|
||||
Threshold = OptSizeUnrollThreshold;
|
||||
|
||||
// Find trip count and trip multiple if count is not available
|
||||
|
|
|
@ -646,7 +646,8 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
|
|||
|
||||
// Do not do non-trivial unswitch while optimizing for size.
|
||||
if (OptimizeForSize ||
|
||||
F->getFnAttributes().hasAttribute(Attribute::OptimizeForSize))
|
||||
F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
|
||||
Attribute::OptimizeForSize))
|
||||
return false;
|
||||
|
||||
UnswitchNontrivialCondition(LoopCond, Val, currentLoop);
|
||||
|
|
|
@ -95,7 +95,8 @@ struct LoopVectorize : public LoopPass {
|
|||
// optimized for size.
|
||||
Function *F = L->getHeader()->getParent();
|
||||
Attribute::AttrKind SzAttr= Attribute::OptimizeForSize;
|
||||
bool OptForSize = F->getFnAttributes().hasAttribute(SzAttr);
|
||||
bool OptForSize =
|
||||
F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, SzAttr);
|
||||
|
||||
unsigned VF = CM.selectVectorizationFactor(OptForSize, VectorizationFactor);
|
||||
|
||||
|
|
|
@ -1601,9 +1601,8 @@ void AssemblyWriter::printFunction(const Function *F) {
|
|||
Out << ')';
|
||||
if (F->hasUnnamedAddr())
|
||||
Out << " unnamed_addr";
|
||||
Attribute FnAttrs = Attrs.getFnAttributes();
|
||||
if (FnAttrs.hasAttributes())
|
||||
Out << ' ' << Attrs.getFnAttributes().getAsString();
|
||||
if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
|
||||
Out << ' ' << Attrs.getAsString(AttributeSet::FunctionIndex);
|
||||
if (F->hasSection()) {
|
||||
Out << " section \"";
|
||||
PrintEscapedString(F->getSection(), Out);
|
||||
|
@ -1875,8 +1874,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
|
|||
writeParamOperand(CI->getArgOperand(op), PAL.getParamAttributes(op + 1));
|
||||
}
|
||||
Out << ')';
|
||||
if (PAL.getFnAttributes().hasAttributes())
|
||||
Out << ' ' << PAL.getFnAttributes().getAsString();
|
||||
if (PAL.hasAttributes(AttributeSet::FunctionIndex))
|
||||
Out << ' ' << PAL.getAsString(AttributeSet::FunctionIndex);
|
||||
} else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) {
|
||||
Operand = II->getCalledValue();
|
||||
PointerType *PTy = cast<PointerType>(Operand->getType());
|
||||
|
@ -1915,8 +1914,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
|
|||
}
|
||||
|
||||
Out << ')';
|
||||
if (PAL.getFnAttributes().hasAttributes())
|
||||
Out << ' ' << PAL.getFnAttributes().getAsString();
|
||||
if (PAL.hasAttributes(AttributeSet::FunctionIndex))
|
||||
Out << ' ' << PAL.getAsString(AttributeSet::FunctionIndex);
|
||||
|
||||
Out << "\n to ";
|
||||
writeOperand(II->getNormalDest(), true);
|
||||
|
|
|
@ -87,7 +87,7 @@ public:
|
|||
/// \class
|
||||
/// \brief This class represents a set of attributes.
|
||||
class AttributeSetImpl : public FoldingSetNode {
|
||||
// AttributesList is uniqued, these should not be publicly available.
|
||||
// AttributesSet is uniqued, these should not be publicly available.
|
||||
void operator=(const AttributeSetImpl &) LLVM_DELETED_FUNCTION;
|
||||
AttributeSetImpl(const AttributeSetImpl &) LLVM_DELETED_FUNCTION;
|
||||
public:
|
||||
|
|
|
@ -451,6 +451,27 @@ const AttributeWithIndex &AttributeSet::getSlot(unsigned Slot) const {
|
|||
return AttrList->Attrs[Slot];
|
||||
}
|
||||
|
||||
bool AttributeSet::hasAttribute(unsigned Index, Attribute::AttrKind Kind) const{
|
||||
return getAttributes(Index).hasAttribute(Kind);
|
||||
}
|
||||
|
||||
bool AttributeSet::hasAttributes(unsigned Index) const {
|
||||
return getAttributes(Index).hasAttributes();
|
||||
}
|
||||
|
||||
std::string AttributeSet::getAsString(unsigned Index) const {
|
||||
return getAttributes(Index).getAsString();
|
||||
}
|
||||
|
||||
unsigned AttributeSet::getStackAlignment(unsigned Index) const {
|
||||
return getAttributes(Index).getStackAlignment();
|
||||
}
|
||||
|
||||
uint64_t AttributeSet::getBitMask(unsigned Index) const {
|
||||
// FIXME: Remove this.
|
||||
return getAttributes(Index).getBitMask();
|
||||
}
|
||||
|
||||
/// getAttributes - The attributes for the specified index are returned.
|
||||
/// Attribute for the result are denoted with Idx = 0. Function notes are
|
||||
/// denoted with idx = ~0.
|
||||
|
|
|
@ -1401,8 +1401,7 @@ void LLVMRemoveFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) {
|
|||
LLVMAttribute LLVMGetFunctionAttr(LLVMValueRef Fn) {
|
||||
Function *Func = unwrap<Function>(Fn);
|
||||
const AttributeSet PAL = Func->getAttributes();
|
||||
Attribute attr = PAL.getFnAttributes();
|
||||
return (LLVMAttribute)attr.getBitMask();
|
||||
return (LLVMAttribute)PAL.getBitMask(AttributeSet::FunctionIndex);
|
||||
}
|
||||
|
||||
/*--.. Operations on parameters ............................................--*/
|
||||
|
|
Loading…
Reference in New Issue