forked from OSchip/llvm-project
Allow invokable sub-classes of IntrinsicInst
It used to be that all of our intrinsics were call instructions, but over time, we've added more and more invokable intrinsics. According to the verifier, we're up to 8 right now. As IntrinsicInst is a sub-class of CallInst, this puts us in an awkward spot where the idiomatic means to check for intrinsic has a false negative if the intrinsic is invoked. This change switches IntrinsicInst from being a sub-class of CallInst to being a subclass of CallBase. This allows invoked intrinsics to be instances of IntrinsicInst, at the cost of requiring a few more casts to CallInst in places where the intrinsic really is known to be a call, not an invoke. After this lands and has baked for a couple days, planned cleanups: Make GCStatepointInst a IntrinsicInst subclass. Merge intrinsic handling in InstCombine and use idiomatic visitIntrinsicInst entry point for InstVisitor. Do the same in SelectionDAG. Do the same in FastISEL. Differential Revision: https://reviews.llvm.org/D99976
This commit is contained in:
parent
080d48f279
commit
d87b9b81cc
|
@ -154,8 +154,8 @@ public:
|
|||
// instruction to a special delegation helper.
|
||||
#define HANDLE_INST(NUM, OPCODE, CLASS) \
|
||||
RetTy visit##OPCODE(CLASS &I) { \
|
||||
if (NUM == Instruction::Call) \
|
||||
return delegateCallInst(I); \
|
||||
if (NUM == Instruction::Call || NUM == Instruction::Invoke) \
|
||||
return delegateCallBase(I); \
|
||||
else \
|
||||
DELEGATE(CLASS); \
|
||||
}
|
||||
|
@ -215,7 +215,12 @@ public:
|
|||
RetTy visitVAStartInst(VAStartInst &I) { DELEGATE(IntrinsicInst); }
|
||||
RetTy visitVAEndInst(VAEndInst &I) { DELEGATE(IntrinsicInst); }
|
||||
RetTy visitVACopyInst(VACopyInst &I) { DELEGATE(IntrinsicInst); }
|
||||
RetTy visitIntrinsicInst(IntrinsicInst &I) { DELEGATE(CallInst); }
|
||||
RetTy visitIntrinsicInst(IntrinsicInst &I) {
|
||||
if (isa<CallInst>(I))
|
||||
return static_cast<SubClass*>(this)->visitCallInst(cast<CallInst>(I));
|
||||
else
|
||||
return static_cast<SubClass*>(this)->visitInvokeInst(cast<InvokeInst>(I));
|
||||
}
|
||||
RetTy visitCallInst(CallInst &I) { DELEGATE(CallBase); }
|
||||
RetTy visitInvokeInst(InvokeInst &I) { DELEGATE(CallBase); }
|
||||
RetTy visitCallBrInst(CallBrInst &I) { DELEGATE(CallBase); }
|
||||
|
@ -280,7 +285,7 @@ public:
|
|||
|
||||
private:
|
||||
// Special helper function to delegate to CallInst subclass visitors.
|
||||
RetTy delegateCallInst(CallInst &I) {
|
||||
RetTy delegateCallBase(CallBase &I) {
|
||||
if (const Function *F = I.getCalledFunction()) {
|
||||
switch (F->getIntrinsicID()) {
|
||||
default: DELEGATE(IntrinsicInst);
|
||||
|
@ -296,13 +301,16 @@ private:
|
|||
case Intrinsic::not_intrinsic: break;
|
||||
}
|
||||
}
|
||||
DELEGATE(CallInst);
|
||||
if (isa<CallInst>(I))
|
||||
return static_cast<SubClass*>(this)->visitCallInst(cast<CallInst>(I));
|
||||
else
|
||||
return static_cast<SubClass*>(this)->visitInvokeInst(cast<InvokeInst>(I));
|
||||
}
|
||||
|
||||
// An overload that will never actually be called, it is used only from dead
|
||||
// code in the dispatching from opcodes to instruction subclasses.
|
||||
RetTy delegateCallInst(Instruction &I) {
|
||||
llvm_unreachable("delegateCallInst called for non-CallInst");
|
||||
RetTy delegateCallBase(Instruction &I) {
|
||||
llvm_unreachable("delegateCallBase called for non-CallBase");
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -13,10 +13,10 @@
|
|||
// if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(Inst))
|
||||
// ... MCI->getDest() ... MCI->getSource() ...
|
||||
//
|
||||
// All intrinsic function calls are instances of the call instruction, so these
|
||||
// are all subclasses of the CallInst class. Note that none of these classes
|
||||
// has state or virtual methods, which is an important part of this gross/neat
|
||||
// hack working.
|
||||
// All intrinsic function calls are instances of the call or invoke instruction,
|
||||
// so these are all subclasses of the CallBase class. Note that none of these
|
||||
// classes has state or virtual methods, which is an important part of this
|
||||
// gross/neat hack working.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
@ -42,7 +42,7 @@ namespace llvm {
|
|||
/// A wrapper class for inspecting calls to intrinsic functions.
|
||||
/// This allows the standard isa/dyncast/cast functionality to work with calls
|
||||
/// to intrinsic functions.
|
||||
class IntrinsicInst : public CallInst {
|
||||
class IntrinsicInst : public CallBase {
|
||||
public:
|
||||
IntrinsicInst() = delete;
|
||||
IntrinsicInst(const IntrinsicInst &) = delete;
|
||||
|
@ -107,13 +107,13 @@ public:
|
|||
}
|
||||
|
||||
// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||
static bool classof(const CallInst *I) {
|
||||
static bool classof(const CallBase *I) {
|
||||
if (const Function *CF = I->getCalledFunction())
|
||||
return CF->isIntrinsic();
|
||||
return false;
|
||||
}
|
||||
static bool classof(const Value *V) {
|
||||
return isa<CallInst>(V) && classof(cast<CallInst>(V));
|
||||
return isa<CallBase>(V) && classof(cast<CallBase>(V));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ void llvm::findDevirtualizableCallsForTypeTest(
|
|||
// Find llvm.assume intrinsics for this llvm.type.test call.
|
||||
for (const Use &CIU : CI->uses())
|
||||
if (auto *Assume = dyn_cast<AssumeInst>(CIU.getUser()))
|
||||
Assumes.push_back(Assume);
|
||||
Assumes.push_back(cast<CallInst>(Assume));
|
||||
|
||||
// If we found any, search for virtual calls based on %p and add them to
|
||||
// DevirtCalls.
|
||||
|
|
|
@ -1338,15 +1338,15 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
|
|||
return true;
|
||||
}
|
||||
case Intrinsic::experimental_stackmap:
|
||||
return selectStackmap(II);
|
||||
return selectStackmap(cast<CallInst>(II));
|
||||
case Intrinsic::experimental_patchpoint_void:
|
||||
case Intrinsic::experimental_patchpoint_i64:
|
||||
return selectPatchpoint(II);
|
||||
return selectPatchpoint(cast<CallInst>(II));
|
||||
|
||||
case Intrinsic::xray_customevent:
|
||||
return selectXRayCustomEvent(II);
|
||||
return selectXRayCustomEvent(cast<CallInst>(II));
|
||||
case Intrinsic::xray_typedevent:
|
||||
return selectXRayTypedEvent(II);
|
||||
return selectXRayTypedEvent(cast<CallInst>(II));
|
||||
}
|
||||
|
||||
return fastLowerIntrinsicCall(II);
|
||||
|
|
|
@ -244,7 +244,7 @@ void ShadowStackGCLowering::CollectRoots(Function &F) {
|
|||
if (Function *F = CI->getCalledFunction())
|
||||
if (F->getIntrinsicID() == Intrinsic::gcroot) {
|
||||
std::pair<CallInst *, AllocaInst *> Pair = std::make_pair(
|
||||
CI,
|
||||
cast<CallInst>(CI),
|
||||
cast<AllocaInst>(CI->getArgOperand(0)->stripPointerCasts()));
|
||||
if (IsNullValue(CI->getArgOperand(1)))
|
||||
Roots.push_back(Pair);
|
||||
|
|
|
@ -255,7 +255,7 @@ static const CallInst *findStackProtectorIntrinsic(Function &F) {
|
|||
for (const Instruction &I : BB)
|
||||
if (const auto *II = dyn_cast<IntrinsicInst>(&I))
|
||||
if (II->getIntrinsicID() == Intrinsic::stackprotector)
|
||||
return II;
|
||||
return cast<CallInst>(II);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -1143,7 +1143,7 @@ void Interpreter::visitIntrinsicInst(IntrinsicInst &I) {
|
|||
bool atBegin(Parent->begin() == Me);
|
||||
if (!atBegin)
|
||||
--Me;
|
||||
IL->LowerIntrinsicCall(&I);
|
||||
IL->LowerIntrinsicCall(cast<CallInst>(&I));
|
||||
|
||||
// Restore the CurInst pointer to the first instruction newly inserted, if
|
||||
// any.
|
||||
|
|
|
@ -3482,7 +3482,8 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
|||
return false;
|
||||
|
||||
const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
|
||||
return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 1);
|
||||
return lowerCallTo(cast<CallInst>(II), IntrMemName,
|
||||
II->getNumArgOperands() - 1);
|
||||
}
|
||||
case Intrinsic::memset: {
|
||||
const MemSetInst *MSI = cast<MemSetInst>(II);
|
||||
|
@ -3498,7 +3499,8 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
|||
// address spaces.
|
||||
return false;
|
||||
|
||||
return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
|
||||
return lowerCallTo(cast<CallInst>(II), "memset",
|
||||
II->getNumArgOperands() - 1);
|
||||
}
|
||||
case Intrinsic::sin:
|
||||
case Intrinsic::cos:
|
||||
|
|
|
@ -1660,7 +1660,7 @@ bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
|||
if (!MTI->getLength()->getType()->isIntegerTy(32))
|
||||
return false;
|
||||
const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
|
||||
return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 1);
|
||||
return lowerCallTo(cast<CallInst>(II), IntrMemName, II->getNumArgOperands() - 1);
|
||||
}
|
||||
case Intrinsic::memset: {
|
||||
const MemSetInst *MSI = cast<MemSetInst>(II);
|
||||
|
@ -1669,7 +1669,7 @@ bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
|||
return false;
|
||||
if (!MSI->getLength()->getType()->isIntegerTy(32))
|
||||
return false;
|
||||
return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
|
||||
return lowerCallTo(cast<CallInst>(II), "memset", II->getNumArgOperands() - 1);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -2738,7 +2738,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
|||
if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
|
||||
return false;
|
||||
|
||||
return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 1);
|
||||
return lowerCallTo(cast<CallInst>(II), "memcpy", II->getNumArgOperands() - 1);
|
||||
}
|
||||
case Intrinsic::memset: {
|
||||
const MemSetInst *MSI = cast<MemSetInst>(II);
|
||||
|
@ -2753,7 +2753,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
|||
if (MSI->getDestAddressSpace() > 255)
|
||||
return false;
|
||||
|
||||
return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
|
||||
return lowerCallTo(cast<CallInst>(II), "memset", II->getNumArgOperands() - 1);
|
||||
}
|
||||
case Intrinsic::stackprotector: {
|
||||
// Emit code to store the stack guard onto the stack.
|
||||
|
|
|
@ -140,6 +140,7 @@ public:
|
|||
Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
|
||||
Instruction *foldItoFPtoI(CastInst &FI);
|
||||
Instruction *visitSelectInst(SelectInst &SI);
|
||||
Instruction *visitCallBase(CallBase &Call);
|
||||
Instruction *visitCallInst(CallInst &CI);
|
||||
Instruction *visitInvokeInst(InvokeInst &II);
|
||||
Instruction *visitCallBrInst(CallBrInst &CBI);
|
||||
|
@ -222,7 +223,6 @@ private:
|
|||
Instruction &CtxI, Value *&OperationResult,
|
||||
Constant *&OverflowResult);
|
||||
|
||||
Instruction *visitCallBase(CallBase &Call);
|
||||
Instruction *tryOptimizeCall(CallInst *CI);
|
||||
bool transformConstExprCastCall(CallBase &Call);
|
||||
Instruction *transformCallThroughTrampoline(CallBase &Call,
|
||||
|
|
|
@ -4142,7 +4142,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
|
|||
Value *VAArgTLSOriginCopy = nullptr;
|
||||
Value *VAArgOverflowSize = nullptr;
|
||||
|
||||
SmallVector<CallInst*, 16> VAStartInstrumentationList;
|
||||
SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
|
||||
|
||||
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
|
||||
|
||||
|
@ -4351,7 +4351,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
|
|||
// Instrument va_start.
|
||||
// Copy va_list shadow from the backup copy of the TLS contents.
|
||||
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
|
||||
CallInst *OrigInst = VAStartInstrumentationList[i];
|
||||
auto *OrigInst = VAStartInstrumentationList[i];
|
||||
IRBuilder<> IRB(OrigInst->getNextNode());
|
||||
Value *VAListTag = OrigInst->getArgOperand(0);
|
||||
|
||||
|
@ -4405,7 +4405,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
|
|||
Value *VAArgTLSCopy = nullptr;
|
||||
Value *VAArgSize = nullptr;
|
||||
|
||||
SmallVector<CallInst*, 16> VAStartInstrumentationList;
|
||||
SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
|
||||
|
||||
VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
|
||||
MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
|
||||
|
@ -4494,7 +4494,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
|
|||
// Instrument va_start.
|
||||
// Copy va_list shadow from the backup copy of the TLS contents.
|
||||
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
|
||||
CallInst *OrigInst = VAStartInstrumentationList[i];
|
||||
auto *OrigInst = VAStartInstrumentationList[i];
|
||||
IRBuilder<> IRB(OrigInst->getNextNode());
|
||||
Value *VAListTag = OrigInst->getArgOperand(0);
|
||||
Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
|
||||
|
@ -4533,7 +4533,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
|
|||
Value *VAArgTLSCopy = nullptr;
|
||||
Value *VAArgOverflowSize = nullptr;
|
||||
|
||||
SmallVector<CallInst*, 16> VAStartInstrumentationList;
|
||||
SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
|
||||
|
||||
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
|
||||
|
||||
|
@ -4688,7 +4688,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
|
|||
// Instrument va_start, copy va_list shadow from the backup copy of
|
||||
// the TLS contents.
|
||||
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
|
||||
CallInst *OrigInst = VAStartInstrumentationList[i];
|
||||
auto *OrigInst = VAStartInstrumentationList[i];
|
||||
IRBuilder<> IRB(OrigInst->getNextNode());
|
||||
|
||||
Value *VAListTag = OrigInst->getArgOperand(0);
|
||||
|
@ -4783,7 +4783,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
|
|||
Value *VAArgTLSCopy = nullptr;
|
||||
Value *VAArgSize = nullptr;
|
||||
|
||||
SmallVector<CallInst*, 16> VAStartInstrumentationList;
|
||||
SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
|
||||
|
||||
VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
|
||||
MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
|
||||
|
@ -4932,7 +4932,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
|
|||
// Instrument va_start.
|
||||
// Copy va_list shadow from the backup copy of the TLS contents.
|
||||
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
|
||||
CallInst *OrigInst = VAStartInstrumentationList[i];
|
||||
auto *OrigInst = VAStartInstrumentationList[i];
|
||||
IRBuilder<> IRB(OrigInst->getNextNode());
|
||||
Value *VAListTag = OrigInst->getArgOperand(0);
|
||||
Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
|
||||
|
@ -4972,7 +4972,7 @@ struct VarArgSystemZHelper : public VarArgHelper {
|
|||
Value *VAArgTLSOriginCopy = nullptr;
|
||||
Value *VAArgOverflowSize = nullptr;
|
||||
|
||||
SmallVector<CallInst *, 16> VAStartInstrumentationList;
|
||||
SmallVector<IntrinsicInst *, 16> VAStartInstrumentationList;
|
||||
|
||||
enum class ArgKind {
|
||||
GeneralPurpose,
|
||||
|
@ -5255,7 +5255,7 @@ struct VarArgSystemZHelper : public VarArgHelper {
|
|||
// Copy va_list shadow from the backup copy of the TLS contents.
|
||||
for (size_t VaStartNo = 0, VaStartNum = VAStartInstrumentationList.size();
|
||||
VaStartNo < VaStartNum; VaStartNo++) {
|
||||
CallInst *OrigInst = VAStartInstrumentationList[VaStartNo];
|
||||
auto *OrigInst = VAStartInstrumentationList[VaStartNo];
|
||||
IRBuilder<> IRB(OrigInst->getNextNode());
|
||||
Value *VAListTag = OrigInst->getArgOperand(0);
|
||||
copyRegSaveArea(IRB, VAListTag);
|
||||
|
|
Loading…
Reference in New Issue