[Transforms] Migrate from getNumArgOperands to arg_size (NFC)

Note that getNumArgOperands is considered a legacy name.  See
llvm/include/llvm/IR/InstrTypes.h for details.
This commit is contained in:
Kazu Hirata 2021-10-01 09:57:40 -07:00
parent 5b44c716ee
commit 4f0225f6d2
26 changed files with 67 additions and 71 deletions

View File

@ -1355,7 +1355,7 @@ struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
}
void visitCallBase(CallBase &CB) {
for (unsigned Op = 0, OpCount = CB.getNumArgOperands(); Op < OpCount; ++Op)
for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
PI.setEscaped(&CB);
handleMayWrite(CB);

View File

@ -638,7 +638,7 @@ public:
void checkWellFormed() const;
Function *getMustTailCallFunction() const {
if (getNumArgOperands() < 3)
if (arg_size() < 3)
return nullptr;
return cast<Function>(

View File

@ -627,7 +627,7 @@ static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
auto Slot = getSwiftErrorSlot(ValueTy);
MappedResult = Builder.CreateLoad(ValueTy, Slot);
} else {
assert(Op->getNumArgOperands() == 1);
assert(Op->arg_size() == 1);
auto Value = MappedOp->getArgOperand(0);
auto ValueTy = Value->getType();
auto Slot = getSwiftErrorSlot(ValueTy);

View File

@ -722,7 +722,7 @@ void CoroAsyncEndInst::checkWellFormed() const {
return;
auto *FnTy =
cast<FunctionType>(MustTailCallFunc->getType()->getPointerElementType());
if (FnTy->getNumParams() != (getNumArgOperands() - 3))
if (FnTy->getNumParams() != (arg_size() - 3))
fail(this,
"llvm.coro.end.async must tail call function argument type must "
"match the tail arguments",

View File

@ -2592,7 +2592,7 @@ void Attributor::identifyDefaultAbstractAttributes(Function &F) {
getOrCreateAAFor<AAValueSimplify>(CBRetPos);
}
for (int I = 0, E = CB.getNumArgOperands(); I < E; ++I) {
for (int I = 0, E = CB.arg_size(); I < E; ++I) {
IRPosition CBArgPos = IRPosition::callsite_argument(CB, I);

View File

@ -2515,7 +2515,7 @@ struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
Function *Callee = CB.getCalledFunction();
if (!Callee)
return true;
for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
// If current argument is known to be simplified to null pointer and the
// corresponding argument position is known to have nonnull attribute,
// the argument is poison. Furthermore, if the argument is poison and
@ -3183,8 +3183,7 @@ struct AANoAliasCallSiteArgument final : AANoAliasImpl {
// value passed at this call site.
// TODO: AbstractCallSite
const auto &CB = cast<CallBase>(getAnchorValue());
for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
OtherArgNo++)
for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
return false;
@ -6516,7 +6515,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
CallBase *DC = cast<CallBase>(ACS.getInstruction());
int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
"Expected a direct call operand for callback call operand");
LLVM_DEBUG({
@ -7733,7 +7732,7 @@ void AAMemoryLocationImpl::categorizePtrValue(
void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
bool &Changed) {
for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
// Skip non-pointer arguments.
const Value *ArgOp = CB.getArgOperand(ArgNo);

View File

@ -593,7 +593,7 @@ struct ArgumentUsesTracker : public CaptureTracker {
assert(UseIndex < CB->data_operands_size() &&
"Indirect function calls should have been filtered above!");
if (UseIndex >= CB->getNumArgOperands()) {
if (UseIndex >= CB->arg_size()) {
// Data operand, but not a argument operand -- must be a bundle operand
assert(CB->hasOperandBundles() && "Must be!");
@ -728,7 +728,7 @@ determinePointerReadAttrs(Argument *A,
assert(UseIndex < CB.data_operands_size() &&
"Data operand use expected!");
bool IsOperandBundleUse = UseIndex >= CB.getNumArgOperands();
bool IsOperandBundleUse = UseIndex >= CB.arg_size();
if (UseIndex >= F->arg_size() && !IsOperandBundleUse) {
assert(F->isVarArg() && "More params than args in non-varargs call");

View File

@ -2100,11 +2100,11 @@ bool LowerTypeTestsModule::lower() {
auto CI = cast<CallInst>(U.getUser());
std::vector<GlobalTypeMember *> Targets;
if (CI->getNumArgOperands() % 2 != 1)
if (CI->arg_size() % 2 != 1)
report_fatal_error("number of arguments should be odd");
GlobalClassesTy::member_iterator CurSet;
for (unsigned I = 1; I != CI->getNumArgOperands(); I += 2) {
for (unsigned I = 1; I != CI->arg_size(); I += 2) {
int64_t Offset;
auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
CI->getOperand(I), Offset, M.getDataLayout()));

View File

@ -1077,8 +1077,8 @@ private:
Args.clear();
Args.push_back(OutlinedFn->getArg(0));
Args.push_back(OutlinedFn->getArg(1));
for (unsigned U = CallbackFirstArgOperand, E = CI->getNumArgOperands();
U < E; ++U)
for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E;
++U)
Args.push_back(CI->getArgOperand(U));
CallInst *NewCI = CallInst::Create(FT, Callee, Args, "", CI);
@ -1086,8 +1086,8 @@ private:
NewCI->setDebugLoc(CI->getDebugLoc());
// Forward parameter attributes from the callback to the callee.
for (unsigned U = CallbackFirstArgOperand, E = CI->getNumArgOperands();
U < E; ++U)
for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E;
++U)
for (const Attribute &A : CI->getAttributes().getParamAttrs(U))
NewCI->addParamAttr(
U - (CallbackFirstArgOperand - CallbackCalleeOperand), A);
@ -1608,7 +1608,7 @@ private:
// TODO: Use dominance to find a good position instead.
auto CanBeMoved = [this](CallBase &CB) {
unsigned NumArgs = CB.getNumArgOperands();
unsigned NumArgs = CB.arg_size();
if (NumArgs == 0)
return true;
if (CB.getArgOperand(0)->getType() != OMPInfoCache.OMPBuilder.IdentPtr)

View File

@ -656,8 +656,8 @@ static Value *simplifyNeonTbl1(const IntrinsicInst &II,
// comparison to the first NumOperands.
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
unsigned NumOperands) {
assert(I.getNumArgOperands() >= NumOperands && "Not enough operands");
assert(E.getNumArgOperands() >= NumOperands && "Not enough operands");
assert(I.arg_size() >= NumOperands && "Not enough operands");
assert(E.arg_size() >= NumOperands && "Not enough operands");
for (unsigned i = 0; i < NumOperands; i++)
if (I.getArgOperand(i) != E.getArgOperand(i))
return false;
@ -686,7 +686,7 @@ removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC,
I->getIntrinsicID() == EndI.getIntrinsicID())
continue;
if (IsStart(*I)) {
if (haveSameOperands(EndI, *I, EndI.getNumArgOperands())) {
if (haveSameOperands(EndI, *I, EndI.arg_size())) {
IC.eraseInstFromFunction(*I);
IC.eraseInstFromFunction(EndI);
return true;
@ -710,7 +710,7 @@ Instruction *InstCombinerImpl::visitVAEndInst(VAEndInst &I) {
}
static CallInst *canonicalizeConstantArg0ToArg1(CallInst &Call) {
assert(Call.getNumArgOperands() > 1 && "Need at least 2 args to swap");
assert(Call.arg_size() > 1 && "Need at least 2 args to swap");
Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
Call.setArgOperand(0, Arg1);
@ -2517,7 +2517,7 @@ static IntrinsicInst *findInitTrampoline(Value *Callee) {
}
void InstCombinerImpl::annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI) {
unsigned NumArgs = Call.getNumArgOperands();
unsigned NumArgs = Call.arg_size();
ConstantInt *Op0C = dyn_cast<ConstantInt>(Call.getOperand(0));
ConstantInt *Op1C =
(NumArgs == 1) ? nullptr : dyn_cast<ConstantInt>(Call.getOperand(1));

View File

@ -962,14 +962,14 @@ static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO,
assert(canConstantFoldCallTo(II, cast<Function>(II->getCalledOperand())) &&
"Expected constant-foldable intrinsic");
Intrinsic::ID IID = II->getIntrinsicID();
if (II->getNumArgOperands() == 1)
if (II->arg_size() == 1)
return Builder.CreateUnaryIntrinsic(IID, SO);
// This works for real binary ops like min/max (where we always expect the
// constant operand to be canonicalized as op1) and unary ops with a bonus
// constant argument like ctlz/cttz.
// TODO: Handle non-commutative binary intrinsics as below for binops.
assert(II->getNumArgOperands() == 2 && "Expected binary intrinsic");
assert(II->arg_size() == 2 && "Expected binary intrinsic");
assert(isa<Constant>(II->getArgOperand(1)) && "Expected constant operand");
return Builder.CreateBinaryIntrinsic(IID, SO, II->getArgOperand(1));
}

View File

@ -1535,7 +1535,7 @@ void AddressSanitizer::getInterestingMemoryOperands(
Value *Mask = CI->getOperand(2 + OpOffset);
Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
} else {
for (unsigned ArgNo = 0; ArgNo < CI->getNumArgOperands(); ArgNo++) {
for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
ignoreAccess(CI->getArgOperand(ArgNo)))
continue;
@ -2982,7 +2982,8 @@ bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
if (LongSize != 32) return false;
CallInst *CI = dyn_cast<CallInst>(I);
if (!CI || !CI->isInlineAsm()) return false;
if (CI->getNumArgOperands() <= 5) return false;
if (CI->arg_size() <= 5)
return false;
// We have inline assembly with quite a few arguments.
return true;
}

View File

@ -839,7 +839,7 @@ void HWAddressSanitizer::getInterestingMemoryOperands(
Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
XCHG->getCompareOperand()->getType(), None);
} else if (auto CI = dyn_cast<CallInst>(I)) {
for (unsigned ArgNo = 0; ArgNo < CI->getNumArgOperands(); ArgNo++) {
for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
ignoreAccess(I, CI->getArgOperand(ArgNo)))
continue;

View File

@ -2674,7 +2674,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
RetTy->isX86_MMXTy()))
return false;
unsigned NumArgOperands = I.getNumArgOperands();
unsigned NumArgOperands = I.arg_size();
for (unsigned i = 0; i < NumArgOperands; ++i) {
Type *Ty = I.getArgOperand(i)->getType();
if (Ty != RetTy)
@ -2701,7 +2701,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// We special-case intrinsics where this approach fails. See llvm.bswap
/// handling as an example of that.
bool handleUnknownIntrinsic(IntrinsicInst &I) {
unsigned NumArgOperands = I.getNumArgOperands();
unsigned NumArgOperands = I.arg_size();
if (NumArgOperands == 0)
return false;
@ -2775,10 +2775,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *CopyOp, *ConvertOp;
assert((!HasRoundingMode ||
isa<ConstantInt>(I.getArgOperand(I.getNumArgOperands() - 1))) &&
isa<ConstantInt>(I.getArgOperand(I.arg_size() - 1))) &&
"Invalid rounding mode");
switch (I.getNumArgOperands() - HasRoundingMode) {
switch (I.arg_size() - HasRoundingMode) {
case 2:
CopyOp = I.getArgOperand(0);
ConvertOp = I.getArgOperand(1);
@ -2867,7 +2867,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// size, and the rest is ignored. Behavior is defined even if shift size is
// greater than register (or field) width.
void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
assert(I.getNumArgOperands() == 2);
assert(I.arg_size() == 2);
IRBuilder<> IRB(&I);
// If any of the S2 bits are poisoned, the whole thing is poisoned.
// Otherwise perform the same shift on S1.
@ -2932,7 +2932,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
// EltSizeInBits is used only for x86mmx arguments.
void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
assert(I.getNumArgOperands() == 2);
assert(I.arg_size() == 2);
bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
IRBuilder<> IRB(&I);
Value *S1 = getShadow(&I, 0);

View File

@ -293,7 +293,7 @@ static unsigned getHashValueImpl(SimpleValue Val) {
// TODO: Extend this to handle intrinsics with >2 operands where the 1st
// 2 operands are commutative.
auto *II = dyn_cast<IntrinsicInst>(Inst);
if (II && II->isCommutative() && II->getNumArgOperands() == 2) {
if (II && II->isCommutative() && II->arg_size() == 2) {
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
if (LHS > RHS)
std::swap(LHS, RHS);
@ -363,7 +363,7 @@ static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS) {
auto *LII = dyn_cast<IntrinsicInst>(LHSI);
auto *RII = dyn_cast<IntrinsicInst>(RHSI);
if (LII && RII && LII->getIntrinsicID() == RII->getIntrinsicID() &&
LII->isCommutative() && LII->getNumArgOperands() == 2) {
LII->isCommutative() && LII->arg_size() == 2) {
return LII->getArgOperand(0) == RII->getArgOperand(1) &&
LII->getArgOperand(1) == RII->getArgOperand(0);
}

View File

@ -421,13 +421,12 @@ uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) {
// a normal load or store instruction.
CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());
if (!local_cdep ||
local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
if (!local_cdep || local_cdep->arg_size() != C->arg_size()) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
if (c_vn != cd_vn) {
@ -477,11 +476,11 @@ uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) {
return nextValueNumber++;
}
if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
if (cdep->arg_size() != C->arg_size()) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
if (c_vn != cd_vn) {

View File

@ -530,7 +530,7 @@ bool ScalarizerVisitor::splitCall(CallInst &CI) {
return false;
unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements();
unsigned NumArgs = CI.getNumArgOperands();
unsigned NumArgs = CI.arg_size();
ValueVector ScalarOperands(NumArgs);
SmallVector<Scatterer, 8> Scattered(NumArgs);

View File

@ -667,7 +667,7 @@ bool TailRecursionEliminator::eliminateCall(CallInst *CI) {
createTailRecurseLoopHeader(CI);
// Copy values of ByVal operands into local temporarily variables.
for (unsigned I = 0, E = CI->getNumArgOperands(); I != E; ++I) {
for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
if (CI->isByValArgument(I))
copyByValueOperandIntoLocalTemp(CI, I);
}
@ -675,7 +675,7 @@ bool TailRecursionEliminator::eliminateCall(CallInst *CI) {
// Ok, now that we know we have a pseudo-entry block WITH all of the
// required PHI nodes, add entries into the PHI node for the actual
// parameters passed into the tail-recursive call.
for (unsigned I = 0, E = CI->getNumArgOperands(); I != E; ++I) {
for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
if (CI->isByValArgument(I)) {
copyLocalTempOfByValueOperandIntoArguments(CI, I);
ArgumentPHIs[I]->addIncoming(F.getArg(I), BB);

View File

@ -284,7 +284,7 @@ bool Evaluator::getFormalParams(CallBase &CB, Function *F,
return false;
auto *FTy = F->getFunctionType();
if (FTy->getNumParams() > CB.getNumArgOperands()) {
if (FTy->getNumParams() > CB.arg_size()) {
LLVM_DEBUG(dbgs() << "Too few arguments for function.\n");
return false;
}

View File

@ -94,8 +94,8 @@ static void addMappingsFromTLI(const TargetLibraryInfo &TLI, CallInst &CI) {
const std::string TLIName =
std::string(TLI.getVectorizedFunction(ScalarName, VF));
if (!TLIName.empty()) {
std::string MangledName = VFABI::mangleTLIVectorName(
TLIName, ScalarName, CI.getNumArgOperands(), VF);
std::string MangledName =
VFABI::mangleTLIVectorName(TLIName, ScalarName, CI.arg_size(), VF);
if (!OriginalSetOfMappings.count(MangledName)) {
Mappings.push_back(MangledName);
++NumCallInjected;

View File

@ -2110,7 +2110,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
SmallVector<Value*,4> VarArgsToForward;
SmallVector<AttributeSet, 4> VarArgsAttrs;
for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
i < CB.getNumArgOperands(); i++) {
i < CB.arg_size(); i++) {
VarArgsToForward.push_back(CB.getArgOperand(i));
VarArgsAttrs.push_back(CB.getAttributes().getParamAttrs(i));
}

View File

@ -3254,7 +3254,7 @@ bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
if (CB.isBundleOperand(OpIdx))
return false;
if (OpIdx < CB.getNumArgOperands()) {
if (OpIdx < CB.arg_size()) {
// Some variadic intrinsics require constants in the variadic arguments,
// which currently aren't markable as immarg.
if (isa<IntrinsicInst>(CB) &&

View File

@ -1295,13 +1295,13 @@ Value *LibCallSimplifier::optimizeCAbs(CallInst *CI, IRBuilderBase &B) {
B.setFastMathFlags(CI->getFastMathFlags());
Value *Real, *Imag;
if (CI->getNumArgOperands() == 1) {
if (CI->arg_size() == 1) {
Value *Op = CI->getArgOperand(0);
assert(Op->getType()->isArrayTy() && "Unexpected signature for cabs!");
Real = B.CreateExtractValue(Op, 0, "real");
Imag = B.CreateExtractValue(Op, 1, "imag");
} else {
assert(CI->getNumArgOperands() == 2 && "Unexpected signature for cabs!");
assert(CI->arg_size() == 2 && "Unexpected signature for cabs!");
Real = CI->getArgOperand(0);
Imag = CI->getArgOperand(1);
}
@ -2298,7 +2298,7 @@ static bool isReportingError(Function *Callee, CallInst *CI, int StreamArg) {
// These functions might be considered cold, but only if their stream
// argument is stderr.
if (StreamArg >= (int)CI->getNumArgOperands())
if (StreamArg >= (int)CI->arg_size())
return false;
LoadInst *LI = dyn_cast<LoadInst>(CI->getArgOperand(StreamArg));
if (!LI)
@ -2330,7 +2330,7 @@ Value *LibCallSimplifier::optimizePrintFString(CallInst *CI, IRBuilderBase &B) {
return emitPutChar(B.getInt32(FormatStr[0]), B, TLI);
// Try to remove call or emit putchar/puts.
if (FormatStr == "%s" && CI->getNumArgOperands() > 1) {
if (FormatStr == "%s" && CI->arg_size() > 1) {
StringRef OperandStr;
if (!getConstantStringInfo(CI->getOperand(1), OperandStr))
return nullptr;
@ -2361,12 +2361,12 @@ Value *LibCallSimplifier::optimizePrintFString(CallInst *CI, IRBuilderBase &B) {
// Optimize specific format strings.
// printf("%c", chr) --> putchar(chr)
if (FormatStr == "%c" && CI->getNumArgOperands() > 1 &&
if (FormatStr == "%c" && CI->arg_size() > 1 &&
CI->getArgOperand(1)->getType()->isIntegerTy())
return emitPutChar(CI->getArgOperand(1), B, TLI);
// printf("%s\n", str) --> puts(str)
if (FormatStr == "%s\n" && CI->getNumArgOperands() > 1 &&
if (FormatStr == "%s\n" && CI->arg_size() > 1 &&
CI->getArgOperand(1)->getType()->isPointerTy())
return emitPutS(CI->getArgOperand(1), B, TLI);
return nullptr;
@ -2418,7 +2418,7 @@ Value *LibCallSimplifier::optimizeSPrintFString(CallInst *CI,
// If we just have a format string (nothing else crazy) transform it.
Value *Dest = CI->getArgOperand(0);
if (CI->getNumArgOperands() == 2) {
if (CI->arg_size() == 2) {
// Make sure there's no % in the constant array. We could try to handle
// %% -> % in the future if we cared.
if (FormatStr.find('%') != StringRef::npos)
@ -2434,8 +2434,7 @@ Value *LibCallSimplifier::optimizeSPrintFString(CallInst *CI,
// The remaining optimizations require the format string to be "%s" or "%c"
// and have an extra operand.
if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
CI->getNumArgOperands() < 3)
if (FormatStr.size() != 2 || FormatStr[0] != '%' || CI->arg_size() < 3)
return nullptr;
// Decode the second character of the format string.
@ -2546,7 +2545,7 @@ Value *LibCallSimplifier::optimizeSnPrintFString(CallInst *CI,
return nullptr;
// If we just have a format string (nothing else crazy) transform it.
if (CI->getNumArgOperands() == 3) {
if (CI->arg_size() == 3) {
// Make sure there's no % in the constant array. We could try to handle
// %% -> % in the future if we cared.
if (FormatStr.find('%') != StringRef::npos)
@ -2568,8 +2567,7 @@ Value *LibCallSimplifier::optimizeSnPrintFString(CallInst *CI,
// The remaining optimizations require the format string to be "%s" or "%c"
// and have an extra operand.
if (FormatStr.size() == 2 && FormatStr[0] == '%' &&
CI->getNumArgOperands() == 4) {
if (FormatStr.size() == 2 && FormatStr[0] == '%' && CI->arg_size() == 4) {
// Decode the second character of the format string.
if (FormatStr[1] == 'c') {
@ -2637,7 +2635,7 @@ Value *LibCallSimplifier::optimizeFPrintFString(CallInst *CI,
return nullptr;
// fprintf(F, "foo") --> fwrite("foo", 3, 1, F)
if (CI->getNumArgOperands() == 2) {
if (CI->arg_size() == 2) {
// Could handle %% -> % if we cared.
if (FormatStr.find('%') != StringRef::npos)
return nullptr; // We found a format specifier.
@ -2650,8 +2648,7 @@ Value *LibCallSimplifier::optimizeFPrintFString(CallInst *CI,
// The remaining optimizations require the format string to be "%s" or "%c"
// and have an extra operand.
if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
CI->getNumArgOperands() < 3)
if (FormatStr.size() != 2 || FormatStr[0] != '%' || CI->arg_size() < 3)
return nullptr;
// Decode the second character of the format string.

View File

@ -749,7 +749,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
if (CI) {
auto *SE = PSE.getSE();
Intrinsic::ID IntrinID = getVectorIntrinsicIDForCall(CI, TLI);
for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i)
for (unsigned i = 0, e = CI->arg_size(); i != e; ++i)
if (hasVectorInstrinsicScalarOpd(IntrinID, i)) {
if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(i)), TheLoop)) {
reportVectorizationFailure("Found unvectorizable intrinsic",

View File

@ -8904,7 +8904,7 @@ VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
return nullptr;
ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands());
ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size());
return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
}

View File

@ -528,7 +528,7 @@ static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
case Instruction::Call: {
CallInst *CI = cast<CallInst>(UserInst);
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) {
if (hasVectorInstrinsicScalarOpd(ID, i))
return (CI->getArgOperand(i) == Scalar);
}
@ -3841,7 +3841,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
return;
}
Function *F = CI->getCalledFunction();
unsigned NumArgs = CI->getNumArgOperands();
unsigned NumArgs = CI->arg_size();
SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr);
for (unsigned j = 0; j != NumArgs; ++j)
if (hasVectorInstrinsicScalarOpd(ID, j))
@ -3893,7 +3893,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
ReuseShuffleIndicies);
TE->setOperandsInOrder();
for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) {
ValueList Operands;
// Prepare the operand vector.
for (Value *V : VL) {
@ -6179,7 +6179,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
std::vector<Value *> OpVecs;
SmallVector<Type *, 2> TysForDecl =
{FixedVectorType::get(CI->getType(), E->Scalars.size())};
for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) {
for (int j = 0, e = CI->arg_size(); j < e; ++j) {
ValueList OpVL;
// Some intrinsics have scalar arguments. This argument should not be
// vectorized.