[APInt] Normalize naming on keep constructors / predicate methods.

This renames the primary methods for creating a zero value to `getZero`
instead of `getNullValue` and renames predicates like `isAllOnesValue`
to simply `isAllOnes`.  This achieves two things:

1) This starts standardizing predicates across the LLVM codebase,
   following (in this case) ConstantInt.  The word "Value" doesn't
   convey anything of merit, and is missing in some of the other things.

2) Calling an integer "null" doesn't make any sense.  The original sin
   here is mine and I've regretted it for years.  This moves us to calling
   it "zero" instead, which is correct!

APInt is widely used and I don't think anyone is keen to take massive source
breakage on anything so core, at least not all in one go.  As such, this
doesn't actually delete any entrypoints, it "soft deprecates" them with a
comment.

Included in this patch are changes to a bunch of the codebase, but there are
more.  We should normalize SelectionDAG and other APIs as well, which would
make the API change more mechanical.

Differential Revision: https://reviews.llvm.org/D109483
This commit is contained in:
Chris Lattner 2021-09-08 22:13:13 -07:00
parent 6355234660
commit 735f46715d
84 changed files with 405 additions and 420 deletions

View File

@ -2675,7 +2675,7 @@ static bool EvalAndBitcastToAPInt(EvalInfo &Info, const Expr *E,
QualType EltTy = VecTy->castAs<VectorType>()->getElementType();
unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
Res = llvm::APInt::getNullValue(VecSize);
Res = llvm::APInt::getZero(VecSize);
for (unsigned i = 0; i < SVal.getVectorLength(); i++) {
APValue &Elt = SVal.getVectorElt(i);
llvm::APInt EltAsInt;

View File

@ -1642,7 +1642,7 @@ static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
} else {
assert(NumPositiveBits <= Bitwidth);
End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
Min = llvm::APInt(Bitwidth, 0);
Min = llvm::APInt::getZero(Bitwidth);
}
}
return true;

View File

@ -2247,8 +2247,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
}
IntegerLiteral AllocationSizeLiteral(
Context,
AllocationSize.getValueOr(llvm::APInt::getNullValue(SizeTyWidth)),
Context, AllocationSize.getValueOr(llvm::APInt::getZero(SizeTyWidth)),
SizeTy, SourceLocation());
// Otherwise, if we failed to constant-fold the allocation size, we'll
// just give up and pass-in something opaque, that isn't a null pointer.
@ -2593,10 +2592,9 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// FIXME: Should the Sema create the expression and embed it in the syntax
// tree? Or should the consumer just recalculate the value?
// FIXME: Using a dummy value will interact poorly with attribute enable_if.
IntegerLiteral Size(Context, llvm::APInt::getNullValue(
Context.getTargetInfo().getPointerWidth(0)),
Context.getSizeType(),
SourceLocation());
IntegerLiteral Size(
Context, llvm::APInt::getZero(Context.getTargetInfo().getPointerWidth(0)),
Context.getSizeType(), SourceLocation());
AllocArgs.push_back(&Size);
QualType AlignValT = Context.VoidTy;

View File

@ -17153,7 +17153,7 @@ static bool actOnOMPReductionKindClause(
} else if (Type->isScalarType()) {
uint64_t Size = Context.getTypeSize(Type);
QualType IntTy = Context.getIntTypeForBitwidth(Size, /*Signed=*/0);
llvm::APInt InitValue = llvm::APInt::getAllOnesValue(Size);
llvm::APInt InitValue = llvm::APInt::getAllOnes(Size);
Init = IntegerLiteral::Create(Context, InitValue, IntTy, ELoc);
}
if (Init && OrigType->isAnyComplexType()) {
@ -18361,7 +18361,7 @@ Sema::ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
Expr::EvalResult Result;
if (Length && !Length->isValueDependent() &&
Length->EvaluateAsInt(Result, Context) &&
Result.Val.getInt().isNullValue()) {
Result.Val.getInt().isZero()) {
Diag(ELoc,
diag::err_omp_depend_zero_length_array_section_not_allowed)
<< SimpleExpr->getSourceRange();
@ -18754,7 +18754,7 @@ public:
Expr::EvalResult Result;
if (!AE->getIdx()->isValueDependent() &&
AE->getIdx()->EvaluateAsInt(Result, SemaRef.getASTContext()) &&
!Result.Val.getInt().isNullValue()) {
!Result.Val.getInt().isZero()) {
SemaRef.Diag(AE->getIdx()->getExprLoc(),
diag::err_omp_invalid_map_this_expr);
SemaRef.Diag(AE->getIdx()->getExprLoc(),
@ -18842,7 +18842,7 @@ public:
if (OASE->getLowerBound() && !OASE->getLowerBound()->isValueDependent() &&
OASE->getLowerBound()->EvaluateAsInt(ResultL,
SemaRef.getASTContext()) &&
!ResultL.Val.getInt().isNullValue()) {
!ResultL.Val.getInt().isZero()) {
SemaRef.Diag(OASE->getLowerBound()->getExprLoc(),
diag::err_omp_invalid_map_this_expr);
SemaRef.Diag(OASE->getLowerBound()->getExprLoc(),

View File

@ -789,7 +789,7 @@ DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR,
if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
const llvm::APInt &Size = CAT->getSize();
if (Size.isNullValue())
if (Size.isZero())
return true;
const AnalyzerOptions &Opts = SVB.getAnalyzerOptions();

View File

@ -2946,9 +2946,9 @@ bool EmulateInstructionMIPS::Emulate_MSA_Branch_V(llvm::MCInst &insn,
bool bnz) {
bool success = false;
int32_t target = 0;
llvm::APInt wr_val = llvm::APInt::getNullValue(128);
llvm::APInt wr_val = llvm::APInt::getZero(128);
llvm::APInt fail_value = llvm::APInt::getMaxValue(128);
llvm::APInt zero_value = llvm::APInt::getNullValue(128);
llvm::APInt zero_value = llvm::APInt::getZero(128);
RegisterValue reg_value;
uint32_t wt = m_reg_info->getEncodingValue(insn.getOperand(0).getReg());

View File

@ -2258,9 +2258,9 @@ bool EmulateInstructionMIPS64::Emulate_MSA_Branch_V(llvm::MCInst &insn,
bool bnz) {
bool success = false;
int64_t target = 0;
llvm::APInt wr_val = llvm::APInt::getNullValue(128);
llvm::APInt wr_val = llvm::APInt::getZero(128);
llvm::APInt fail_value = llvm::APInt::getMaxValue(128);
llvm::APInt zero_value = llvm::APInt::getNullValue(128);
llvm::APInt zero_value = llvm::APInt::getZero(128);
RegisterValue reg_value;
uint32_t wt = m_reg_info->getEncodingValue(insn.getOperand(0).getReg());

View File

@ -714,7 +714,7 @@ Status Scalar::SetValueFromData(const DataExtractor &data,
return Status("insufficient data");
m_type = e_int;
m_integer =
APSInt(APInt::getNullValue(8 * byte_size), encoding == eEncodingUint);
APSInt(APInt::getZero(8 * byte_size), encoding == eEncodingUint);
if (data.GetByteOrder() == endian::InlHostByteOrder()) {
llvm::LoadIntFromMemory(m_integer, data.GetDataStart(), byte_size);
} else {

View File

@ -173,10 +173,11 @@ public:
/// \name Value Generators
/// @{
/// Get the '0' value.
///
/// \returns the '0' value for an APInt of the specified bit-width.
static APInt getNullValue(unsigned numBits) { return APInt(numBits, 0); }
/// Get the '0' value for the specified bit-width.
static APInt getZero(unsigned numBits) { return APInt(numBits, 0); }
/// NOTE: This is soft-deprecated. Please use `getZero()` instead.
static APInt getNullValue(unsigned numBits) { return getZero(numBits); }
/// Gets maximum unsigned value of APInt for specific bit width.
static APInt getMaxValue(unsigned numBits) {
@ -208,13 +209,14 @@ public:
return getSignedMinValue(BitWidth);
}
/// Get the all-ones value.
///
/// \returns the all-ones value for an APInt of the specified bit-width.
static APInt getAllOnesValue(unsigned numBits) {
/// Return an APInt of a specified width with all bits set.
static APInt getAllOnes(unsigned numBits) {
return APInt(numBits, WORDTYPE_MAX, true);
}
/// NOTE: This is soft-deprecated. Please use `getAllOnes()` instead.
static APInt getAllOnesValue(unsigned numBits) { return getAllOnes(numBits); }
/// Return an APInt with exactly one bit set in the result.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo) {
APInt Res(numBits, 0);
@ -340,42 +342,46 @@ public:
/// that 0 is not a positive value.
///
/// \returns true if this APInt is positive.
bool isStrictlyPositive() const { return isNonNegative() && !isNullValue(); }
bool isStrictlyPositive() const { return isNonNegative() && !isZero(); }
/// Determine if this APInt Value is non-positive (<= 0).
///
/// \returns true if this APInt is non-positive.
bool isNonPositive() const { return !isStrictlyPositive(); }
/// Determine if all bits are set
///
/// This checks to see if the value has all bits of the APInt are set or not.
bool isAllOnesValue() const {
/// Determine if all bits are set.
bool isAllOnes() const {
if (isSingleWord())
return U.VAL == WORDTYPE_MAX >> (APINT_BITS_PER_WORD - BitWidth);
return countTrailingOnesSlowCase() == BitWidth;
}
/// Determine if all bits are clear
///
/// This checks to see if the value has all bits of the APInt are clear or
/// not.
bool isNullValue() const { return !*this; }
/// NOTE: This is soft-deprecated. Please use `isAllOnes()` instead.
bool isAllOnesValue() const { return isAllOnes(); }
/// Determine if this value is zero, i.e. all bits are clear.
bool isZero() const { return !*this; }
/// NOTE: This is soft-deprecated. Please use `isZero()` instead.
bool isNullValue() const { return isZero(); }
/// Determine if this is a value of 1.
///
/// This checks to see if the value of this APInt is one.
bool isOneValue() const {
bool isOne() const {
if (isSingleWord())
return U.VAL == 1;
return countLeadingZerosSlowCase() == BitWidth - 1;
}
/// NOTE: This is soft-deprecated. Please use `isOne()` instead.
bool isOneValue() const { return isOne(); }
/// Determine if this is the largest unsigned value.
///
/// This checks to see if the value of this APInt is the maximum unsigned
/// value for the APInt's bit width.
bool isMaxValue() const { return isAllOnesValue(); }
bool isMaxValue() const { return isAllOnes(); }
/// Determine if this is the largest signed value.
///
@ -391,7 +397,7 @@ public:
///
/// This checks to see if the value of this APInt is the minimum unsigned
/// value for the APInt's bit width.
bool isMinValue() const { return isNullValue(); }
bool isMinValue() const { return isZero(); }
/// Determine if this is the smallest signed value.
///

View File

@ -58,7 +58,7 @@ public:
/// that 0 is not a positive value.
///
/// \returns true if this APSInt is positive.
bool isStrictlyPositive() const { return isNonNegative() && !isNullValue(); }
bool isStrictlyPositive() const { return isNonNegative() && !isZero(); }
APSInt &operator=(APInt RHS) {
// Retain our current sign.

View File

@ -697,7 +697,7 @@ public:
bool Extract) {
auto *Ty = cast<FixedVectorType>(InTy);
APInt DemandedElts = APInt::getAllOnesValue(Ty->getNumElements());
APInt DemandedElts = APInt::getAllOnes(Ty->getNumElements());
return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
}

View File

@ -127,7 +127,7 @@ public:
return false;
LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
LLT SrcTy = MRI.getType(SrcReg);
APInt MaskVal = APInt::getAllOnesValue(SrcTy.getScalarSizeInBits());
APInt MaskVal = APInt::getAllOnes(SrcTy.getScalarSizeInBits());
auto Mask = Builder.buildConstant(
DstTy, MaskVal.zext(DstTy.getScalarSizeInBits()));
if (SextSrc && (DstTy != MRI.getType(SextSrc)))

View File

@ -621,8 +621,8 @@ public:
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget = false,
bool IsOpaque = false) {
return getConstant(APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL,
VT, IsTarget, IsOpaque);
return getConstant(APInt::getAllOnes(VT.getScalarSizeInBits()), DL, VT,
IsTarget, IsOpaque);
}
SDValue getConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,

View File

@ -191,13 +191,13 @@ public:
/// This is just a convenience method to make client code smaller for a
/// common code. It also correctly performs the comparison without the
/// potential for an assertion from getZExtValue().
bool isZero() const { return Val.isNullValue(); }
bool isZero() const { return Val.isZero(); }
/// This is just a convenience method to make client code smaller for a
/// common case. It also correctly performs the comparison without the
/// potential for an assertion from getZExtValue().
/// Determine if the value is one.
bool isOne() const { return Val.isOneValue(); }
bool isOne() const { return Val.isOne(); }
/// This function will return true iff every bit in this constant is set
/// to true.

View File

@ -515,7 +515,7 @@ inline cst_pred_ty<is_one> m_One() {
}
struct is_zero_int {
bool isValue(const APInt &C) { return C.isNullValue(); }
bool isValue(const APInt &C) { return C.isZero(); }
};
/// Match an integer 0 or a vector with all elements equal to 0.
/// For vectors, this includes constants with undefined elements.

View File

@ -60,7 +60,7 @@ public:
}
/// Returns true if we don't know any bits.
bool isUnknown() const { return Zero.isNullValue() && One.isNullValue(); }
bool isUnknown() const { return Zero.isZero() && One.isZero(); }
/// Resets the known state of all bits.
void resetAll() {
@ -99,10 +99,12 @@ public:
bool isNonNegative() const { return Zero.isSignBitSet(); }
/// Returns true if this value is known to be non-zero.
bool isNonZero() const { return !One.isNullValue(); }
bool isNonZero() const { return !One.isZero(); }
/// Returns true if this value is known to be positive.
bool isStrictlyPositive() const { return Zero.isSignBitSet() && !One.isNullValue(); }
bool isStrictlyPositive() const {
return Zero.isSignBitSet() && !One.isZero();
}
/// Make this value negative.
void makeNegative() {

View File

@ -165,16 +165,16 @@ public:
switch (Pred) {
case ICmpInst::ICMP_SLT: // True if LHS s< 0
TrueIfSigned = true;
return RHS.isNullValue();
return RHS.isZero();
case ICmpInst::ICMP_SLE: // True if LHS s<= -1
TrueIfSigned = true;
return RHS.isAllOnesValue();
return RHS.isAllOnes();
case ICmpInst::ICMP_SGT: // True if LHS s> -1
TrueIfSigned = false;
return RHS.isAllOnesValue();
return RHS.isAllOnes();
case ICmpInst::ICMP_SGE: // True if LHS s>= 0
TrueIfSigned = false;
return RHS.isNullValue();
return RHS.isZero();
case ICmpInst::ICMP_UGT:
// True if LHS u> RHS and RHS == sign-bit-mask - 1
TrueIfSigned = true;

View File

@ -362,7 +362,7 @@ void DemandedBits::performAnalysis() {
if (Instruction *J = dyn_cast<Instruction>(OI)) {
Type *T = J->getType();
if (T->isIntOrIntVectorTy())
AliveBits[J] = APInt::getAllOnesValue(T->getScalarSizeInBits());
AliveBits[J] = APInt::getAllOnes(T->getScalarSizeInBits());
else
Visited.insert(J);
Worklist.insert(J);
@ -407,7 +407,7 @@ void DemandedBits::performAnalysis() {
Type *T = OI->getType();
if (T->isIntOrIntVectorTy()) {
unsigned BitWidth = T->getScalarSizeInBits();
APInt AB = APInt::getAllOnesValue(BitWidth);
APInt AB = APInt::getAllOnes(BitWidth);
if (InputIsKnownDead) {
AB = APInt(BitWidth, 0);
} else {
@ -417,7 +417,7 @@ void DemandedBits::performAnalysis() {
Known, Known2, KnownBitsComputed);
// Keep track of uses which have no demanded bits.
if (AB.isNullValue())
if (AB.isZero())
DeadUses.insert(&OI);
else
DeadUses.erase(&OI);
@ -448,8 +448,7 @@ APInt DemandedBits::getDemandedBits(Instruction *I) {
return Found->second;
const DataLayout &DL = I->getModule()->getDataLayout();
return APInt::getAllOnesValue(
DL.getTypeSizeInBits(I->getType()->getScalarType()));
return APInt::getAllOnes(DL.getTypeSizeInBits(I->getType()->getScalarType()));
}
APInt DemandedBits::getDemandedBits(Use *U) {
@ -461,7 +460,7 @@ APInt DemandedBits::getDemandedBits(Use *U) {
// We only track integer uses, everything else produces a mask with all bits
// set
if (!T->isIntOrIntVectorTy())
return APInt::getAllOnesValue(BitWidth);
return APInt::getAllOnes(BitWidth);
if (isUseDead(U))
return APInt(BitWidth, 0);
@ -469,7 +468,7 @@ APInt DemandedBits::getDemandedBits(Use *U) {
performAnalysis();
APInt AOut = getDemandedBits(UserI);
APInt AB = APInt::getAllOnesValue(BitWidth);
APInt AB = APInt::getAllOnes(BitWidth);
KnownBits Known, Known2;
bool KnownBitsComputed = false;
@ -504,7 +503,7 @@ bool DemandedBits::isUseDead(Use *U) {
// is dead. These uses might not be explicitly present in the DeadUses map.
if (UserI->getType()->isIntOrIntVectorTy()) {
auto Found = AliveBits.find(UserI);
if (Found != AliveBits.end() && Found->second.isNullValue())
if (Found != AliveBits.end() && Found->second.isZero())
return true;
}

View File

@ -2325,7 +2325,7 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
LLVM_DEBUG(dbgs() << "starting gcd\n");
++GCDapplications;
unsigned BitWidth = SE->getTypeSizeInBits(Src->getType());
APInt RunningGCD = APInt::getNullValue(BitWidth);
APInt RunningGCD = APInt::getZero(BitWidth);
// Examine Src coefficients.
// Compute running GCD and record source constant.
@ -2365,7 +2365,7 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
}
const SCEV *DstConst = Coefficients;
APInt ExtraGCD = APInt::getNullValue(BitWidth);
APInt ExtraGCD = APInt::getZero(BitWidth);
const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
LLVM_DEBUG(dbgs() << " Delta = " << *Delta << "\n");
const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Delta);

View File

@ -1370,7 +1370,7 @@ bool CallAnalyzer::visitPHI(PHINode &I) {
// Or could we skip the getPointerSizeInBits call completely? As far as I can
// see the ZeroOffset is used as a dummy value, so we can probably use any
// bit width for the ZeroOffset?
APInt ZeroOffset = APInt::getNullValue(DL.getPointerSizeInBits(0));
APInt ZeroOffset = APInt::getZero(DL.getPointerSizeInBits(0));
bool CheckSROA = I.getType()->isPointerTy();
// Track the constant or pointer with constant offset we've seen so far.
@ -2462,7 +2462,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
unsigned AS = V->getType()->getPointerAddressSpace();
unsigned IntPtrWidth = DL.getIndexSizeInBits(AS);
APInt Offset = APInt::getNullValue(IntPtrWidth);
APInt Offset = APInt::getZero(IntPtrWidth);
// Even though we don't look through PHI nodes, we could be called on an
// instruction in an unreachable block, which may be on a cycle.

View File

@ -699,7 +699,7 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
assert(V->getType()->isPtrOrPtrVectorTy());
Type *IntIdxTy = DL.getIndexType(V->getType())->getScalarType();
APInt Offset = APInt::getNullValue(IntIdxTy->getIntegerBitWidth());
APInt Offset = APInt::getZero(IntIdxTy->getIntegerBitWidth());
V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds);
// As that strip may trace through `addrspacecast`, need to sext or trunc
@ -1765,7 +1765,7 @@ static Value *simplifyAndOrOfICmpsWithLimitConst(ICmpInst *Cmp0, ICmpInst *Cmp1,
if (match(Cmp0->getOperand(1), m_APInt(C)))
MinMaxC = HasNotOp ? ~*C : *C;
else if (isa<ConstantPointerNull>(Cmp0->getOperand(1)))
MinMaxC = APInt::getNullValue(8);
MinMaxC = APInt::getZero(8);
else
return nullptr;

View File

@ -846,7 +846,7 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueSelect(
}
if (SPR.Flavor == SPF_NABS) {
ConstantRange Zero(APInt::getNullValue(TrueCR.getBitWidth()));
ConstantRange Zero(APInt::getZero(TrueCR.getBitWidth()));
if (LHS == SI->getTrueValue())
return ValueLatticeElement::getRange(
Zero.sub(TrueCR.abs()), FalseVal.isConstantRangeIncludingUndef());
@ -1122,7 +1122,7 @@ static ValueLatticeElement getValueFromICmpCondition(Value *Val, ICmpInst *ICI,
unsigned BitWidth = Ty->getIntegerBitWidth();
return ValueLatticeElement::getRange(ConstantRange::getNonEmpty(
APInt::getOneBitSet(BitWidth, Mask->countTrailingZeros()),
APInt::getNullValue(BitWidth)));
APInt::getZero(BitWidth)));
}
}

View File

@ -610,7 +610,7 @@ ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL,
SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
IntTyBits = DL.getIndexTypeSizeInBits(V->getType());
Zero = APInt::getNullValue(IntTyBits);
Zero = APInt::getZero(IntTyBits);
V = V->stripPointerCasts();
if (Instruction *I = dyn_cast<Instruction>(V)) {

View File

@ -170,7 +170,7 @@ ConstantRange getStaticAllocaSizeRange(const AllocaInst &AI) {
if (Overflow)
return R;
}
R = ConstantRange(APInt::getNullValue(PointerSize), APSize);
R = ConstantRange(APInt::getZero(PointerSize), APSize);
assert(!isUnsafe(R));
return R;
}
@ -299,8 +299,8 @@ ConstantRange StackSafetyLocalAnalysis::getAccessRange(Value *Addr, Value *Base,
APInt APSize(PointerSize, Size.getFixedSize(), true);
if (APSize.isNegative())
return UnknownRange;
return getAccessRange(
Addr, Base, ConstantRange(APInt::getNullValue(PointerSize), APSize));
return getAccessRange(Addr, Base,
ConstantRange(APInt::getZero(PointerSize), APSize));
}
ConstantRange StackSafetyLocalAnalysis::getMemIntrinsicAccessRange(
@ -323,8 +323,7 @@ ConstantRange StackSafetyLocalAnalysis::getMemIntrinsicAccessRange(
if (Sizes.getUpper().isNegative() || isUnsafe(Sizes))
return UnknownRange;
Sizes = Sizes.sextOrTrunc(PointerSize);
ConstantRange SizeRange(APInt::getNullValue(PointerSize),
Sizes.getUpper() - 1);
ConstantRange SizeRange(APInt::getZero(PointerSize), Sizes.getUpper() - 1);
return getAccessRange(U, Base, SizeRange);
}

View File

@ -165,7 +165,7 @@ static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
int NumElts =
cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts);
DemandedLHS = DemandedRHS = APInt::getZero(NumElts);
if (DemandedElts.isNullValue())
return true;
// Simple case of a shuffle with zeroinitializer.
@ -206,7 +206,7 @@ static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
APInt DemandedElts =
FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
computeKnownBits(V, DemandedElts, Known, Depth, Q);
}
@ -378,7 +378,7 @@ static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
APInt DemandedElts =
FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
return ComputeNumSignBits(V, DemandedElts, Depth, Q);
}
@ -582,7 +582,7 @@ static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
return false;
ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
return !TrueValues.contains(APInt::getNullValue(C->getBitWidth()));
return !TrueValues.contains(APInt::getZero(C->getBitWidth()));
}
static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
@ -1210,7 +1210,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
// (dependent on endian) to form the full result of known bits.
unsigned NumElts = DemandedElts.getBitWidth();
unsigned SubScale = BitWidth / SubBitWidth;
APInt SubDemandedElts = APInt::getNullValue(NumElts * SubScale);
APInt SubDemandedElts = APInt::getZero(NumElts * SubScale);
for (unsigned i = 0; i != NumElts; ++i) {
if (DemandedElts[i])
SubDemandedElts.setBit(i * SubScale);
@ -1763,7 +1763,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
break;
}
unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
APInt DemandedVecElts = APInt::getAllOnes(NumElts);
if (CIdx && CIdx->getValue().ult(NumElts))
DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
@ -2532,7 +2532,7 @@ bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
auto *CIdx = dyn_cast<ConstantInt>(Idx);
if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
unsigned NumElts = VecTy->getNumElements();
APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
APInt DemandedVecElts = APInt::getAllOnes(NumElts);
if (CIdx && CIdx->getValue().ult(NumElts))
DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
@ -2559,7 +2559,7 @@ bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
APInt DemandedElts =
FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
return isKnownNonZero(V, DemandedElts, Depth, Q);
}
@ -6746,7 +6746,7 @@ static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
case Instruction::LShr:
if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
// 'lshr x, C' produces [0, UINT_MAX >> C].
Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
Upper = APInt::getAllOnes(Width).lshr(*C) + 1;
} else if (match(BO.getOperand(0), m_APInt(C))) {
// 'lshr C, x' produces [C >> (Width-1), C].
unsigned ShiftAmount = Width - 1;
@ -6956,7 +6956,7 @@ static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
// If the negation part of the abs (in RHS) has the NSW flag,
// then the result of abs(X) is [0..SIGNED_MAX],
// otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
Lower = APInt::getNullValue(BitWidth);
Lower = APInt::getZero(BitWidth);
if (match(RHS, m_Neg(m_Specific(LHS))) &&
IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
Upper = APInt::getSignedMaxValue(BitWidth) + 1;

View File

@ -946,7 +946,7 @@ APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
const unsigned VWidth =
cast<FixedVectorType>(Mask->getType())->getNumElements();
APInt DemandedElts = APInt::getAllOnesValue(VWidth);
APInt DemandedElts = APInt::getAllOnes(VWidth);
if (auto *CV = dyn_cast<ConstantVector>(Mask))
for (unsigned i = 0; i < VWidth; i++)
if (CV->getAggregateElement(i)->isNullValue())

View File

@ -2652,14 +2652,14 @@ bool CombinerHelper::matchRedundantAnd(MachineInstr &MI,
//
// Check if we can replace AndDst with the LHS of the G_AND
if (canReplaceReg(AndDst, LHS, MRI) &&
(LHSBits.Zero | RHSBits.One).isAllOnesValue()) {
(LHSBits.Zero | RHSBits.One).isAllOnes()) {
Replacement = LHS;
return true;
}
// Check if we can replace AndDst with the RHS of the G_AND
if (canReplaceReg(AndDst, RHS, MRI) &&
(LHSBits.One | RHSBits.Zero).isAllOnesValue()) {
(LHSBits.One | RHSBits.Zero).isAllOnes()) {
Replacement = RHS;
return true;
}

View File

@ -57,7 +57,7 @@ KnownBits GISelKnownBits::getKnownBits(MachineInstr &MI) {
KnownBits GISelKnownBits::getKnownBits(Register R) {
const LLT Ty = MRI.getType(R);
APInt DemandedElts =
Ty.isVector() ? APInt::getAllOnesValue(Ty.getNumElements()) : APInt(1, 1);
Ty.isVector() ? APInt::getAllOnes(Ty.getNumElements()) : APInt(1, 1);
return getKnownBits(R, DemandedElts);
}
@ -198,8 +198,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
case TargetOpcode::COPY:
case TargetOpcode::G_PHI:
case TargetOpcode::PHI: {
Known.One = APInt::getAllOnesValue(BitWidth);
Known.Zero = APInt::getAllOnesValue(BitWidth);
Known.One = APInt::getAllOnes(BitWidth);
Known.Zero = APInt::getAllOnes(BitWidth);
// Destination registers should not have subregisters at this
// point of the pipeline, otherwise the main live-range will be
// defined more than once, which is against SSA.
@ -688,9 +688,8 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,
unsigned GISelKnownBits::computeNumSignBits(Register R, unsigned Depth) {
LLT Ty = MRI.getType(R);
APInt DemandedElts = Ty.isVector()
? APInt::getAllOnesValue(Ty.getNumElements())
: APInt(1, 1);
APInt DemandedElts =
Ty.isVector() ? APInt::getAllOnes(Ty.getNumElements()) : APInt(1, 1);
return computeNumSignBits(R, DemandedElts, Depth);
}

View File

@ -2670,7 +2670,7 @@ static Register getBitcastWiderVectorElementOffset(MachineIRBuilder &B,
// Now figure out the amount we need to shift to get the target bits.
auto OffsetMask = B.buildConstant(
IdxTy, ~(APInt::getAllOnesValue(IdxTy.getSizeInBits()) << Log2EltRatio));
IdxTy, ~(APInt::getAllOnes(IdxTy.getSizeInBits()) << Log2EltRatio));
auto OffsetIdx = B.buildAnd(IdxTy, Idx, OffsetMask);
return B.buildShl(IdxTy, OffsetIdx,
B.buildConstant(IdxTy, Log2_32(OldEltSize))).getReg(0);
@ -6287,7 +6287,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerFPTOUI(MachineInstr &MI) {
APInt TwoPExpInt = APInt::getSignMask(DstTy.getSizeInBits());
APFloat TwoPExpFP(SrcTy.getSizeInBits() == 32 ? APFloat::IEEEsingle()
: APFloat::IEEEdouble(),
APInt::getNullValue(SrcTy.getSizeInBits()));
APInt::getZero(SrcTy.getSizeInBits()));
TwoPExpFP.convertFromAPInt(TwoPExpInt, false, APFloat::rmNearestTiesToEven);
MachineInstrBuilder FPTOSI = MIRBuilder.buildFPTOSI(DstTy, Src);

View File

@ -313,7 +313,7 @@ public:
}
// Multiplying by zero removes the coefficient B and defines all bits.
if (C.isNullValue()) {
if (C.isZero()) {
ErrorMSBs = 0;
deleteB();
}
@ -464,7 +464,7 @@ public:
return *this;
}
if (C.isNullValue())
if (C.isZero())
return *this;
// Test if the result will be zero
@ -571,7 +571,7 @@ public:
bool isProvenEqualTo(const Polynomial &o) {
// Subtract both polynomials and test if it is fully defined and zero.
Polynomial r = *this - o;
return (r.ErrorMSBs == 0) && (!r.isFirstOrder()) && (r.A.isNullValue());
return (r.ErrorMSBs == 0) && (!r.isFirstOrder()) && (r.A.isZero());
}
/// Print the polynomial into a stream.

View File

@ -319,7 +319,7 @@ namespace {
/// If so, return true.
bool SimplifyDemandedBits(SDValue Op) {
unsigned BitWidth = Op.getScalarValueSizeInBits();
APInt DemandedBits = APInt::getAllOnesValue(BitWidth);
APInt DemandedBits = APInt::getAllOnes(BitWidth);
return SimplifyDemandedBits(Op, DemandedBits);
}
@ -345,7 +345,7 @@ namespace {
return false;
unsigned NumElts = Op.getValueType().getVectorNumElements();
APInt DemandedElts = APInt::getAllOnesValue(NumElts);
APInt DemandedElts = APInt::getAllOnes(NumElts);
return SimplifyDemandedVectorElts(Op, DemandedElts);
}
@ -5640,11 +5640,11 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
// fold (and x, 0) -> 0, vector edition
if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
// do not return N0, because undef node may exist in N0
return DAG.getConstant(APInt::getNullValue(N0.getScalarValueSizeInBits()),
return DAG.getConstant(APInt::getZero(N0.getScalarValueSizeInBits()),
SDLoc(N), N0.getValueType());
if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
// do not return N1, because undef node may exist in N1
return DAG.getConstant(APInt::getNullValue(N1.getScalarValueSizeInBits()),
return DAG.getConstant(APInt::getZero(N1.getScalarValueSizeInBits()),
SDLoc(N), N1.getValueType());
// fold (and x, -1) -> x, vector edition
@ -5695,8 +5695,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
// if (and x, c) is known to be zero, return 0
unsigned BitWidth = VT.getScalarSizeInBits();
if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
APInt::getAllOnesValue(BitWidth)))
if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), APInt::getAllOnes(BitWidth)))
return DAG.getConstant(0, SDLoc(N), VT);
if (SDValue NewSel = foldBinOpIntoSelect(N))
@ -5758,7 +5757,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
// Get the constant (if applicable) the zero'th operand is being ANDed with.
// This can be a pure constant or a vector splat, in which case we treat the
// vector as a scalar and use the splat value.
APInt Constant = APInt::getNullValue(1);
APInt Constant = APInt::getZero(1);
if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
Constant = C->getAPIntValue();
} else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) {
@ -5789,7 +5788,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
// Make sure that variable 'Constant' is only set if 'SplatBitSize' is a
// multiple of 'BitWidth'. Otherwise, we could propagate a wrong value.
if ((SplatBitSize % EltBitWidth) == 0) {
Constant = APInt::getAllOnesValue(EltBitWidth);
Constant = APInt::getAllOnes(EltBitWidth);
for (unsigned i = 0, n = (SplatBitSize / EltBitWidth); i < n; ++i)
Constant &= SplatValue.extractBits(EltBitWidth, i * EltBitWidth);
}
@ -7915,7 +7914,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
// shift has been simplified to undef.
uint64_t ShiftAmt = ShiftC->getLimitedValue();
if (ShiftAmt < BitWidth) {
APInt Ones = APInt::getAllOnesValue(BitWidth);
APInt Ones = APInt::getAllOnes(BitWidth);
Ones = N0Opcode == ISD::SHL ? Ones.shl(ShiftAmt) : Ones.lshr(ShiftAmt);
if (XorC->getAPIntValue() == Ones) {
// If the xor constant is a shifted -1, do a 'not' before the shift:
@ -8271,8 +8270,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
return NewSel;
// if (shl x, c) is known to be zero, return 0
if (DAG.MaskedValueIsZero(SDValue(N, 0),
APInt::getAllOnesValue(OpSizeInBits)))
if (DAG.MaskedValueIsZero(SDValue(N, 0), APInt::getAllOnes(OpSizeInBits)))
return DAG.getConstant(0, SDLoc(N), VT);
// fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
@ -8790,8 +8788,8 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
return NewSel;
// if (srl x, c) is known to be zero, return 0
if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
APInt::getAllOnesValue(OpSizeInBits)))
if (N1C &&
DAG.MaskedValueIsZero(SDValue(N, 0), APInt::getAllOnes(OpSizeInBits)))
return DAG.getConstant(0, SDLoc(N), VT);
// fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2))
@ -16733,7 +16731,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
unsigned BitWidth = N1.getValueSizeInBits();
APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue();
if (Opc == ISD::AND)
Imm ^= APInt::getAllOnesValue(BitWidth);
Imm ^= APInt::getAllOnes(BitWidth);
if (Imm == 0 || Imm.isAllOnesValue())
return SDValue();
unsigned ShAmt = Imm.countTrailingZeros();
@ -16761,7 +16759,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
if ((Imm & Mask) == Imm) {
APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW);
if (Opc == ISD::AND)
NewImm ^= APInt::getAllOnesValue(NewBW);
NewImm ^= APInt::getAllOnes(NewBW);
uint64_t PtrOff = ShAmt / 8;
// For big endian targets, we need to adjust the offset to the pointer to
// load the correct bytes.
@ -18911,7 +18909,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
Use->getOperand(0) == VecOp &&
isa<ConstantSDNode>(Use->getOperand(1));
})) {
APInt DemandedElts = APInt::getNullValue(NumElts);
APInt DemandedElts = APInt::getZero(NumElts);
for (SDNode *Use : VecOp->uses()) {
auto *CstElt = cast<ConstantSDNode>(Use->getOperand(1));
if (CstElt->getAPIntValue().ult(NumElts))
@ -18924,7 +18922,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
AddToWorklist(N);
return SDValue(N, 0);
}
APInt DemandedBits = APInt::getAllOnesValue(VecEltBitWidth);
APInt DemandedBits = APInt::getAllOnes(VecEltBitWidth);
if (SimplifyDemandedBits(VecOp, DemandedBits, DemandedElts, true)) {
// We simplified the vector operand of this extract element. If this
// extract is not dead, visit it again so it is folded properly.

View File

@ -3240,9 +3240,9 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) &&
TLI.isOperationLegalOrCustom(ISD::XOR, VT) &&
"Don't know how to expand this subtraction!");
Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1),
DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl,
VT));
Tmp1 = DAG.getNode(
ISD::XOR, dl, VT, Node->getOperand(1),
DAG.getConstant(APInt::getAllOnes(VT.getSizeInBits()), dl, VT));
Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp1, DAG.getConstant(1, dl, VT));
Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1));
break;

View File

@ -257,7 +257,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FABS(SDNode *N) {
unsigned Size = NVT.getSizeInBits();
// Mask = ~(1 << (Size-1))
APInt API = APInt::getAllOnesValue(Size);
APInt API = APInt::getAllOnes(Size);
API.clearBit(Size - 1);
SDValue Mask = DAG.getConstant(API, SDLoc(N), NVT);
SDValue Op = GetSoftenedFloat(N->getOperand(0));

View File

@ -832,7 +832,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBSHLSAT(SDNode *N) {
unsigned NewBits = PromotedType.getScalarSizeInBits();
if (Opcode == ISD::UADDSAT) {
APInt MaxVal = APInt::getAllOnesValue(OldBits).zext(NewBits);
APInt MaxVal = APInt::getAllOnes(OldBits).zext(NewBits);
SDValue SatMax = DAG.getConstant(MaxVal, dl, PromotedType);
SDValue Add =
DAG.getNode(ISD::ADD, dl, PromotedType, Op1Promoted, Op2Promoted);
@ -3698,7 +3698,7 @@ void DAGTypeLegalizer::ExpandIntRes_MULFIX(SDNode *N, SDValue &Lo,
// Saturate to signed maximum.
APInt MaxHi = APInt::getSignedMaxValue(NVTSize);
APInt MaxLo = APInt::getAllOnesValue(NVTSize);
APInt MaxLo = APInt::getAllOnes(NVTSize);
Hi = DAG.getSelect(dl, NVT, SatMax, DAG.getConstant(MaxHi, dl, NVT), Hi);
Lo = DAG.getSelect(dl, NVT, SatMax, DAG.getConstant(MaxLo, dl, NVT), Lo);
// Saturate to signed minimum.

View File

@ -943,10 +943,10 @@ SDValue VectorLegalizer::ExpandSELECT(SDNode *Node) {
// What is the size of each element in the vector mask.
EVT BitTy = MaskTy.getScalarType();
Mask = DAG.getSelect(DL, BitTy, Mask,
DAG.getConstant(APInt::getAllOnesValue(BitTy.getSizeInBits()), DL,
BitTy),
DAG.getConstant(0, DL, BitTy));
Mask = DAG.getSelect(
DL, BitTy, Mask,
DAG.getConstant(APInt::getAllOnes(BitTy.getSizeInBits()), DL, BitTy),
DAG.getConstant(0, DL, BitTy));
// Broadcast the mask so that the entire vector is all one or all zero.
if (VT.isFixedLengthVector())
@ -960,8 +960,8 @@ SDValue VectorLegalizer::ExpandSELECT(SDNode *Node) {
Op1 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op1);
Op2 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op2);
SDValue AllOnes = DAG.getConstant(
APInt::getAllOnesValue(BitTy.getSizeInBits()), DL, MaskTy);
SDValue AllOnes =
DAG.getConstant(APInt::getAllOnes(BitTy.getSizeInBits()), DL, MaskTy);
SDValue NotMask = DAG.getNode(ISD::XOR, DL, MaskTy, Mask, AllOnes);
Op1 = DAG.getNode(ISD::AND, DL, MaskTy, Op1, Mask);
@ -1207,8 +1207,8 @@ SDValue VectorLegalizer::ExpandVSELECT(SDNode *Node) {
Op1 = DAG.getNode(ISD::BITCAST, DL, VT, Op1);
Op2 = DAG.getNode(ISD::BITCAST, DL, VT, Op2);
SDValue AllOnes = DAG.getConstant(
APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL, VT);
SDValue AllOnes =
DAG.getConstant(APInt::getAllOnes(VT.getScalarSizeInBits()), DL, VT);
SDValue NotMask = DAG.getNode(ISD::XOR, DL, VT, Mask, AllOnes);
Op1 = DAG.getNode(ISD::AND, DL, VT, Op1, Mask);
@ -1501,10 +1501,10 @@ void VectorLegalizer::UnrollStrictFPOp(SDNode *Node,
if (Node->getOpcode() == ISD::STRICT_FSETCC ||
Node->getOpcode() == ISD::STRICT_FSETCCS)
ScalarResult = DAG.getSelect(dl, EltVT, ScalarResult,
DAG.getConstant(APInt::getAllOnesValue
(EltVT.getSizeInBits()), dl, EltVT),
DAG.getConstant(0, dl, EltVT));
ScalarResult = DAG.getSelect(
dl, EltVT, ScalarResult,
DAG.getConstant(APInt::getAllOnes(EltVT.getSizeInBits()), dl, EltVT),
DAG.getConstant(0, dl, EltVT));
OpValues.push_back(ScalarResult);
OpChains.push_back(ScalarChain);
@ -1536,10 +1536,10 @@ SDValue VectorLegalizer::UnrollVSETCC(SDNode *Node) {
TLI.getSetCCResultType(DAG.getDataLayout(),
*DAG.getContext(), TmpEltVT),
LHSElem, RHSElem, CC);
Ops[i] = DAG.getSelect(dl, EltVT, Ops[i],
DAG.getConstant(APInt::getAllOnesValue
(EltVT.getSizeInBits()), dl, EltVT),
DAG.getConstant(0, dl, EltVT));
Ops[i] = DAG.getSelect(
dl, EltVT, Ops[i],
DAG.getConstant(APInt::getAllOnes(EltVT.getSizeInBits()), dl, EltVT),
DAG.getConstant(0, dl, EltVT));
}
return DAG.getBuildVector(VT, dl, Ops);
}

View File

@ -1349,7 +1349,7 @@ SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
EVT EltVT = VT.getScalarType();
SDValue NegOne =
getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
getConstant(APInt::getAllOnes(EltVT.getSizeInBits()), DL, VT);
return getNode(ISD::XOR, DL, VT, Val, NegOne);
}
@ -2408,7 +2408,7 @@ SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) {
return SDValue();
APInt DemandedElts = VT.isVector()
? APInt::getAllOnesValue(VT.getVectorNumElements())
? APInt::getAllOnes(VT.getVectorNumElements())
: APInt(1, 1);
return GetDemandedBits(V, DemandedBits, DemandedElts);
}
@ -2503,7 +2503,7 @@ bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
switch (V.getOpcode()) {
case ISD::SPLAT_VECTOR:
UndefElts = V.getOperand(0).isUndef()
? APInt::getAllOnesValue(DemandedElts.getBitWidth())
? APInt::getAllOnes(DemandedElts.getBitWidth())
: APInt(DemandedElts.getBitWidth(), 0);
return true;
case ISD::ADD:
@ -2535,7 +2535,7 @@ bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
unsigned NumElts = VT.getVectorNumElements();
assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch");
UndefElts = APInt::getNullValue(NumElts);
UndefElts = APInt::getZero(NumElts);
switch (V.getOpcode()) {
case ISD::BUILD_VECTOR: {
@ -2604,7 +2604,7 @@ bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) {
// For now we don't support this with scalable vectors.
if (!VT.isScalableVector())
DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
return isSplatValue(V, DemandedElts, UndefElts) &&
(AllowUndefs || !UndefElts);
}
@ -2620,7 +2620,7 @@ SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) {
APInt DemandedElts;
if (!VT.isScalableVector())
DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
if (isSplatValue(V, DemandedElts, UndefElts)) {
if (VT.isScalableVector()) {
@ -2768,7 +2768,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const {
}
APInt DemandedElts = VT.isVector()
? APInt::getAllOnesValue(VT.getVectorNumElements())
? APInt::getAllOnes(VT.getVectorNumElements())
: APInt(1, 1);
return computeKnownBits(Op, DemandedElts, Depth);
}
@ -2906,7 +2906,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
APInt DemandedSrcElts = DemandedElts;
DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx);
DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx);
Known.One.setAllBits();
Known.Zero.setAllBits();
@ -3443,7 +3443,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
// If we know the element index, just demand that vector element, else for
// an unknown element index, ignore DemandedElts and demand them all.
APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
DemandedSrcElts =
@ -3697,7 +3697,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
return 1;
APInt DemandedElts = VT.isVector()
? APInt::getAllOnesValue(VT.getVectorNumElements())
? APInt::getAllOnes(VT.getVectorNumElements())
: APInt(1, 1);
return ComputeNumSignBits(Op, DemandedElts, Depth);
}
@ -4114,7 +4114,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
// If we know the element index, just demand that vector element, else for
// an unknown element index, ignore DemandedElts and demand them all.
APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
DemandedSrcElts =
@ -4160,7 +4160,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
APInt DemandedSrcElts = DemandedElts;
DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx);
DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx);
Tmp = std::numeric_limits<unsigned>::max();
if (!!DemandedSubElts) {
@ -4294,7 +4294,7 @@ bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly,
return false;
APInt DemandedElts = VT.isVector()
? APInt::getAllOnesValue(VT.getVectorNumElements())
? APInt::getAllOnes(VT.getVectorNumElements())
: APInt(1, 1);
return isGuaranteedNotToBeUndefOrPoison(Op, DemandedElts, PoisonOnly, Depth);
}
@ -4720,7 +4720,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP: {
APFloat apf(EVTToAPFloatSemantics(VT),
APInt::getNullValue(VT.getSizeInBits()));
APInt::getZero(VT.getSizeInBits()));
(void)apf.convertFromAPInt(Val,
Opcode==ISD::SINT_TO_FP,
APFloat::rmNearestTiesToEven);
@ -10799,7 +10799,7 @@ SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
}
SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
APInt DemandedElts = APInt::getAllOnesValue(getNumOperands());
APInt DemandedElts = APInt::getAllOnes(getNumOperands());
return getSplatValue(DemandedElts, UndefElements);
}
@ -10851,7 +10851,7 @@ bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts,
bool BuildVectorSDNode::getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
BitVector *UndefElements) const {
APInt DemandedElts = APInt::getAllOnesValue(getNumOperands());
APInt DemandedElts = APInt::getAllOnes(getNumOperands());
return getRepeatedSequence(DemandedElts, Sequence, UndefElements);
}

View File

@ -537,7 +537,7 @@ bool TargetLowering::ShrinkDemandedConstant(SDValue Op,
TargetLoweringOpt &TLO) const {
EVT VT = Op.getValueType();
APInt DemandedElts = VT.isVector()
? APInt::getAllOnesValue(VT.getVectorNumElements())
? APInt::getAllOnes(VT.getVectorNumElements())
: APInt(1, 1);
return ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO);
}
@ -621,7 +621,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
}
APInt DemandedElts = VT.isVector()
? APInt::getAllOnesValue(VT.getVectorNumElements())
? APInt::getAllOnes(VT.getVectorNumElements())
: APInt(1, 1);
return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth,
AssumeSingleUse);
@ -667,8 +667,8 @@ SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
DAG.getDataLayout().isLittleEndian()) {
unsigned Scale = NumDstEltBits / NumSrcEltBits;
unsigned NumSrcElts = SrcVT.getVectorNumElements();
APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits);
APInt DemandedSrcElts = APInt::getZero(NumSrcElts);
for (unsigned i = 0; i != Scale; ++i) {
unsigned Offset = i * NumSrcEltBits;
APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset);
@ -690,8 +690,8 @@ SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
DAG.getDataLayout().isLittleEndian()) {
unsigned Scale = NumSrcEltBits / NumDstEltBits;
unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits);
APInt DemandedSrcElts = APInt::getZero(NumSrcElts);
for (unsigned i = 0; i != NumElts; ++i)
if (DemandedElts[i]) {
unsigned Offset = (i % Scale) * NumDstEltBits;
@ -874,7 +874,7 @@ SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
unsigned Depth) const {
EVT VT = Op.getValueType();
APInt DemandedElts = VT.isVector()
? APInt::getAllOnesValue(VT.getVectorNumElements())
? APInt::getAllOnes(VT.getVectorNumElements())
: APInt(1, 1);
return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG,
Depth);
@ -883,7 +883,7 @@ SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
SDValue TargetLowering::SimplifyMultipleUseDemandedVectorElts(
SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG,
unsigned Depth) const {
APInt DemandedBits = APInt::getAllOnesValue(Op.getScalarValueSizeInBits());
APInt DemandedBits = APInt::getAllOnes(Op.getScalarValueSizeInBits());
return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG,
Depth);
}
@ -950,8 +950,8 @@ bool TargetLowering::SimplifyDemandedBits(
}
// If this is the root being simplified, allow it to have multiple uses,
// just set the DemandedBits/Elts to all bits.
DemandedBits = APInt::getAllOnesValue(BitWidth);
DemandedElts = APInt::getAllOnesValue(NumElts);
DemandedBits = APInt::getAllOnes(BitWidth);
DemandedElts = APInt::getAllOnes(NumElts);
} else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
// Not demanding any bits/elts from Op.
return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
@ -1046,7 +1046,7 @@ bool TargetLowering::SimplifyDemandedBits(
unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
APInt DemandedSrcElts = DemandedElts;
DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx);
DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx);
KnownBits KnownSub, KnownSrc;
if (SimplifyDemandedBits(Sub, DemandedBits, DemandedSubElts, KnownSub, TLO,
@ -1064,8 +1064,8 @@ bool TargetLowering::SimplifyDemandedBits(
Known = KnownBits::commonBits(Known, KnownSrc);
// Attempt to avoid multi-use src if we don't need anything from it.
if (!DemandedBits.isAllOnesValue() || !DemandedSubElts.isAllOnesValue() ||
!DemandedSrcElts.isAllOnesValue()) {
if (!DemandedBits.isAllOnes() || !DemandedSubElts.isAllOnes() ||
!DemandedSrcElts.isAllOnes()) {
SDValue NewSub = SimplifyMultipleUseDemandedBits(
Sub, DemandedBits, DemandedSubElts, TLO.DAG, Depth + 1);
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
@ -1094,7 +1094,7 @@ bool TargetLowering::SimplifyDemandedBits(
return true;
// Attempt to avoid multi-use src if we don't need anything from it.
if (!DemandedBits.isAllOnesValue() || !DemandedSrcElts.isAllOnesValue()) {
if (!DemandedBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) {
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1);
if (DemandedSrc) {
@ -1224,7 +1224,7 @@ bool TargetLowering::SimplifyDemandedBits(
assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
// Attempt to avoid multi-use ops if we don't need anything from them.
if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) {
SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
@ -1271,7 +1271,7 @@ bool TargetLowering::SimplifyDemandedBits(
assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
// Attempt to avoid multi-use ops if we don't need anything from them.
if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) {
SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
@ -1314,7 +1314,7 @@ bool TargetLowering::SimplifyDemandedBits(
assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
// Attempt to avoid multi-use ops if we don't need anything from them.
if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) {
SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
@ -1663,7 +1663,7 @@ bool TargetLowering::SimplifyDemandedBits(
Known.One.setHighBits(ShAmt);
// Attempt to avoid multi-use ops if we don't need anything from them.
if (!InDemandedMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
if (!InDemandedMask.isAllOnes() || !DemandedElts.isAllOnes()) {
SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1);
if (DemandedOp0) {
@ -2079,7 +2079,7 @@ bool TargetLowering::SimplifyDemandedBits(
// Demand the bits from every vector element without a constant index.
unsigned NumSrcElts = SrcEltCnt.getFixedValue();
APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx))
if (CIdx->getAPIntValue().ult(NumSrcElts))
DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue());
@ -2095,8 +2095,7 @@ bool TargetLowering::SimplifyDemandedBits(
return true;
// Attempt to avoid multi-use ops if we don't need anything from them.
if (!DemandedSrcBits.isAllOnesValue() ||
!DemandedSrcElts.isAllOnesValue()) {
if (!DemandedSrcBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) {
if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) {
SDValue NewOp =
@ -2146,8 +2145,8 @@ bool TargetLowering::SimplifyDemandedBits(
TLO.DAG.getDataLayout().isLittleEndian()) {
unsigned Scale = BitWidth / NumSrcEltBits;
unsigned NumSrcElts = SrcVT.getVectorNumElements();
APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits);
APInt DemandedSrcElts = APInt::getZero(NumSrcElts);
for (unsigned i = 0; i != Scale; ++i) {
unsigned Offset = i * NumSrcEltBits;
APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset);
@ -2172,8 +2171,8 @@ bool TargetLowering::SimplifyDemandedBits(
TLO.DAG.getDataLayout().isLittleEndian()) {
unsigned Scale = NumSrcEltBits / BitWidth;
unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits);
APInt DemandedSrcElts = APInt::getZero(NumSrcElts);
for (unsigned i = 0; i != NumElts; ++i)
if (DemandedElts[i]) {
unsigned Offset = (i % Scale) * BitWidth;
@ -2230,7 +2229,7 @@ bool TargetLowering::SimplifyDemandedBits(
}
// Attempt to avoid multi-use ops if we don't need anything from them.
if (!LoMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
if (!LoMask.isAllOnes() || !DemandedElts.isAllOnes()) {
SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1);
SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
@ -2254,7 +2253,7 @@ bool TargetLowering::SimplifyDemandedBits(
ConstantSDNode *C = isConstOrConstSplat(Op1);
APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ);
if (C && !C->isAllOnesValue() && !C->isOne() &&
(C->getAPIntValue() | HighMask).isAllOnesValue()) {
(C->getAPIntValue() | HighMask).isAllOnes()) {
SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT);
// Disable the nsw and nuw flags. We can no longer guarantee that we
// won't wrap after simplification.
@ -2352,7 +2351,7 @@ static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG,
return SDValue();
};
APInt KnownUndef = APInt::getNullValue(NumElts);
APInt KnownUndef = APInt::getZero(NumElts);
for (unsigned i = 0; i != NumElts; ++i) {
// If both inputs for this element are either constant or undef and match
// the element type, compute the constant/undef result for this element of
@ -2379,7 +2378,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
unsigned NumElts = DemandedElts.getBitWidth();
assert(VT.isVector() && "Expected vector op");
KnownUndef = KnownZero = APInt::getNullValue(NumElts);
KnownUndef = KnownZero = APInt::getZero(NumElts);
// TODO: For now we assume we know nothing about scalable vectors.
if (VT.isScalableVector())
@ -2472,7 +2471,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
KnownZero, TLO, Depth + 1);
APInt SrcZero, SrcUndef;
APInt SrcDemandedElts = APInt::getNullValue(NumSrcElts);
APInt SrcDemandedElts = APInt::getZero(NumSrcElts);
// Bitcast from 'large element' src vector to 'small element' vector, we
// must demand a source element if any DemandedElt maps to it.
@ -2491,7 +2490,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
// TODO - bigendian once we have test coverage.
if (TLO.DAG.getDataLayout().isLittleEndian()) {
unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits();
APInt SrcDemandedBits = APInt::getNullValue(SrcEltSizeInBits);
APInt SrcDemandedBits = APInt::getZero(SrcEltSizeInBits);
for (unsigned i = 0; i != NumElts; ++i)
if (DemandedElts[i]) {
unsigned Ofs = (i % Scale) * EltSizeInBits;
@ -2533,9 +2532,9 @@ bool TargetLowering::SimplifyDemandedVectorElts(
// the output element will be as well, assuming it was demanded.
for (unsigned i = 0; i != NumElts; ++i) {
if (DemandedElts[i]) {
if (SrcZero.extractBits(Scale, i * Scale).isAllOnesValue())
if (SrcZero.extractBits(Scale, i * Scale).isAllOnes())
KnownZero.setBit(i);
if (SrcUndef.extractBits(Scale, i * Scale).isAllOnesValue())
if (SrcUndef.extractBits(Scale, i * Scale).isAllOnes())
KnownUndef.setBit(i);
}
}
@ -2544,7 +2543,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
}
case ISD::BUILD_VECTOR: {
// Check all elements and simplify any unused elements with UNDEF.
if (!DemandedElts.isAllOnesValue()) {
if (!DemandedElts.isAllOnes()) {
// Don't simplify BROADCASTS.
if (llvm::any_of(Op->op_values(),
[&](SDValue Elt) { return Op.getOperand(0) != Elt; })) {
@ -2597,7 +2596,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
APInt DemandedSrcElts = DemandedElts;
DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx);
DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx);
APInt SubUndef, SubZero;
if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO,
@ -2617,8 +2616,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
KnownZero.insertBits(SubZero, Idx);
// Attempt to avoid multi-use ops if we don't need anything from them.
if (!DemandedSrcElts.isAllOnesValue() ||
!DemandedSubElts.isAllOnesValue()) {
if (!DemandedSrcElts.isAllOnes() || !DemandedSubElts.isAllOnes()) {
SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
Src, DemandedSrcElts, TLO.DAG, Depth + 1);
SDValue NewSub = SimplifyMultipleUseDemandedVectorElts(
@ -2650,7 +2648,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
KnownZero = SrcZero.extractBits(NumElts, Idx);
// Attempt to avoid multi-use ops if we don't need anything from them.
if (!DemandedElts.isAllOnesValue()) {
if (!DemandedElts.isAllOnes()) {
SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
Src, DemandedSrcElts, TLO.DAG, Depth + 1);
if (NewSrc) {
@ -2850,7 +2848,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
// Attempt to avoid multi-use ops if we don't need anything from them.
// TODO - use KnownUndef to relax the demandedelts?
if (!DemandedElts.isAllOnesValue())
if (!DemandedElts.isAllOnes())
if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
return true;
break;
@ -2877,7 +2875,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
// Attempt to avoid multi-use ops if we don't need anything from them.
// TODO - use KnownUndef to relax the demandedelts?
if (!DemandedElts.isAllOnesValue())
if (!DemandedElts.isAllOnes())
if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
return true;
break;
@ -2905,7 +2903,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
// Attempt to avoid multi-use ops if we don't need anything from them.
// TODO - use KnownUndef to relax the demandedelts?
if (!DemandedElts.isAllOnesValue())
if (!DemandedElts.isAllOnes())
if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
return true;
break;
@ -2931,7 +2929,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
return true;
} else {
KnownBits Known;
APInt DemandedBits = APInt::getAllOnesValue(EltSizeInBits);
APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known,
TLO, Depth, AssumeSingleUse))
return true;
@ -3121,7 +3119,7 @@ bool TargetLowering::isConstTrueVal(const SDNode *N) const {
case ZeroOrOneBooleanContent:
return CVal.isOneValue();
case ZeroOrNegativeOneBooleanContent:
return CVal.isAllOnesValue();
return CVal.isAllOnes();
}
llvm_unreachable("Invalid boolean contents");
@ -4020,7 +4018,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
if (Cond == ISD::SETEQ || Cond == ISD::SETNE) {
// (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0
if (C1.isNullValue())
if (C1.isZero())
if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift(
VT, N0, N1, Cond, DCI, dl))
return CC;
@ -4030,7 +4028,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
// all bits clear: (X | (Y<<32)) == 0 --> (X | Y) == 0
// all bits set: (X | (Y<<32)) == -1 --> (X & Y) == -1
bool CmpZero = N1C->getAPIntValue().isNullValue();
bool CmpNegOne = N1C->getAPIntValue().isAllOnesValue();
bool CmpNegOne = N1C->getAPIntValue().isAllOnes();
if ((CmpZero || CmpNegOne) && N0.hasOneUse()) {
// Match or(lo,shl(hi,bw/2)) pattern.
auto IsConcat = [&](SDValue V, SDValue &Lo, SDValue &Hi) {
@ -5157,7 +5155,7 @@ static mu magicu(const APInt &d, unsigned LeadingZeros = 0) {
APInt nc, delta, q1, r1, q2, r2;
struct mu magu;
magu.a = 0; // initialize "add" indicator
APInt allOnes = APInt::getAllOnesValue(d.getBitWidth()).lshr(LeadingZeros);
APInt allOnes = APInt::getAllOnes(d.getBitWidth()).lshr(LeadingZeros);
APInt signedMin = APInt::getSignedMinValue(d.getBitWidth());
APInt signedMax = APInt::getSignedMaxValue(d.getBitWidth());
@ -5285,7 +5283,7 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
int NumeratorFactor = 0;
int ShiftMask = -1;
if (Divisor.isOneValue() || Divisor.isAllOnesValue()) {
if (Divisor.isOneValue() || Divisor.isAllOnes()) {
// If d is +1/-1, we just multiply the numerator by +1/-1.
NumeratorFactor = Divisor.getSExtValue();
magics.m = 0;
@ -5456,7 +5454,7 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
MagicFactors.push_back(DAG.getConstant(Magic, dl, SVT));
NPQFactors.push_back(
DAG.getConstant(SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1)
: APInt::getNullValue(EltBits),
: APInt::getZero(EltBits),
dl, SVT));
PostShifts.push_back(DAG.getConstant(PostShift, dl, ShSVT));
UseNPQ |= SelNPQ;
@ -5687,14 +5685,14 @@ TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode,
// Q = floor((2^W - 1) u/ D)
// R = ((2^W - 1) u% D)
APInt Q, R;
APInt::udivrem(APInt::getAllOnesValue(W), D, Q, R);
APInt::udivrem(APInt::getAllOnes(W), D, Q, R);
// If we are comparing with zero, then that comparison constant is okay,
// else it may need to be one less than that.
if (Cmp.ugt(R))
Q -= 1;
assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) &&
assert(APInt::getAllOnes(ShSVT.getSizeInBits()).ugt(K) &&
"We are expecting that K is always less than all-ones for ShSVT");
// If the lane is tautological the result can be constant-folded.
@ -5943,9 +5941,9 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
// Q = floor((2 * A) / (2^K))
APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K));
assert(APInt::getAllOnesValue(SVT.getSizeInBits()).ugt(A) &&
assert(APInt::getAllOnes(SVT.getSizeInBits()).ugt(A) &&
"We are expecting that A is always less than all-ones for SVT");
assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) &&
assert(APInt::getAllOnes(ShSVT.getSizeInBits()).ugt(K) &&
"We are expecting that K is always less than all-ones for ShSVT");
// If the divisor is 1 the result can be constant-folded. Likewise, we
@ -6076,7 +6074,7 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
SDValue IntMax = DAG.getConstant(
APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT);
SDValue Zero =
DAG.getConstant(APInt::getNullValue(SVT.getScalarSizeInBits()), DL, VT);
DAG.getConstant(APInt::getZero(SVT.getScalarSizeInBits()), DL, VT);
// Which lanes had INT_MIN divisors? Divisor is constant, so const-folded.
SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ);
@ -6902,7 +6900,7 @@ bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result,
// the destination signmask can't be represented by the float, so we can
// just use FP_TO_SINT directly.
const fltSemantics &APFSem = DAG.EVTToAPFloatSemantics(SrcVT);
APFloat APF(APFSem, APInt::getNullValue(SrcVT.getScalarSizeInBits()));
APFloat APF(APFSem, APInt::getZero(SrcVT.getScalarSizeInBits()));
APInt SignMask = APInt::getSignMask(DstVT.getScalarSizeInBits());
if (APFloat::opOverflow &
APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) {

View File

@ -405,7 +405,7 @@ bool SwitchCG::SwitchLowering::buildBitTests(CaseClusterVector &Clusters,
if (Low.isStrictlyPositive() && High.slt(BitWidth)) {
// Optimize the case where all the case values fit in a word without having
// to subtract minValue. In this case, we can optimize away the subtraction.
LowBound = APInt::getNullValue(Low.getBitWidth());
LowBound = APInt::getZero(Low.getBitWidth());
CmpRange = High;
ContiguousRange = false;
} else {

View File

@ -1945,7 +1945,7 @@ static std::string APIntToHexString(const APInt &AI) {
static std::string scalarConstantToHexString(const Constant *C) {
Type *Ty = C->getType();
if (isa<UndefValue>(C)) {
return APIntToHexString(APInt::getNullValue(Ty->getPrimitiveSizeInBits()));
return APIntToHexString(APInt::getZero(Ty->getPrimitiveSizeInBits()));
} else if (const auto *CFP = dyn_cast<ConstantFP>(C)) {
return APIntToHexString(CFP->getValueAPF().bitcastToAPInt());
} else if (const auto *CI = dyn_cast<ConstantInt>(C)) {

View File

@ -364,7 +364,7 @@ bool TypePromotion::isSafeWrap(Instruction *I) {
Total += OverflowConst->getValue().getBitWidth() < 32 ?
OverflowConst->getValue().abs().zext(32) : OverflowConst->getValue().abs();
APInt Max = APInt::getAllOnesValue(TypePromotion::TypeSize);
APInt Max = APInt::getAllOnes(TypePromotion::TypeSize);
if (Total.getBitWidth() > Max.getBitWidth()) {
if (Total.ugt(Max.zext(Total.getBitWidth())))

View File

@ -479,7 +479,7 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
const APInt &api = CI->getValue();
APFloat apf(DestTy->getFltSemantics(),
APInt::getNullValue(DestTy->getPrimitiveSizeInBits()));
APInt::getZero(DestTy->getPrimitiveSizeInBits()));
apf.convertFromAPInt(api, opc==Instruction::SIToFP,
APFloat::rmNearestTiesToEven);
return ConstantFP::get(V->getContext(), apf);

View File

@ -110,7 +110,7 @@ ConstantRange ConstantRange::makeAllowedICmpRegion(CmpInst::Predicate Pred,
APInt UMin(CR.getUnsignedMin());
if (UMin.isMaxValue())
return getEmpty(W);
return ConstantRange(std::move(UMin) + 1, APInt::getNullValue(W));
return ConstantRange(std::move(UMin) + 1, APInt::getZero(W));
}
case CmpInst::ICMP_SGT: {
APInt SMin(CR.getSignedMin());
@ -119,7 +119,7 @@ ConstantRange ConstantRange::makeAllowedICmpRegion(CmpInst::Predicate Pred,
return ConstantRange(std::move(SMin) + 1, APInt::getSignedMinValue(W));
}
case CmpInst::ICMP_UGE:
return getNonEmpty(CR.getUnsignedMin(), APInt::getNullValue(W));
return getNonEmpty(CR.getUnsignedMin(), APInt::getZero(W));
case CmpInst::ICMP_SGE:
return getNonEmpty(CR.getSignedMin(), APInt::getSignedMinValue(W));
}
@ -248,8 +248,7 @@ ConstantRange::makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp,
case Instruction::Add: {
if (Unsigned)
return getNonEmpty(APInt::getNullValue(BitWidth),
-Other.getUnsignedMax());
return getNonEmpty(APInt::getZero(BitWidth), -Other.getUnsignedMax());
APInt SignedMinVal = APInt::getSignedMinValue(BitWidth);
APInt SMin = Other.getSignedMin(), SMax = Other.getSignedMax();
@ -291,7 +290,7 @@ ConstantRange::makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp,
// to be at most bitwidth-1, which results in most conservative range.
APInt ShAmtUMax = ShAmt.getUnsignedMax();
if (Unsigned)
return getNonEmpty(APInt::getNullValue(BitWidth),
return getNonEmpty(APInt::getZero(BitWidth),
APInt::getMaxValue(BitWidth).lshr(ShAmtUMax) + 1);
return getNonEmpty(APInt::getSignedMinValue(BitWidth).ashr(ShAmtUMax),
APInt::getSignedMaxValue(BitWidth).ashr(ShAmtUMax) + 1);
@ -316,7 +315,7 @@ bool ConstantRange::isEmptySet() const {
}
bool ConstantRange::isWrappedSet() const {
return Lower.ugt(Upper) && !Upper.isNullValue();
return Lower.ugt(Upper) && !Upper.isZero();
}
bool ConstantRange::isUpperWrapped() const {
@ -595,7 +594,7 @@ ConstantRange ConstantRange::unionWith(const ConstantRange &CR,
APInt L = CR.Lower.ult(Lower) ? CR.Lower : Lower;
APInt U = (CR.Upper - 1).ugt(Upper - 1) ? CR.Upper : Upper;
if (L.isNullValue() && U.isNullValue())
if (L.isZero() && U.isZero())
return getFull();
return ConstantRange(std::move(L), std::move(U));
@ -1113,13 +1112,13 @@ ConstantRange::umin(const ConstantRange &Other) const {
ConstantRange
ConstantRange::udiv(const ConstantRange &RHS) const {
if (isEmptySet() || RHS.isEmptySet() || RHS.getUnsignedMax().isNullValue())
if (isEmptySet() || RHS.isEmptySet() || RHS.getUnsignedMax().isZero())
return getEmpty();
APInt Lower = getUnsignedMin().udiv(RHS.getUnsignedMax());
APInt RHS_umin = RHS.getUnsignedMin();
if (RHS_umin.isNullValue()) {
if (RHS_umin.isZero()) {
// We want the lowest value in RHS excluding zero. Usually that would be 1
// except for a range in the form of [X, 1) in which case it would be X.
if (RHS.getUpper() == 1)
@ -1136,7 +1135,7 @@ ConstantRange ConstantRange::sdiv(const ConstantRange &RHS) const {
// We split up the LHS and RHS into positive and negative components
// and then also compute the positive and negative components of the result
// separately by combining division results with the appropriate signs.
APInt Zero = APInt::getNullValue(getBitWidth());
APInt Zero = APInt::getZero(getBitWidth());
APInt SignedMin = APInt::getSignedMinValue(getBitWidth());
ConstantRange PosFilter(APInt(getBitWidth(), 1), SignedMin);
ConstantRange NegFilter(SignedMin, Zero);
@ -1159,7 +1158,7 @@ ConstantRange ConstantRange::sdiv(const ConstantRange &RHS) const {
// (For APInts the operation is well-defined and yields SignedMin.) We
// handle this by dropping either SignedMin from the LHS or -1 from the RHS.
APInt Lo = (NegL.Upper - 1).sdiv(NegR.Lower);
if (NegL.Lower.isMinSignedValue() && NegR.Upper.isNullValue()) {
if (NegL.Lower.isMinSignedValue() && NegR.Upper.isZero()) {
// Remove -1 from the LHS. Skip if it's the only element, as this would
// leave us with an empty set.
if (!NegR.Lower.isAllOnesValue()) {
@ -1218,12 +1217,12 @@ ConstantRange ConstantRange::sdiv(const ConstantRange &RHS) const {
}
ConstantRange ConstantRange::urem(const ConstantRange &RHS) const {
if (isEmptySet() || RHS.isEmptySet() || RHS.getUnsignedMax().isNullValue())
if (isEmptySet() || RHS.isEmptySet() || RHS.getUnsignedMax().isZero())
return getEmpty();
if (const APInt *RHSInt = RHS.getSingleElement()) {
// UREM by null is UB.
if (RHSInt->isNullValue())
if (RHSInt->isZero())
return getEmpty();
// Use APInt's implementation of UREM for single element ranges.
if (const APInt *LHSInt = getSingleElement())
@ -1236,7 +1235,7 @@ ConstantRange ConstantRange::urem(const ConstantRange &RHS) const {
// L % R is <= L and < R.
APInt Upper = APIntOps::umin(getUnsignedMax(), RHS.getUnsignedMax() - 1) + 1;
return getNonEmpty(APInt::getNullValue(getBitWidth()), std::move(Upper));
return getNonEmpty(APInt::getZero(getBitWidth()), std::move(Upper));
}
ConstantRange ConstantRange::srem(const ConstantRange &RHS) const {
@ -1245,7 +1244,7 @@ ConstantRange ConstantRange::srem(const ConstantRange &RHS) const {
if (const APInt *RHSInt = RHS.getSingleElement()) {
// SREM by null is UB.
if (RHSInt->isNullValue())
if (RHSInt->isZero())
return getEmpty();
// Use APInt's implementation of SREM for single element ranges.
if (const APInt *LHSInt = getSingleElement())
@ -1257,10 +1256,10 @@ ConstantRange ConstantRange::srem(const ConstantRange &RHS) const {
APInt MaxAbsRHS = AbsRHS.getUnsignedMax();
// Modulus by zero is UB.
if (MaxAbsRHS.isNullValue())
if (MaxAbsRHS.isZero())
return getEmpty();
if (MinAbsRHS.isNullValue())
if (MinAbsRHS.isZero())
++MinAbsRHS;
APInt MinLHS = getSignedMin(), MaxLHS = getSignedMax();
@ -1272,7 +1271,7 @@ ConstantRange ConstantRange::srem(const ConstantRange &RHS) const {
// L % R is <= L and < R.
APInt Upper = APIntOps::umin(MaxLHS, MaxAbsRHS - 1) + 1;
return ConstantRange(APInt::getNullValue(getBitWidth()), std::move(Upper));
return ConstantRange(APInt::getZero(getBitWidth()), std::move(Upper));
}
// Same basic logic as above, but the result is negative.
@ -1291,7 +1290,7 @@ ConstantRange ConstantRange::srem(const ConstantRange &RHS) const {
}
ConstantRange ConstantRange::binaryNot() const {
return ConstantRange(APInt::getAllOnesValue(getBitWidth())).sub(*this);
return ConstantRange(APInt::getAllOnes(getBitWidth())).sub(*this);
}
ConstantRange
@ -1306,7 +1305,7 @@ ConstantRange::binaryAnd(const ConstantRange &Other) const {
// TODO: replace this with something less conservative
APInt umin = APIntOps::umin(Other.getUnsignedMax(), getUnsignedMax());
return getNonEmpty(APInt::getNullValue(getBitWidth()), std::move(umin) + 1);
return getNonEmpty(APInt::getZero(getBitWidth()), std::move(umin) + 1);
}
ConstantRange
@ -1321,7 +1320,7 @@ ConstantRange::binaryOr(const ConstantRange &Other) const {
// TODO: replace this with something less conservative
APInt umax = APIntOps::umax(getUnsignedMin(), Other.getUnsignedMin());
return getNonEmpty(std::move(umax), APInt::getNullValue(getBitWidth()));
return getNonEmpty(std::move(umax), APInt::getZero(getBitWidth()));
}
ConstantRange ConstantRange::binaryXor(const ConstantRange &Other) const {
@ -1352,7 +1351,7 @@ ConstantRange::shl(const ConstantRange &Other) const {
// If we are shifting by maximum amount of
// zero return return the original range.
if (Other_umax.isNullValue())
if (Other_umax.isZero())
return *this;
// there's overflow!
if (Other_umax.ugt(max.countLeadingZeros()))
@ -1535,7 +1534,7 @@ ConstantRange ConstantRange::abs(bool IntMinIsPoison) const {
APInt Lo;
// Check whether the range crosses zero.
if (Upper.isStrictlyPositive() || !Lower.isStrictlyPositive())
Lo = APInt::getNullValue(getBitWidth());
Lo = APInt::getZero(getBitWidth());
else
Lo = APIntOps::umin(Lower, -Upper + 1);
@ -1565,7 +1564,7 @@ ConstantRange ConstantRange::abs(bool IntMinIsPoison) const {
return ConstantRange(-SMax, -SMin + 1);
// Range crosses zero.
return ConstantRange(APInt::getNullValue(getBitWidth()),
return ConstantRange(APInt::getZero(getBitWidth()),
APIntOps::umax(-SMin, SMax) + 1);
}

View File

@ -366,9 +366,8 @@ Constant *Constant::getNullValue(Type *Ty) {
return ConstantFP::get(Ty->getContext(),
APFloat::getZero(APFloat::IEEEquad()));
case Type::PPC_FP128TyID:
return ConstantFP::get(Ty->getContext(),
APFloat(APFloat::PPCDoubleDouble(),
APInt::getNullValue(128)));
return ConstantFP::get(Ty->getContext(), APFloat(APFloat::PPCDoubleDouble(),
APInt::getZero(128)));
case Type::PointerTyID:
return ConstantPointerNull::get(cast<PointerType>(Ty));
case Type::StructTyID:
@ -404,7 +403,7 @@ Constant *Constant::getIntegerValue(Type *Ty, const APInt &V) {
Constant *Constant::getAllOnesValue(Type *Ty) {
if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
return ConstantInt::get(Ty->getContext(),
APInt::getAllOnesValue(ITy->getBitWidth()));
APInt::getAllOnes(ITy->getBitWidth()));
if (Ty->isFloatingPointTy()) {
APFloat FL = APFloat::getAllOnesValue(Ty->getFltSemantics(),

View File

@ -2271,9 +2271,9 @@ bool ShuffleVectorInst::isInsertSubvectorMask(ArrayRef<int> Mask,
return false;
// Determine which mask elements are attributed to which source.
APInt UndefElts = APInt::getNullValue(NumMaskElts);
APInt Src0Elts = APInt::getNullValue(NumMaskElts);
APInt Src1Elts = APInt::getNullValue(NumMaskElts);
APInt UndefElts = APInt::getZero(NumMaskElts);
APInt Src0Elts = APInt::getZero(NumMaskElts);
APInt Src1Elts = APInt::getZero(NumMaskElts);
bool Src0Identity = true;
bool Src1Identity = true;

View File

@ -296,9 +296,7 @@ IntegerType *IntegerType::get(LLVMContext &C, unsigned NumBits) {
return Entry;
}
APInt IntegerType::getMask() const {
return APInt::getAllOnesValue(getBitWidth());
}
APInt IntegerType::getMask() const { return APInt::getAllOnes(getBitWidth()); }
//===----------------------------------------------------------------------===//
// FunctionType Implementation

View File

@ -687,7 +687,7 @@ InstrBuilder::createInstruction(const MCInst &MCI) {
if (IsDepBreaking) {
// A mask of all zeroes means: explicit input operands are not
// independent.
if (Mask.isNullValue()) {
if (Mask.isZero()) {
if (!RD.isImplicitRead())
RS.setIndependentFromDef();
} else {

View File

@ -381,7 +381,7 @@ void APFixedPoint::toString(SmallVectorImpl<char> &Str) const {
// Add 4 digits to hold the value after multiplying 10 (the radix)
unsigned Width = Val.getBitWidth() + 4;
APInt FractPart = Val.zextOrTrunc(Scale).zext(Width);
APInt FractPartMask = APInt::getAllOnesValue(Scale).zext(Width);
APInt FractPartMask = APInt::getAllOnes(Scale).zext(Width);
APInt RadixInt = APInt(Width, 10);
IntPart.toString(Str, /*Radix=*/10);

View File

@ -4866,7 +4866,7 @@ APFloat::opStatus APFloat::convert(const fltSemantics &ToSemantics,
APFloat APFloat::getAllOnesValue(const fltSemantics &Semantics,
unsigned BitWidth) {
return APFloat(Semantics, APInt::getAllOnesValue(BitWidth));
return APFloat(Semantics, APInt::getAllOnes(BitWidth));
}
void APFloat::print(raw_ostream &OS) const {

View File

@ -1121,7 +1121,7 @@ unsigned APInt::nearestLogBase2() const {
return U.VAL - 1;
// Handle the zero case.
if (isNullValue())
if (isZero())
return UINT32_MAX;
// The non-zero case is handled by computing:
@ -2764,7 +2764,7 @@ llvm::APIntOps::SolveQuadraticEquationWrap(APInt A, APInt B, APInt C,
<< "x + " << C << ", rw:" << RangeWidth << '\n');
// Identify 0 as a (non)solution immediately.
if (C.sextOrTrunc(RangeWidth).isNullValue() ) {
if (C.sextOrTrunc(RangeWidth).isZero()) {
LLVM_DEBUG(dbgs() << __func__ << ": zero solution\n");
return APInt(CoeffWidth, 0);
}
@ -2826,7 +2826,7 @@ llvm::APIntOps::SolveQuadraticEquationWrap(APInt A, APInt B, APInt C,
auto RoundUp = [] (const APInt &V, const APInt &A) -> APInt {
assert(A.isStrictlyPositive());
APInt T = V.abs().urem(A);
if (T.isNullValue())
if (T.isZero())
return V;
return V.isNegative() ? V+T : V+(A-T);
};
@ -2910,7 +2910,7 @@ llvm::APIntOps::SolveQuadraticEquationWrap(APInt A, APInt B, APInt C,
// can be 0, but cannot be negative.
assert(X.isNonNegative() && "Solution should be non-negative");
if (!InexactSQ && Rem.isNullValue()) {
if (!InexactSQ && Rem.isZero()) {
LLVM_DEBUG(dbgs() << __func__ << ": solution (root): " << X << '\n');
return X;
}
@ -2926,8 +2926,8 @@ llvm::APIntOps::SolveQuadraticEquationWrap(APInt A, APInt B, APInt C,
APInt VX = (A*X + B)*X + C;
APInt VY = VX + TwoA*X + A + B;
bool SignChange = VX.isNegative() != VY.isNegative() ||
VX.isNullValue() != VY.isNullValue();
bool SignChange =
VX.isNegative() != VY.isNegative() || VX.isZero() != VY.isZero();
// If the sign did not change between X and X+1, X is not a valid solution.
// This could happen when the actual (exact) roots don't have an integer
// between them, so they would both be contained between X and X+1.

View File

@ -13525,7 +13525,7 @@ performVectorTruncateCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
unsigned ElemSizeInBits = VT.getScalarSizeInBits();
APInt CAsAPInt(ElemSizeInBits, C);
if (CAsAPInt != APInt::getAllOnesValue(ElemSizeInBits))
if (CAsAPInt != APInt::getAllOnes(ElemSizeInBits))
return SDValue();
ExtendOpA = Xor.getOperand(0);

View File

@ -1710,7 +1710,7 @@ bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
!cast<ConstantSDNode>(Idxen)->getSExtValue() &&
!cast<ConstantSDNode>(Addr64)->getSExtValue()) {
uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
APInt::getAllOnesValue(32).getZExtValue(); // Size
APInt::getAllOnes(32).getZExtValue(); // Size
SDLoc DL(Addr);
const SITargetLowering& Lowering =

View File

@ -12182,8 +12182,7 @@ static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
// When looking for a 0 constant, N can be zext or sext.
OtherOp = DAG.getConstant(1, dl, VT);
else
OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl,
VT);
OtherOp = DAG.getConstant(APInt::getAllOnes(VT.getSizeInBits()), dl, VT);
return true;
}
}
@ -19325,8 +19324,8 @@ bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode(
if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(1) &&
isa<ConstantSDNode>(Op->getOperand(2))) {
unsigned ShAmt = Op->getConstantOperandVal(2);
if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf(
APInt::getAllOnesValue(32) << (32 - ShAmt)))
if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf(APInt::getAllOnes(32)
<< (32 - ShAmt)))
return TLO.CombineTo(
Op, TLO.DAG.getNode(
ISD::SHL, SDLoc(Op), MVT::i32, Op.getOperand(1),

View File

@ -1400,8 +1400,7 @@ static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, SDValue &CC,
// value is 0.
OtherOp = DAG.getConstant(0, dl, VT);
else
OtherOp =
DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl, VT);
OtherOp = DAG.getConstant(APInt::getAllOnes(VT.getSizeInBits()), dl, VT);
return true;
}
}

View File

@ -1978,7 +1978,7 @@ SDValue M68kTargetLowering::LowerSETCCCARRY(SDValue Op,
M68k::CondCode CC = TranslateIntegerM68kCC(cast<CondCodeSDNode>(Cond)->get());
EVT CarryVT = Carry.getValueType();
APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
APInt NegOne = APInt::getAllOnes(CarryVT.getScalarSizeInBits());
Carry = DAG.getNode(M68kISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32), Carry,
DAG.getConstant(NegOne, DL, CarryVT));

View File

@ -15226,7 +15226,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
int Bits = 4 /* 16 byte alignment */;
if (DAG.MaskedValueIsZero(Add->getOperand(1),
APInt::getAllOnesValue(Bits /* alignment */)
APInt::getAllOnes(Bits /* alignment */)
.zext(Add.getScalarValueSizeInBits()))) {
SDNode *BasePtr = Add->getOperand(0).getNode();
for (SDNode::use_iterator UI = BasePtr->use_begin(),

View File

@ -6539,7 +6539,7 @@ static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
"Expected a 128/256/512-bit vector type");
APInt Ones = APInt::getAllOnesValue(32);
APInt Ones = APInt::getAllOnes(32);
unsigned NumElts = VT.getSizeInBits() / 32;
SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
return DAG.getBitcast(VT, Vec);
@ -6818,19 +6818,19 @@ static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
// Handle UNDEFs.
if (Op.isUndef()) {
APInt UndefSrcElts = APInt::getAllOnesValue(NumElts);
APInt UndefSrcElts = APInt::getAllOnes(NumElts);
SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
return CastBitData(UndefSrcElts, SrcEltBits);
}
// Extract scalar constant bits.
if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
APInt UndefSrcElts = APInt::getNullValue(1);
APInt UndefSrcElts = APInt::getZero(1);
SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
return CastBitData(UndefSrcElts, SrcEltBits);
}
if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
APInt UndefSrcElts = APInt::getNullValue(1);
APInt UndefSrcElts = APInt::getZero(1);
APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
SmallVector<APInt, 64> SrcEltBits(1, RawBits);
return CastBitData(UndefSrcElts, SrcEltBits);
@ -7034,12 +7034,12 @@ static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
AllowPartialUndefs))
return false;
UndefElts = APInt::getNullValue(NumElts);
UndefElts = APInt::getZero(NumElts);
for (int i = 0; i != (int)NumElts; ++i) {
int M = Mask[i];
if (M < 0) {
UndefElts.setBit(i);
EltBits.push_back(APInt::getNullValue(EltSizeInBits));
EltBits.push_back(APInt::getZero(EltSizeInBits));
} else if (M < (int)NumElts) {
if (UndefElts0[M])
UndefElts.setBit(i);
@ -7136,8 +7136,8 @@ static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
int NumEltsPerLane = NumElts / NumLanes;
int NumInnerEltsPerLane = NumInnerElts / NumLanes;
DemandedLHS = APInt::getNullValue(NumInnerElts);
DemandedRHS = APInt::getNullValue(NumInnerElts);
DemandedLHS = APInt::getZero(NumInnerElts);
DemandedRHS = APInt::getZero(NumInnerElts);
// Map DemandedElts to the packed operands.
for (int Lane = 0; Lane != NumLanes; ++Lane) {
@ -7160,8 +7160,8 @@ static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
int NumEltsPerLane = NumElts / NumLanes;
int HalfEltsPerLane = NumEltsPerLane / 2;
DemandedLHS = APInt::getNullValue(NumElts);
DemandedRHS = APInt::getNullValue(NumElts);
DemandedLHS = APInt::getZero(NumElts);
DemandedRHS = APInt::getZero(NumElts);
// Map DemandedElts to the horizontal operands.
for (int Idx = 0; Idx != NumElts; ++Idx) {
@ -7508,7 +7508,7 @@ static void computeZeroableShuffleElements(ArrayRef<int> Mask,
SDValue V1, SDValue V2,
APInt &KnownUndef, APInt &KnownZero) {
int Size = Mask.size();
KnownUndef = KnownZero = APInt::getNullValue(Size);
KnownUndef = KnownZero = APInt::getZero(Size);
V1 = peekThroughBitcasts(V1);
V2 = peekThroughBitcasts(V2);
@ -7601,7 +7601,7 @@ static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
int Size = Mask.size();
SDValue V1 = Ops[0];
SDValue V2 = IsUnary ? V1 : Ops[1];
KnownUndef = KnownZero = APInt::getNullValue(Size);
KnownUndef = KnownZero = APInt::getZero(Size);
V1 = peekThroughBitcasts(V1);
V2 = peekThroughBitcasts(V2);
@ -7708,7 +7708,7 @@ static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
APInt &KnownUndef,
APInt &KnownZero) {
unsigned NumElts = Mask.size();
KnownUndef = KnownZero = APInt::getNullValue(NumElts);
KnownUndef = KnownZero = APInt::getZero(NumElts);
for (unsigned i = 0; i != NumElts; ++i) {
int M = Mask[i];
@ -8204,7 +8204,7 @@ static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
APInt KnownUndef, KnownZero;
unsigned NumElts = Op.getValueType().getVectorNumElements();
APInt DemandedElts = APInt::getAllOnesValue(NumElts);
APInt DemandedElts = APInt::getAllOnes(NumElts);
return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
KnownZero, DAG, Depth, ResolveKnownElts);
}
@ -8729,9 +8729,9 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
unsigned NumElems = Elts.size();
int LastLoadedElt = -1;
APInt LoadMask = APInt::getNullValue(NumElems);
APInt ZeroMask = APInt::getNullValue(NumElems);
APInt UndefMask = APInt::getNullValue(NumElems);
APInt LoadMask = APInt::getZero(NumElems);
APInt ZeroMask = APInt::getZero(NumElems);
APInt UndefMask = APInt::getZero(NumElems);
SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
@ -9993,7 +9993,7 @@ static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
unsigned NumElts = VT.getVectorNumElements();
APInt DemandedElts = APInt::getAllOnesValue(NumElts);
APInt DemandedElts = APInt::getAllOnes(NumElts);
for (unsigned i = 0; i != NumElts; ++i)
if (BV->getOperand(i).isUndef())
DemandedElts.clearBit(i);
@ -10568,9 +10568,9 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return VectorConstant;
unsigned EVTBits = EltVT.getSizeInBits();
APInt UndefMask = APInt::getNullValue(NumElems);
APInt ZeroMask = APInt::getNullValue(NumElems);
APInt NonZeroMask = APInt::getNullValue(NumElems);
APInt UndefMask = APInt::getZero(NumElems);
APInt ZeroMask = APInt::getZero(NumElems);
APInt NonZeroMask = APInt::getZero(NumElems);
bool IsAllConstants = true;
SmallSet<SDValue, 8> Values;
unsigned NumConstants = NumElems;
@ -22668,7 +22668,7 @@ static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
if (!SrcOpMap.empty() && VT != SrcOpMap.begin()->first.getValueType())
return false;
unsigned NumElts = VT.getVectorNumElements();
APInt EltCount = APInt::getNullValue(NumElts);
APInt EltCount = APInt::getZero(NumElts);
M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
SrcOps.push_back(Src);
}
@ -22772,7 +22772,7 @@ static SDValue MatchVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
// Check whether we're masking/truncating an OR-reduction result, in which
// case track the masked bits.
APInt Mask = APInt::getAllOnesValue(Op.getScalarValueSizeInBits());
APInt Mask = APInt::getAllOnes(Op.getScalarValueSizeInBits());
switch (Op.getOpcode()) {
case ISD::TRUNCATE: {
SDValue Src = Op.getOperand(0);
@ -35455,8 +35455,8 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
APInt DemandedLHS, DemandedRHS;
getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
Known.One = APInt::getAllOnesValue(BitWidth * 2);
Known.Zero = APInt::getAllOnesValue(BitWidth * 2);
Known.One = APInt::getAllOnes(BitWidth * 2);
Known.Zero = APInt::getAllOnes(BitWidth * 2);
KnownBits Known2;
if (!!DemandedLHS) {
@ -36202,8 +36202,8 @@ static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
unsigned NumV2Elts = V2.getValueType().getVectorNumElements();
unsigned Scale1 = NumV1Elts / NumMaskElts;
unsigned Scale2 = NumV2Elts / NumMaskElts;
APInt DemandedZeroV1 = APInt::getNullValue(NumV1Elts);
APInt DemandedZeroV2 = APInt::getNullValue(NumV2Elts);
APInt DemandedZeroV1 = APInt::getZero(NumV1Elts);
APInt DemandedZeroV2 = APInt::getZero(NumV2Elts);
for (unsigned i = 0; i != NumMaskElts; ++i) {
int M = Mask[i];
if (M == SM_SentinelUndef)
@ -36973,8 +36973,8 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
if (UnaryShuffle && MaskContainsZeros && AllowVariablePerLaneMask &&
isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
APInt Zero = APInt::getNullValue(MaskEltSizeInBits);
APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits);
APInt Zero = APInt::getZero(MaskEltSizeInBits);
APInt AllOnes = APInt::getAllOnes(MaskEltSizeInBits);
APInt UndefElts(NumMaskElts, 0);
SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
for (unsigned i = 0; i != NumMaskElts; ++i) {
@ -37483,7 +37483,7 @@ static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
APInt ZeroElts(NumMaskElts, 0);
APInt ConstantElts(NumMaskElts, 0);
SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
APInt::getNullValue(MaskSizeInBits));
APInt::getZero(MaskSizeInBits));
for (unsigned i = 0; i != NumMaskElts; ++i) {
int M = Mask[i];
if (M == SM_SentinelUndef) {
@ -37610,7 +37610,7 @@ static SDValue combineX86ShufflesRecursively(
SmallVector<int, 64> OpMask;
SmallVector<SDValue, 2> OpInputs;
APInt OpUndef, OpZero;
APInt OpDemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
APInt OpDemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
if (!getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
OpZero, DAG, Depth, false))
@ -37653,7 +37653,7 @@ static SDValue combineX86ShufflesRecursively(
// up in an infinite loop.
bool ResolveKnownZeros = true;
if (!OpZero.isNullValue()) {
APInt UsedInputs = APInt::getNullValue(OpInputs.size());
APInt UsedInputs = APInt::getZero(OpInputs.size());
for (int i = 0, e = OpMask.size(); i != e; ++i) {
int M = OpMask[i];
if (OpUndef[i] || OpZero[i] || isUndefOrZero(M))
@ -39253,7 +39253,7 @@ static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
// Simplify source operands based on shuffle mask.
// TODO - merge this into combineX86ShufflesRecursively.
APInt KnownUndef, KnownZero;
APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero,
DCI))
return SDValue(N, 0);
@ -39860,7 +39860,7 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
continue;
int Lo = Src * NumElts;
APInt SrcElts = APInt::getNullValue(NumElts);
APInt SrcElts = APInt::getZero(NumElts);
for (int i = 0; i != NumElts; ++i)
if (DemandedElts[i]) {
int M = OpMask[i] - Lo;
@ -40382,7 +40382,7 @@ SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
// Bitmask that indicates which ops have only been accessed 'inline'.
APInt IdentityOp = APInt::getAllOnesValue(NumOps);
APInt IdentityOp = APInt::getAllOnes(NumOps);
for (int i = 0; i != NumElts; ++i) {
int M = ShuffleMask[i];
if (!DemandedElts[i] || ShuffleUndef[i])
@ -41233,7 +41233,7 @@ static SDValue combineMinMaxReduction(SDNode *Extract, SelectionDAG &DAG,
else if (BinOp == ISD::SMIN)
Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
else if (BinOp == ISD::UMAX)
Mask = DAG.getConstant(APInt::getAllOnesValue(MaskEltsBits), DL, SrcVT);
Mask = DAG.getConstant(APInt::getAllOnes(MaskEltsBits), DL, SrcVT);
if (Mask)
MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
@ -41965,8 +41965,8 @@ static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
if (IsPextr) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.SimplifyDemandedBits(
SDValue(N, 0), APInt::getAllOnesValue(VT.getSizeInBits()), DCI))
if (TLI.SimplifyDemandedBits(SDValue(N, 0),
APInt::getAllOnes(VT.getSizeInBits()), DCI))
return SDValue(N, 0);
// PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
@ -43518,7 +43518,7 @@ static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
ShuffleInputs.size() == 1 && !isAnyZeroOrUndef(ShuffleMask) &&
ShuffleInputs[0].getValueSizeInBits() == VecVT.getSizeInBits()) {
unsigned NumShuffleElts = ShuffleMask.size();
APInt DemandedElts = APInt::getNullValue(NumShuffleElts);
APInt DemandedElts = APInt::getZero(NumShuffleElts);
for (int M : ShuffleMask) {
assert(0 <= M && M < (int)NumShuffleElts && "Bad unary shuffle index");
DemandedElts.setBit(M);
@ -44658,7 +44658,7 @@ static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
APInt Undefs(NumDstElts, 0);
SmallVector<APInt, 32> Bits(NumDstElts, APInt::getNullValue(DstBitsPerElt));
SmallVector<APInt, 32> Bits(NumDstElts, APInt::getZero(DstBitsPerElt));
for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
@ -44688,9 +44688,9 @@ static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
if (Val.isIntN(DstBitsPerElt))
Val = Val.trunc(DstBitsPerElt);
else if (Val.isNegative())
Val = APInt::getNullValue(DstBitsPerElt);
Val = APInt::getZero(DstBitsPerElt);
else
Val = APInt::getAllOnesValue(DstBitsPerElt);
Val = APInt::getAllOnes(DstBitsPerElt);
}
Bits[Lane * NumDstEltsPerLane + Elt] = Val;
}
@ -44848,7 +44848,7 @@ static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
APInt KnownUndef, KnownZero;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
KnownZero, DCI))
return SDValue(N, 0);
@ -44950,8 +44950,8 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
}
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.SimplifyDemandedBits(SDValue(N, 0),
APInt::getAllOnesValue(NumBitsPerElt), DCI))
if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBitsPerElt),
DCI))
return SDValue(N, 0);
return SDValue();
@ -44970,7 +44970,7 @@ static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
unsigned NumBitsPerElt = VT.getScalarSizeInBits();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.SimplifyDemandedBits(SDValue(N, 0),
APInt::getAllOnesValue(NumBitsPerElt), DCI))
APInt::getAllOnes(NumBitsPerElt), DCI))
return SDValue(N, 0);
}
@ -46171,7 +46171,7 @@ static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
APInt SignedMax, SignedMin;
if (MatchPackUS) {
SignedMax = APInt::getAllOnesValue(NumDstBits).zext(NumSrcBits);
SignedMax = APInt::getAllOnes(NumDstBits).zext(NumSrcBits);
SignedMin = APInt(NumSrcBits, 0);
} else {
SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
@ -47747,7 +47747,7 @@ static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
// SimplifyDemandedBits often relaxes sra to srl so we need to reverse it.
if (In.getOpcode() == ISD::SRL && N->isOnlyUserOf(In.getNode()))
if (const APInt *ShAmt = DAG.getValidShiftAmountConstant(
In, APInt::getAllOnesValue(VT.getVectorNumElements()))) {
In, APInt::getAllOnes(VT.getVectorNumElements()))) {
if (*ShAmt == MinSignBits) {
SDValue NewIn = DAG.getNode(ISD::SRA, DL, InVT, In->ops());
return truncateVectorWithPACK(X86ISD::PACKSS, VT, NewIn, DL, DAG,
@ -48003,7 +48003,7 @@ static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
APInt DemandedMask(APInt::getAllOnesValue(VT.getScalarSizeInBits()));
APInt DemandedMask(APInt::getAllOnes(VT.getScalarSizeInBits()));
if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
return SDValue(N, 0);
@ -48387,7 +48387,7 @@ static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
// Simplify the inputs.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
APInt DemandedMask(APInt::getAllOnesValue(NumBits));
APInt DemandedMask(APInt::getAllOnes(NumBits));
if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
return SDValue(N, 0);
@ -48591,7 +48591,7 @@ static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
APInt KnownUndef, KnownZero;
APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
KnownZero, DCI))
return SDValue(N, 0);
@ -49704,7 +49704,7 @@ static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
// Simplify the inputs.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
APInt DemandedMask(APInt::getAllOnesValue(NumBits));
APInt DemandedMask(APInt::getAllOnes(NumBits));
if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
return SDValue(N, 0);
@ -51748,7 +51748,7 @@ static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
// PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnesValue(64), DCI))
if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(64), DCI))
return SDValue(N, 0);
// If the input is an extend_invec and the SimplifyDemandedBits call didn't
@ -51862,7 +51862,7 @@ static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
APInt KnownUndef, KnownZero;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
KnownZero, DCI))
return SDValue(N, 0);
@ -52074,8 +52074,7 @@ static SDValue combinePDEP(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
unsigned NumBits = N->getSimpleValueType(0).getSizeInBits();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.SimplifyDemandedBits(SDValue(N, 0),
APInt::getAllOnesValue(NumBits), DCI))
if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBits), DCI))
return SDValue(N, 0);
return SDValue();

View File

@ -476,7 +476,7 @@ static Value *simplifyX86pack(IntrinsicInst &II,
// PACKUS: Truncate signed value with unsigned saturation.
// Source values less than zero are saturated to zero.
// Source values greater than dst maxuint are saturated to maxuint.
MinValue = APInt::getNullValue(SrcScalarSizeInBits);
MinValue = APInt::getZero(SrcScalarSizeInBits);
MaxValue = APInt::getLowBitsSet(SrcScalarSizeInBits, DstScalarSizeInBits);
}

View File

@ -3740,7 +3740,7 @@ X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) ||
(IsStore && !isLegalMaskedStore(SrcVTy, Alignment))) {
// Scalarization
APInt DemandedElts = APInt::getAllOnesValue(NumElem);
APInt DemandedElts = APInt::getAllOnes(NumElem);
InstructionCost MaskSplitCost =
getScalarizationOverhead(MaskTy, DemandedElts, false, true);
InstructionCost ScalarCompareCost = getCmpSelInstrCost(
@ -4653,7 +4653,7 @@ InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
bool VariableMask, Align Alignment,
unsigned AddressSpace) {
unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
APInt DemandedElts = APInt::getAllOnesValue(VF);
APInt DemandedElts = APInt::getAllOnes(VF);
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
InstructionCost MaskUnpackCost = 0;

View File

@ -206,8 +206,8 @@ struct MaskOps {
bool FoundAnd1;
MaskOps(unsigned BitWidth, bool MatchAnds)
: Root(nullptr), Mask(APInt::getNullValue(BitWidth)),
MatchAndChain(MatchAnds), FoundAnd1(false) {}
: Root(nullptr), Mask(APInt::getZero(BitWidth)), MatchAndChain(MatchAnds),
FoundAnd1(false) {}
};
/// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a

View File

@ -3456,13 +3456,13 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
// canonicalize to a 'not' before the shift to help SCEV and codegen:
// (X << C) ^ RHSC --> ~X << C
if (match(Op0, m_OneUse(m_Shl(m_Value(X), m_APInt(C)))) &&
*RHSC == APInt::getAllOnesValue(Ty->getScalarSizeInBits()).shl(*C)) {
*RHSC == APInt::getAllOnes(Ty->getScalarSizeInBits()).shl(*C)) {
Value *NotX = Builder.CreateNot(X);
return BinaryOperator::CreateShl(NotX, ConstantInt::get(Ty, *C));
}
// (X >>u C) ^ RHSC --> ~X >>u C
if (match(Op0, m_OneUse(m_LShr(m_Value(X), m_APInt(C)))) &&
*RHSC == APInt::getAllOnesValue(Ty->getScalarSizeInBits()).lshr(*C)) {
*RHSC == APInt::getAllOnes(Ty->getScalarSizeInBits()).lshr(*C)) {
Value *NotX = Builder.CreateNot(X);
return BinaryOperator::CreateLShr(NotX, ConstantInt::get(Ty, *C));
}

View File

@ -953,7 +953,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
if (auto *IIFVTy = dyn_cast<FixedVectorType>(II->getType())) {
auto VWidth = IIFVTy->getNumElements();
APInt UndefElts(VWidth, 0);
APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
if (V != II)
return replaceInstUsesWith(*II, V);

View File

@ -3746,7 +3746,7 @@ foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ,
(WidestTy->getScalarSizeInBits() - 1) +
(NarrowestTy->getScalarSizeInBits() - 1);
APInt MaximalRepresentableShiftAmount =
APInt::getAllOnesValue(XShAmt->getType()->getScalarSizeInBits());
APInt::getAllOnes(XShAmt->getType()->getScalarSizeInBits());
if (MaximalRepresentableShiftAmount.ult(MaximalPossibleTotalShiftAmount))
return nullptr;
@ -5042,7 +5042,7 @@ static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) {
const APInt *RHS;
if (!match(I.getOperand(1), m_APInt(RHS)))
return APInt::getAllOnesValue(BitWidth);
return APInt::getAllOnes(BitWidth);
// If this is a normal comparison, it demands all bits. If it is a sign bit
// comparison, it only demands the sign bit.
@ -5064,7 +5064,7 @@ static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) {
return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingZeros());
default:
return APInt::getAllOnesValue(BitWidth);
return APInt::getAllOnes(BitWidth);
}
}
@ -5228,8 +5228,7 @@ Instruction *InstCombinerImpl::foldICmpUsingKnownBits(ICmpInst &I) {
Op0Known, 0))
return &I;
if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth),
Op1Known, 0))
if (SimplifyDemandedBits(&I, 1, APInt::getAllOnes(BitWidth), Op1Known, 0))
return &I;
// Given the known and unknown bits, compute a range that the LHS could be

View File

@ -1265,9 +1265,9 @@ static Instruction *canonicalizeClampLike(SelectInst &Sel0, ICmpInst &Cmp0,
// We want to canonicalize it to 'ult', so we'll need to increment C0,
// which again means it must not have any all-ones elements.
if (!match(C0,
m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_NE,
APInt::getAllOnesValue(
C0->getType()->getScalarSizeInBits()))))
m_SpecificInt_ICMP(
ICmpInst::Predicate::ICMP_NE,
APInt::getAllOnes(C0->getType()->getScalarSizeInBits()))))
return nullptr; // Can't do, have all-ones element[s].
C0 = InstCombiner::AddOne(C0);
std::swap(X, Sel1);
@ -2441,7 +2441,7 @@ Instruction *InstCombinerImpl::foldVectorSelect(SelectInst &Sel) {
unsigned NumElts = VecTy->getNumElements();
APInt UndefElts(NumElts, 0);
APInt AllOnesEltMask(APInt::getAllOnesValue(NumElts));
APInt AllOnesEltMask(APInt::getAllOnes(NumElts));
if (Value *V = SimplifyDemandedVectorElts(&Sel, AllOnesEltMask, UndefElts)) {
if (V != &Sel)
return replaceInstUsesWith(Sel, V);

View File

@ -41,7 +41,7 @@ bool canTryToConstantAddTwoShiftAmounts(Value *Sh0, Value *ShAmt0, Value *Sh1,
(Sh0->getType()->getScalarSizeInBits() - 1) +
(Sh1->getType()->getScalarSizeInBits() - 1);
APInt MaximalRepresentableShiftAmount =
APInt::getAllOnesValue(ShAmt0->getType()->getScalarSizeInBits());
APInt::getAllOnes(ShAmt0->getType()->getScalarSizeInBits());
return MaximalRepresentableShiftAmount.uge(MaximalPossibleTotalShiftAmount);
}

View File

@ -55,7 +55,7 @@ static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
bool InstCombinerImpl::SimplifyDemandedInstructionBits(Instruction &Inst) {
unsigned BitWidth = Inst.getType()->getScalarSizeInBits();
KnownBits Known(BitWidth);
APInt DemandedMask(APInt::getAllOnesValue(BitWidth));
APInt DemandedMask(APInt::getAllOnes(BitWidth));
Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, Known,
0, &Inst);
@ -743,7 +743,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
}
case Instruction::URem: {
KnownBits Known2(BitWidth);
APInt AllOnes = APInt::getAllOnesValue(BitWidth);
APInt AllOnes = APInt::getAllOnes(BitWidth);
if (SimplifyDemandedBits(I, 0, AllOnes, Known2, Depth + 1) ||
SimplifyDemandedBits(I, 1, AllOnes, Known2, Depth + 1))
return I;
@ -1044,8 +1044,8 @@ Value *InstCombinerImpl::simplifyShrShlDemandedBits(
Known.Zero.setLowBits(ShlAmt - 1);
Known.Zero &= DemandedMask;
APInt BitMask1(APInt::getAllOnesValue(BitWidth));
APInt BitMask2(APInt::getAllOnesValue(BitWidth));
APInt BitMask1(APInt::getAllOnes(BitWidth));
APInt BitMask2(APInt::getAllOnes(BitWidth));
bool isLshr = (Shr->getOpcode() == Instruction::LShr);
BitMask1 = isLshr ? (BitMask1.lshr(ShrAmt) << ShlAmt) :
@ -1111,7 +1111,7 @@ Value *InstCombinerImpl::SimplifyDemandedVectorElts(Value *V,
return nullptr;
unsigned VWidth = cast<FixedVectorType>(V->getType())->getNumElements();
APInt EltMask(APInt::getAllOnesValue(VWidth));
APInt EltMask(APInt::getAllOnes(VWidth));
assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!");
if (match(V, m_Undef())) {
@ -1538,8 +1538,8 @@ Value *InstCombinerImpl::SimplifyDemandedVectorElts(Value *V,
// Subtlety: If we load from a pointer, the pointer must be valid
// regardless of whether the element is demanded. Doing otherwise risks
// segfaults which didn't exist in the original program.
APInt DemandedPtrs(APInt::getAllOnesValue(VWidth)),
DemandedPassThrough(DemandedElts);
APInt DemandedPtrs(APInt::getAllOnes(VWidth)),
DemandedPassThrough(DemandedElts);
if (auto *CV = dyn_cast<ConstantVector>(II->getOperand(2)))
for (unsigned i = 0; i < VWidth; i++) {
Constant *CElt = CV->getAggregateElement(i);

View File

@ -282,7 +282,7 @@ static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr) {
unsigned VWidth = cast<FixedVectorType>(V->getType())->getNumElements();
// Conservatively assume that all elements are needed.
APInt UsedElts(APInt::getAllOnesValue(VWidth));
APInt UsedElts(APInt::getAllOnes(VWidth));
switch (UserInstr->getOpcode()) {
case Instruction::ExtractElement: {
@ -330,11 +330,11 @@ static APInt findDemandedEltsByAllUsers(Value *V) {
if (Instruction *I = dyn_cast<Instruction>(U.getUser())) {
UnionUsedElts |= findDemandedEltsBySingleUser(V, I);
} else {
UnionUsedElts = APInt::getAllOnesValue(VWidth);
UnionUsedElts = APInt::getAllOnes(VWidth);
break;
}
if (UnionUsedElts.isAllOnesValue())
if (UnionUsedElts.isAllOnes())
break;
}
@ -396,7 +396,7 @@ Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) {
// If the input vector has multiple uses, simplify it based on a union
// of all elements used.
APInt DemandedElts = findDemandedEltsByAllUsers(SrcVec);
if (!DemandedElts.isAllOnesValue()) {
if (!DemandedElts.isAllOnes()) {
APInt UndefElts(NumElts, 0);
if (Value *V = SimplifyDemandedVectorElts(
SrcVec, DemandedElts, UndefElts, 0 /* Depth */,
@ -1503,7 +1503,7 @@ Instruction *InstCombinerImpl::visitInsertElementInst(InsertElementInst &IE) {
if (auto VecTy = dyn_cast<FixedVectorType>(VecOp->getType())) {
unsigned VWidth = VecTy->getNumElements();
APInt UndefElts(VWidth, 0);
APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
if (Value *V = SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts)) {
if (V != &IE)
return replaceInstUsesWith(IE, V);
@ -2452,7 +2452,7 @@ Instruction *InstCombinerImpl::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
return I;
APInt UndefElts(VWidth, 0);
APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
if (Value *V = SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) {
if (V != &SVI)
return replaceInstUsesWith(SVI, V);

View File

@ -1906,7 +1906,7 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
auto VWidth = GEPFVTy->getNumElements();
APInt UndefElts(VWidth, 0);
APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
UndefElts)) {
if (V != &GEP)

View File

@ -715,7 +715,7 @@ static bool narrowSDivOrSRem(BinaryOperator *Instr, LazyValueInfo *LVI) {
// sdiv/srem is UB if divisor is -1 and divident is INT_MIN, so unless we can
// prove that such a combination is impossible, we need to bump the bitwidth.
if (CRs[1]->contains(APInt::getAllOnesValue(OrigWidth)) &&
if (CRs[1]->contains(APInt::getAllOnes(OrigWidth)) &&
CRs[0]->contains(
APInt::getSignedMinValue(MinSignedBits).sextOrSelf(OrigWidth)))
++MinSignedBits;

View File

@ -256,7 +256,7 @@ void Float2IntPass::walkForwards() {
Op = [](ArrayRef<ConstantRange> Ops) {
assert(Ops.size() == 1 && "FNeg is a unary operator!");
unsigned Size = Ops[0].getBitWidth();
auto Zero = ConstantRange(APInt::getNullValue(Size));
auto Zero = ConstantRange(APInt::getZero(Size));
return Zero.sub(Ops[0]);
};
break;

View File

@ -140,7 +140,7 @@ XorOpnd::XorOpnd(Value *V) {
// view the operand as "V | 0"
SymbolicPart = V;
ConstPart = APInt::getNullValue(V->getType()->getScalarSizeInBits());
ConstPart = APInt::getZero(V->getType()->getScalarSizeInBits());
isOr = true;
}
@ -1361,7 +1361,7 @@ bool ReassociatePass::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1,
APInt C3((~C1) ^ C2);
// Do not increase code size!
if (!C3.isNullValue() && !C3.isAllOnesValue()) {
if (!C3.isZero() && !C3.isAllOnes()) {
int NewInstNum = ConstOpnd.getBoolValue() ? 1 : 2;
if (NewInstNum > DeadInstNum)
return false;
@ -1377,7 +1377,7 @@ bool ReassociatePass::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1,
APInt C3 = C1 ^ C2;
// Do not increase code size
if (!C3.isNullValue() && !C3.isAllOnesValue()) {
if (!C3.isZero() && !C3.isAllOnes()) {
int NewInstNum = ConstOpnd.getBoolValue() ? 1 : 2;
if (NewInstNum > DeadInstNum)
return false;

View File

@ -3179,7 +3179,7 @@ bool llvm::recognizeBSwapOrBitReverseIdiom(
// Now, is the bit permutation correct for a bswap or a bitreverse? We can
// only byteswap values with an even number of bytes.
APInt DemandedMask = APInt::getAllOnesValue(DemandedBW);
APInt DemandedMask = APInt::getAllOnes(DemandedBW);
bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
bool OKForBitReverse = MatchBitReversals;
for (unsigned BitIdx = 0;

View File

@ -2468,7 +2468,7 @@ Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
Value *StartValue = expandCodeForImpl(Start, ARTy, Loc, false);
ConstantInt *Zero =
ConstantInt::get(Loc->getContext(), APInt::getNullValue(DstBits));
ConstantInt::get(Loc->getContext(), APInt::getZero(DstBits));
Builder.SetInsertPoint(Loc);
// Compute |Step|

View File

@ -6898,7 +6898,7 @@ int LoopVectorizationCostModel::computePredInstDiscount(
if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
ScalarCost += TTI.getScalarizationOverhead(
cast<VectorType>(ToVectorTy(I->getType(), VF)),
APInt::getAllOnesValue(VF.getFixedValue()), true, false);
APInt::getAllOnes(VF.getFixedValue()), true, false);
ScalarCost +=
VF.getFixedValue() *
TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
@ -6917,7 +6917,7 @@ int LoopVectorizationCostModel::computePredInstDiscount(
else if (needsExtract(J, VF)) {
ScalarCost += TTI.getScalarizationOverhead(
cast<VectorType>(ToVectorTy(J->getType(), VF)),
APInt::getAllOnesValue(VF.getFixedValue()), false, true);
APInt::getAllOnes(VF.getFixedValue()), false, true);
}
}
@ -7063,7 +7063,7 @@ LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
auto *Vec_i1Ty =
VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
Cost += TTI.getScalarizationOverhead(
Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()),
/*Insert=*/false, /*Extract=*/true);
Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
@ -7405,8 +7405,8 @@ LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
if (!RetTy->isVoidTy() &&
(!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
Cost += TTI.getScalarizationOverhead(
cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()),
true, false);
cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true,
false);
// Some targets keep addresses scalar.
if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
@ -7657,8 +7657,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
return (
TTI.getScalarizationOverhead(
Vec_i1Ty, APInt::getAllOnesValue(VF.getFixedValue()), false,
true) +
Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) +
(TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
} else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
// The back-edge branch will remain, as will all scalar branches.

View File

@ -3899,7 +3899,7 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
unsigned const NumElts = SrcVecTy->getNumElements();
unsigned const NumScalars = VL.size();
APInt DemandedElts = APInt::getNullValue(NumElts);
APInt DemandedElts = APInt::getZero(NumElts);
// TODO: Add support for Instruction::InsertValue.
unsigned Offset = UINT_MAX;
bool IsIdentity = true;
@ -4525,7 +4525,7 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
VF.push_back(FTy->getNumElements());
ShuffleMask.emplace_back(VF.back(), UndefMaskElem);
FirstUsers.push_back(EU.User);
DemandedElts.push_back(APInt::getNullValue(VF.back()));
DemandedElts.push_back(APInt::getZero(VF.back()));
VecId = FirstUsers.size() - 1;
} else {
VecId = std::distance(FirstUsers.begin(), It);
@ -4759,7 +4759,7 @@ InstructionCost
BoUpSLP::getGatherCost(FixedVectorType *Ty,
const DenseSet<unsigned> &ShuffledIndices) const {
unsigned NumElts = Ty->getNumElements();
APInt DemandedElts = APInt::getNullValue(NumElts);
APInt DemandedElts = APInt::getZero(NumElts);
for (unsigned I = 0; I < NumElts; ++I)
if (!ShuffledIndices.count(I))
DemandedElts.setBit(I);

View File

@ -452,10 +452,10 @@ struct ConstModifier: public Modifier {
switch (getRandom() % 7) {
case 0:
return PT->push_back(ConstantInt::get(
Ty, APInt::getAllOnesValue(Ty->getPrimitiveSizeInBits())));
Ty, APInt::getAllOnes(Ty->getPrimitiveSizeInBits())));
case 1:
return PT->push_back(ConstantInt::get(
Ty, APInt::getNullValue(Ty->getPrimitiveSizeInBits())));
return PT->push_back(
ConstantInt::get(Ty, APInt::getZero(Ty->getPrimitiveSizeInBits())));
case 2:
case 3:
case 4:

View File

@ -26,7 +26,7 @@ TEST(APIntTest, ValueInit) {
// Test that APInt shift left works when bitwidth > 64 and shiftamt == 0
TEST(APIntTest, ShiftLeftByZero) {
APInt One = APInt::getNullValue(65) + 1;
APInt One = APInt::getZero(65) + 1;
APInt Shl = One.shl(0);
EXPECT_TRUE(Shl[0]);
EXPECT_FALSE(Shl[1]);
@ -102,7 +102,7 @@ TEST(APIntTest, i65_Count) {
}
TEST(APIntTest, i128_PositiveCount) {
APInt u128max = APInt::getAllOnesValue(128);
APInt u128max = APInt::getAllOnes(128);
EXPECT_EQ(128u, u128max.countLeadingOnes());
EXPECT_EQ(0u, u128max.countLeadingZeros());
EXPECT_EQ(128u, u128max.getActiveBits());
@ -2323,7 +2323,7 @@ TEST(APIntTest, getHiBits) {
}
TEST(APIntTest, clearLowBits) {
APInt i64hi32 = APInt::getAllOnesValue(64);
APInt i64hi32 = APInt::getAllOnes(64);
i64hi32.clearLowBits(32);
EXPECT_EQ(32u, i64hi32.countLeadingOnes());
EXPECT_EQ(0u, i64hi32.countLeadingZeros());
@ -2332,7 +2332,7 @@ TEST(APIntTest, clearLowBits) {
EXPECT_EQ(0u, i64hi32.countTrailingOnes());
EXPECT_EQ(32u, i64hi32.countPopulation());
APInt i128hi64 = APInt::getAllOnesValue(128);
APInt i128hi64 = APInt::getAllOnes(128);
i128hi64.clearLowBits(64);
EXPECT_EQ(64u, i128hi64.countLeadingOnes());
EXPECT_EQ(0u, i128hi64.countLeadingZeros());
@ -2341,7 +2341,7 @@ TEST(APIntTest, clearLowBits) {
EXPECT_EQ(0u, i128hi64.countTrailingOnes());
EXPECT_EQ(64u, i128hi64.countPopulation());
APInt i128hi24 = APInt::getAllOnesValue(128);
APInt i128hi24 = APInt::getAllOnes(128);
i128hi24.clearLowBits(104);
EXPECT_EQ(24u, i128hi24.countLeadingOnes());
EXPECT_EQ(0u, i128hi24.countLeadingZeros());
@ -2350,7 +2350,7 @@ TEST(APIntTest, clearLowBits) {
EXPECT_EQ(0u, i128hi24.countTrailingOnes());
EXPECT_EQ(24u, i128hi24.countPopulation());
APInt i128hi104 = APInt::getAllOnesValue(128);
APInt i128hi104 = APInt::getAllOnes(128);
i128hi104.clearLowBits(24);
EXPECT_EQ(104u, i128hi104.countLeadingOnes());
EXPECT_EQ(0u, i128hi104.countLeadingZeros());
@ -2359,7 +2359,7 @@ TEST(APIntTest, clearLowBits) {
EXPECT_EQ(0u, i128hi104.countTrailingOnes());
EXPECT_EQ(104u, i128hi104.countPopulation());
APInt i128hi0 = APInt::getAllOnesValue(128);
APInt i128hi0 = APInt::getAllOnes(128);
i128hi0.clearLowBits(128);
EXPECT_EQ(0u, i128hi0.countLeadingOnes());
EXPECT_EQ(128u, i128hi0.countLeadingZeros());
@ -2368,7 +2368,7 @@ TEST(APIntTest, clearLowBits) {
EXPECT_EQ(0u, i128hi0.countTrailingOnes());
EXPECT_EQ(0u, i128hi0.countPopulation());
APInt i80hi1 = APInt::getAllOnesValue(80);
APInt i80hi1 = APInt::getAllOnes(80);
i80hi1.clearLowBits(79);
EXPECT_EQ(1u, i80hi1.countLeadingOnes());
EXPECT_EQ(0u, i80hi1.countLeadingZeros());
@ -2377,7 +2377,7 @@ TEST(APIntTest, clearLowBits) {
EXPECT_EQ(0u, i80hi1.countTrailingOnes());
EXPECT_EQ(1u, i80hi1.countPopulation());
APInt i32hi16 = APInt::getAllOnesValue(32);
APInt i32hi16 = APInt::getAllOnes(32);
i32hi16.clearLowBits(16);
EXPECT_EQ(16u, i32hi16.countLeadingOnes());
EXPECT_EQ(0u, i32hi16.countLeadingZeros());
@ -2484,7 +2484,7 @@ TEST(APIntTest, ArithmeticRightShift) {
// Ensure we handle large shifts of multi-word.
const APInt signmin32(APInt::getSignedMinValue(32));
EXPECT_TRUE(signmin32.ashr(32).isAllOnesValue());
EXPECT_TRUE(signmin32.ashr(32).isAllOnes());
// Ensure we handle large shifts of multi-word.
const APInt umax32(APInt::getSignedMaxValue(32));
@ -2492,7 +2492,7 @@ TEST(APIntTest, ArithmeticRightShift) {
// Ensure we handle large shifts of multi-word.
const APInt signmin128(APInt::getSignedMinValue(128));
EXPECT_TRUE(signmin128.ashr(128).isAllOnesValue());
EXPECT_TRUE(signmin128.ashr(128).isAllOnes());
// Ensure we handle large shifts of multi-word.
const APInt umax128(APInt::getSignedMaxValue(128));

View File

@ -1109,7 +1109,7 @@ TEST_F(AArch64GISelMITest, TestMetadata) {
KnownBits Res = Info.getKnownBits(And->getOperand(1).getReg());
// We don't know what the result of the load is, so we don't know any ones.
EXPECT_TRUE(Res.One.isNullValue());
EXPECT_TRUE(Res.One.isZero());
// We know that the value is in [0, 2). So, we don't know if the first bit
// is 0 or not. However, we do know that every other bit must be 0.
@ -1601,11 +1601,11 @@ TEST_F(AArch64GISelMITest, TestInvalidQueries) {
// We don't know what the result of the shift is, but we should not crash
EXPECT_TRUE(EqSizeRes.One.isNullValue());
EXPECT_TRUE(EqSizeRes.Zero.isNullValue());
EXPECT_TRUE(EqSizeRes.One.isZero());
EXPECT_TRUE(EqSizeRes.Zero.isZero());
EXPECT_TRUE(BiggerSizeRes.One.isNullValue());
EXPECT_TRUE(BiggerSizeRes.Zero.isNullValue());
EXPECT_TRUE(BiggerSizeRes.One.isZero());
EXPECT_TRUE(BiggerSizeRes.Zero.isZero());
}
TEST_F(AArch64GISelMITest, TestKnownBitsAssertZext) {

View File

@ -1016,7 +1016,7 @@ TEST_F(AArch64GISelMITest, TestVectorMetadata) {
GISelKnownBits Info(*MF);
KnownBits Res = Info.getKnownBits(And->getOperand(1).getReg());
EXPECT_TRUE(Res.One.isNullValue());
EXPECT_TRUE(Res.One.isZero());
APInt Mask(Res.getBitWidth(), 1);
Mask.flipAllBits();
@ -1454,11 +1454,11 @@ TEST_F(AArch64GISelMITest, TestVectorInvalidQueries) {
KnownBits EqSizeRes = Info.getKnownBits(EqSizedShl);
KnownBits BiggerSizeRes = Info.getKnownBits(BiggerSizedShl);
EXPECT_TRUE(EqSizeRes.One.isNullValue());
EXPECT_TRUE(EqSizeRes.Zero.isNullValue());
EXPECT_TRUE(EqSizeRes.One.isZero());
EXPECT_TRUE(EqSizeRes.Zero.isZero());
EXPECT_TRUE(BiggerSizeRes.One.isNullValue());
EXPECT_TRUE(BiggerSizeRes.Zero.isNullValue());
EXPECT_TRUE(BiggerSizeRes.One.isZero());
EXPECT_TRUE(BiggerSizeRes.Zero.isZero());
}
TEST_F(AArch64GISelMITest, TestKnownBitsVectorAssertZext) {

View File

@ -1969,7 +1969,7 @@ TEST_F(ConstantRangeTest, UnsignedAddOverflow) {
EXPECT_MAY_OVERFLOW(Empty.unsignedAddMayOverflow(Some));
// Never overflow despite one full/wrap set.
ConstantRange Zero(APInt::getNullValue(16));
ConstantRange Zero(APInt::getZero(16));
EXPECT_NEVER_OVERFLOWS(Full.unsignedAddMayOverflow(Zero));
EXPECT_NEVER_OVERFLOWS(Wrap.unsignedAddMayOverflow(Zero));
EXPECT_NEVER_OVERFLOWS(Zero.unsignedAddMayOverflow(Full));
@ -2003,8 +2003,8 @@ TEST_F(ConstantRangeTest, UnsignedSubOverflow) {
EXPECT_MAY_OVERFLOW(Empty.unsignedSubMayOverflow(Some));
// Never overflow despite one full/wrap set.
ConstantRange Zero(APInt::getNullValue(16));
ConstantRange Max(APInt::getAllOnesValue(16));
ConstantRange Zero(APInt::getZero(16));
ConstantRange Max(APInt::getAllOnes(16));
EXPECT_NEVER_OVERFLOWS(Full.unsignedSubMayOverflow(Zero));
EXPECT_NEVER_OVERFLOWS(Wrap.unsignedSubMayOverflow(Zero));
EXPECT_NEVER_OVERFLOWS(Max.unsignedSubMayOverflow(Full));
@ -2038,7 +2038,7 @@ TEST_F(ConstantRangeTest, SignedAddOverflow) {
EXPECT_MAY_OVERFLOW(Empty.signedAddMayOverflow(Some));
// Never overflow despite one full/wrap set.
ConstantRange Zero(APInt::getNullValue(16));
ConstantRange Zero(APInt::getZero(16));
EXPECT_NEVER_OVERFLOWS(Full.signedAddMayOverflow(Zero));
EXPECT_NEVER_OVERFLOWS(Wrap.signedAddMayOverflow(Zero));
EXPECT_NEVER_OVERFLOWS(Zero.signedAddMayOverflow(Full));
@ -2090,7 +2090,7 @@ TEST_F(ConstantRangeTest, SignedSubOverflow) {
EXPECT_MAY_OVERFLOW(Empty.signedSubMayOverflow(Some));
// Never overflow despite one full/wrap set.
ConstantRange Zero(APInt::getNullValue(16));
ConstantRange Zero(APInt::getZero(16));
EXPECT_NEVER_OVERFLOWS(Full.signedSubMayOverflow(Zero));
EXPECT_NEVER_OVERFLOWS(Wrap.signedSubMayOverflow(Zero));
@ -2474,18 +2474,14 @@ TEST_F(ConstantRangeTest, binaryNot) {
PreferSmallest);
TestUnaryOpExhaustive(
[](const ConstantRange &CR) {
return CR.binaryXor(
ConstantRange(APInt::getAllOnesValue(CR.getBitWidth())));
return CR.binaryXor(ConstantRange(APInt::getAllOnes(CR.getBitWidth())));
},
[](const APInt &N) { return ~N; },
PreferSmallest);
[](const APInt &N) { return ~N; }, PreferSmallest);
TestUnaryOpExhaustive(
[](const ConstantRange &CR) {
return ConstantRange(APInt::getAllOnesValue(CR.getBitWidth()))
.binaryXor(CR);
return ConstantRange(APInt::getAllOnes(CR.getBitWidth())).binaryXor(CR);
},
[](const APInt &N) { return ~N; },
PreferSmallest);
[](const APInt &N) { return ~N; }, PreferSmallest);
}
} // anonymous namespace

View File

@ -1469,8 +1469,8 @@ struct is_float_nan_pred {
TEST_F(PatternMatchTest, ConstantPredicateType) {
// Scalar integer
APInt U32Max = APInt::getAllOnesValue(32);
APInt U32Zero = APInt::getNullValue(32);
APInt U32Max = APInt::getAllOnes(32);
APInt U32Zero = APInt::getZero(32);
APInt U32DeadBeef(32, 0xDEADBEEF);
Type *U32Ty = Type::getInt32Ty(Ctx);

View File

@ -464,8 +464,8 @@ TEST(KnownBitsTest, SExtInReg) {
unsigned Bits = 4;
for (unsigned FromBits = 1; FromBits <= Bits; ++FromBits) {
ForeachKnownBits(Bits, [&](const KnownBits &Known) {
APInt CommonOne = APInt::getAllOnesValue(Bits);
APInt CommonZero = APInt::getAllOnesValue(Bits);
APInt CommonOne = APInt::getAllOnes(Bits);
APInt CommonZero = APInt::getAllOnes(Bits);
unsigned ExtBits = Bits - FromBits;
ForeachNumInKnownBits(Known, [&](const APInt &N) {
APInt Ext = N << ExtBits;

View File

@ -254,7 +254,7 @@ createLinalgBodyCalculationForElementwiseOp(Operation *op, ValueRange args,
// tosa::BitwiseNotOp
if (isa<tosa::BitwiseNotOp>(op) && elementTy.isa<IntegerType>()) {
auto allOnesAttr = rewriter.getIntegerAttr(
elementTy, APInt::getAllOnesValue(elementTy.getIntOrFloatBitWidth()));
elementTy, APInt::getAllOnes(elementTy.getIntOrFloatBitWidth()));
auto allOnes = rewriter.create<ConstantOp>(loc, allOnesAttr);
return rewriter.create<mlir::XOrOp>(loc, resultTypes, args[0], allOnes);
}
@ -739,10 +739,10 @@ static Attribute createInitialValueForReduceOp(Operation *op, Type elementTy,
elementTy, APInt::getSignedMinValue(elementTy.getIntOrFloatBitWidth()));
if (isa<tosa::ReduceAllOp>(op) && elementTy.isInteger(1))
return rewriter.getIntegerAttr(elementTy, APInt::getAllOnesValue(1));
return rewriter.getIntegerAttr(elementTy, APInt::getAllOnes(1));
if (isa<tosa::ReduceAnyOp>(op) && elementTy.isInteger(1))
return rewriter.getIntegerAttr(elementTy, APInt::getNullValue(1));
return rewriter.getIntegerAttr(elementTy, APInt::getZero(1));
if (isa<tosa::ArgMaxOp>(op) && elementTy.isa<FloatType>())
return rewriter.getFloatAttr(

View File

@ -288,8 +288,7 @@ OpFoldResult AndOp::fold(ArrayRef<Attribute> operands) {
return rhs();
/// and(x, allOnes) -> x
APInt intValue;
if (matchPattern(rhs(), m_ConstantInt(&intValue)) &&
intValue.isAllOnesValue())
if (matchPattern(rhs(), m_ConstantInt(&intValue)) && intValue.isAllOnes())
return lhs();
/// and(x,x) -> x
if (lhs() == rhs())
@ -1774,7 +1773,7 @@ OpFoldResult SignedFloorDivIOp::fold(ArrayRef<Attribute> operands) {
return a;
}
unsigned bits = a.getBitWidth();
APInt zero = APInt::getNullValue(bits);
APInt zero = APInt::getZero(bits);
if (a.sge(zero) && b.sgt(zero)) {
// Both positive (or a is zero), return a / b.
return a.sdiv_ov(b, overflowOrDiv0);
@ -1824,7 +1823,7 @@ OpFoldResult SignedCeilDivIOp::fold(ArrayRef<Attribute> operands) {
return a;
}
unsigned bits = a.getBitWidth();
APInt zero = APInt::getNullValue(bits);
APInt zero = APInt::getZero(bits);
if (a.sgt(zero) && b.sgt(zero)) {
// Both positive, return ceil(a, b).
return signedCeilNonnegInputs(a, b, overflowOrDiv0);

View File

@ -1380,7 +1380,7 @@ APFloat SparseElementsAttr::getZeroAPFloat() const {
/// Get a zero APInt for the given sparse attribute.
APInt SparseElementsAttr::getZeroAPInt() const {
auto eltType = getType().getElementType().cast<IntegerType>();
return APInt::getNullValue(eltType.getWidth());
return APInt::getZero(eltType.getWidth());
}
/// Get a zero attribute for the given attribute type.