forked from OSchip/llvm-project
[DataLayout] Fix occurrences that size and range of pointers are assumed to be the same.
GEP index size can be specified in the DataLayout, introduced in D42123. However, there were still places in which getIndexSizeInBits was used interchangeably with getPointerSizeInBits. This notably caused issues with Instcombine's visitPtrToInt; but the unit tests was incorrect, so this remained undiscovered. Differential Revision: https://reviews.llvm.org/D68328 Patch by Joseph Faulls!
This commit is contained in:
parent
7d7789899f
commit
5f6208778f
|
@ -3264,10 +3264,10 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
|
|||
expr->getRHS()))
|
||||
return CGF.Builder.CreateIntToPtr(index, pointer->getType());
|
||||
|
||||
if (width != DL.getTypeSizeInBits(PtrTy)) {
|
||||
if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
|
||||
// Zero-extend or sign-extend the pointer value according to
|
||||
// whether the index is signed or not.
|
||||
index = CGF.Builder.CreateIntCast(index, DL.getIntPtrType(PtrTy), isSigned,
|
||||
index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
|
||||
"idx.ext");
|
||||
}
|
||||
|
||||
|
|
|
@ -222,9 +222,9 @@ public:
|
|||
// offsets on this pointer.
|
||||
// FIXME: Support a vector of pointers.
|
||||
assert(I.getType()->isPointerTy());
|
||||
IntegerType *IntPtrTy = cast<IntegerType>(DL.getIntPtrType(I.getType()));
|
||||
IntegerType *IntIdxTy = cast<IntegerType>(DL.getIndexType(I.getType()));
|
||||
IsOffsetKnown = true;
|
||||
Offset = APInt(IntPtrTy->getBitWidth(), 0);
|
||||
Offset = APInt(IntIdxTy->getBitWidth(), 0);
|
||||
PI.reset();
|
||||
|
||||
// Enqueue the uses of this pointer.
|
||||
|
|
|
@ -29,15 +29,15 @@ template <typename IRBuilderTy>
|
|||
Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
|
||||
bool NoAssumptions = false) {
|
||||
GEPOperator *GEPOp = cast<GEPOperator>(GEP);
|
||||
Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
|
||||
Value *Result = Constant::getNullValue(IntPtrTy);
|
||||
Type *IntIdxTy = DL.getIndexType(GEP->getType());
|
||||
Value *Result = Constant::getNullValue(IntIdxTy);
|
||||
|
||||
// If the GEP is inbounds, we know that none of the addressing operations will
|
||||
// overflow in a signed sense.
|
||||
bool isInBounds = GEPOp->isInBounds() && !NoAssumptions;
|
||||
|
||||
// Build a mask for high order bits.
|
||||
unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
|
||||
unsigned IntPtrWidth = IntIdxTy->getScalarType()->getIntegerBitWidth();
|
||||
uint64_t PtrSizeMask =
|
||||
std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth);
|
||||
|
||||
|
@ -56,17 +56,17 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
|
|||
Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
|
||||
|
||||
if (Size)
|
||||
Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
|
||||
Result = Builder->CreateAdd(Result, ConstantInt::get(IntIdxTy, Size),
|
||||
GEP->getName()+".offs");
|
||||
continue;
|
||||
}
|
||||
|
||||
// Splat the constant if needed.
|
||||
if (IntPtrTy->isVectorTy() && !OpC->getType()->isVectorTy())
|
||||
OpC = ConstantVector::getSplat(IntPtrTy->getVectorNumElements(), OpC);
|
||||
if (IntIdxTy->isVectorTy() && !OpC->getType()->isVectorTy())
|
||||
OpC = ConstantVector::getSplat(IntIdxTy->getVectorNumElements(), OpC);
|
||||
|
||||
Constant *Scale = ConstantInt::get(IntPtrTy, Size);
|
||||
Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
|
||||
Constant *Scale = ConstantInt::get(IntIdxTy, Size);
|
||||
Constant *OC = ConstantExpr::getIntegerCast(OpC, IntIdxTy, true /*SExt*/);
|
||||
Scale =
|
||||
ConstantExpr::getMul(OC, Scale, false /*NUW*/, isInBounds /*NSW*/);
|
||||
// Emit an add instruction.
|
||||
|
@ -75,15 +75,15 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
|
|||
}
|
||||
|
||||
// Splat the index if needed.
|
||||
if (IntPtrTy->isVectorTy() && !Op->getType()->isVectorTy())
|
||||
Op = Builder->CreateVectorSplat(IntPtrTy->getVectorNumElements(), Op);
|
||||
if (IntIdxTy->isVectorTy() && !Op->getType()->isVectorTy())
|
||||
Op = Builder->CreateVectorSplat(IntIdxTy->getVectorNumElements(), Op);
|
||||
|
||||
// Convert to correct type.
|
||||
if (Op->getType() != IntPtrTy)
|
||||
Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
|
||||
if (Op->getType() != IntIdxTy)
|
||||
Op = Builder->CreateIntCast(Op, IntIdxTy, true, Op->getName()+".c");
|
||||
if (Size != 1) {
|
||||
// We'll let instcombine(mul) convert this to a shl if possible.
|
||||
Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
|
||||
Op = Builder->CreateMul(Op, ConstantInt::get(IntIdxTy, Size),
|
||||
GEP->getName() + ".idx", false /*NUW*/,
|
||||
isInBounds /*NSW*/);
|
||||
}
|
||||
|
|
|
@ -766,8 +766,8 @@ Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
|
|||
Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
|
||||
Type *ResultTy, Optional<unsigned> InRangeIndex,
|
||||
const DataLayout &DL, const TargetLibraryInfo *TLI) {
|
||||
Type *IntPtrTy = DL.getIntPtrType(ResultTy);
|
||||
Type *IntPtrScalarTy = IntPtrTy->getScalarType();
|
||||
Type *IntIdxTy = DL.getIndexType(ResultTy);
|
||||
Type *IntIdxScalarTy = IntIdxTy->getScalarType();
|
||||
|
||||
bool Any = false;
|
||||
SmallVector<Constant*, 32> NewIdxs;
|
||||
|
@ -775,11 +775,11 @@ Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
|
|||
if ((i == 1 ||
|
||||
!isa<StructType>(GetElementPtrInst::getIndexedType(
|
||||
SrcElemTy, Ops.slice(1, i - 1)))) &&
|
||||
Ops[i]->getType()->getScalarType() != IntPtrScalarTy) {
|
||||
Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
|
||||
Any = true;
|
||||
Type *NewType = Ops[i]->getType()->isVectorTy()
|
||||
? IntPtrTy
|
||||
: IntPtrTy->getScalarType();
|
||||
? IntIdxTy
|
||||
: IntIdxScalarTy;
|
||||
NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
|
||||
true,
|
||||
NewType,
|
||||
|
@ -839,7 +839,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
|
|||
if (!Ptr->getType()->isPointerTy())
|
||||
return nullptr;
|
||||
|
||||
Type *IntPtrTy = DL.getIntPtrType(Ptr->getType());
|
||||
Type *IntIdxTy = DL.getIndexType(Ptr->getType());
|
||||
|
||||
// If this is a constant expr gep that is effectively computing an
|
||||
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
|
||||
|
@ -850,7 +850,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
|
|||
// "inttoptr (sub (ptrtoint Ptr), V)"
|
||||
if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
|
||||
auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
|
||||
assert((!CE || CE->getType() == IntPtrTy) &&
|
||||
assert((!CE || CE->getType() == IntIdxTy) &&
|
||||
"CastGEPIndices didn't canonicalize index types!");
|
||||
if (CE && CE->getOpcode() == Instruction::Sub &&
|
||||
CE->getOperand(0)->isNullValue()) {
|
||||
|
@ -865,7 +865,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
|
||||
unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
|
||||
APInt Offset =
|
||||
APInt(BitWidth,
|
||||
DL.getIndexedOffsetInType(
|
||||
|
@ -945,7 +945,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
|
|||
// The element size is 0. This may be [0 x Ty]*, so just use a zero
|
||||
// index for this level and proceed to the next level to see if it can
|
||||
// accommodate the offset.
|
||||
NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
|
||||
NewIdxs.push_back(ConstantInt::get(IntIdxTy, 0));
|
||||
} else {
|
||||
// The element size is non-zero divide the offset by the element
|
||||
// size (rounding down), to compute the index at this level.
|
||||
|
@ -954,7 +954,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
|
|||
if (Overflow)
|
||||
break;
|
||||
Offset -= NewIdx * ElemSize;
|
||||
NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
|
||||
NewIdxs.push_back(ConstantInt::get(IntIdxTy, NewIdx));
|
||||
}
|
||||
} else {
|
||||
auto *STy = cast<StructType>(Ty);
|
||||
|
|
|
@ -1678,8 +1678,8 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
|
|||
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
|
||||
} while (Visited.insert(V).second);
|
||||
|
||||
Type *IntPtrTy = DL.getIntPtrType(V->getContext(), AS);
|
||||
return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
|
||||
Type *IdxPtrTy = DL.getIndexType(V->getType());
|
||||
return cast<ConstantInt>(ConstantInt::get(IdxPtrTy, Offset));
|
||||
}
|
||||
|
||||
/// Find dead blocks due to deleted CFG edges during inlining.
|
||||
|
|
|
@ -662,16 +662,16 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
|
|||
bool AllowNonInbounds = false) {
|
||||
assert(V->getType()->isPtrOrPtrVectorTy());
|
||||
|
||||
Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType();
|
||||
APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());
|
||||
Type *IntIdxTy = DL.getIndexType(V->getType())->getScalarType();
|
||||
APInt Offset = APInt::getNullValue(IntIdxTy->getIntegerBitWidth());
|
||||
|
||||
V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds);
|
||||
// As that strip may trace through `addrspacecast`, need to sext or trunc
|
||||
// the offset calculated.
|
||||
IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType();
|
||||
Offset = Offset.sextOrTrunc(IntPtrTy->getIntegerBitWidth());
|
||||
IntIdxTy = DL.getIndexType(V->getType())->getScalarType();
|
||||
Offset = Offset.sextOrTrunc(IntIdxTy->getIntegerBitWidth());
|
||||
|
||||
Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset);
|
||||
Constant *OffsetIntPtr = ConstantInt::get(IntIdxTy, Offset);
|
||||
if (V->getType()->isVectorTy())
|
||||
return ConstantVector::getSplat(V->getType()->getVectorNumElements(),
|
||||
OffsetIntPtr);
|
||||
|
@ -4032,7 +4032,7 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
|
|||
// The following transforms are only safe if the ptrtoint cast
|
||||
// doesn't truncate the pointers.
|
||||
if (Ops[1]->getType()->getScalarSizeInBits() ==
|
||||
Q.DL.getIndexSizeInBits(AS)) {
|
||||
Q.DL.getPointerSizeInBits(AS)) {
|
||||
auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * {
|
||||
if (match(P, m_Zero()))
|
||||
return Constant::getNullValue(GEPTy);
|
||||
|
|
|
@ -150,7 +150,7 @@ bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
|
|||
|
||||
// Require ABI alignment for loads without alignment specification
|
||||
const Align Alignment = DL.getValueOrABITypeAlignment(MA, Ty);
|
||||
APInt AccessSize(DL.getIndexTypeSizeInBits(V->getType()),
|
||||
APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
|
||||
DL.getTypeStoreSize(Ty));
|
||||
return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
|
||||
DT);
|
||||
|
|
|
@ -544,6 +544,7 @@ Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize,
|
|||
Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second);
|
||||
Value *UseZero =
|
||||
Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second);
|
||||
ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType);
|
||||
return Builder.CreateSelect(UseZero, ConstantInt::get(ResultType, 0),
|
||||
ResultSize);
|
||||
}
|
||||
|
@ -576,7 +577,7 @@ ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL,
|
|||
}
|
||||
|
||||
SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
|
||||
IntTyBits = DL.getPointerTypeSizeInBits(V->getType());
|
||||
IntTyBits = DL.getIndexTypeSizeInBits(V->getType());
|
||||
Zero = APInt::getNullValue(IntTyBits);
|
||||
|
||||
V = V->stripPointerCasts();
|
||||
|
@ -746,7 +747,7 @@ ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) {
|
|||
|
||||
SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) {
|
||||
SizeOffsetType PtrData = compute(GEP.getPointerOperand());
|
||||
APInt Offset(IntTyBits, 0);
|
||||
APInt Offset(DL.getIndexTypeSizeInBits(GEP.getPointerOperand()->getType()), 0);
|
||||
if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(DL, Offset))
|
||||
return unknown();
|
||||
|
||||
|
@ -834,7 +835,7 @@ ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(
|
|||
|
||||
SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) {
|
||||
// XXX - Are vectors of pointers possible here?
|
||||
IntTy = cast<IntegerType>(DL.getIntPtrType(V->getType()));
|
||||
IntTy = cast<IntegerType>(DL.getIndexType(V->getType()));
|
||||
Zero = ConstantInt::get(IntTy, 0);
|
||||
|
||||
SizeOffsetEvalType Result = compute_(V);
|
||||
|
@ -938,12 +939,12 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallSite(CallSite CS) {
|
|||
}
|
||||
|
||||
Value *FirstArg = CS.getArgument(FnData->FstParam);
|
||||
FirstArg = Builder.CreateZExt(FirstArg, IntTy);
|
||||
FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy);
|
||||
if (FnData->SndParam < 0)
|
||||
return std::make_pair(FirstArg, Zero);
|
||||
|
||||
Value *SecondArg = CS.getArgument(FnData->SndParam);
|
||||
SecondArg = Builder.CreateZExt(SecondArg, IntTy);
|
||||
SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy);
|
||||
Value *Size = Builder.CreateMul(FirstArg, SecondArg);
|
||||
return std::make_pair(Size, Zero);
|
||||
|
||||
|
|
|
@ -3495,7 +3495,7 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP,
|
|||
const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand());
|
||||
// getSCEV(Base)->getType() has the same address space as Base->getType()
|
||||
// because SCEV::getType() preserves the address space.
|
||||
Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType());
|
||||
Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType());
|
||||
// FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
|
||||
// instruction to its SCEV, because the Instruction may be guarded by control
|
||||
// flow and the no-overflow bits may not be valid for the expression in any
|
||||
|
@ -3504,7 +3504,7 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP,
|
|||
SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW
|
||||
: SCEV::FlagAnyWrap;
|
||||
|
||||
const SCEV *TotalOffset = getZero(IntPtrTy);
|
||||
const SCEV *TotalOffset = getZero(IntIdxTy);
|
||||
// The array size is unimportant. The first thing we do on CurTy is getting
|
||||
// its element type.
|
||||
Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0);
|
||||
|
@ -3514,7 +3514,7 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP,
|
|||
// For a struct, add the member offset.
|
||||
ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
|
||||
unsigned FieldNo = Index->getZExtValue();
|
||||
const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
|
||||
const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo);
|
||||
|
||||
// Add the field offset to the running total offset.
|
||||
TotalOffset = getAddExpr(TotalOffset, FieldOffset);
|
||||
|
@ -3525,9 +3525,9 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP,
|
|||
// Update CurTy to its element type.
|
||||
CurTy = cast<SequentialType>(CurTy)->getElementType();
|
||||
// For an array, add the element offset, explicitly scaled.
|
||||
const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy);
|
||||
const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy);
|
||||
// Getelementptr indices are signed.
|
||||
IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy);
|
||||
IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy);
|
||||
|
||||
// Multiply the index by the element size to compute the element offset.
|
||||
const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap);
|
||||
|
@ -3786,7 +3786,7 @@ uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
|
|||
|
||||
/// Return a type with the same bitwidth as the given type and which represents
|
||||
/// how SCEV will treat the given type, for which isSCEVable must return
|
||||
/// true. For pointer types, this is the pointer-sized integer type.
|
||||
/// true. For pointer types, this is the pointer index sized integer type.
|
||||
Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
|
||||
assert(isSCEVable(Ty) && "Type is not SCEVable!");
|
||||
|
||||
|
@ -3795,7 +3795,7 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
|
|||
|
||||
// The only other support type is pointer.
|
||||
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
|
||||
return getDataLayout().getIntPtrType(Ty);
|
||||
return getDataLayout().getIndexType(Ty);
|
||||
}
|
||||
|
||||
Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const {
|
||||
|
@ -5726,6 +5726,15 @@ ScalarEvolution::getRangeRef(const SCEV *S,
|
|||
assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
|
||||
"generalize as needed!");
|
||||
unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
|
||||
// If the pointer size is larger than the index size type, this can cause
|
||||
// NS to be larger than BitWidth. So compensate for this.
|
||||
if (U->getType()->isPointerTy()) {
|
||||
unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType());
|
||||
int ptrIdxDiff = ptrSize - BitWidth;
|
||||
if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff)
|
||||
NS -= ptrIdxDiff;
|
||||
}
|
||||
|
||||
if (NS > 1)
|
||||
ConservativeResult = ConservativeResult.intersectWith(
|
||||
ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
|
||||
|
|
|
@ -414,7 +414,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
|||
// without the other.
|
||||
SplitAddRecs(Ops, Ty, SE);
|
||||
|
||||
Type *IntPtrTy = DL.getIntPtrType(PTy);
|
||||
Type *IntIdxTy = DL.getIndexType(PTy);
|
||||
|
||||
// Descend down the pointer's type and attempt to convert the other
|
||||
// operands into GEP indices, at each level. The first index in a GEP
|
||||
|
@ -426,7 +426,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
|||
// array indexing.
|
||||
SmallVector<const SCEV *, 8> ScaledOps;
|
||||
if (ElTy->isSized()) {
|
||||
const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
|
||||
const SCEV *ElSize = SE.getSizeOfExpr(IntIdxTy, ElTy);
|
||||
if (!ElSize->isZero()) {
|
||||
SmallVector<const SCEV *, 8> NewOps;
|
||||
for (const SCEV *Op : Ops) {
|
||||
|
|
|
@ -90,7 +90,7 @@ static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
|
|||
if (unsigned BitWidth = Ty->getScalarSizeInBits())
|
||||
return BitWidth;
|
||||
|
||||
return DL.getIndexTypeSizeInBits(Ty);
|
||||
return DL.getPointerTypeSizeInBits(Ty);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
@ -1137,7 +1137,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
|
|||
// which fall through here.
|
||||
Type *ScalarTy = SrcTy->getScalarType();
|
||||
SrcBitWidth = ScalarTy->isPointerTy() ?
|
||||
Q.DL.getIndexTypeSizeInBits(ScalarTy) :
|
||||
Q.DL.getPointerTypeSizeInBits(ScalarTy) :
|
||||
Q.DL.getTypeSizeInBits(ScalarTy);
|
||||
|
||||
assert(SrcBitWidth && "SrcBitWidth can't be zero");
|
||||
|
@ -1664,7 +1664,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
|
|||
|
||||
Type *ScalarTy = V->getType()->getScalarType();
|
||||
unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
|
||||
Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
|
||||
Q.DL.getPointerTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
|
||||
assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth");
|
||||
(void)BitWidth;
|
||||
(void)ExpectedWidth;
|
||||
|
@ -2409,7 +2409,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
|
|||
|
||||
Type *ScalarTy = V->getType()->getScalarType();
|
||||
unsigned TyBits = ScalarTy->isPointerTy() ?
|
||||
Q.DL.getIndexTypeSizeInBits(ScalarTy) :
|
||||
Q.DL.getPointerTypeSizeInBits(ScalarTy) :
|
||||
Q.DL.getTypeSizeInBits(ScalarTy);
|
||||
|
||||
unsigned Tmp, Tmp2;
|
||||
|
|
|
@ -9322,8 +9322,8 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
|
|||
const GlobalValue *GV = nullptr;
|
||||
int64_t GVOffset = 0;
|
||||
if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
|
||||
unsigned IdxWidth = getDataLayout().getIndexTypeSizeInBits(GV->getType());
|
||||
KnownBits Known(IdxWidth);
|
||||
unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
|
||||
KnownBits Known(PtrWidth);
|
||||
llvm::computeKnownBits(GV, Known, getDataLayout());
|
||||
unsigned AlignBits = Known.countMinTrailingZeros();
|
||||
unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
|
||||
|
|
|
@ -768,13 +768,13 @@ unsigned DataLayout::getPrefTypeAlignment(Type *Ty) const {
|
|||
|
||||
IntegerType *DataLayout::getIntPtrType(LLVMContext &C,
|
||||
unsigned AddressSpace) const {
|
||||
return IntegerType::get(C, getIndexSizeInBits(AddressSpace));
|
||||
return IntegerType::get(C, getPointerSizeInBits(AddressSpace));
|
||||
}
|
||||
|
||||
Type *DataLayout::getIntPtrType(Type *Ty) const {
|
||||
assert(Ty->isPtrOrPtrVectorTy() &&
|
||||
"Expected a pointer or pointer vector type.");
|
||||
unsigned NumBits = getIndexTypeSizeInBits(Ty);
|
||||
unsigned NumBits = getPointerTypeSizeInBits(Ty);
|
||||
IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits);
|
||||
if (VectorType *VecTy = dyn_cast<VectorType>(Ty))
|
||||
return VectorType::get(IntTy, VecTy->getNumElements());
|
||||
|
|
|
@ -1832,7 +1832,7 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
|
|||
Type *Ty = CI.getType();
|
||||
unsigned AS = CI.getPointerAddressSpace();
|
||||
|
||||
if (Ty->getScalarSizeInBits() == DL.getIndexSizeInBits(AS))
|
||||
if (Ty->getScalarSizeInBits() == DL.getPointerSizeInBits(AS))
|
||||
return commonPointerCastTransforms(CI);
|
||||
|
||||
Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS);
|
||||
|
|
|
@ -4930,7 +4930,7 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
|
|||
// Get scalar or pointer size.
|
||||
unsigned BitWidth = Ty->isIntOrIntVectorTy()
|
||||
? Ty->getScalarSizeInBits()
|
||||
: DL.getIndexTypeSizeInBits(Ty->getScalarType());
|
||||
: DL.getPointerTypeSizeInBits(Ty->getScalarType());
|
||||
|
||||
if (!BitWidth)
|
||||
return nullptr;
|
||||
|
|
|
@ -901,12 +901,12 @@ bool LoopIdiomRecognize::processLoopStridedStore(
|
|||
SCEVExpander Expander(*SE, *DL, "loop-idiom");
|
||||
|
||||
Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
|
||||
Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS);
|
||||
Type *IntIdxTy = DL->getIndexType(DestPtr->getType());
|
||||
|
||||
const SCEV *Start = Ev->getStart();
|
||||
// Handle negative strided loops.
|
||||
if (NegStride)
|
||||
Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE);
|
||||
Start = getStartForNegStride(Start, BECount, IntIdxTy, StoreSize, SE);
|
||||
|
||||
// TODO: ideally we should still be able to generate memset if SCEV expander
|
||||
// is taught to generate the dependencies at the latest point.
|
||||
|
@ -934,7 +934,7 @@ bool LoopIdiomRecognize::processLoopStridedStore(
|
|||
// Okay, everything looks good, insert the memset.
|
||||
|
||||
const SCEV *NumBytesS =
|
||||
getNumBytes(BECount, IntPtr, StoreSize, CurLoop, DL, SE);
|
||||
getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE);
|
||||
|
||||
// TODO: ideally we should still be able to generate memset if SCEV expander
|
||||
// is taught to generate the dependencies at the latest point.
|
||||
|
@ -942,7 +942,7 @@ bool LoopIdiomRecognize::processLoopStridedStore(
|
|||
return false;
|
||||
|
||||
Value *NumBytes =
|
||||
Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
|
||||
Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator());
|
||||
|
||||
CallInst *NewCall;
|
||||
if (SplatValue) {
|
||||
|
@ -955,7 +955,7 @@ bool LoopIdiomRecognize::processLoopStridedStore(
|
|||
Module *M = TheStore->getModule();
|
||||
StringRef FuncName = "memset_pattern16";
|
||||
FunctionCallee MSP = M->getOrInsertFunction(FuncName, Builder.getVoidTy(),
|
||||
Int8PtrTy, Int8PtrTy, IntPtr);
|
||||
Int8PtrTy, Int8PtrTy, IntIdxTy);
|
||||
inferLibFuncAttributes(M, FuncName, *TLI);
|
||||
|
||||
// Otherwise we should form a memset_pattern16. PatternValue is known to be
|
||||
|
@ -1022,11 +1022,11 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
|
|||
|
||||
const SCEV *StrStart = StoreEv->getStart();
|
||||
unsigned StrAS = SI->getPointerAddressSpace();
|
||||
Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS);
|
||||
Type *IntIdxTy = Builder.getIntNTy(DL->getIndexSizeInBits(StrAS));
|
||||
|
||||
// Handle negative strided loops.
|
||||
if (NegStride)
|
||||
StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, SE);
|
||||
StrStart = getStartForNegStride(StrStart, BECount, IntIdxTy, StoreSize, SE);
|
||||
|
||||
// Okay, we have a strided store "p[i]" of a loaded value. We can turn
|
||||
// this into a memcpy in the loop preheader now if we want. However, this
|
||||
|
@ -1052,7 +1052,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
|
|||
|
||||
// Handle negative strided loops.
|
||||
if (NegStride)
|
||||
LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE);
|
||||
LdStart = getStartForNegStride(LdStart, BECount, IntIdxTy, StoreSize, SE);
|
||||
|
||||
// For a memcpy, we have to make sure that the input array is not being
|
||||
// mutated by the loop.
|
||||
|
@ -1074,10 +1074,10 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
|
|||
// Okay, everything is safe, we can transform this!
|
||||
|
||||
const SCEV *NumBytesS =
|
||||
getNumBytes(BECount, IntPtrTy, StoreSize, CurLoop, DL, SE);
|
||||
getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE);
|
||||
|
||||
Value *NumBytes =
|
||||
Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
|
||||
Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator());
|
||||
|
||||
CallInst *NewCall = nullptr;
|
||||
// Check whether to generate an unordered atomic memcpy:
|
||||
|
|
|
@ -2579,7 +2579,7 @@ void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
|
|||
if (!NewTy->isPointerTy())
|
||||
return;
|
||||
|
||||
unsigned BitWidth = DL.getIndexTypeSizeInBits(NewTy);
|
||||
unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
|
||||
if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
|
||||
MDNode *NN = MDNode::get(OldLI.getContext(), None);
|
||||
NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
; RUN: opt -instcombine -S < %s | FileCheck %s
|
||||
target datalayout = "e-m:o-p:40:64:64:32-i64:64-f80:128-n8:16:32:64-S128"
|
||||
|
||||
; check that memory builtins can be handled.
|
||||
define i64 @objsize1_custom_idx(i64 %sz) {
|
||||
entry:
|
||||
%ptr = call i8* @malloc(i64 %sz)
|
||||
%ptr2 = getelementptr inbounds i8, i8* %ptr, i32 2
|
||||
%calc_size = call i64 @llvm.objectsize.i64.p0i8(i8* %ptr2, i1 false, i1 true, i1 true)
|
||||
ret i64 %calc_size
|
||||
}
|
||||
|
||||
%struct.V = type { [10 x i8], i32, [10 x i8] }
|
||||
|
||||
define i32 @objsize2_custom_idx() #0 {
|
||||
entry:
|
||||
%var = alloca %struct.V, align 4
|
||||
%0 = bitcast %struct.V* %var to i8*
|
||||
call void @llvm.lifetime.start.p0i8(i64 28, i8* %0) #3
|
||||
%buf1 = getelementptr inbounds %struct.V, %struct.V* %var, i32 0, i32 0
|
||||
%arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buf1, i64 0, i64 1
|
||||
%1 = call i64 @llvm.objectsize.i64.p0i8(i8* %arrayidx, i1 false, i1 false, i1 false)
|
||||
%conv = trunc i64 %1 to i32
|
||||
call void @llvm.lifetime.end.p0i8(i64 28, i8* %0) #3
|
||||
ret i32 %conv
|
||||
; CHECK: ret i32 27
|
||||
}
|
||||
|
||||
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
|
||||
declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
|
||||
declare i8* @malloc(i64)
|
||||
declare i64 @llvm.objectsize.i64.p0i8(i8*, i1, i1, i1)
|
|
@ -164,3 +164,22 @@ define i32 @test10() {
|
|||
%B = ptrtoint double* %A to i32
|
||||
ret i32 %B
|
||||
}
|
||||
|
||||
@X_as1 = addrspace(1) global [1000 x i8] zeroinitializer, align 16
|
||||
|
||||
define i16 @constant_fold_custom_dl() {
|
||||
; CHECK-LABEL: @constant_fold_custom_dl(
|
||||
; CHECK: ret i16 ptrtoint
|
||||
|
||||
entry:
|
||||
%A = bitcast i8 addrspace(1)* getelementptr inbounds ([1000 x i8], [1000 x i8] addrspace(1)* @X_as1, i64 1, i64 0) to i8 addrspace(1)*
|
||||
%B = bitcast i8 addrspace(1)* getelementptr inbounds ([1000 x i8], [1000 x i8] addrspace(1)* @X_as1, i64 0, i64 0) to i8 addrspace(1)*
|
||||
|
||||
%B2 = ptrtoint i8 addrspace(1)* %B to i16
|
||||
%C = sub i16 0, %B2
|
||||
%D = getelementptr i8, i8 addrspace(1)* %A, i16 %C
|
||||
%E = ptrtoint i8 addrspace(1)* %D to i16
|
||||
|
||||
ret i16 %E
|
||||
}
|
||||
|
||||
|
|
|
@ -8,8 +8,8 @@ declare i32 @test58_d(i64 )
|
|||
define i1 @test59(i8* %foo) {
|
||||
; CHECK-LABEL: @test59(
|
||||
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, i8* [[FOO:%.*]], i32 8
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i8* [[GEP1]] to i32
|
||||
; CHECK-NEXT: [[USE:%.*]] = zext i32 [[TMP1]] to i64
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i8* [[GEP1]] to i40
|
||||
; CHECK-NEXT: [[USE:%.*]] = zext i40 [[TMP1]] to i64
|
||||
; CHECK-NEXT: [[CALL:%.*]] = call i32 @test58_d(i64 [[USE]])
|
||||
; CHECK-NEXT: ret i1 true
|
||||
;
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; RUN: opt < %s -instcombine -S -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
|
||||
|
||||
target datalayout = "e-m:o-p:40:64:64:32-i64:64-f80:128-n8:16:32:64-S128"
|
||||
%struct._IO_FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker*, %struct._IO_FILE*, i32, i32, i64, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i64, i32, [20 x i8] }
|
||||
%struct._IO_marker = type { %struct._IO_marker*, %struct._IO_FILE*, i32 }
|
||||
@.str = private unnamed_addr constant [5 x i8] c"file\00", align 1
|
||||
@.str.1 = private unnamed_addr constant [2 x i8] c"w\00", align 1
|
||||
@.str.2 = private unnamed_addr constant [4 x i8] c"str\00", align 1
|
||||
|
||||
; Check fwrite is generated with arguments of ptr size, not index size
|
||||
define internal void @fputs_test_custom_dl() {
|
||||
; CHECK-LABEL: @fputs_test_custom_dl(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call %struct._IO_FILE* @fopen(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.1, i32 0, i32 0))
|
||||
;
|
||||
%call = call %struct._IO_FILE* @fopen(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i64 0, i64 0), i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.1, i64 0, i64 0))
|
||||
%call1 = call i32 @fputs(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.2, i64 0, i64 0), %struct._IO_FILE* %call)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare %struct._IO_FILE* @fopen(i8*, i8*)
|
||||
declare i32 @fputs(i8* nocapture readonly, %struct._IO_FILE* nocapture)
|
|
@ -65,3 +65,73 @@ for.inc: ; preds = %for.body
|
|||
for.end: ; preds = %for.cond
|
||||
ret void
|
||||
}
|
||||
|
||||
@array = weak global [101 x i32] zeroinitializer, align 32 ; <[100 x i32]*> [#uses=1]
|
||||
|
||||
; CHECK: Loop %bb: backedge-taken count is 100
|
||||
|
||||
define void @test_range_ref1a(i32 %x) {
|
||||
entry:
|
||||
br label %bb
|
||||
|
||||
bb: ; preds = %bb, %entry
|
||||
%i.01.0 = phi i32 [ 100, %entry ], [ %tmp4, %bb ] ; <i32> [#uses=2]
|
||||
%tmp1 = getelementptr [101 x i32], [101 x i32]* @array, i32 0, i32 %i.01.0 ; <i32*> [#uses=1]
|
||||
store i32 %x, i32* %tmp1
|
||||
%tmp4 = add i32 %i.01.0, -1 ; <i32> [#uses=2]
|
||||
%tmp7 = icmp sgt i32 %tmp4, -1 ; <i1> [#uses=1]
|
||||
br i1 %tmp7, label %bb, label %return
|
||||
|
||||
return: ; preds = %bb
|
||||
ret void
|
||||
}
|
||||
|
||||
define i32 @test_loop_idiom_recogize(i32 %x, i32 %y, i32* %lam, i32* %alp) nounwind {
|
||||
bb1.thread:
|
||||
br label %bb1
|
||||
|
||||
bb1: ; preds = %bb1, %bb1.thread
|
||||
%indvar = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ] ; <i32> [#uses=4]
|
||||
%i.0.reg2mem.0 = sub i32 255, %indvar ; <i32> [#uses=2]
|
||||
%0 = getelementptr i32, i32* %alp, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
|
||||
%1 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
|
||||
%2 = getelementptr i32, i32* %lam, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
|
||||
store i32 %1, i32* %2, align 4
|
||||
%3 = sub i32 254, %indvar ; <i32> [#uses=1]
|
||||
%4 = icmp slt i32 %3, 0 ; <i1> [#uses=1]
|
||||
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
|
||||
br i1 %4, label %bb2, label %bb1
|
||||
|
||||
bb2: ; preds = %bb1
|
||||
%tmp10 = mul i32 %indvar, %x ; <i32> [#uses=1]
|
||||
%z.0.reg2mem.0 = add i32 %tmp10, %y ; <i32> [#uses=1]
|
||||
%5 = add i32 %z.0.reg2mem.0, %x ; <i32> [#uses=1]
|
||||
ret i32 %5
|
||||
}
|
||||
|
||||
declare void @use(i1)
|
||||
|
||||
declare void @llvm.experimental.guard(i1, ...)
|
||||
|
||||
; This tests getRangeRef acts as intended with different idx size.
|
||||
; CHECK: max backedge-taken count is 318
|
||||
define void @test_range_ref1(i8 %t) {
|
||||
entry:
|
||||
%t.ptr = inttoptr i8 %t to i8*
|
||||
%p.42 = inttoptr i8 42 to i8*
|
||||
%cmp1 = icmp slt i8* %t.ptr, %p.42
|
||||
call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
|
||||
br label %loop
|
||||
|
||||
loop:
|
||||
%idx = phi i8* [ %t.ptr, %entry ], [ %snext, %loop ]
|
||||
%snext = getelementptr inbounds i8, i8* %idx, i64 1
|
||||
%c = icmp slt i8* %idx, %p.42
|
||||
call void @use(i1 %c)
|
||||
%be = icmp slt i8* %snext, %p.42
|
||||
br i1 %be, label %loop, label %exit
|
||||
|
||||
exit:
|
||||
ret void
|
||||
}
|
||||
|
||||
|
|
|
@ -33,10 +33,10 @@ F: ; preds = %0
|
|||
|
||||
define void @test1_ptr(i32* %V) {
|
||||
; CHECK-LABEL: @test1_ptr(
|
||||
; CHECK-NEXT: [[MAGICPTR:%.*]] = ptrtoint i32* [[V:%.*]] to i32
|
||||
; CHECK-NEXT: switch i32 [[MAGICPTR]], label [[F:%.*]] [
|
||||
; CHECK-NEXT: i32 17, label [[T:%.*]]
|
||||
; CHECK-NEXT: i32 4, label [[T]]
|
||||
; CHECK-NEXT: [[MAGICPTR:%.*]] = ptrtoint i32* [[V:%.*]] to i40
|
||||
; CHECK-NEXT: switch i40 [[MAGICPTR]], label [[F:%.*]] [
|
||||
; CHECK-NEXT: i40 17, label [[T:%.*]]
|
||||
; CHECK-NEXT: i40 4, label [[T]]
|
||||
; CHECK-NEXT: ]
|
||||
; CHECK: T:
|
||||
; CHECK-NEXT: call void @foo1()
|
||||
|
@ -59,10 +59,10 @@ F: ; preds = %0
|
|||
|
||||
define void @test1_ptr_as1(i32 addrspace(1)* %V) {
|
||||
; CHECK-LABEL: @test1_ptr_as1(
|
||||
; CHECK-NEXT: [[MAGICPTR:%.*]] = ptrtoint i32 addrspace(1)* [[V:%.*]] to i32
|
||||
; CHECK-NEXT: switch i32 [[MAGICPTR]], label [[F:%.*]] [
|
||||
; CHECK-NEXT: i32 17, label [[T:%.*]]
|
||||
; CHECK-NEXT: i32 4, label [[T]]
|
||||
; CHECK-NEXT: [[MAGICPTR:%.*]] = ptrtoint i32 addrspace(1)* [[V:%.*]] to i40
|
||||
; CHECK-NEXT: switch i40 [[MAGICPTR]], label [[F:%.*]] [
|
||||
; CHECK-NEXT: i40 17, label [[T:%.*]]
|
||||
; CHECK-NEXT: i40 4, label [[T]]
|
||||
; CHECK-NEXT: ]
|
||||
; CHECK: T:
|
||||
; CHECK-NEXT: call void @foo1()
|
||||
|
|
Loading…
Reference in New Issue