forked from OSchip/llvm-project
Revert the series of commits starting with r166578 which introduced the
getIntPtrType support for multiple address spaces via a pointer type, and also introduced a crasher bug in the constant folder reported in PR14233. These commits also contained several problems that should really be addressed before they are re-committed. I have avoided reverting various cleanups to the DataLayout APIs that are reasonable to have moving forward in order to reduce the amount of churn, and minimize the number of commits that were reverted. I've also manually updated merge conflicts and manually arranged for the getIntPtrType function to stay in DataLayout and to be defined in a plausible way after this revert. Thanks to Duncan for working through this exact strategy with me, and Nick Lewycky for tracking down the really annoying crasher this triggered. (Test case to follow in its own commit.) After discussing with Duncan extensively, and based on a note from Micah, I'm going to continue to back out some more of the more problematic patches in this series in order to ensure we go into the LLVM 3.2 branch with a reasonable story here. I'll send a note to llvmdev explaining what's going on and why. Summary of reverted revisions: r166634: Fix a compiler warning with an unused variable. r166607: Add some cleanup to the DataLayout changes requested by Chandler. r166596: Revert "Back out r166591, not sure why this made it through since I cancelled the command. Bleh, sorry about this! r166591: Delete a directory that wasn't supposed to be checked in yet. r166578: Add in support for getIntPtrType to get the pointer type based on the address space. llvm-svn: 167221
This commit is contained in:
parent
2d8b294b3c
commit
7ec5085e01
|
@ -168,8 +168,7 @@ class ObjectSizeOffsetVisitor
|
|||
|
||||
public:
|
||||
ObjectSizeOffsetVisitor(const DataLayout *TD, const TargetLibraryInfo *TLI,
|
||||
LLVMContext &Context, bool RoundToAlign = false,
|
||||
unsigned AS = 0);
|
||||
LLVMContext &Context, bool RoundToAlign = false);
|
||||
|
||||
SizeOffsetType compute(Value *V);
|
||||
|
||||
|
@ -230,7 +229,7 @@ class ObjectSizeOffsetEvaluator
|
|||
|
||||
public:
|
||||
ObjectSizeOffsetEvaluator(const DataLayout *TD, const TargetLibraryInfo *TLI,
|
||||
LLVMContext &Context, unsigned AS = 0);
|
||||
LLVMContext &Context);
|
||||
SizeOffsetEvalType compute(Value *V);
|
||||
|
||||
bool knownSize(SizeOffsetEvalType SizeOffset) {
|
||||
|
|
|
@ -628,7 +628,7 @@ namespace llvm {
|
|||
|
||||
/// getSizeOfExpr - Return an expression for sizeof on the given type.
|
||||
///
|
||||
const SCEV *getSizeOfExpr(Type *AllocTy, Type *IntPtrTy);
|
||||
const SCEV *getSizeOfExpr(Type *AllocTy);
|
||||
|
||||
/// getAlignOfExpr - Return an expression for alignof on the given type.
|
||||
///
|
||||
|
@ -636,8 +636,7 @@ namespace llvm {
|
|||
|
||||
/// getOffsetOfExpr - Return an expression for offsetof on the given field.
|
||||
///
|
||||
const SCEV *getOffsetOfExpr(StructType *STy, Type *IntPtrTy,
|
||||
unsigned FieldNo);
|
||||
const SCEV *getOffsetOfExpr(StructType *STy, unsigned FieldNo);
|
||||
|
||||
/// getOffsetOfExpr - Return an expression for offsetof on the given field.
|
||||
///
|
||||
|
|
|
@ -258,14 +258,6 @@ public:
|
|||
unsigned getPointerSizeInBits(unsigned AS) const {
|
||||
return getPointerSize(AS) * 8;
|
||||
}
|
||||
/// Layout pointer size, in bits, based on the type.
|
||||
/// If this function is called with a pointer type, then
|
||||
/// the type size of the pointer is returned.
|
||||
/// If this function is called with a vector of pointers,
|
||||
/// then the type size of the pointer is returned.
|
||||
/// Otherwise the type sizeo f a default pointer is returned.
|
||||
unsigned getPointerTypeSizeInBits(Type* Ty) const;
|
||||
|
||||
/// Size examples:
|
||||
///
|
||||
/// Type SizeInBits StoreSizeInBits AllocSizeInBits[*]
|
||||
|
@ -343,7 +335,7 @@ public:
|
|||
|
||||
/// getIntPtrType - Return an integer type with size at least as big as that
|
||||
/// of a pointer in the given address space.
|
||||
IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace) const;
|
||||
IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const;
|
||||
|
||||
/// getIntPtrType - Return an integer (vector of integer) type with size at
|
||||
/// least as big as that of a pointer of the given pointer (vector of pointer)
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#define LLVM_INSTRUCTION_TYPES_H
|
||||
|
||||
#include "llvm/Instruction.h"
|
||||
#include "llvm/DataLayout.h"
|
||||
#include "llvm/OperandTraits.h"
|
||||
#include "llvm/DerivedTypes.h"
|
||||
#include "llvm/ADT/Twine.h"
|
||||
|
@ -577,11 +576,6 @@ public:
|
|||
Type *IntPtrTy ///< Integer type corresponding to pointer
|
||||
) const;
|
||||
|
||||
/// @brief Determine if this cast is a no-op cast.
|
||||
bool isNoopCast(
|
||||
const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
|
||||
) const;
|
||||
|
||||
/// Determine how a pair of casts can be eliminated, if they can be at all.
|
||||
/// This is a helper function for both CastInst and ConstantExpr.
|
||||
/// @returns 0 if the CastInst pair can't be eliminated, otherwise
|
||||
|
|
|
@ -179,9 +179,8 @@ static inline unsigned getKnownAlignment(Value *V, const DataLayout *TD = 0) {
|
|||
template<typename IRBuilderTy>
|
||||
Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
|
||||
bool NoAssumptions = false) {
|
||||
unsigned AS = cast<GEPOperator>(GEP)->getPointerAddressSpace();
|
||||
gep_type_iterator GTI = gep_type_begin(GEP);
|
||||
Type *IntPtrTy = TD.getIntPtrType(GEP->getContext(), AS);
|
||||
Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
|
||||
Value *Result = Constant::getNullValue(IntPtrTy);
|
||||
|
||||
// If the GEP is inbounds, we know that none of the addressing operations will
|
||||
|
@ -189,6 +188,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
|
|||
bool isInBounds = cast<GEPOperator>(GEP)->isInBounds() && !NoAssumptions;
|
||||
|
||||
// Build a mask for high order bits.
|
||||
unsigned AS = cast<GEPOperator>(GEP)->getPointerAddressSpace();
|
||||
unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
|
||||
uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
|
||||
|
||||
|
|
|
@ -378,7 +378,7 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
|
|||
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
|
||||
if (CE->getOpcode() == Instruction::IntToPtr &&
|
||||
CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getType()))
|
||||
CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
|
||||
return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
|
||||
BytesLeft, TD);
|
||||
}
|
||||
|
@ -575,7 +575,7 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
|
|||
Type *ResultTy, const DataLayout *TD,
|
||||
const TargetLibraryInfo *TLI) {
|
||||
if (!TD) return 0;
|
||||
Type *IntPtrTy = TD->getIntPtrType(ResultTy);
|
||||
Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
|
||||
|
||||
bool Any = false;
|
||||
SmallVector<Constant*, 32> NewIdxs;
|
||||
|
@ -629,8 +629,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
|
|||
!Ptr->getType()->isPointerTy())
|
||||
return 0;
|
||||
|
||||
unsigned AS = cast<PointerType>(Ptr->getType())->getAddressSpace();
|
||||
Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext(), AS);
|
||||
Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
|
||||
|
||||
// If this is a constant expr gep that is effectively computing an
|
||||
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
|
||||
|
@ -703,8 +702,6 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
|
|||
// Also, this helps GlobalOpt do SROA on GlobalVariables.
|
||||
Type *Ty = Ptr->getType();
|
||||
assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type");
|
||||
assert(Ty->getPointerAddressSpace() == AS
|
||||
&& "Operand and result of GEP should be in the same address space.");
|
||||
SmallVector<Constant*, 32> NewIdxs;
|
||||
do {
|
||||
if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
|
||||
|
@ -720,7 +717,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
|
|||
|
||||
// Determine which element of the array the offset points into.
|
||||
APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
|
||||
IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext(), AS);
|
||||
IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
|
||||
if (ElemSize == 0)
|
||||
// The element size is 0. This may be [0 x Ty]*, so just use a zero
|
||||
// index for this level and proceed to the next level to see if it can
|
||||
|
@ -937,7 +934,8 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
|
|||
// pointer, so it can't be done in ConstantExpr::getCast.
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0]))
|
||||
if (TD && CE->getOpcode() == Instruction::PtrToInt &&
|
||||
TD->getTypeSizeInBits(CE->getOperand(0)->getType())
|
||||
TD->getPointerSizeInBits(
|
||||
cast<PointerType>(CE->getOperand(0)->getType())->getAddressSpace())
|
||||
<= CE->getType()->getScalarSizeInBits())
|
||||
return FoldBitCast(CE->getOperand(0), DestTy, *TD);
|
||||
|
||||
|
@ -990,10 +988,9 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
|||
// ConstantExpr::getCompare cannot do this, because it doesn't have TD
|
||||
// around to know if bit truncation is happening.
|
||||
if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
|
||||
Type *IntPtrTy = NULL;
|
||||
if (TD && Ops1->isNullValue()) {
|
||||
Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
|
||||
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
||||
IntPtrTy = TD->getIntPtrType(CE0->getType());
|
||||
// Convert the integer value to the right size to ensure we get the
|
||||
// proper extension or truncation.
|
||||
Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
|
||||
|
@ -1004,21 +1001,19 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
|||
|
||||
// Only do this transformation if the int is intptrty in size, otherwise
|
||||
// there is a truncation or extension that we aren't modeling.
|
||||
if (CE0->getOpcode() == Instruction::PtrToInt) {
|
||||
IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType());
|
||||
if (CE0->getType() == IntPtrTy) {
|
||||
if (CE0->getOpcode() == Instruction::PtrToInt &&
|
||||
CE0->getType() == IntPtrTy) {
|
||||
Constant *C = CE0->getOperand(0);
|
||||
Constant *Null = Constant::getNullValue(C->getType());
|
||||
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
|
||||
if (TD && CE0->getOpcode() == CE1->getOpcode()) {
|
||||
Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
|
||||
|
||||
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
||||
Type *IntPtrTy = TD->getIntPtrType(CE0->getType());
|
||||
// Convert the integer value to the right size to ensure we get the
|
||||
// proper extension or truncation.
|
||||
Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
|
||||
|
@ -1027,14 +1022,12 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
|||
IntPtrTy, false);
|
||||
return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI);
|
||||
}
|
||||
}
|
||||
|
||||
// Only do this transformation if the int is intptrty in size, otherwise
|
||||
// there is a truncation or extension that we aren't modeling.
|
||||
if (CE0->getOpcode() == Instruction::PtrToInt) {
|
||||
IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType());
|
||||
if (CE0->getType() == IntPtrTy &&
|
||||
CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType())
|
||||
if ((CE0->getOpcode() == Instruction::PtrToInt &&
|
||||
CE0->getType() == IntPtrTy &&
|
||||
CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()))
|
||||
return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0),
|
||||
CE1->getOperand(0), TD, TLI);
|
||||
}
|
||||
|
|
|
@ -788,7 +788,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
|
|||
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
|
||||
} while (Visited.insert(V));
|
||||
|
||||
Type *IntPtrTy = TD->getIntPtrType(V->getType());
|
||||
Type *IntPtrTy = TD->getIntPtrType(V->getContext());
|
||||
return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
|
||||
}
|
||||
|
||||
|
@ -828,7 +828,8 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
|
|||
// size of the byval type by the target's pointer size.
|
||||
PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
|
||||
unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
|
||||
unsigned PointerSize = TD->getTypeSizeInBits(PTy);
|
||||
unsigned AS = PTy->getAddressSpace();
|
||||
unsigned PointerSize = TD->getPointerSizeInBits(AS);
|
||||
// Ceiling division.
|
||||
unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
|
||||
|
||||
|
|
|
@ -728,7 +728,7 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout &TD,
|
|||
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
|
||||
} while (Visited.insert(V));
|
||||
|
||||
Type *IntPtrTy = TD.getIntPtrType(V->getContext(), AS);
|
||||
Type *IntPtrTy = TD.getIntPtrType(V->getContext());
|
||||
return ConstantInt::get(IntPtrTy, Offset);
|
||||
}
|
||||
|
||||
|
@ -1880,7 +1880,9 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
|||
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
|
||||
// if the integer type is the same size as the pointer type.
|
||||
if (MaxRecurse && Q.TD && isa<PtrToIntInst>(LI) &&
|
||||
Q.TD->getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
|
||||
Q.TD->getPointerSizeInBits(
|
||||
cast<PtrToIntInst>(LI)->getPointerAddressSpace()) ==
|
||||
DstTy->getPrimitiveSizeInBits()) {
|
||||
if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
|
||||
// Transfer the cast to the constant.
|
||||
if (Value *V = SimplifyICmpInst(Pred, SrcOp,
|
||||
|
|
|
@ -626,7 +626,8 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
|
|||
if (W != V)
|
||||
return findValueImpl(W, OffsetOk, Visited);
|
||||
} else if (CastInst *CI = dyn_cast<CastInst>(V)) {
|
||||
if (CI->isNoopCast(*TD))
|
||||
if (CI->isNoopCast(TD ? TD->getIntPtrType(V->getContext()) :
|
||||
Type::getInt64Ty(V->getContext())))
|
||||
return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
|
||||
} else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
|
||||
if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
|
||||
|
@ -639,7 +640,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
|
|||
if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
|
||||
CE->getOperand(0)->getType(),
|
||||
CE->getType(),
|
||||
TD ? TD->getIntPtrType(CE->getType()) :
|
||||
TD ? TD->getIntPtrType(V->getContext()) :
|
||||
Type::getInt64Ty(V->getContext())))
|
||||
return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
|
||||
} else if (CE->getOpcode() == Instruction::ExtractValue) {
|
||||
|
|
|
@ -376,10 +376,9 @@ APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
|
|||
ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *TD,
|
||||
const TargetLibraryInfo *TLI,
|
||||
LLVMContext &Context,
|
||||
bool RoundToAlign,
|
||||
unsigned AS)
|
||||
bool RoundToAlign)
|
||||
: TD(TD), TLI(TLI), RoundToAlign(RoundToAlign) {
|
||||
IntegerType *IntTy = TD->getIntPtrType(Context, AS);
|
||||
IntegerType *IntTy = TD->getIntPtrType(Context);
|
||||
IntTyBits = IntTy->getBitWidth();
|
||||
Zero = APInt::getNullValue(IntTyBits);
|
||||
}
|
||||
|
@ -562,10 +561,9 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
|
|||
|
||||
ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *TD,
|
||||
const TargetLibraryInfo *TLI,
|
||||
LLVMContext &Context,
|
||||
unsigned AS)
|
||||
LLVMContext &Context)
|
||||
: TD(TD), TLI(TLI), Context(Context), Builder(Context, TargetFolder(TD)) {
|
||||
IntTy = TD->getIntPtrType(Context, AS);
|
||||
IntTy = TD->getIntPtrType(Context);
|
||||
Zero = ConstantInt::get(IntTy, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -2586,12 +2586,13 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
|
|||
return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy, Type *IntPtrTy) {
|
||||
const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
|
||||
// If we have DataLayout, we can bypass creating a target-independent
|
||||
// constant expression and then folding it back into a ConstantInt.
|
||||
// This is just a compile-time optimization.
|
||||
if (TD)
|
||||
return getConstant(IntPtrTy, TD->getTypeAllocSize(AllocTy));
|
||||
return getConstant(TD->getIntPtrType(getContext()),
|
||||
TD->getTypeAllocSize(AllocTy));
|
||||
|
||||
Constant *C = ConstantExpr::getSizeOf(AllocTy);
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
|
@ -2610,13 +2611,13 @@ const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
|
|||
return getTruncateOrZeroExtend(getSCEV(C), Ty);
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy, Type *IntPtrTy,
|
||||
const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
|
||||
unsigned FieldNo) {
|
||||
// If we have DataLayout, we can bypass creating a target-independent
|
||||
// constant expression and then folding it back into a ConstantInt.
|
||||
// This is just a compile-time optimization.
|
||||
if (TD)
|
||||
return getConstant(IntPtrTy,
|
||||
return getConstant(TD->getIntPtrType(getContext()),
|
||||
TD->getStructLayout(STy)->getElementOffset(FieldNo));
|
||||
|
||||
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
|
||||
|
@ -2703,7 +2704,7 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
|
|||
|
||||
// The only other support type is pointer.
|
||||
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
|
||||
if (TD) return TD->getIntPtrType(Ty);
|
||||
if (TD) return TD->getIntPtrType(getContext());
|
||||
|
||||
// Without DataLayout, conservatively assume pointers are 64-bit.
|
||||
return Type::getInt64Ty(getContext());
|
||||
|
@ -3156,13 +3157,13 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
|
|||
if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
|
||||
// For a struct, add the member offset.
|
||||
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
|
||||
const SCEV *FieldOffset = getOffsetOfExpr(STy, IntPtrTy, FieldNo);
|
||||
const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
|
||||
|
||||
// Add the field offset to the running total offset.
|
||||
TotalOffset = getAddExpr(TotalOffset, FieldOffset);
|
||||
} else {
|
||||
// For an array, add the element offset, explicitly scaled.
|
||||
const SCEV *ElementSize = getSizeOfExpr(*GTI, IntPtrTy);
|
||||
const SCEV *ElementSize = getSizeOfExpr(*GTI);
|
||||
const SCEV *IndexS = getSCEV(Index);
|
||||
// Getelementptr indices are signed.
|
||||
IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
|
||||
|
|
|
@ -417,9 +417,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
|||
// array indexing.
|
||||
SmallVector<const SCEV *, 8> ScaledOps;
|
||||
if (ElTy->isSized()) {
|
||||
Type *IntPtrTy = SE.TD ? SE.TD->getIntPtrType(PTy) :
|
||||
IntegerType::getInt64Ty(PTy->getContext());
|
||||
const SCEV *ElSize = SE.getSizeOfExpr(ElTy, IntPtrTy);
|
||||
const SCEV *ElSize = SE.getSizeOfExpr(ElTy);
|
||||
if (!ElSize->isZero()) {
|
||||
SmallVector<const SCEV *, 8> NewOps;
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
|
||||
|
|
|
@ -385,8 +385,8 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
|
|||
// - __tlv_bootstrap - used to make sure support exists
|
||||
// - spare pointer, used when mapped by the runtime
|
||||
// - pointer to mangled symbol above with initializer
|
||||
assert(GV->getType()->isPointerTy() && "GV must be a pointer type!");
|
||||
unsigned PtrSize = TD->getTypeSizeInBits(GV->getType())/8;
|
||||
unsigned AS = GV->getType()->getAddressSpace();
|
||||
unsigned PtrSize = TD->getPointerSizeInBits(AS)/8;
|
||||
OutStreamer.EmitSymbolValue(GetExternalSymbolSymbol("_tlv_bootstrap"),
|
||||
PtrSize, 0);
|
||||
OutStreamer.EmitIntValue(0, PtrSize, 0);
|
||||
|
@ -1481,9 +1481,9 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
|
|||
if (Offset == 0)
|
||||
return Base;
|
||||
|
||||
assert(CE->getType()->isPointerTy() && "We must have a pointer type!");
|
||||
unsigned AS = cast<PointerType>(CE->getType())->getAddressSpace();
|
||||
// Truncate/sext the offset to the pointer size.
|
||||
unsigned Width = TD.getTypeSizeInBits(CE->getType());
|
||||
unsigned Width = TD.getPointerSizeInBits(AS);
|
||||
if (Width < 64)
|
||||
Offset = SignExtend64(Offset, Width);
|
||||
|
||||
|
@ -1505,7 +1505,7 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
|
|||
// Handle casts to pointers by changing them into casts to the appropriate
|
||||
// integer type. This promotes constant folding and simplifies this code.
|
||||
Constant *Op = CE->getOperand(0);
|
||||
Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CE->getType()),
|
||||
Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()),
|
||||
false/*ZExt*/);
|
||||
return lowerConstant(Op, AP);
|
||||
}
|
||||
|
|
|
@ -115,21 +115,21 @@ void IntrinsicLowering::AddPrototypes(Module &M) {
|
|||
Type::getInt8PtrTy(Context),
|
||||
Type::getInt8PtrTy(Context),
|
||||
Type::getInt8PtrTy(Context),
|
||||
TD.getIntPtrType(Context, 0), (Type *)0);
|
||||
TD.getIntPtrType(Context), (Type *)0);
|
||||
break;
|
||||
case Intrinsic::memmove:
|
||||
M.getOrInsertFunction("memmove",
|
||||
Type::getInt8PtrTy(Context),
|
||||
Type::getInt8PtrTy(Context),
|
||||
Type::getInt8PtrTy(Context),
|
||||
TD.getIntPtrType(Context, 0), (Type *)0);
|
||||
TD.getIntPtrType(Context), (Type *)0);
|
||||
break;
|
||||
case Intrinsic::memset:
|
||||
M.getOrInsertFunction("memset",
|
||||
Type::getInt8PtrTy(Context),
|
||||
Type::getInt8PtrTy(Context),
|
||||
Type::getInt32Ty(M.getContext()),
|
||||
TD.getIntPtrType(Context, 0), (Type *)0);
|
||||
TD.getIntPtrType(Context), (Type *)0);
|
||||
break;
|
||||
case Intrinsic::sqrt:
|
||||
EnsureFPIntrinsicsExist(M, I, "sqrtf", "sqrt", "sqrtl");
|
||||
|
@ -457,7 +457,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
|||
break; // Strip out annotate intrinsic
|
||||
|
||||
case Intrinsic::memcpy: {
|
||||
Type *IntPtr = TD.getIntPtrType(CI->getArgOperand(0)->getType());
|
||||
Type *IntPtr = TD.getIntPtrType(Context);
|
||||
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
||||
/* isSigned */ false);
|
||||
Value *Ops[3];
|
||||
|
@ -468,7 +468,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
|||
break;
|
||||
}
|
||||
case Intrinsic::memmove: {
|
||||
Type *IntPtr = TD.getIntPtrType(CI->getArgOperand(0)->getType());
|
||||
Type *IntPtr = TD.getIntPtrType(Context);
|
||||
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
||||
/* isSigned */ false);
|
||||
Value *Ops[3];
|
||||
|
@ -479,7 +479,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
|||
break;
|
||||
}
|
||||
case Intrinsic::memset: {
|
||||
Type *IntPtr = TD.getIntPtrType(CI->getArgOperand(0)->getType());
|
||||
Type *IntPtr = TD.getIntPtrType(Context);
|
||||
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
||||
/* isSigned */ false);
|
||||
Value *Ops[3];
|
||||
|
|
|
@ -101,7 +101,8 @@ bool FastISel::hasTrivialKill(const Value *V) const {
|
|||
|
||||
// No-op casts are trivially coalesced by fast-isel.
|
||||
if (const CastInst *Cast = dyn_cast<CastInst>(I))
|
||||
if (Cast->isNoopCast(TD) && !hasTrivialKill(Cast->getOperand(0)))
|
||||
if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
|
||||
!hasTrivialKill(Cast->getOperand(0)))
|
||||
return false;
|
||||
|
||||
// GEPs with all zero indices are trivially coalesced by fast-isel.
|
||||
|
@ -174,7 +175,7 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
|
|||
// Translate this as an integer zero so that it can be
|
||||
// local-CSE'd with actual integer zeros.
|
||||
Reg =
|
||||
getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getType())));
|
||||
getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
|
||||
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
|
||||
if (CF->isNullValue()) {
|
||||
Reg = TargetMaterializeFloatZero(CF);
|
||||
|
|
|
@ -3791,8 +3791,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
|
|||
// Emit a library call.
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
unsigned AS = SrcPtrInfo.getAddrSpace();
|
||||
Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext(), AS);
|
||||
Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
|
||||
Entry.Node = Dst; Args.push_back(Entry);
|
||||
Entry.Node = Src; Args.push_back(Entry);
|
||||
Entry.Node = Size; Args.push_back(Entry);
|
||||
|
@ -3847,8 +3846,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
|
|||
// Emit a library call.
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
unsigned AS = SrcPtrInfo.getAddrSpace();
|
||||
Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext(), AS);
|
||||
Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
|
||||
Entry.Node = Dst; Args.push_back(Entry);
|
||||
Entry.Node = Src; Args.push_back(Entry);
|
||||
Entry.Node = Size; Args.push_back(Entry);
|
||||
|
@ -3897,8 +3895,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
|
|||
return Result;
|
||||
|
||||
// Emit a library call.
|
||||
unsigned AS = DstPtrInfo.getAddrSpace();
|
||||
Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext(), AS);
|
||||
Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext());
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
Entry.Node = Dst; Entry.Ty = IntPtrTy;
|
||||
|
|
|
@ -155,8 +155,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
|
|||
TargetLowering::ArgListEntry Entry;
|
||||
|
||||
// First argument: data pointer
|
||||
unsigned AS = DstPtrInfo.getAddrSpace();
|
||||
Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
|
||||
Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*DAG.getContext());
|
||||
Entry.Node = Dst;
|
||||
Entry.Ty = IntPtrTy;
|
||||
Args.push_back(Entry);
|
||||
|
|
|
@ -126,9 +126,10 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
|
|||
return Base;
|
||||
|
||||
// Truncate/sext the offset to the pointer size.
|
||||
unsigned PtrSize = TD.getPointerTypeSizeInBits(PtrVal->getType());
|
||||
if (PtrSize != 64) {
|
||||
int SExtAmount = 64-PtrSize;
|
||||
unsigned AS = PtrVal->getType()->isPointerTy() ?
|
||||
cast<PointerType>(PtrVal->getType())->getAddressSpace() : 0;
|
||||
if (TD.getPointerSizeInBits(AS) != 64) {
|
||||
int SExtAmount = 64-TD.getPointerSizeInBits(AS);
|
||||
Offset = (Offset << SExtAmount) >> SExtAmount;
|
||||
}
|
||||
|
||||
|
@ -150,7 +151,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
|
|||
// Handle casts to pointers by changing them into casts to the appropriate
|
||||
// integer type. This promotes constant folding and simplifies this code.
|
||||
Constant *Op = CE->getOperand(0);
|
||||
Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CE->getType()),
|
||||
Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()),
|
||||
false/*ZExt*/);
|
||||
return LowerConstant(Op, AP);
|
||||
}
|
||||
|
|
|
@ -1512,10 +1512,9 @@ SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
|
|||
|
||||
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
|
||||
bool isPPC64 = (PtrVT == MVT::i64);
|
||||
unsigned AS = 0;
|
||||
Type *IntPtrTy =
|
||||
DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType(
|
||||
*DAG.getContext(), AS);
|
||||
*DAG.getContext());
|
||||
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
|
|
|
@ -64,7 +64,7 @@ unsigned LLVMPointerSizeForAS(LLVMTargetDataRef TD, unsigned AS) {
|
|||
}
|
||||
|
||||
LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef TD) {
|
||||
return wrap(unwrap(TD)->getIntPtrType(getGlobalContext(), 0));
|
||||
return wrap(unwrap(TD)->getIntPtrType(getGlobalContext()));
|
||||
}
|
||||
|
||||
LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef TD, unsigned AS) {
|
||||
|
|
|
@ -282,9 +282,8 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) {
|
|||
bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
|
||||
const X86AddressMode &AM) {
|
||||
// Handle 'null' like i32/i64 0.
|
||||
if (isa<ConstantPointerNull>(Val)) {
|
||||
Val = Constant::getNullValue(TD.getIntPtrType(Val->getType()));
|
||||
}
|
||||
if (isa<ConstantPointerNull>(Val))
|
||||
Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
|
||||
|
||||
// If this is a store of a simple constant, fold the constant into the store.
|
||||
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
|
||||
|
@ -895,9 +894,8 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
|
|||
if (Op0Reg == 0) return false;
|
||||
|
||||
// Handle 'null' like i32/i64 0.
|
||||
if (isa<ConstantPointerNull>(Op1)) {
|
||||
Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getType()));
|
||||
}
|
||||
if (isa<ConstantPointerNull>(Op1))
|
||||
Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getContext()));
|
||||
|
||||
// We have two options: compare with register or immediate. If the RHS of
|
||||
// the compare is an immediate that we can fold into this compare, use
|
||||
|
|
|
@ -54,8 +54,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
|
|||
if (const char *bzeroEntry = V &&
|
||||
V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
|
||||
EVT IntPtr = TLI.getPointerTy();
|
||||
unsigned AS = DstPtrInfo.getAddrSpace();
|
||||
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
|
||||
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
Entry.Node = Dst;
|
||||
|
|
|
@ -477,8 +477,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
|
|||
}
|
||||
|
||||
// Lower to a call to __misaligned_load(BasePtr).
|
||||
unsigned AS = LD->getAddressSpace();
|
||||
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
|
||||
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
|
||||
|
@ -537,8 +536,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
|
|||
}
|
||||
|
||||
// Lower to a call to __misaligned_store(BasePtr, Value).
|
||||
unsigned AS = ST->getAddressSpace();
|
||||
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
|
||||
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
|
||||
|
|
|
@ -1500,7 +1500,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
|
|||
unsigned TypeSize = TD->getTypeAllocSize(FieldTy);
|
||||
if (StructType *ST = dyn_cast<StructType>(FieldTy))
|
||||
TypeSize = TD->getStructLayout(ST)->getSizeInBytes();
|
||||
Type *IntPtrTy = TD->getIntPtrType(GV->getType());
|
||||
Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
|
||||
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
|
||||
ConstantInt::get(IntPtrTy, TypeSize),
|
||||
NElems, 0,
|
||||
|
@ -1730,7 +1730,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
|
|||
// If this is a fixed size array, transform the Malloc to be an alloc of
|
||||
// structs. malloc [100 x struct],1 -> malloc struct, 100
|
||||
if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
|
||||
Type *IntPtrTy = TD->getIntPtrType(GV->getType());
|
||||
Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
|
||||
unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
|
||||
Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
|
||||
Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
|
||||
|
|
|
@ -206,8 +206,9 @@ bool FunctionComparator::isEquivalentType(Type *Ty1,
|
|||
return true;
|
||||
if (Ty1->getTypeID() != Ty2->getTypeID()) {
|
||||
if (TD) {
|
||||
if (isa<PointerType>(Ty1) && Ty2 == TD->getIntPtrType(Ty1)) return true;
|
||||
if (isa<PointerType>(Ty2) && Ty1 == TD->getIntPtrType(Ty2)) return true;
|
||||
LLVMContext &Ctx = Ty1->getContext();
|
||||
if (isa<PointerType>(Ty1) && Ty2 == TD->getIntPtrType(Ctx)) return true;
|
||||
if (isa<PointerType>(Ty2) && Ty1 == TD->getIntPtrType(Ctx)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -208,7 +208,7 @@ private:
|
|||
bool ShouldChangeType(Type *From, Type *To) const;
|
||||
Value *dyn_castNegVal(Value *V) const;
|
||||
Value *dyn_castFNegVal(Value *V) const;
|
||||
Type *FindElementAtOffset(Type *Ty, int64_t Offset, Type *IntPtrTy,
|
||||
Type *FindElementAtOffset(Type *Ty, int64_t Offset,
|
||||
SmallVectorImpl<Value*> &NewIndices);
|
||||
Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
|
||||
|
||||
|
|
|
@ -996,9 +996,9 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
|
|||
// Conversion is ok if changing from one pointer type to another or from
|
||||
// a pointer to an integer of the same size.
|
||||
!((OldRetTy->isPointerTy() || !TD ||
|
||||
OldRetTy == TD->getIntPtrType(NewRetTy)) &&
|
||||
OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
|
||||
(NewRetTy->isPointerTy() || !TD ||
|
||||
NewRetTy == TD->getIntPtrType(OldRetTy))))
|
||||
NewRetTy == TD->getIntPtrType(Caller->getContext()))))
|
||||
return false; // Cannot transform this return value.
|
||||
|
||||
if (!Caller->use_empty() &&
|
||||
|
@ -1057,13 +1057,11 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
|
|||
|
||||
// Converting from one pointer type to another or between a pointer and an
|
||||
// integer of the same size is safe even if we do not have a body.
|
||||
// FIXME: Not sure what to do here, so setting AS to 0.
|
||||
// How can the AS for a function call be outside the default?
|
||||
bool isConvertible = ActTy == ParamTy ||
|
||||
(TD && ((ParamTy->isPointerTy() ||
|
||||
ParamTy == TD->getIntPtrType(ActTy)) &&
|
||||
ParamTy == TD->getIntPtrType(Caller->getContext())) &&
|
||||
(ActTy->isPointerTy() ||
|
||||
ActTy == TD->getIntPtrType(ParamTy))));
|
||||
ActTy == TD->getIntPtrType(Caller->getContext()))));
|
||||
if (Callee->isDeclaration() && !isConvertible) return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -1301,13 +1301,13 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
|
|||
if (CI.getOperand(0)->getType()->getScalarSizeInBits() >
|
||||
TD->getPointerSizeInBits(AS)) {
|
||||
Value *P = Builder->CreateTrunc(CI.getOperand(0),
|
||||
TD->getIntPtrType(CI.getType()));
|
||||
TD->getIntPtrType(CI.getContext()));
|
||||
return new IntToPtrInst(P, CI.getType());
|
||||
}
|
||||
if (CI.getOperand(0)->getType()->getScalarSizeInBits() <
|
||||
TD->getPointerSizeInBits(AS)) {
|
||||
Value *P = Builder->CreateZExt(CI.getOperand(0),
|
||||
TD->getIntPtrType(CI.getType()));
|
||||
TD->getIntPtrType(CI.getContext()));
|
||||
return new IntToPtrInst(P, CI.getType());
|
||||
}
|
||||
}
|
||||
|
@ -1348,8 +1348,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
|
|||
Type *GEPIdxTy =
|
||||
cast<PointerType>(OrigBase->getType())->getElementType();
|
||||
SmallVector<Value*, 8> NewIndices;
|
||||
Type *IntPtrTy = TD->getIntPtrType(OrigBase->getType());
|
||||
if (FindElementAtOffset(GEPIdxTy, Offset, IntPtrTy, NewIndices)) {
|
||||
if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices)) {
|
||||
// If we were able to index down into an element, create the GEP
|
||||
// and bitcast the result. This eliminates one bitcast, potentially
|
||||
// two.
|
||||
|
@ -1377,12 +1376,12 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
|
|||
if (TD) {
|
||||
if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits(AS)) {
|
||||
Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
|
||||
TD->getIntPtrType(CI.getContext(), AS));
|
||||
TD->getIntPtrType(CI.getContext()));
|
||||
return new TruncInst(P, CI.getType());
|
||||
}
|
||||
if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits(AS)) {
|
||||
Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
|
||||
TD->getIntPtrType(CI.getContext(), AS));
|
||||
TD->getIntPtrType(CI.getContext()));
|
||||
return new ZExtInst(P, CI.getType());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -371,7 +371,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
|
|||
// an inbounds GEP because the index can't be out of range.
|
||||
if (!GEP->isInBounds() &&
|
||||
Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits(AS))
|
||||
Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext(), AS));
|
||||
Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext()));
|
||||
|
||||
// If the comparison is only true for one or two elements, emit direct
|
||||
// comparisons.
|
||||
|
@ -539,7 +539,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
|
|||
// we don't need to bother extending: the extension won't affect where the
|
||||
// computation crosses zero.
|
||||
if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
|
||||
Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext(), AS);
|
||||
Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
|
||||
VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy);
|
||||
}
|
||||
return VariableIdx;
|
||||
|
@ -561,7 +561,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
|
|||
return 0;
|
||||
|
||||
// Okay, we can do this evaluation. Start by converting the index to intptr.
|
||||
Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext(), AS);
|
||||
Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
|
||||
if (VariableIdx->getType() != IntPtrTy)
|
||||
VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy,
|
||||
true /*Signed*/);
|
||||
|
@ -1554,7 +1554,8 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
|
|||
// Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
|
||||
// integer type is the same size as the pointer type.
|
||||
if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
|
||||
TD->getTypeSizeInBits(DestTy) ==
|
||||
TD->getPointerSizeInBits(
|
||||
cast<PtrToIntInst>(LHSCI)->getPointerAddressSpace()) ==
|
||||
cast<IntegerType>(DestTy)->getBitWidth()) {
|
||||
Value *RHSOp = 0;
|
||||
if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
|
||||
|
@ -2250,7 +2251,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||
case Instruction::IntToPtr:
|
||||
// icmp pred inttoptr(X), null -> icmp pred X, 0
|
||||
if (RHSC->isNullValue() && TD &&
|
||||
TD->getIntPtrType(LHSI->getType()) ==
|
||||
TD->getIntPtrType(RHSC->getContext()) ==
|
||||
LHSI->getOperand(0)->getType())
|
||||
return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
|
||||
Constant::getNullValue(LHSI->getOperand(0)->getType()));
|
||||
|
|
|
@ -173,7 +173,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
|
|||
// Ensure that the alloca array size argument has type intptr_t, so that
|
||||
// any casting is exposed early.
|
||||
if (TD) {
|
||||
Type *IntPtrTy = TD->getIntPtrType(AI.getType());
|
||||
Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
|
||||
if (AI.getArraySize()->getType() != IntPtrTy) {
|
||||
Value *V = Builder->CreateIntCast(AI.getArraySize(),
|
||||
IntPtrTy, false);
|
||||
|
@ -513,7 +513,8 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
|
|||
// If the pointers point into different address spaces or if they point to
|
||||
// values with different sizes, we can't do the transformation.
|
||||
if (!IC.getDataLayout() ||
|
||||
SrcTy->getAddressSpace() != CI->getType()->getPointerAddressSpace() ||
|
||||
SrcTy->getAddressSpace() !=
|
||||
cast<PointerType>(CI->getType())->getAddressSpace() ||
|
||||
IC.getDataLayout()->getTypeSizeInBits(SrcPTy) !=
|
||||
IC.getDataLayout()->getTypeSizeInBits(DestPTy))
|
||||
return 0;
|
||||
|
|
|
@ -738,7 +738,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
|
|||
/// or not there is a sequence of GEP indices into the type that will land us at
|
||||
/// the specified offset. If so, fill them into NewIndices and return the
|
||||
/// resultant element type, otherwise return null.
|
||||
Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset, Type *IntPtrTy,
|
||||
Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
|
||||
SmallVectorImpl<Value*> &NewIndices) {
|
||||
if (!TD) return 0;
|
||||
if (!Ty->isSized()) return 0;
|
||||
|
@ -746,6 +746,7 @@ Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset, Type *IntPtrTy
|
|||
// Start with the index over the outer type. Note that the type size
|
||||
// might be zero (even if the offset isn't zero) if the indexed type
|
||||
// is something like [0 x {int, int}]
|
||||
Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
|
||||
int64_t FirstIdx = 0;
|
||||
if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
|
||||
FirstIdx = Offset/TySize;
|
||||
|
@ -1054,7 +1055,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
|||
// by multiples of a zero size type with zero.
|
||||
if (TD) {
|
||||
bool MadeChange = false;
|
||||
Type *IntPtrTy = TD->getIntPtrType(PtrOp->getType());
|
||||
Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
|
||||
|
||||
gep_type_iterator GTI = gep_type_begin(GEP);
|
||||
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
|
||||
|
@ -1239,7 +1240,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
|||
|
||||
// Earlier transforms ensure that the index has type IntPtrType, which
|
||||
// considerably simplifies the logic by eliminating implicit casts.
|
||||
assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
|
||||
assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) &&
|
||||
"Index not cast to pointer width?");
|
||||
|
||||
bool NSW;
|
||||
|
@ -1274,7 +1275,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
|||
|
||||
// Earlier transforms ensure that the index has type IntPtrType, which
|
||||
// considerably simplifies the logic by eliminating implicit casts.
|
||||
assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
|
||||
assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) &&
|
||||
"Index not cast to pointer width?");
|
||||
|
||||
bool NSW;
|
||||
|
@ -1336,8 +1337,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
|||
SmallVector<Value*, 8> NewIndices;
|
||||
Type *InTy =
|
||||
cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
|
||||
Type *IntPtrTy = TD->getIntPtrType(BCI->getOperand(0)->getType());
|
||||
if (FindElementAtOffset(InTy, Offset, IntPtrTy, NewIndices)) {
|
||||
if (FindElementAtOffset(InTy, Offset, NewIndices)) {
|
||||
Value *NGEP = GEP.isInBounds() ?
|
||||
Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices) :
|
||||
Builder->CreateGEP(BCI->getOperand(0), NewIndices);
|
||||
|
|
|
@ -933,7 +933,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
|
|||
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
|
||||
<< *MemoryInst);
|
||||
Type *IntPtrTy =
|
||||
TLI->getDataLayout()->getIntPtrType(Addr->getType());
|
||||
TLI->getDataLayout()->getIntPtrType(AccessTy->getContext());
|
||||
|
||||
Value *Result = 0;
|
||||
|
||||
|
|
|
@ -1428,8 +1428,7 @@ FindLoopCounter(Loop *L, const SCEV *BECount,
|
|||
/// genLoopLimit - Help LinearFunctionTestReplace by generating a value that
|
||||
/// holds the RHS of the new loop test.
|
||||
static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
|
||||
SCEVExpander &Rewriter, ScalarEvolution *SE,
|
||||
Type *IntPtrTy) {
|
||||
SCEVExpander &Rewriter, ScalarEvolution *SE) {
|
||||
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(IndVar));
|
||||
assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter");
|
||||
const SCEV *IVInit = AR->getStart();
|
||||
|
@ -1455,8 +1454,7 @@ static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
|
|||
// We could handle pointer IVs other than i8*, but we need to compensate for
|
||||
// gep index scaling. See canExpandBackedgeTakenCount comments.
|
||||
assert(SE->getSizeOfExpr(
|
||||
cast<PointerType>(GEPBase->getType())->getElementType(),
|
||||
IntPtrTy)->isOne()
|
||||
cast<PointerType>(GEPBase->getType())->getElementType())->isOne()
|
||||
&& "unit stride pointer IV must be i8*");
|
||||
|
||||
IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
|
||||
|
@ -1555,9 +1553,7 @@ LinearFunctionTestReplace(Loop *L,
|
|||
CmpIndVar = IndVar;
|
||||
}
|
||||
|
||||
Type *IntPtrTy = TD ? TD->getIntPtrType(IndVar->getType()) :
|
||||
IntegerType::getInt64Ty(IndVar->getContext());
|
||||
Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE, IntPtrTy);
|
||||
Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE);
|
||||
assert(ExitCnt->getType()->isPointerTy() == IndVar->getType()->isPointerTy()
|
||||
&& "genLoopLimit missed a cast");
|
||||
|
||||
|
|
|
@ -458,10 +458,9 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
|
|||
// Okay, we have a strided store "p[i]" of a splattable value. We can turn
|
||||
// this into a memset in the loop preheader now if we want. However, this
|
||||
// would be unsafe to do if there is anything else in the loop that may read
|
||||
// or write to the aliased location.
|
||||
assert(DestPtr->getType()->isPointerTy()
|
||||
&& "Must be a pointer type.");
|
||||
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
|
||||
// or write to the aliased location. Check for any overlap by generating the
|
||||
// base pointer and checking the region.
|
||||
unsigned AddrSpace = cast<PointerType>(DestPtr->getType())->getAddressSpace();
|
||||
Value *BasePtr =
|
||||
Expander.expandCodeFor(Ev->getStart(), Builder.getInt8PtrTy(AddrSpace),
|
||||
Preheader->getTerminator());
|
||||
|
@ -471,7 +470,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
|
|||
|
||||
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
|
||||
// pointer size if it isn't already.
|
||||
Type *IntPtr = TD->getIntPtrType(DestPtr->getType());
|
||||
Type *IntPtr = TD->getIntPtrType(DestPtr->getContext());
|
||||
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
|
||||
|
||||
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
|
||||
|
@ -587,7 +586,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
|
|||
|
||||
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
|
||||
// pointer size if it isn't already.
|
||||
Type *IntPtr = TD->getIntPtrType(SI->getType());
|
||||
Type *IntPtr = TD->getIntPtrType(SI->getContext());
|
||||
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
|
||||
|
||||
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
|
||||
|
|
|
@ -2395,9 +2395,8 @@ private:
|
|||
|
||||
Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) {
|
||||
assert(BeginOffset >= NewAllocaBeginOffset);
|
||||
assert(PointerTy->isPointerTy() &&
|
||||
"Type must be pointer type!");
|
||||
APInt Offset(TD.getTypeSizeInBits(PointerTy), BeginOffset - NewAllocaBeginOffset);
|
||||
unsigned AS = cast<PointerType>(PointerTy)->getAddressSpace();
|
||||
APInt Offset(TD.getPointerSizeInBits(AS), BeginOffset - NewAllocaBeginOffset);
|
||||
return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName(""));
|
||||
}
|
||||
|
||||
|
@ -2795,8 +2794,9 @@ private:
|
|||
= P.getMemTransferOffsets(II);
|
||||
|
||||
assert(OldPtr->getType()->isPointerTy() && "Must be a pointer type!");
|
||||
unsigned AS = cast<PointerType>(OldPtr->getType())->getAddressSpace();
|
||||
// Compute the relative offset within the transfer.
|
||||
unsigned IntPtrWidth = TD.getTypeSizeInBits(OldPtr->getType());
|
||||
unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
|
||||
APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin
|
||||
: MTO.SourceBegin));
|
||||
|
||||
|
|
|
@ -963,7 +963,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
|
|||
if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())
|
||||
SV = Builder.CreateBitCast(SV, IntegerType::get(SV->getContext(),SrcWidth));
|
||||
else if (SV->getType()->isPointerTy())
|
||||
SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getType()));
|
||||
SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getContext()));
|
||||
|
||||
// Zero extend or truncate the value if needed.
|
||||
if (SV->getType() != AllocaType) {
|
||||
|
|
|
@ -311,11 +311,10 @@ struct MemCpyOpt : public LibCallOptimization {
|
|||
if (!TD) return 0;
|
||||
|
||||
FunctionType *FT = Callee->getFunctionType();
|
||||
Type *PT = FT->getParamType(0);
|
||||
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
|
||||
!FT->getParamType(0)->isPointerTy() ||
|
||||
!FT->getParamType(1)->isPointerTy() ||
|
||||
FT->getParamType(2) != TD->getIntPtrType(PT))
|
||||
FT->getParamType(2) != TD->getIntPtrType(*Context))
|
||||
return 0;
|
||||
|
||||
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
|
||||
|
@ -334,11 +333,10 @@ struct MemMoveOpt : public LibCallOptimization {
|
|||
if (!TD) return 0;
|
||||
|
||||
FunctionType *FT = Callee->getFunctionType();
|
||||
Type *PT = FT->getParamType(0);
|
||||
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
|
||||
!FT->getParamType(0)->isPointerTy() ||
|
||||
!FT->getParamType(1)->isPointerTy() ||
|
||||
FT->getParamType(2) != TD->getIntPtrType(PT))
|
||||
FT->getParamType(2) != TD->getIntPtrType(*Context))
|
||||
return 0;
|
||||
|
||||
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
|
||||
|
@ -357,11 +355,10 @@ struct MemSetOpt : public LibCallOptimization {
|
|||
if (!TD) return 0;
|
||||
|
||||
FunctionType *FT = Callee->getFunctionType();
|
||||
Type *PT = FT->getParamType(0);
|
||||
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
|
||||
!FT->getParamType(0)->isPointerTy() ||
|
||||
!FT->getParamType(1)->isIntegerTy() ||
|
||||
FT->getParamType(2) != TD->getIntPtrType(PT))
|
||||
FT->getParamType(2) != TD->getIntPtrType(*Context))
|
||||
return 0;
|
||||
|
||||
// memset(p, v, n) -> llvm.memset(p, v, n, 1)
|
||||
|
@ -786,9 +783,8 @@ struct SPrintFOpt : public LibCallOptimization {
|
|||
if (!TD) return 0;
|
||||
|
||||
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
|
||||
Type *AT = CI->getArgOperand(0)->getType();
|
||||
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
|
||||
ConstantInt::get(TD->getIntPtrType(AT), // Copy the
|
||||
ConstantInt::get(TD->getIntPtrType(*Context), // Copy the
|
||||
FormatStr.size() + 1), 1); // nul byte.
|
||||
return ConstantInt::get(CI->getType(), FormatStr.size());
|
||||
}
|
||||
|
@ -915,9 +911,8 @@ struct FPutsOpt : public LibCallOptimization {
|
|||
uint64_t Len = GetStringLength(CI->getArgOperand(0));
|
||||
if (!Len) return 0;
|
||||
// Known to have no uses (see above).
|
||||
Type *PT = FT->getParamType(0);
|
||||
return EmitFWrite(CI->getArgOperand(0),
|
||||
ConstantInt::get(TD->getIntPtrType(PT), Len-1),
|
||||
ConstantInt::get(TD->getIntPtrType(*Context), Len-1),
|
||||
CI->getArgOperand(1), B, TD, TLI);
|
||||
}
|
||||
};
|
||||
|
@ -942,9 +937,8 @@ struct FPrintFOpt : public LibCallOptimization {
|
|||
// These optimizations require DataLayout.
|
||||
if (!TD) return 0;
|
||||
|
||||
Type *AT = CI->getArgOperand(1)->getType();
|
||||
Value *NewCI = EmitFWrite(CI->getArgOperand(1),
|
||||
ConstantInt::get(TD->getIntPtrType(AT),
|
||||
ConstantInt::get(TD->getIntPtrType(*Context),
|
||||
FormatStr.size()),
|
||||
CI->getArgOperand(0), B, TD, TLI);
|
||||
return NewCI ? ConstantInt::get(CI->getType(), FormatStr.size()) : 0;
|
||||
|
|
|
@ -46,8 +46,9 @@ Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
|
|||
AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
||||
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
||||
|
||||
LLVMContext &Context = B.GetInsertBlock()->getContext();
|
||||
Constant *StrLen = M->getOrInsertFunction("strlen", AttrListPtr::get(AWI),
|
||||
TD->getIntPtrType(Ptr->getType()),
|
||||
TD->getIntPtrType(Context),
|
||||
B.getInt8PtrTy(),
|
||||
NULL);
|
||||
CallInst *CI = B.CreateCall(StrLen, CastToCStr(Ptr, B), "strlen");
|
||||
|
@ -72,10 +73,11 @@ Value *llvm::EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
|
|||
AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
||||
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
||||
|
||||
LLVMContext &Context = B.GetInsertBlock()->getContext();
|
||||
Constant *StrNLen = M->getOrInsertFunction("strnlen", AttrListPtr::get(AWI),
|
||||
TD->getIntPtrType(Ptr->getType()),
|
||||
TD->getIntPtrType(Context),
|
||||
B.getInt8PtrTy(),
|
||||
TD->getIntPtrType(Ptr->getType()),
|
||||
TD->getIntPtrType(Context),
|
||||
NULL);
|
||||
CallInst *CI = B.CreateCall2(StrNLen, CastToCStr(Ptr, B), MaxLen, "strnlen");
|
||||
if (const Function *F = dyn_cast<Function>(StrNLen->stripPointerCasts()))
|
||||
|
@ -124,12 +126,12 @@ Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
|
|||
AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
||||
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
||||
|
||||
LLVMContext &Context = B.GetInsertBlock()->getContext();
|
||||
Value *StrNCmp = M->getOrInsertFunction("strncmp", AttrListPtr::get(AWI),
|
||||
B.getInt32Ty(),
|
||||
B.getInt8PtrTy(),
|
||||
B.getInt8PtrTy(),
|
||||
TD->getIntPtrType(Ptr1->getType()),
|
||||
NULL);
|
||||
TD->getIntPtrType(Context), NULL);
|
||||
CallInst *CI = B.CreateCall3(StrNCmp, CastToCStr(Ptr1, B),
|
||||
CastToCStr(Ptr2, B), Len, "strncmp");
|
||||
|
||||
|
@ -199,14 +201,14 @@ Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
|
|||
AttributeWithIndex AWI;
|
||||
AWI = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
||||
Attributes::NoUnwind);
|
||||
LLVMContext &Context = B.GetInsertBlock()->getContext();
|
||||
Value *MemCpy = M->getOrInsertFunction("__memcpy_chk",
|
||||
AttrListPtr::get(AWI),
|
||||
B.getInt8PtrTy(),
|
||||
B.getInt8PtrTy(),
|
||||
B.getInt8PtrTy(),
|
||||
TD->getIntPtrType(Dst->getType()),
|
||||
TD->getIntPtrType(Src->getType()),
|
||||
NULL);
|
||||
TD->getIntPtrType(Context),
|
||||
TD->getIntPtrType(Context), NULL);
|
||||
Dst = CastToCStr(Dst, B);
|
||||
Src = CastToCStr(Src, B);
|
||||
CallInst *CI = B.CreateCall4(MemCpy, Dst, Src, Len, ObjSize);
|
||||
|
@ -228,11 +230,12 @@ Value *llvm::EmitMemChr(Value *Ptr, Value *Val,
|
|||
Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind };
|
||||
AWI = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
||||
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
||||
LLVMContext &Context = B.GetInsertBlock()->getContext();
|
||||
Value *MemChr = M->getOrInsertFunction("memchr", AttrListPtr::get(AWI),
|
||||
B.getInt8PtrTy(),
|
||||
B.getInt8PtrTy(),
|
||||
B.getInt32Ty(),
|
||||
TD->getIntPtrType(Ptr->getType()),
|
||||
TD->getIntPtrType(Context),
|
||||
NULL);
|
||||
CallInst *CI = B.CreateCall3(MemChr, CastToCStr(Ptr, B), Val, Len, "memchr");
|
||||
|
||||
|
@ -257,12 +260,12 @@ Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
|
|||
AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
||||
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
||||
|
||||
LLVMContext &Context = B.GetInsertBlock()->getContext();
|
||||
Value *MemCmp = M->getOrInsertFunction("memcmp", AttrListPtr::get(AWI),
|
||||
B.getInt32Ty(),
|
||||
B.getInt8PtrTy(),
|
||||
B.getInt8PtrTy(),
|
||||
TD->getIntPtrType(Ptr1->getType()),
|
||||
NULL);
|
||||
TD->getIntPtrType(Context), NULL);
|
||||
CallInst *CI = B.CreateCall3(MemCmp, CastToCStr(Ptr1, B), CastToCStr(Ptr2, B),
|
||||
Len, "memcmp");
|
||||
|
||||
|
@ -422,24 +425,24 @@ Value *llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File,
|
|||
AWI[1] = AttributeWithIndex::get(M->getContext(), 4, Attributes::NoCapture);
|
||||
AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
||||
Attributes::NoUnwind);
|
||||
LLVMContext &Context = B.GetInsertBlock()->getContext();
|
||||
StringRef FWriteName = TLI->getName(LibFunc::fwrite);
|
||||
Constant *F;
|
||||
Type *PtrTy = Ptr->getType();
|
||||
if (File->getType()->isPointerTy())
|
||||
F = M->getOrInsertFunction(FWriteName, AttrListPtr::get(AWI),
|
||||
TD->getIntPtrType(PtrTy),
|
||||
TD->getIntPtrType(Context),
|
||||
B.getInt8PtrTy(),
|
||||
TD->getIntPtrType(PtrTy),
|
||||
TD->getIntPtrType(PtrTy),
|
||||
TD->getIntPtrType(Context),
|
||||
TD->getIntPtrType(Context),
|
||||
File->getType(), NULL);
|
||||
else
|
||||
F = M->getOrInsertFunction(FWriteName, TD->getIntPtrType(PtrTy),
|
||||
F = M->getOrInsertFunction(FWriteName, TD->getIntPtrType(Context),
|
||||
B.getInt8PtrTy(),
|
||||
TD->getIntPtrType(PtrTy),
|
||||
TD->getIntPtrType(PtrTy),
|
||||
TD->getIntPtrType(Context),
|
||||
TD->getIntPtrType(Context),
|
||||
File->getType(), NULL);
|
||||
CallInst *CI = B.CreateCall4(F, CastToCStr(Ptr, B), Size,
|
||||
ConstantInt::get(TD->getIntPtrType(PtrTy), 1), File);
|
||||
ConstantInt::get(TD->getIntPtrType(Context), 1), File);
|
||||
|
||||
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
|
||||
CI->setCallingConv(Fn->getCallingConv());
|
||||
|
@ -461,13 +464,12 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
|
|||
IRBuilder<> B(CI);
|
||||
|
||||
if (Name == "__memcpy_chk") {
|
||||
Type *PT = FT->getParamType(0);
|
||||
// Check if this has the right signature.
|
||||
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
||||
!FT->getParamType(0)->isPointerTy() ||
|
||||
!FT->getParamType(1)->isPointerTy() ||
|
||||
FT->getParamType(2) != TD->getIntPtrType(PT) ||
|
||||
FT->getParamType(3) != TD->getIntPtrType(PT))
|
||||
FT->getParamType(2) != TD->getIntPtrType(Context) ||
|
||||
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||
return false;
|
||||
|
||||
if (isFoldable(3, 2, false)) {
|
||||
|
@ -486,12 +488,11 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
|
|||
|
||||
if (Name == "__memmove_chk") {
|
||||
// Check if this has the right signature.
|
||||
Type *PT = FT->getParamType(0);
|
||||
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
||||
!FT->getParamType(0)->isPointerTy() ||
|
||||
!FT->getParamType(1)->isPointerTy() ||
|
||||
FT->getParamType(2) != TD->getIntPtrType(PT) ||
|
||||
FT->getParamType(3) != TD->getIntPtrType(PT))
|
||||
FT->getParamType(2) != TD->getIntPtrType(Context) ||
|
||||
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||
return false;
|
||||
|
||||
if (isFoldable(3, 2, false)) {
|
||||
|
@ -505,12 +506,11 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
|
|||
|
||||
if (Name == "__memset_chk") {
|
||||
// Check if this has the right signature.
|
||||
Type *PT = FT->getParamType(0);
|
||||
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
||||
!FT->getParamType(0)->isPointerTy() ||
|
||||
!FT->getParamType(1)->isIntegerTy() ||
|
||||
FT->getParamType(2) != TD->getIntPtrType(PT) ||
|
||||
FT->getParamType(3) != TD->getIntPtrType(PT))
|
||||
FT->getParamType(2) != TD->getIntPtrType(Context) ||
|
||||
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||
return false;
|
||||
|
||||
if (isFoldable(3, 2, false)) {
|
||||
|
@ -525,12 +525,11 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
|
|||
|
||||
if (Name == "__strcpy_chk" || Name == "__stpcpy_chk") {
|
||||
// Check if this has the right signature.
|
||||
Type *PT = FT->getParamType(0);
|
||||
if (FT->getNumParams() != 3 ||
|
||||
FT->getReturnType() != FT->getParamType(0) ||
|
||||
FT->getParamType(0) != FT->getParamType(1) ||
|
||||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
|
||||
FT->getParamType(2) != TD->getIntPtrType(PT))
|
||||
FT->getParamType(2) != TD->getIntPtrType(Context))
|
||||
return 0;
|
||||
|
||||
|
||||
|
@ -552,12 +551,11 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
|
|||
|
||||
if (Name == "__strncpy_chk" || Name == "__stpncpy_chk") {
|
||||
// Check if this has the right signature.
|
||||
Type *PT = FT->getParamType(0);
|
||||
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
||||
FT->getParamType(0) != FT->getParamType(1) ||
|
||||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
|
||||
!FT->getParamType(2)->isIntegerTy() ||
|
||||
FT->getParamType(3) != TD->getIntPtrType(PT))
|
||||
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||
return false;
|
||||
|
||||
if (isFoldable(3, 2, false)) {
|
||||
|
|
|
@ -806,7 +806,8 @@ unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
|
|||
const DataLayout *TD) {
|
||||
assert(V->getType()->isPointerTy() &&
|
||||
"getOrEnforceKnownAlignment expects a pointer!");
|
||||
unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) : 64;
|
||||
unsigned AS = cast<PointerType>(V->getType())->getAddressSpace();
|
||||
unsigned BitWidth = TD ? TD->getPointerSizeInBits(AS) : 64;
|
||||
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
||||
ComputeMaskedBits(V, KnownZero, KnownOne, TD);
|
||||
unsigned TrailZ = KnownZero.countTrailingOnes();
|
||||
|
|
|
@ -535,13 +535,9 @@ Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) {
|
|||
CV = ICI->getOperand(0);
|
||||
|
||||
// Unwrap any lossless ptrtoint cast.
|
||||
if (TD && CV) {
|
||||
PtrToIntInst *PTII = NULL;
|
||||
if ((PTII = dyn_cast<PtrToIntInst>(CV)) &&
|
||||
CV->getType() == TD->getIntPtrType(CV->getContext(),
|
||||
PTII->getPointerAddressSpace()))
|
||||
if (TD && CV && CV->getType() == TD->getIntPtrType(CV->getContext()))
|
||||
if (PtrToIntInst *PTII = dyn_cast<PtrToIntInst>(CV))
|
||||
CV = PTII->getOperand(0);
|
||||
}
|
||||
return CV;
|
||||
}
|
||||
|
||||
|
@ -988,7 +984,7 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
|
|||
// Convert pointer to int before we switch.
|
||||
if (CV->getType()->isPointerTy()) {
|
||||
assert(TD && "Cannot switch on pointer without DataLayout");
|
||||
CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getType()),
|
||||
CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getContext()),
|
||||
"magicptr");
|
||||
}
|
||||
|
||||
|
@ -2716,7 +2712,7 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD,
|
|||
if (CompVal->getType()->isPointerTy()) {
|
||||
assert(TD && "Cannot switch on pointer without DataLayout");
|
||||
CompVal = Builder.CreatePtrToInt(CompVal,
|
||||
TD->getIntPtrType(CompVal->getType()),
|
||||
TD->getIntPtrType(CompVal->getContext()),
|
||||
"magicptr");
|
||||
}
|
||||
|
||||
|
|
|
@ -122,13 +122,14 @@ struct MemCpyChkOpt : public InstFortifiedLibCallOptimization {
|
|||
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
|
||||
this->CI = CI;
|
||||
FunctionType *FT = Callee->getFunctionType();
|
||||
LLVMContext &Context = CI->getParent()->getContext();
|
||||
|
||||
// Check if this has the right signature.
|
||||
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
||||
!FT->getParamType(0)->isPointerTy() ||
|
||||
!FT->getParamType(1)->isPointerTy() ||
|
||||
FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)) ||
|
||||
FT->getParamType(3) != TD->getIntPtrType(FT->getParamType(1)))
|
||||
FT->getParamType(2) != TD->getIntPtrType(Context) ||
|
||||
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||
return 0;
|
||||
|
||||
if (isFoldable(3, 2, false)) {
|
||||
|
@ -144,13 +145,14 @@ struct MemMoveChkOpt : public InstFortifiedLibCallOptimization {
|
|||
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
|
||||
this->CI = CI;
|
||||
FunctionType *FT = Callee->getFunctionType();
|
||||
LLVMContext &Context = CI->getParent()->getContext();
|
||||
|
||||
// Check if this has the right signature.
|
||||
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
||||
!FT->getParamType(0)->isPointerTy() ||
|
||||
!FT->getParamType(1)->isPointerTy() ||
|
||||
FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)) ||
|
||||
FT->getParamType(3) != TD->getIntPtrType(FT->getParamType(1)))
|
||||
FT->getParamType(2) != TD->getIntPtrType(Context) ||
|
||||
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||
return 0;
|
||||
|
||||
if (isFoldable(3, 2, false)) {
|
||||
|
@ -166,13 +168,14 @@ struct MemSetChkOpt : public InstFortifiedLibCallOptimization {
|
|||
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
|
||||
this->CI = CI;
|
||||
FunctionType *FT = Callee->getFunctionType();
|
||||
LLVMContext &Context = CI->getParent()->getContext();
|
||||
|
||||
// Check if this has the right signature.
|
||||
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
||||
!FT->getParamType(0)->isPointerTy() ||
|
||||
!FT->getParamType(1)->isIntegerTy() ||
|
||||
FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)) ||
|
||||
FT->getParamType(3) != TD->getIntPtrType(FT->getParamType(0)))
|
||||
FT->getParamType(2) != TD->getIntPtrType(Context) ||
|
||||
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||
return 0;
|
||||
|
||||
if (isFoldable(3, 2, false)) {
|
||||
|
@ -197,7 +200,7 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
|
|||
FT->getReturnType() != FT->getParamType(0) ||
|
||||
FT->getParamType(0) != FT->getParamType(1) ||
|
||||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
|
||||
FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)))
|
||||
FT->getParamType(2) != TD->getIntPtrType(Context))
|
||||
return 0;
|
||||
|
||||
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
|
||||
|
@ -222,8 +225,8 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
|
|||
|
||||
Value *Ret =
|
||||
EmitMemCpyChk(Dst, Src,
|
||||
ConstantInt::get(TD->getIntPtrType(Dst->getType()),
|
||||
Len), CI->getArgOperand(2), B, TD, TLI);
|
||||
ConstantInt::get(TD->getIntPtrType(Context), Len),
|
||||
CI->getArgOperand(2), B, TD, TLI);
|
||||
return Ret;
|
||||
}
|
||||
return 0;
|
||||
|
@ -292,7 +295,7 @@ struct StrNCpyChkOpt : public InstFortifiedLibCallOptimization {
|
|||
FT->getParamType(0) != FT->getParamType(1) ||
|
||||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
|
||||
!FT->getParamType(2)->isIntegerTy() ||
|
||||
FT->getParamType(3) != TD->getIntPtrType(FT->getParamType(0)))
|
||||
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||
return 0;
|
||||
|
||||
if (isFoldable(3, 2, false)) {
|
||||
|
@ -354,8 +357,7 @@ struct StrCatOpt : public LibCallOptimization {
|
|||
// We have enough information to now generate the memcpy call to do the
|
||||
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
|
||||
B.CreateMemCpy(CpyDst, Src,
|
||||
ConstantInt::get(TD->getIntPtrType(Src->getType()),
|
||||
Len + 1), 1);
|
||||
ConstantInt::get(TD->getIntPtrType(*Context), Len + 1), 1);
|
||||
return Dst;
|
||||
}
|
||||
};
|
||||
|
@ -427,9 +429,8 @@ struct StrChrOpt : public LibCallOptimization {
|
|||
if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.
|
||||
return 0;
|
||||
|
||||
Type *PT = FT->getParamType(0);
|
||||
return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
|
||||
ConstantInt::get(TD->getIntPtrType(PT), Len),
|
||||
ConstantInt::get(TD->getIntPtrType(*Context), Len),
|
||||
B, TD, TLI);
|
||||
}
|
||||
|
||||
|
@ -523,9 +524,8 @@ struct StrCmpOpt : public LibCallOptimization {
|
|||
// These optimizations require DataLayout.
|
||||
if (!TD) return 0;
|
||||
|
||||
Type *PT = FT->getParamType(0);
|
||||
return EmitMemCmp(Str1P, Str2P,
|
||||
ConstantInt::get(TD->getIntPtrType(PT),
|
||||
ConstantInt::get(TD->getIntPtrType(*Context),
|
||||
std::min(Len1, Len2)), B, TD, TLI);
|
||||
}
|
||||
|
||||
|
@ -607,7 +607,7 @@ struct StrCpyOpt : public LibCallOptimization {
|
|||
// We have enough information to now generate the memcpy call to do the
|
||||
// copy for us. Make a memcpy to copy the nul byte with align = 1.
|
||||
B.CreateMemCpy(Dst, Src,
|
||||
ConstantInt::get(TD->getIntPtrType(Dst->getType()), Len), 1);
|
||||
ConstantInt::get(TD->getIntPtrType(*Context), Len), 1);
|
||||
return Dst;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -524,14 +524,6 @@ std::string DataLayout::getStringRepresentation() const {
|
|||
return OS.str();
|
||||
}
|
||||
|
||||
unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const
|
||||
{
|
||||
if (Ty->isPointerTy()) return getTypeSizeInBits(Ty);
|
||||
if (Ty->isVectorTy()
|
||||
&& cast<VectorType>(Ty)->getElementType()->isPointerTy())
|
||||
return getTypeSizeInBits(cast<VectorType>(Ty)->getElementType());
|
||||
return getPointerSizeInBits(0);
|
||||
}
|
||||
|
||||
uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const {
|
||||
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
|
||||
|
@ -679,14 +671,20 @@ IntegerType *DataLayout::getIntPtrType(LLVMContext &C,
|
|||
/// least as big as that of a pointer of the given pointer (vector of pointer)
|
||||
/// type.
|
||||
Type *DataLayout::getIntPtrType(Type *Ty) const {
|
||||
unsigned NumBits = getPointerTypeSizeInBits(Ty);
|
||||
#if 0
|
||||
// FIXME: This assert should always have been here, but the review comments
|
||||
// weren't addressed in time, and now there is lots of code "depending" on
|
||||
// this. Uncomment once this is cleaned up.
|
||||
assert(Ty->isPtrOrPtrVectorTy() &&
|
||||
"Expected a pointer or pointer vector type.");
|
||||
#endif
|
||||
unsigned NumBits = getTypeSizeInBits(Ty->getScalarType());
|
||||
IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits);
|
||||
if (VectorType *VecTy = dyn_cast<VectorType>(Ty))
|
||||
return VectorType::get(IntTy, VecTy->getNumElements());
|
||||
return IntTy;
|
||||
}
|
||||
|
||||
|
||||
uint64_t DataLayout::getIndexedOffset(Type *ptrTy,
|
||||
ArrayRef<Value *> Indices) const {
|
||||
Type *Ty = ptrTy;
|
||||
|
|
|
@ -2120,17 +2120,6 @@ bool CastInst::isNoopCast(Type *IntPtrTy) const {
|
|||
return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
|
||||
}
|
||||
|
||||
/// @brief Determine if a cast is a no-op
|
||||
bool CastInst::isNoopCast(const DataLayout &DL) const {
|
||||
unsigned AS = 0;
|
||||
if (getOpcode() == Instruction::PtrToInt)
|
||||
AS = getOperand(0)->getType()->getPointerAddressSpace();
|
||||
else if (getOpcode() == Instruction::IntToPtr)
|
||||
AS = getType()->getPointerAddressSpace();
|
||||
Type *IntPtrTy = DL.getIntPtrType(getContext(), AS);
|
||||
return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
|
||||
}
|
||||
|
||||
/// This function determines if a pair of casts can be eliminated and what
|
||||
/// opcode should be used in the elimination. This assumes that there are two
|
||||
/// instructions like this:
|
||||
|
|
|
@ -215,12 +215,7 @@ unsigned Type::getVectorNumElements() const {
|
|||
}
|
||||
|
||||
unsigned Type::getPointerAddressSpace() const {
|
||||
if (isPointerTy())
|
||||
return cast<PointerType>(this)->getAddressSpace();
|
||||
if (isVectorTy())
|
||||
return getSequentialElementType()->getPointerAddressSpace();
|
||||
llvm_unreachable("Should never reach here!");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
; RUN: opt -instcombine %s | llvm-dis | FileCheck %s
|
||||
target datalayout = "e-p:32:32:32-p1:64:64:64-p2:8:8:8-p3:16:16:16--p4:96:96:96-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32"
|
||||
|
||||
define i32 @test_as0(i32 addrspace(0)* %A) {
|
||||
entry:
|
||||
; CHECK: %arrayidx = getelementptr i32* %A, i32 1
|
||||
%arrayidx = getelementptr i32 addrspace(0)* %A, i64 1
|
||||
%y = load i32 addrspace(0)* %arrayidx, align 4
|
||||
ret i32 %y
|
||||
}
|
||||
|
||||
define i32 @test_as1(i32 addrspace(1)* %A) {
|
||||
entry:
|
||||
; CHECK: %arrayidx = getelementptr i32 addrspace(1)* %A, i64 1
|
||||
%arrayidx = getelementptr i32 addrspace(1)* %A, i32 1
|
||||
%y = load i32 addrspace(1)* %arrayidx, align 4
|
||||
ret i32 %y
|
||||
}
|
||||
|
||||
define i32 @test_as2(i32 addrspace(2)* %A) {
|
||||
entry:
|
||||
; CHECK: %arrayidx = getelementptr i32 addrspace(2)* %A, i8 1
|
||||
%arrayidx = getelementptr i32 addrspace(2)* %A, i32 1
|
||||
%y = load i32 addrspace(2)* %arrayidx, align 4
|
||||
ret i32 %y
|
||||
}
|
||||
|
||||
define i32 @test_as3(i32 addrspace(3)* %A) {
|
||||
entry:
|
||||
; CHECK: %arrayidx = getelementptr i32 addrspace(3)* %A, i16 1
|
||||
%arrayidx = getelementptr i32 addrspace(3)* %A, i32 1
|
||||
%y = load i32 addrspace(3)* %arrayidx, align 4
|
||||
ret i32 %y
|
||||
}
|
||||
|
||||
define i32 @test_as4(i32 addrspace(4)* %A) {
|
||||
entry:
|
||||
; CHECK: %arrayidx = getelementptr i32 addrspace(4)* %A, i96 1
|
||||
%arrayidx = getelementptr i32 addrspace(4)* %A, i32 1
|
||||
%y = load i32 addrspace(4)* %arrayidx, align 4
|
||||
ret i32 %y
|
||||
}
|
||||
|
|
@ -1,235 +0,0 @@
|
|||
; "PLAIN" - No optimizations. This tests the target-independent
|
||||
; constant folder.
|
||||
; RUN: opt -S -o - < %s | FileCheck --check-prefix=PLAIN %s
|
||||
|
||||
target datalayout = "e-p:128:128:128-p1:32:32:32-p2:8:8:8-p3:16:16:16-p4:64:64:64-p5:96:96:96-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32"
|
||||
|
||||
; PLAIN: ModuleID = '<stdin>'
|
||||
|
||||
; The automatic constant folder in opt does not have targetdata access, so
|
||||
; it can't fold gep arithmetic, in general. However, the constant folder run
|
||||
; from instcombine and global opt can use targetdata.
|
||||
; PLAIN: @G8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1)
|
||||
@G8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1)
|
||||
; PLAIN: @G1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -1)
|
||||
@G1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -1)
|
||||
; PLAIN: @F8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2)
|
||||
@F8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2)
|
||||
; PLAIN: @F1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -2)
|
||||
@F1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -2)
|
||||
; PLAIN: @H8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* null, i32 -1)
|
||||
@H8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1)
|
||||
; PLAIN: @H1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i8 -1)
|
||||
@H1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 0 to i1 addrspace(2)*), i8 -1)
|
||||
|
||||
|
||||
; The target-independent folder should be able to do some clever
|
||||
; simplifications on sizeof, alignof, and offsetof expressions. The
|
||||
; target-dependent folder should fold these down to constants.
|
||||
; PLAIN-X: @a = constant i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2310)
|
||||
@a = constant i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]} addrspace(4)* getelementptr ({[7 x double], [7 x double]} addrspace(4)* null, i64 11) to i64), i64 5))
|
||||
|
||||
; PLAIN-X: @b = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
|
||||
@b = constant i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64)
|
||||
|
||||
; PLAIN-X: @c = constant i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2)
|
||||
@c = constant i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64)
|
||||
|
||||
; PLAIN-X: @d = constant i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 11)
|
||||
@d = constant i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64)
|
||||
|
||||
; PLAIN-X: @e = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64)
|
||||
@e = constant i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64)
|
||||
|
||||
; PLAIN-X: @f = constant i64 1
|
||||
@f = constant i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64)
|
||||
|
||||
; PLAIN-X: @g = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
|
||||
@g = constant i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64)
|
||||
|
||||
; PLAIN-X: @h = constant i64 ptrtoint (i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 1) to i64)
|
||||
@h = constant i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i64 1) to i64)
|
||||
|
||||
; PLAIN-X: @i = constant i64 ptrtoint (i1 addrspace(2)* getelementptr ({ i1, i1 addrspace(2)* }* null, i64 0, i32 1) to i64)
|
||||
@i = constant i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double} addrspace(4)* null, i64 0, i32 1) to i64)
|
||||
|
||||
; The target-dependent folder should cast GEP indices to integer-sized pointers.
|
||||
|
||||
; PLAIN: @M = constant i64 addrspace(5)* getelementptr (i64 addrspace(5)* null, i32 1)
|
||||
; PLAIN: @N = constant i64 addrspace(5)* getelementptr ({ i64, i64 } addrspace(5)* null, i32 0, i32 1)
|
||||
; PLAIN: @O = constant i64 addrspace(5)* getelementptr ([2 x i64] addrspace(5)* null, i32 0, i32 1)
|
||||
|
||||
@M = constant i64 addrspace(5)* getelementptr (i64 addrspace(5)* null, i32 1)
|
||||
@N = constant i64 addrspace(5)* getelementptr ({ i64, i64 } addrspace(5)* null, i32 0, i32 1)
|
||||
@O = constant i64 addrspace(5)* getelementptr ([2 x i64] addrspace(5)* null, i32 0, i32 1)
|
||||
|
||||
; Fold GEP of a GEP. Very simple cases are folded.
|
||||
|
||||
; PLAIN-X: @Y = global [3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 2)
|
||||
@ext = external addrspace(3) global [3 x { i32, i32 }]
|
||||
@Y = global [3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 1), i64 1)
|
||||
|
||||
; PLAIN-X: @Z = global i32addrspace(3)* getelementptr inbounds (i32addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 0, i64 1, i32 0), i64 1)
|
||||
@Z = global i32addrspace(3)* getelementptr inbounds (i32addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 0, i64 1, i32 0), i64 1)
|
||||
|
||||
|
||||
; Duplicate all of the above as function return values rather than
|
||||
; global initializers.
|
||||
|
||||
; PLAIN: define i8 addrspace(1)* @goo8() nounwind {
|
||||
; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)*
|
||||
; PLAIN: ret i8 addrspace(1)* %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i1 addrspace(2)* @goo1() nounwind {
|
||||
; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)*
|
||||
; PLAIN: ret i1 addrspace(2)* %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i8 addrspace(1)* @foo8() nounwind {
|
||||
; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) to i8 addrspace(1)*
|
||||
; PLAIN: ret i8 addrspace(1)* %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i1 addrspace(2)* @foo1() nounwind {
|
||||
; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) to i1 addrspace(2)*
|
||||
; PLAIN: ret i1 addrspace(2)* %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i8 addrspace(1)* @hoo8() nounwind {
|
||||
; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* null, i32 -1) to i8 addrspace(1)*
|
||||
; PLAIN: ret i8 addrspace(1)* %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i1 addrspace(2)* @hoo1() nounwind {
|
||||
; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 -1) to i1 addrspace(2)*
|
||||
; PLAIN: ret i1 addrspace(2)* %t
|
||||
; PLAIN: }
|
||||
define i8 addrspace(1)* @goo8() nounwind {
|
||||
%t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)*
|
||||
ret i8 addrspace(1)* %t
|
||||
}
|
||||
define i1 addrspace(2)* @goo1() nounwind {
|
||||
%t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)*
|
||||
ret i1 addrspace(2)* %t
|
||||
}
|
||||
define i8 addrspace(1)* @foo8() nounwind {
|
||||
%t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) to i8 addrspace(1)*
|
||||
ret i8 addrspace(1)* %t
|
||||
}
|
||||
define i1 addrspace(2)* @foo1() nounwind {
|
||||
%t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) to i1 addrspace(2)*
|
||||
ret i1 addrspace(2)* %t
|
||||
}
|
||||
define i8 addrspace(1)* @hoo8() nounwind {
|
||||
%t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)*
|
||||
ret i8 addrspace(1)* %t
|
||||
}
|
||||
define i1 addrspace(2)* @hoo1() nounwind {
|
||||
%t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 0 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)*
|
||||
ret i1 addrspace(2)* %t
|
||||
}
|
||||
|
||||
; PLAIN-X: define i64 @fa() nounwind {
|
||||
; PLAIN-X: %t = bitcast i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2310) to i64
|
||||
; PLAIN-X: ret i64 %t
|
||||
; PLAIN-X: }
|
||||
; PLAIN-X: define i64 @fb() nounwind {
|
||||
; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
|
||||
; PLAIN-X: ret i64 %t
|
||||
; PLAIN-X: }
|
||||
; PLAIN-X: define i64 @fc() nounwind {
|
||||
; PLAIN-X: %t = bitcast i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2) to i64
|
||||
; PLAIN-X: ret i64 %t
|
||||
; PLAIN-X: }
|
||||
; PLAIN-X: define i64 @fd() nounwind {
|
||||
; PLAIN-X: %t = bitcast i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 11) to i64
|
||||
; PLAIN-X: ret i64 %t
|
||||
; PLAIN-X: }
|
||||
; PLAIN-X: define i64 @fe() nounwind {
|
||||
; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) to i64
|
||||
; PLAIN-X: ret i64 %t
|
||||
; PLAIN-X: }
|
||||
; PLAIN-X: define i64 @ff() nounwind {
|
||||
; PLAIN-X: %t = bitcast i64 1 to i64
|
||||
; PLAIN-X: ret i64 %t
|
||||
; PLAIN-X: }
|
||||
; PLAIN-X: define i64 @fg() nounwind {
|
||||
; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
|
||||
; PLAIN-X: ret i64 %t
|
||||
; PLAIN-X: }
|
||||
; PLAIN-X: define i64 @fh() nounwind {
|
||||
; PLAIN-X: %t = bitcast i64 ptrtoint (i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 1) to i64) to i64
|
||||
; PLAIN-X: ret i64 %t
|
||||
; PLAIN-X: }
|
||||
; PLAIN-X: define i64 @fi() nounwind {
|
||||
; PLAIN-X: %t = bitcast i64 ptrtoint (i1 addrspace(2)* getelementptr ({ i1, i1 addrspace(2)* }* null, i64 0, i32 1) to i64) to i64
|
||||
; PLAIN-X: ret i64 %t
|
||||
; PLAIN-X: }
|
||||
define i64 @fa() nounwind {
|
||||
%t = bitcast i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]}* getelementptr ({[7 x double], [7 x double]}* null, i64 11) to i64), i64 5)) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
define i64 @fb() nounwind {
|
||||
%t = bitcast i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
define i64 @fc() nounwind {
|
||||
%t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
define i64 @fd() nounwind {
|
||||
%t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
define i64 @fe() nounwind {
|
||||
%t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
define i64 @ff() nounwind {
|
||||
%t = bitcast i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
define i64 @fg() nounwind {
|
||||
%t = bitcast i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
define i64 @fh() nounwind {
|
||||
%t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
define i64 @fi() nounwind {
|
||||
%t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double}addrspace(4)* null, i64 0, i32 1) to i64) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
|
||||
; PLAIN: define i64* @fM() nounwind {
|
||||
; PLAIN: %t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
|
||||
; PLAIN: ret i64* %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i64* @fN() nounwind {
|
||||
; PLAIN: %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64*
|
||||
; PLAIN: ret i64* %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i64* @fO() nounwind {
|
||||
; PLAIN: %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
|
||||
; PLAIN: ret i64* %t
|
||||
; PLAIN: }
|
||||
|
||||
define i64* @fM() nounwind {
|
||||
%t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
|
||||
ret i64* %t
|
||||
}
|
||||
define i64* @fN() nounwind {
|
||||
%t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64*
|
||||
ret i64* %t
|
||||
}
|
||||
define i64* @fO() nounwind {
|
||||
%t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
|
||||
ret i64* %t
|
||||
}
|
||||
|
||||
; PLAIN: define i32 addrspace(1)* @fZ() nounwind {
|
||||
; PLAIN: %t = bitcast i32 addrspace(1)* getelementptr inbounds (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) to i32 addrspace(1)*
|
||||
; PLAIN: ret i32 addrspace(1)* %t
|
||||
; PLAIN: }
|
||||
@ext2 = external addrspace(1) global [3 x { i32, i32 }]
|
||||
define i32 addrspace(1)* @fZ() nounwind {
|
||||
%t = bitcast i32 addrspace(1)* getelementptr inbounds (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) to i32 addrspace(1)*
|
||||
ret i32 addrspace(1)* %t
|
||||
}
|
Loading…
Reference in New Issue