forked from OSchip/llvm-project
Revert the series of commits starting with r166578 which introduced the
getIntPtrType support for multiple address spaces via a pointer type, and also introduced a crasher bug in the constant folder reported in PR14233. These commits also contained several problems that should really be addressed before they are re-committed. I have avoided reverting various cleanups to the DataLayout APIs that are reasonable to have moving forward in order to reduce the amount of churn, and minimize the number of commits that were reverted. I've also manually updated merge conflicts and manually arranged for the getIntPtrType function to stay in DataLayout and to be defined in a plausible way after this revert. Thanks to Duncan for working through this exact strategy with me, and Nick Lewycky for tracking down the really annoying crasher this triggered. (Test case to follow in its own commit.) After discussing with Duncan extensively, and based on a note from Micah, I'm going to continue to back out some more of the more problematic patches in this series in order to ensure we go into the LLVM 3.2 branch with a reasonable story here. I'll send a note to llvmdev explaining what's going on and why. Summary of reverted revisions: r166634: Fix a compiler warning with an unused variable. r166607: Add some cleanup to the DataLayout changes requested by Chandler. r166596: Revert "Back out r166591, not sure why this made it through since I cancelled the command. Bleh, sorry about this! r166591: Delete a directory that wasn't supposed to be checked in yet. r166578: Add in support for getIntPtrType to get the pointer type based on the address space. llvm-svn: 167221
This commit is contained in:
parent
2d8b294b3c
commit
7ec5085e01
|
@ -168,8 +168,7 @@ class ObjectSizeOffsetVisitor
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ObjectSizeOffsetVisitor(const DataLayout *TD, const TargetLibraryInfo *TLI,
|
ObjectSizeOffsetVisitor(const DataLayout *TD, const TargetLibraryInfo *TLI,
|
||||||
LLVMContext &Context, bool RoundToAlign = false,
|
LLVMContext &Context, bool RoundToAlign = false);
|
||||||
unsigned AS = 0);
|
|
||||||
|
|
||||||
SizeOffsetType compute(Value *V);
|
SizeOffsetType compute(Value *V);
|
||||||
|
|
||||||
|
@ -230,7 +229,7 @@ class ObjectSizeOffsetEvaluator
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ObjectSizeOffsetEvaluator(const DataLayout *TD, const TargetLibraryInfo *TLI,
|
ObjectSizeOffsetEvaluator(const DataLayout *TD, const TargetLibraryInfo *TLI,
|
||||||
LLVMContext &Context, unsigned AS = 0);
|
LLVMContext &Context);
|
||||||
SizeOffsetEvalType compute(Value *V);
|
SizeOffsetEvalType compute(Value *V);
|
||||||
|
|
||||||
bool knownSize(SizeOffsetEvalType SizeOffset) {
|
bool knownSize(SizeOffsetEvalType SizeOffset) {
|
||||||
|
|
|
@ -628,7 +628,7 @@ namespace llvm {
|
||||||
|
|
||||||
/// getSizeOfExpr - Return an expression for sizeof on the given type.
|
/// getSizeOfExpr - Return an expression for sizeof on the given type.
|
||||||
///
|
///
|
||||||
const SCEV *getSizeOfExpr(Type *AllocTy, Type *IntPtrTy);
|
const SCEV *getSizeOfExpr(Type *AllocTy);
|
||||||
|
|
||||||
/// getAlignOfExpr - Return an expression for alignof on the given type.
|
/// getAlignOfExpr - Return an expression for alignof on the given type.
|
||||||
///
|
///
|
||||||
|
@ -636,8 +636,7 @@ namespace llvm {
|
||||||
|
|
||||||
/// getOffsetOfExpr - Return an expression for offsetof on the given field.
|
/// getOffsetOfExpr - Return an expression for offsetof on the given field.
|
||||||
///
|
///
|
||||||
const SCEV *getOffsetOfExpr(StructType *STy, Type *IntPtrTy,
|
const SCEV *getOffsetOfExpr(StructType *STy, unsigned FieldNo);
|
||||||
unsigned FieldNo);
|
|
||||||
|
|
||||||
/// getOffsetOfExpr - Return an expression for offsetof on the given field.
|
/// getOffsetOfExpr - Return an expression for offsetof on the given field.
|
||||||
///
|
///
|
||||||
|
|
|
@ -258,14 +258,6 @@ public:
|
||||||
unsigned getPointerSizeInBits(unsigned AS) const {
|
unsigned getPointerSizeInBits(unsigned AS) const {
|
||||||
return getPointerSize(AS) * 8;
|
return getPointerSize(AS) * 8;
|
||||||
}
|
}
|
||||||
/// Layout pointer size, in bits, based on the type.
|
|
||||||
/// If this function is called with a pointer type, then
|
|
||||||
/// the type size of the pointer is returned.
|
|
||||||
/// If this function is called with a vector of pointers,
|
|
||||||
/// then the type size of the pointer is returned.
|
|
||||||
/// Otherwise the type sizeo f a default pointer is returned.
|
|
||||||
unsigned getPointerTypeSizeInBits(Type* Ty) const;
|
|
||||||
|
|
||||||
/// Size examples:
|
/// Size examples:
|
||||||
///
|
///
|
||||||
/// Type SizeInBits StoreSizeInBits AllocSizeInBits[*]
|
/// Type SizeInBits StoreSizeInBits AllocSizeInBits[*]
|
||||||
|
@ -343,7 +335,7 @@ public:
|
||||||
|
|
||||||
/// getIntPtrType - Return an integer type with size at least as big as that
|
/// getIntPtrType - Return an integer type with size at least as big as that
|
||||||
/// of a pointer in the given address space.
|
/// of a pointer in the given address space.
|
||||||
IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace) const;
|
IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const;
|
||||||
|
|
||||||
/// getIntPtrType - Return an integer (vector of integer) type with size at
|
/// getIntPtrType - Return an integer (vector of integer) type with size at
|
||||||
/// least as big as that of a pointer of the given pointer (vector of pointer)
|
/// least as big as that of a pointer of the given pointer (vector of pointer)
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
#define LLVM_INSTRUCTION_TYPES_H
|
#define LLVM_INSTRUCTION_TYPES_H
|
||||||
|
|
||||||
#include "llvm/Instruction.h"
|
#include "llvm/Instruction.h"
|
||||||
#include "llvm/DataLayout.h"
|
|
||||||
#include "llvm/OperandTraits.h"
|
#include "llvm/OperandTraits.h"
|
||||||
#include "llvm/DerivedTypes.h"
|
#include "llvm/DerivedTypes.h"
|
||||||
#include "llvm/ADT/Twine.h"
|
#include "llvm/ADT/Twine.h"
|
||||||
|
@ -577,11 +576,6 @@ public:
|
||||||
Type *IntPtrTy ///< Integer type corresponding to pointer
|
Type *IntPtrTy ///< Integer type corresponding to pointer
|
||||||
) const;
|
) const;
|
||||||
|
|
||||||
/// @brief Determine if this cast is a no-op cast.
|
|
||||||
bool isNoopCast(
|
|
||||||
const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
|
|
||||||
) const;
|
|
||||||
|
|
||||||
/// Determine how a pair of casts can be eliminated, if they can be at all.
|
/// Determine how a pair of casts can be eliminated, if they can be at all.
|
||||||
/// This is a helper function for both CastInst and ConstantExpr.
|
/// This is a helper function for both CastInst and ConstantExpr.
|
||||||
/// @returns 0 if the CastInst pair can't be eliminated, otherwise
|
/// @returns 0 if the CastInst pair can't be eliminated, otherwise
|
||||||
|
|
|
@ -179,9 +179,8 @@ static inline unsigned getKnownAlignment(Value *V, const DataLayout *TD = 0) {
|
||||||
template<typename IRBuilderTy>
|
template<typename IRBuilderTy>
|
||||||
Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
|
Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
|
||||||
bool NoAssumptions = false) {
|
bool NoAssumptions = false) {
|
||||||
unsigned AS = cast<GEPOperator>(GEP)->getPointerAddressSpace();
|
|
||||||
gep_type_iterator GTI = gep_type_begin(GEP);
|
gep_type_iterator GTI = gep_type_begin(GEP);
|
||||||
Type *IntPtrTy = TD.getIntPtrType(GEP->getContext(), AS);
|
Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
|
||||||
Value *Result = Constant::getNullValue(IntPtrTy);
|
Value *Result = Constant::getNullValue(IntPtrTy);
|
||||||
|
|
||||||
// If the GEP is inbounds, we know that none of the addressing operations will
|
// If the GEP is inbounds, we know that none of the addressing operations will
|
||||||
|
@ -189,6 +188,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
|
||||||
bool isInBounds = cast<GEPOperator>(GEP)->isInBounds() && !NoAssumptions;
|
bool isInBounds = cast<GEPOperator>(GEP)->isInBounds() && !NoAssumptions;
|
||||||
|
|
||||||
// Build a mask for high order bits.
|
// Build a mask for high order bits.
|
||||||
|
unsigned AS = cast<GEPOperator>(GEP)->getPointerAddressSpace();
|
||||||
unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
|
unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
|
||||||
uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
|
uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ using namespace llvm;
|
||||||
// Constant Folding internal helper functions
|
// Constant Folding internal helper functions
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
|
/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
|
||||||
/// DataLayout. This always returns a non-null constant, but it may be a
|
/// DataLayout. This always returns a non-null constant, but it may be a
|
||||||
/// ConstantExpr if unfoldable.
|
/// ConstantExpr if unfoldable.
|
||||||
static Constant *FoldBitCast(Constant *C, Type *DestTy,
|
static Constant *FoldBitCast(Constant *C, Type *DestTy,
|
||||||
|
@ -59,9 +59,9 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
|
||||||
return ConstantExpr::getBitCast(C, DestTy);
|
return ConstantExpr::getBitCast(C, DestTy);
|
||||||
|
|
||||||
unsigned NumSrcElts = CDV->getType()->getNumElements();
|
unsigned NumSrcElts = CDV->getType()->getNumElements();
|
||||||
|
|
||||||
Type *SrcEltTy = CDV->getType()->getElementType();
|
Type *SrcEltTy = CDV->getType()->getElementType();
|
||||||
|
|
||||||
// If the vector is a vector of floating point, convert it to vector of int
|
// If the vector is a vector of floating point, convert it to vector of int
|
||||||
// to simplify things.
|
// to simplify things.
|
||||||
if (SrcEltTy->isFloatingPointTy()) {
|
if (SrcEltTy->isFloatingPointTy()) {
|
||||||
|
@ -72,7 +72,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
|
||||||
C = ConstantExpr::getBitCast(C, SrcIVTy);
|
C = ConstantExpr::getBitCast(C, SrcIVTy);
|
||||||
CDV = cast<ConstantDataVector>(C);
|
CDV = cast<ConstantDataVector>(C);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now that we know that the input value is a vector of integers, just shift
|
// Now that we know that the input value is a vector of integers, just shift
|
||||||
// and insert them into our result.
|
// and insert them into our result.
|
||||||
unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy);
|
unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy);
|
||||||
|
@ -84,43 +84,43 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
|
||||||
else
|
else
|
||||||
Result |= CDV->getElementAsInteger(i);
|
Result |= CDV->getElementAsInteger(i);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ConstantInt::get(IT, Result);
|
return ConstantInt::get(IT, Result);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The code below only handles casts to vectors currently.
|
// The code below only handles casts to vectors currently.
|
||||||
VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
|
VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
|
||||||
if (DestVTy == 0)
|
if (DestVTy == 0)
|
||||||
return ConstantExpr::getBitCast(C, DestTy);
|
return ConstantExpr::getBitCast(C, DestTy);
|
||||||
|
|
||||||
// If this is a scalar -> vector cast, convert the input into a <1 x scalar>
|
// If this is a scalar -> vector cast, convert the input into a <1 x scalar>
|
||||||
// vector so the code below can handle it uniformly.
|
// vector so the code below can handle it uniformly.
|
||||||
if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
|
if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
|
||||||
Constant *Ops = C; // don't take the address of C!
|
Constant *Ops = C; // don't take the address of C!
|
||||||
return FoldBitCast(ConstantVector::get(Ops), DestTy, TD);
|
return FoldBitCast(ConstantVector::get(Ops), DestTy, TD);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this is a bitcast from constant vector -> vector, fold it.
|
// If this is a bitcast from constant vector -> vector, fold it.
|
||||||
if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
|
if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
|
||||||
return ConstantExpr::getBitCast(C, DestTy);
|
return ConstantExpr::getBitCast(C, DestTy);
|
||||||
|
|
||||||
// If the element types match, VMCore can fold it.
|
// If the element types match, VMCore can fold it.
|
||||||
unsigned NumDstElt = DestVTy->getNumElements();
|
unsigned NumDstElt = DestVTy->getNumElements();
|
||||||
unsigned NumSrcElt = C->getType()->getVectorNumElements();
|
unsigned NumSrcElt = C->getType()->getVectorNumElements();
|
||||||
if (NumDstElt == NumSrcElt)
|
if (NumDstElt == NumSrcElt)
|
||||||
return ConstantExpr::getBitCast(C, DestTy);
|
return ConstantExpr::getBitCast(C, DestTy);
|
||||||
|
|
||||||
Type *SrcEltTy = C->getType()->getVectorElementType();
|
Type *SrcEltTy = C->getType()->getVectorElementType();
|
||||||
Type *DstEltTy = DestVTy->getElementType();
|
Type *DstEltTy = DestVTy->getElementType();
|
||||||
|
|
||||||
// Otherwise, we're changing the number of elements in a vector, which
|
// Otherwise, we're changing the number of elements in a vector, which
|
||||||
// requires endianness information to do the right thing. For example,
|
// requires endianness information to do the right thing. For example,
|
||||||
// bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
|
// bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
|
||||||
// folds to (little endian):
|
// folds to (little endian):
|
||||||
// <4 x i32> <i32 0, i32 0, i32 1, i32 0>
|
// <4 x i32> <i32 0, i32 0, i32 1, i32 0>
|
||||||
// and to (big endian):
|
// and to (big endian):
|
||||||
// <4 x i32> <i32 0, i32 0, i32 0, i32 1>
|
// <4 x i32> <i32 0, i32 0, i32 0, i32 1>
|
||||||
|
|
||||||
// First thing is first. We only want to think about integer here, so if
|
// First thing is first. We only want to think about integer here, so if
|
||||||
// we have something in FP form, recast it as integer.
|
// we have something in FP form, recast it as integer.
|
||||||
if (DstEltTy->isFloatingPointTy()) {
|
if (DstEltTy->isFloatingPointTy()) {
|
||||||
|
@ -130,11 +130,11 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
|
||||||
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
|
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
|
||||||
// Recursively handle this integer conversion, if possible.
|
// Recursively handle this integer conversion, if possible.
|
||||||
C = FoldBitCast(C, DestIVTy, TD);
|
C = FoldBitCast(C, DestIVTy, TD);
|
||||||
|
|
||||||
// Finally, VMCore can handle this now that #elts line up.
|
// Finally, VMCore can handle this now that #elts line up.
|
||||||
return ConstantExpr::getBitCast(C, DestTy);
|
return ConstantExpr::getBitCast(C, DestTy);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Okay, we know the destination is integer, if the input is FP, convert
|
// Okay, we know the destination is integer, if the input is FP, convert
|
||||||
// it to integer first.
|
// it to integer first.
|
||||||
if (SrcEltTy->isFloatingPointTy()) {
|
if (SrcEltTy->isFloatingPointTy()) {
|
||||||
|
@ -148,13 +148,13 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
|
||||||
!isa<ConstantDataVector>(C))
|
!isa<ConstantDataVector>(C))
|
||||||
return C;
|
return C;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now we know that the input and output vectors are both integer vectors
|
// Now we know that the input and output vectors are both integer vectors
|
||||||
// of the same size, and that their #elements is not the same. Do the
|
// of the same size, and that their #elements is not the same. Do the
|
||||||
// conversion here, which depends on whether the input or output has
|
// conversion here, which depends on whether the input or output has
|
||||||
// more elements.
|
// more elements.
|
||||||
bool isLittleEndian = TD.isLittleEndian();
|
bool isLittleEndian = TD.isLittleEndian();
|
||||||
|
|
||||||
SmallVector<Constant*, 32> Result;
|
SmallVector<Constant*, 32> Result;
|
||||||
if (NumDstElt < NumSrcElt) {
|
if (NumDstElt < NumSrcElt) {
|
||||||
// Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
|
// Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
|
||||||
|
@ -170,15 +170,15 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
|
||||||
Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
|
Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
|
||||||
if (!Src) // Reject constantexpr elements.
|
if (!Src) // Reject constantexpr elements.
|
||||||
return ConstantExpr::getBitCast(C, DestTy);
|
return ConstantExpr::getBitCast(C, DestTy);
|
||||||
|
|
||||||
// Zero extend the element to the right size.
|
// Zero extend the element to the right size.
|
||||||
Src = ConstantExpr::getZExt(Src, Elt->getType());
|
Src = ConstantExpr::getZExt(Src, Elt->getType());
|
||||||
|
|
||||||
// Shift it to the right place, depending on endianness.
|
// Shift it to the right place, depending on endianness.
|
||||||
Src = ConstantExpr::getShl(Src,
|
Src = ConstantExpr::getShl(Src,
|
||||||
ConstantInt::get(Src->getType(), ShiftAmt));
|
ConstantInt::get(Src->getType(), ShiftAmt));
|
||||||
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
|
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
|
||||||
|
|
||||||
// Mix it in.
|
// Mix it in.
|
||||||
Elt = ConstantExpr::getOr(Elt, Src);
|
Elt = ConstantExpr::getOr(Elt, Src);
|
||||||
}
|
}
|
||||||
|
@ -186,30 +186,30 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
|
||||||
}
|
}
|
||||||
return ConstantVector::get(Result);
|
return ConstantVector::get(Result);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
|
// Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
|
||||||
unsigned Ratio = NumDstElt/NumSrcElt;
|
unsigned Ratio = NumDstElt/NumSrcElt;
|
||||||
unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
|
unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
|
||||||
|
|
||||||
// Loop over each source value, expanding into multiple results.
|
// Loop over each source value, expanding into multiple results.
|
||||||
for (unsigned i = 0; i != NumSrcElt; ++i) {
|
for (unsigned i = 0; i != NumSrcElt; ++i) {
|
||||||
Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
|
Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
|
||||||
if (!Src) // Reject constantexpr elements.
|
if (!Src) // Reject constantexpr elements.
|
||||||
return ConstantExpr::getBitCast(C, DestTy);
|
return ConstantExpr::getBitCast(C, DestTy);
|
||||||
|
|
||||||
unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
|
unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
|
||||||
for (unsigned j = 0; j != Ratio; ++j) {
|
for (unsigned j = 0; j != Ratio; ++j) {
|
||||||
// Shift the piece of the value into the right place, depending on
|
// Shift the piece of the value into the right place, depending on
|
||||||
// endianness.
|
// endianness.
|
||||||
Constant *Elt = ConstantExpr::getLShr(Src,
|
Constant *Elt = ConstantExpr::getLShr(Src,
|
||||||
ConstantInt::get(Src->getType(), ShiftAmt));
|
ConstantInt::get(Src->getType(), ShiftAmt));
|
||||||
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
|
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
|
||||||
|
|
||||||
// Truncate and remember this piece.
|
// Truncate and remember this piece.
|
||||||
Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
|
Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ConstantVector::get(Result);
|
return ConstantVector::get(Result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -224,28 +224,28 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
|
||||||
Offset = 0;
|
Offset = 0;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise, if this isn't a constant expr, bail out.
|
// Otherwise, if this isn't a constant expr, bail out.
|
||||||
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
|
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
|
||||||
if (!CE) return false;
|
if (!CE) return false;
|
||||||
|
|
||||||
// Look through ptr->int and ptr->ptr casts.
|
// Look through ptr->int and ptr->ptr casts.
|
||||||
if (CE->getOpcode() == Instruction::PtrToInt ||
|
if (CE->getOpcode() == Instruction::PtrToInt ||
|
||||||
CE->getOpcode() == Instruction::BitCast)
|
CE->getOpcode() == Instruction::BitCast)
|
||||||
return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD);
|
return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD);
|
||||||
|
|
||||||
// i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
|
// i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
|
||||||
if (CE->getOpcode() == Instruction::GetElementPtr) {
|
if (CE->getOpcode() == Instruction::GetElementPtr) {
|
||||||
// Cannot compute this if the element type of the pointer is missing size
|
// Cannot compute this if the element type of the pointer is missing size
|
||||||
// info.
|
// info.
|
||||||
if (!cast<PointerType>(CE->getOperand(0)->getType())
|
if (!cast<PointerType>(CE->getOperand(0)->getType())
|
||||||
->getElementType()->isSized())
|
->getElementType()->isSized())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// If the base isn't a global+constant, we aren't either.
|
// If the base isn't a global+constant, we aren't either.
|
||||||
if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD))
|
if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// Otherwise, add any offset that our operands provide.
|
// Otherwise, add any offset that our operands provide.
|
||||||
gep_type_iterator GTI = gep_type_begin(CE);
|
gep_type_iterator GTI = gep_type_begin(CE);
|
||||||
for (User::const_op_iterator i = CE->op_begin() + 1, e = CE->op_end();
|
for (User::const_op_iterator i = CE->op_begin() + 1, e = CE->op_end();
|
||||||
|
@ -253,7 +253,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
|
||||||
ConstantInt *CI = dyn_cast<ConstantInt>(*i);
|
ConstantInt *CI = dyn_cast<ConstantInt>(*i);
|
||||||
if (!CI) return false; // Index isn't a simple constant?
|
if (!CI) return false; // Index isn't a simple constant?
|
||||||
if (CI->isZero()) continue; // Not adding anything.
|
if (CI->isZero()) continue; // Not adding anything.
|
||||||
|
|
||||||
if (StructType *ST = dyn_cast<StructType>(*GTI)) {
|
if (StructType *ST = dyn_cast<StructType>(*GTI)) {
|
||||||
// N = N + Offset
|
// N = N + Offset
|
||||||
Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
|
Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
|
||||||
|
@ -264,7 +264,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -277,27 +277,27 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
|
||||||
const DataLayout &TD) {
|
const DataLayout &TD) {
|
||||||
assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) &&
|
assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) &&
|
||||||
"Out of range access");
|
"Out of range access");
|
||||||
|
|
||||||
// If this element is zero or undefined, we can just return since *CurPtr is
|
// If this element is zero or undefined, we can just return since *CurPtr is
|
||||||
// zero initialized.
|
// zero initialized.
|
||||||
if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
|
if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
|
||||||
if (CI->getBitWidth() > 64 ||
|
if (CI->getBitWidth() > 64 ||
|
||||||
(CI->getBitWidth() & 7) != 0)
|
(CI->getBitWidth() & 7) != 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
uint64_t Val = CI->getZExtValue();
|
uint64_t Val = CI->getZExtValue();
|
||||||
unsigned IntBytes = unsigned(CI->getBitWidth()/8);
|
unsigned IntBytes = unsigned(CI->getBitWidth()/8);
|
||||||
|
|
||||||
for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
|
for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
|
||||||
CurPtr[i] = (unsigned char)(Val >> (ByteOffset * 8));
|
CurPtr[i] = (unsigned char)(Val >> (ByteOffset * 8));
|
||||||
++ByteOffset;
|
++ByteOffset;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
|
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
|
||||||
if (CFP->getType()->isDoubleTy()) {
|
if (CFP->getType()->isDoubleTy()) {
|
||||||
C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD);
|
C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD);
|
||||||
|
@ -309,13 +309,13 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
|
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
|
||||||
const StructLayout *SL = TD.getStructLayout(CS->getType());
|
const StructLayout *SL = TD.getStructLayout(CS->getType());
|
||||||
unsigned Index = SL->getElementContainingOffset(ByteOffset);
|
unsigned Index = SL->getElementContainingOffset(ByteOffset);
|
||||||
uint64_t CurEltOffset = SL->getElementOffset(Index);
|
uint64_t CurEltOffset = SL->getElementOffset(Index);
|
||||||
ByteOffset -= CurEltOffset;
|
ByteOffset -= CurEltOffset;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
// If the element access is to the element itself and not to tail padding,
|
// If the element access is to the element itself and not to tail padding,
|
||||||
// read the bytes from the element.
|
// read the bytes from the element.
|
||||||
|
@ -325,9 +325,9 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
|
||||||
!ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
|
!ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
|
||||||
BytesLeft, TD))
|
BytesLeft, TD))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
++Index;
|
++Index;
|
||||||
|
|
||||||
// Check to see if we read from the last struct element, if so we're done.
|
// Check to see if we read from the last struct element, if so we're done.
|
||||||
if (Index == CS->getType()->getNumElements())
|
if (Index == CS->getType()->getNumElements())
|
||||||
return true;
|
return true;
|
||||||
|
@ -375,11 +375,11 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
|
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
|
||||||
if (CE->getOpcode() == Instruction::IntToPtr &&
|
if (CE->getOpcode() == Instruction::IntToPtr &&
|
||||||
CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getType()))
|
CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
|
||||||
return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
|
return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
|
||||||
BytesLeft, TD);
|
BytesLeft, TD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -391,7 +391,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
|
||||||
const DataLayout &TD) {
|
const DataLayout &TD) {
|
||||||
Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
|
Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
|
||||||
IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
|
IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
|
||||||
|
|
||||||
// If this isn't an integer load we can't fold it directly.
|
// If this isn't an integer load we can't fold it directly.
|
||||||
if (!IntType) {
|
if (!IntType) {
|
||||||
// If this is a float/double load, we can try folding it as an int32/64 load
|
// If this is a float/double load, we can try folding it as an int32/64 load
|
||||||
|
@ -415,15 +415,15 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
|
||||||
return FoldBitCast(Res, LoadTy, TD);
|
return FoldBitCast(Res, LoadTy, TD);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
|
unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
|
||||||
if (BytesLoaded > 32 || BytesLoaded == 0) return 0;
|
if (BytesLoaded > 32 || BytesLoaded == 0) return 0;
|
||||||
|
|
||||||
GlobalValue *GVal;
|
GlobalValue *GVal;
|
||||||
int64_t Offset;
|
int64_t Offset;
|
||||||
if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD))
|
if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
|
GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
|
||||||
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
|
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
|
||||||
!GV->getInitializer()->getType()->isSized())
|
!GV->getInitializer()->getType()->isSized())
|
||||||
|
@ -432,11 +432,11 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
|
||||||
// If we're loading off the beginning of the global, some bytes may be valid,
|
// If we're loading off the beginning of the global, some bytes may be valid,
|
||||||
// but we don't try to handle this.
|
// but we don't try to handle this.
|
||||||
if (Offset < 0) return 0;
|
if (Offset < 0) return 0;
|
||||||
|
|
||||||
// If we're not accessing anything in this constant, the result is undefined.
|
// If we're not accessing anything in this constant, the result is undefined.
|
||||||
if (uint64_t(Offset) >= TD.getTypeAllocSize(GV->getInitializer()->getType()))
|
if (uint64_t(Offset) >= TD.getTypeAllocSize(GV->getInitializer()->getType()))
|
||||||
return UndefValue::get(IntType);
|
return UndefValue::get(IntType);
|
||||||
|
|
||||||
unsigned char RawBytes[32] = {0};
|
unsigned char RawBytes[32] = {0};
|
||||||
if (!ReadDataFromGlobal(GV->getInitializer(), Offset, RawBytes,
|
if (!ReadDataFromGlobal(GV->getInitializer(), Offset, RawBytes,
|
||||||
BytesLoaded, TD))
|
BytesLoaded, TD))
|
||||||
|
@ -464,15 +464,15 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
|
||||||
// If the loaded value isn't a constant expr, we can't handle it.
|
// If the loaded value isn't a constant expr, we can't handle it.
|
||||||
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
|
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
|
||||||
if (!CE) return 0;
|
if (!CE) return 0;
|
||||||
|
|
||||||
if (CE->getOpcode() == Instruction::GetElementPtr) {
|
if (CE->getOpcode() == Instruction::GetElementPtr) {
|
||||||
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
|
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
|
||||||
if (GV->isConstant() && GV->hasDefinitiveInitializer())
|
if (GV->isConstant() && GV->hasDefinitiveInitializer())
|
||||||
if (Constant *V =
|
if (Constant *V =
|
||||||
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
|
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
|
||||||
return V;
|
return V;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instead of loading constant c string, use corresponding integer value
|
// Instead of loading constant c string, use corresponding integer value
|
||||||
// directly if string length is small enough.
|
// directly if string length is small enough.
|
||||||
StringRef Str;
|
StringRef Str;
|
||||||
|
@ -500,14 +500,14 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
|
||||||
SingleChar = 0;
|
SingleChar = 0;
|
||||||
StrVal = (StrVal << 8) | SingleChar;
|
StrVal = (StrVal << 8) | SingleChar;
|
||||||
}
|
}
|
||||||
|
|
||||||
Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
|
Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
|
||||||
if (Ty->isFloatingPointTy())
|
if (Ty->isFloatingPointTy())
|
||||||
Res = ConstantExpr::getBitCast(Res, Ty);
|
Res = ConstantExpr::getBitCast(Res, Ty);
|
||||||
return Res;
|
return Res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this load comes from anywhere in a constant global, and if the global
|
// If this load comes from anywhere in a constant global, and if the global
|
||||||
// is all undef or zero, we know what it loads.
|
// is all undef or zero, we know what it loads.
|
||||||
if (GlobalVariable *GV =
|
if (GlobalVariable *GV =
|
||||||
|
@ -520,7 +520,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
|
||||||
return UndefValue::get(ResTy);
|
return UndefValue::get(ResTy);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try hard to fold loads from bitcasted strange and non-type-safe things. We
|
// Try hard to fold loads from bitcasted strange and non-type-safe things. We
|
||||||
// currently don't do any of this for big endian systems. It can be
|
// currently don't do any of this for big endian systems. It can be
|
||||||
// generalized in the future if someone is interested.
|
// generalized in the future if someone is interested.
|
||||||
|
@ -531,7 +531,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
|
||||||
|
|
||||||
static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
|
static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
|
||||||
if (LI->isVolatile()) return 0;
|
if (LI->isVolatile()) return 0;
|
||||||
|
|
||||||
if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
|
if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
|
||||||
return ConstantFoldLoadFromConstPtr(C, TD);
|
return ConstantFoldLoadFromConstPtr(C, TD);
|
||||||
|
|
||||||
|
@ -540,23 +540,23 @@ static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
|
||||||
|
|
||||||
/// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
|
/// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
|
||||||
/// Attempt to symbolically evaluate the result of a binary operator merging
|
/// Attempt to symbolically evaluate the result of a binary operator merging
|
||||||
/// these together. If target data info is available, it is provided as TD,
|
/// these together. If target data info is available, it is provided as TD,
|
||||||
/// otherwise TD is null.
|
/// otherwise TD is null.
|
||||||
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
|
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
|
||||||
Constant *Op1, const DataLayout *TD){
|
Constant *Op1, const DataLayout *TD){
|
||||||
// SROA
|
// SROA
|
||||||
|
|
||||||
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
|
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
|
||||||
// Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
|
// Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
|
||||||
// bits.
|
// bits.
|
||||||
|
|
||||||
|
|
||||||
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
|
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
|
||||||
// constant. This happens frequently when iterating over a global array.
|
// constant. This happens frequently when iterating over a global array.
|
||||||
if (Opc == Instruction::Sub && TD) {
|
if (Opc == Instruction::Sub && TD) {
|
||||||
GlobalValue *GV1, *GV2;
|
GlobalValue *GV1, *GV2;
|
||||||
int64_t Offs1, Offs2;
|
int64_t Offs1, Offs2;
|
||||||
|
|
||||||
if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *TD))
|
if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *TD))
|
||||||
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) &&
|
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) &&
|
||||||
GV1 == GV2) {
|
GV1 == GV2) {
|
||||||
|
@ -564,7 +564,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
|
||||||
return ConstantInt::get(Op0->getType(), Offs1-Offs2);
|
return ConstantInt::get(Op0->getType(), Offs1-Offs2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -575,7 +575,7 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
|
||||||
Type *ResultTy, const DataLayout *TD,
|
Type *ResultTy, const DataLayout *TD,
|
||||||
const TargetLibraryInfo *TLI) {
|
const TargetLibraryInfo *TLI) {
|
||||||
if (!TD) return 0;
|
if (!TD) return 0;
|
||||||
Type *IntPtrTy = TD->getIntPtrType(ResultTy);
|
Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
|
||||||
|
|
||||||
bool Any = false;
|
bool Any = false;
|
||||||
SmallVector<Constant*, 32> NewIdxs;
|
SmallVector<Constant*, 32> NewIdxs;
|
||||||
|
@ -628,15 +628,14 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
|
||||||
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() ||
|
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() ||
|
||||||
!Ptr->getType()->isPointerTy())
|
!Ptr->getType()->isPointerTy())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
unsigned AS = cast<PointerType>(Ptr->getType())->getAddressSpace();
|
Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
|
||||||
Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext(), AS);
|
|
||||||
|
|
||||||
// If this is a constant expr gep that is effectively computing an
|
// If this is a constant expr gep that is effectively computing an
|
||||||
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
|
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
|
||||||
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
|
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
|
||||||
if (!isa<ConstantInt>(Ops[i])) {
|
if (!isa<ConstantInt>(Ops[i])) {
|
||||||
|
|
||||||
// If this is "gep i8* Ptr, (sub 0, V)", fold this as:
|
// If this is "gep i8* Ptr, (sub 0, V)", fold this as:
|
||||||
// "inttoptr (sub (ptrtoint Ptr), V)"
|
// "inttoptr (sub (ptrtoint Ptr), V)"
|
||||||
if (Ops.size() == 2 &&
|
if (Ops.size() == 2 &&
|
||||||
|
@ -703,8 +702,6 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
|
||||||
// Also, this helps GlobalOpt do SROA on GlobalVariables.
|
// Also, this helps GlobalOpt do SROA on GlobalVariables.
|
||||||
Type *Ty = Ptr->getType();
|
Type *Ty = Ptr->getType();
|
||||||
assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type");
|
assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type");
|
||||||
assert(Ty->getPointerAddressSpace() == AS
|
|
||||||
&& "Operand and result of GEP should be in the same address space.");
|
|
||||||
SmallVector<Constant*, 32> NewIdxs;
|
SmallVector<Constant*, 32> NewIdxs;
|
||||||
do {
|
do {
|
||||||
if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
|
if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
|
||||||
|
@ -712,15 +709,15 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
|
||||||
// The only pointer indexing we'll do is on the first index of the GEP.
|
// The only pointer indexing we'll do is on the first index of the GEP.
|
||||||
if (!NewIdxs.empty())
|
if (!NewIdxs.empty())
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// Only handle pointers to sized types, not pointers to functions.
|
// Only handle pointers to sized types, not pointers to functions.
|
||||||
if (!ATy->getElementType()->isSized())
|
if (!ATy->getElementType()->isSized())
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine which element of the array the offset points into.
|
// Determine which element of the array the offset points into.
|
||||||
APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
|
APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
|
||||||
IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext(), AS);
|
IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
|
||||||
if (ElemSize == 0)
|
if (ElemSize == 0)
|
||||||
// The element size is 0. This may be [0 x Ty]*, so just use a zero
|
// The element size is 0. This may be [0 x Ty]*, so just use a zero
|
||||||
// index for this level and proceed to the next level to see if it can
|
// index for this level and proceed to the next level to see if it can
|
||||||
|
@ -840,7 +837,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
|
||||||
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
|
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
|
||||||
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
|
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
|
||||||
TD, TLI);
|
TD, TLI);
|
||||||
|
|
||||||
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
|
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
|
||||||
return ConstantFoldLoadInst(LI, TD);
|
return ConstantFoldLoadInst(LI, TD);
|
||||||
|
|
||||||
|
@ -890,19 +887,19 @@ Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
|
||||||
/// information, due to only being passed an opcode and operands. Constant
|
/// information, due to only being passed an opcode and operands. Constant
|
||||||
/// folding using this function strips this information.
|
/// folding using this function strips this information.
|
||||||
///
|
///
|
||||||
Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
|
Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
|
||||||
ArrayRef<Constant *> Ops,
|
ArrayRef<Constant *> Ops,
|
||||||
const DataLayout *TD,
|
const DataLayout *TD,
|
||||||
const TargetLibraryInfo *TLI) {
|
const TargetLibraryInfo *TLI) {
|
||||||
// Handle easy binops first.
|
// Handle easy binops first.
|
||||||
if (Instruction::isBinaryOp(Opcode)) {
|
if (Instruction::isBinaryOp(Opcode)) {
|
||||||
if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1]))
|
if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1]))
|
||||||
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD))
|
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD))
|
||||||
return C;
|
return C;
|
||||||
|
|
||||||
return ConstantExpr::get(Opcode, Ops[0], Ops[1]);
|
return ConstantExpr::get(Opcode, Ops[0], Ops[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (Opcode) {
|
switch (Opcode) {
|
||||||
default: return 0;
|
default: return 0;
|
||||||
case Instruction::ICmp:
|
case Instruction::ICmp:
|
||||||
|
@ -921,7 +918,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
|
||||||
unsigned InWidth = Input->getType()->getScalarSizeInBits();
|
unsigned InWidth = Input->getType()->getScalarSizeInBits();
|
||||||
unsigned AS = cast<PointerType>(CE->getType())->getAddressSpace();
|
unsigned AS = cast<PointerType>(CE->getType())->getAddressSpace();
|
||||||
if (TD->getPointerSizeInBits(AS) < InWidth) {
|
if (TD->getPointerSizeInBits(AS) < InWidth) {
|
||||||
Constant *Mask =
|
Constant *Mask =
|
||||||
ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth,
|
ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth,
|
||||||
TD->getPointerSizeInBits(AS)));
|
TD->getPointerSizeInBits(AS)));
|
||||||
Input = ConstantExpr::getAnd(Input, Mask);
|
Input = ConstantExpr::getAnd(Input, Mask);
|
||||||
|
@ -937,7 +934,8 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
|
||||||
// pointer, so it can't be done in ConstantExpr::getCast.
|
// pointer, so it can't be done in ConstantExpr::getCast.
|
||||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0]))
|
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0]))
|
||||||
if (TD && CE->getOpcode() == Instruction::PtrToInt &&
|
if (TD && CE->getOpcode() == Instruction::PtrToInt &&
|
||||||
TD->getTypeSizeInBits(CE->getOperand(0)->getType())
|
TD->getPointerSizeInBits(
|
||||||
|
cast<PointerType>(CE->getOperand(0)->getType())->getAddressSpace())
|
||||||
<= CE->getType()->getScalarSizeInBits())
|
<= CE->getType()->getScalarSizeInBits())
|
||||||
return FoldBitCast(CE->getOperand(0), DestTy, *TD);
|
return FoldBitCast(CE->getOperand(0), DestTy, *TD);
|
||||||
|
|
||||||
|
@ -969,7 +967,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
|
||||||
return C;
|
return C;
|
||||||
if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI))
|
if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI))
|
||||||
return C;
|
return C;
|
||||||
|
|
||||||
return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1));
|
return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -979,7 +977,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
|
||||||
/// returns a constant expression of the specified operands.
|
/// returns a constant expression of the specified operands.
|
||||||
///
|
///
|
||||||
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||||
Constant *Ops0, Constant *Ops1,
|
Constant *Ops0, Constant *Ops1,
|
||||||
const DataLayout *TD,
|
const DataLayout *TD,
|
||||||
const TargetLibraryInfo *TLI) {
|
const TargetLibraryInfo *TLI) {
|
||||||
// fold: icmp (inttoptr x), null -> icmp x, 0
|
// fold: icmp (inttoptr x), null -> icmp x, 0
|
||||||
|
@ -990,10 +988,9 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||||
// ConstantExpr::getCompare cannot do this, because it doesn't have TD
|
// ConstantExpr::getCompare cannot do this, because it doesn't have TD
|
||||||
// around to know if bit truncation is happening.
|
// around to know if bit truncation is happening.
|
||||||
if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
|
if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
|
||||||
Type *IntPtrTy = NULL;
|
|
||||||
if (TD && Ops1->isNullValue()) {
|
if (TD && Ops1->isNullValue()) {
|
||||||
|
Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
|
||||||
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
||||||
IntPtrTy = TD->getIntPtrType(CE0->getType());
|
|
||||||
// Convert the integer value to the right size to ensure we get the
|
// Convert the integer value to the right size to ensure we get the
|
||||||
// proper extension or truncation.
|
// proper extension or truncation.
|
||||||
Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
|
Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
|
||||||
|
@ -1001,24 +998,22 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||||
Constant *Null = Constant::getNullValue(C->getType());
|
Constant *Null = Constant::getNullValue(C->getType());
|
||||||
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
|
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only do this transformation if the int is intptrty in size, otherwise
|
// Only do this transformation if the int is intptrty in size, otherwise
|
||||||
// there is a truncation or extension that we aren't modeling.
|
// there is a truncation or extension that we aren't modeling.
|
||||||
if (CE0->getOpcode() == Instruction::PtrToInt) {
|
if (CE0->getOpcode() == Instruction::PtrToInt &&
|
||||||
IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType());
|
CE0->getType() == IntPtrTy) {
|
||||||
if (CE0->getType() == IntPtrTy) {
|
Constant *C = CE0->getOperand(0);
|
||||||
Constant *C = CE0->getOperand(0);
|
Constant *Null = Constant::getNullValue(C->getType());
|
||||||
Constant *Null = Constant::getNullValue(C->getType());
|
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
|
||||||
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
|
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
|
||||||
if (TD && CE0->getOpcode() == CE1->getOpcode()) {
|
if (TD && CE0->getOpcode() == CE1->getOpcode()) {
|
||||||
|
Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
|
||||||
|
|
||||||
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
||||||
Type *IntPtrTy = TD->getIntPtrType(CE0->getType());
|
|
||||||
// Convert the integer value to the right size to ensure we get the
|
// Convert the integer value to the right size to ensure we get the
|
||||||
// proper extension or truncation.
|
// proper extension or truncation.
|
||||||
Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
|
Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
|
||||||
|
@ -1027,36 +1022,34 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||||
IntPtrTy, false);
|
IntPtrTy, false);
|
||||||
return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI);
|
return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Only do this transformation if the int is intptrty in size, otherwise
|
// Only do this transformation if the int is intptrty in size, otherwise
|
||||||
// there is a truncation or extension that we aren't modeling.
|
// there is a truncation or extension that we aren't modeling.
|
||||||
if (CE0->getOpcode() == Instruction::PtrToInt) {
|
if ((CE0->getOpcode() == Instruction::PtrToInt &&
|
||||||
IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType());
|
CE0->getType() == IntPtrTy &&
|
||||||
if (CE0->getType() == IntPtrTy &&
|
CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()))
|
||||||
CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType())
|
|
||||||
return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0),
|
return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0),
|
||||||
CE1->getOperand(0), TD, TLI);
|
CE1->getOperand(0), TD, TLI);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
|
// icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
|
||||||
// icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
|
// icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
|
||||||
if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
|
if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
|
||||||
CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
|
CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
|
||||||
Constant *LHS =
|
Constant *LHS =
|
||||||
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,
|
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,
|
||||||
TD, TLI);
|
TD, TLI);
|
||||||
Constant *RHS =
|
Constant *RHS =
|
||||||
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,
|
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,
|
||||||
TD, TLI);
|
TD, TLI);
|
||||||
unsigned OpC =
|
unsigned OpC =
|
||||||
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
|
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
|
||||||
Constant *Ops[] = { LHS, RHS };
|
Constant *Ops[] = { LHS, RHS };
|
||||||
return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI);
|
return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
|
return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1064,7 +1057,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||||
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
|
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
|
||||||
/// getelementptr constantexpr, return the constant value being addressed by the
|
/// getelementptr constantexpr, return the constant value being addressed by the
|
||||||
/// constant expression, or null if something is funny and we can't decide.
|
/// constant expression, or null if something is funny and we can't decide.
|
||||||
Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
|
Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
|
||||||
ConstantExpr *CE) {
|
ConstantExpr *CE) {
|
||||||
if (!CE->getOperand(1)->isNullValue())
|
if (!CE->getOperand(1)->isNullValue())
|
||||||
return 0; // Do not allow stepping over the value!
|
return 0; // Do not allow stepping over the value!
|
||||||
|
@ -1134,14 +1127,14 @@ llvm::canConstantFoldCallTo(const Function *F) {
|
||||||
|
|
||||||
if (!F->hasName()) return false;
|
if (!F->hasName()) return false;
|
||||||
StringRef Name = F->getName();
|
StringRef Name = F->getName();
|
||||||
|
|
||||||
// In these cases, the check of the length is required. We don't want to
|
// In these cases, the check of the length is required. We don't want to
|
||||||
// return true for a name like "cos\0blah" which strcmp would return equal to
|
// return true for a name like "cos\0blah" which strcmp would return equal to
|
||||||
// "cos", but has length 8.
|
// "cos", but has length 8.
|
||||||
switch (Name[0]) {
|
switch (Name[0]) {
|
||||||
default: return false;
|
default: return false;
|
||||||
case 'a':
|
case 'a':
|
||||||
return Name == "acos" || Name == "asin" ||
|
return Name == "acos" || Name == "asin" ||
|
||||||
Name == "atan" || Name == "atan2";
|
Name == "atan" || Name == "atan2";
|
||||||
case 'c':
|
case 'c':
|
||||||
return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh";
|
return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh";
|
||||||
|
@ -1161,7 +1154,7 @@ llvm::canConstantFoldCallTo(const Function *F) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
|
static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
|
||||||
Type *Ty) {
|
Type *Ty) {
|
||||||
sys::llvm_fenv_clearexcept();
|
sys::llvm_fenv_clearexcept();
|
||||||
V = NativeFP(V);
|
V = NativeFP(V);
|
||||||
|
@ -1169,7 +1162,7 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
|
||||||
sys::llvm_fenv_clearexcept();
|
sys::llvm_fenv_clearexcept();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Ty->isFloatTy())
|
if (Ty->isFloatTy())
|
||||||
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
|
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
|
||||||
if (Ty->isDoubleTy())
|
if (Ty->isDoubleTy())
|
||||||
|
@ -1185,7 +1178,7 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
|
||||||
sys::llvm_fenv_clearexcept();
|
sys::llvm_fenv_clearexcept();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Ty->isFloatTy())
|
if (Ty->isFloatTy())
|
||||||
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
|
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
|
||||||
if (Ty->isDoubleTy())
|
if (Ty->isDoubleTy())
|
||||||
|
@ -1279,7 +1272,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
|
||||||
case 'e':
|
case 'e':
|
||||||
if (Name == "exp" && TLI->has(LibFunc::exp))
|
if (Name == "exp" && TLI->has(LibFunc::exp))
|
||||||
return ConstantFoldFP(exp, V, Ty);
|
return ConstantFoldFP(exp, V, Ty);
|
||||||
|
|
||||||
if (Name == "exp2" && TLI->has(LibFunc::exp2)) {
|
if (Name == "exp2" && TLI->has(LibFunc::exp2)) {
|
||||||
// Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
|
// Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
|
||||||
// C99 library.
|
// C99 library.
|
||||||
|
@ -1355,7 +1348,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Support ConstantVector in case we have an Undef in the top.
|
// Support ConstantVector in case we have an Undef in the top.
|
||||||
if (isa<ConstantVector>(Operands[0]) ||
|
if (isa<ConstantVector>(Operands[0]) ||
|
||||||
isa<ConstantDataVector>(Operands[0])) {
|
isa<ConstantDataVector>(Operands[0])) {
|
||||||
Constant *Op = cast<Constant>(Operands[0]);
|
Constant *Op = cast<Constant>(Operands[0]);
|
||||||
switch (F->getIntrinsicID()) {
|
switch (F->getIntrinsicID()) {
|
||||||
|
@ -1374,11 +1367,11 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
|
||||||
case Intrinsic::x86_sse2_cvttsd2si64:
|
case Intrinsic::x86_sse2_cvttsd2si64:
|
||||||
if (ConstantFP *FPOp =
|
if (ConstantFP *FPOp =
|
||||||
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
|
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
|
||||||
return ConstantFoldConvertToInt(FPOp->getValueAPF(),
|
return ConstantFoldConvertToInt(FPOp->getValueAPF(),
|
||||||
/*roundTowardZero=*/true, Ty);
|
/*roundTowardZero=*/true, Ty);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isa<UndefValue>(Operands[0])) {
|
if (isa<UndefValue>(Operands[0])) {
|
||||||
if (F->getIntrinsicID() == Intrinsic::bswap)
|
if (F->getIntrinsicID() == Intrinsic::bswap)
|
||||||
return Operands[0];
|
return Operands[0];
|
||||||
|
@ -1392,14 +1385,14 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
|
||||||
if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
|
if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
|
||||||
if (!Ty->isFloatTy() && !Ty->isDoubleTy())
|
if (!Ty->isFloatTy() && !Ty->isDoubleTy())
|
||||||
return 0;
|
return 0;
|
||||||
double Op1V = Ty->isFloatTy() ?
|
double Op1V = Ty->isFloatTy() ?
|
||||||
(double)Op1->getValueAPF().convertToFloat() :
|
(double)Op1->getValueAPF().convertToFloat() :
|
||||||
Op1->getValueAPF().convertToDouble();
|
Op1->getValueAPF().convertToDouble();
|
||||||
if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
|
if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
|
||||||
if (Op2->getType() != Op1->getType())
|
if (Op2->getType() != Op1->getType())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
double Op2V = Ty->isFloatTy() ?
|
double Op2V = Ty->isFloatTy() ?
|
||||||
(double)Op2->getValueAPF().convertToFloat():
|
(double)Op2->getValueAPF().convertToFloat():
|
||||||
Op2->getValueAPF().convertToDouble();
|
Op2->getValueAPF().convertToDouble();
|
||||||
|
|
||||||
|
@ -1426,7 +1419,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
|
if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
|
||||||
if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
|
if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
|
||||||
switch (F->getIntrinsicID()) {
|
switch (F->getIntrinsicID()) {
|
||||||
|
@ -1476,7 +1469,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
|
||||||
return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
|
return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -788,7 +788,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
|
||||||
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
|
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
|
||||||
} while (Visited.insert(V));
|
} while (Visited.insert(V));
|
||||||
|
|
||||||
Type *IntPtrTy = TD->getIntPtrType(V->getType());
|
Type *IntPtrTy = TD->getIntPtrType(V->getContext());
|
||||||
return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
|
return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -828,7 +828,8 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
|
||||||
// size of the byval type by the target's pointer size.
|
// size of the byval type by the target's pointer size.
|
||||||
PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
|
PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
|
||||||
unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
|
unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
|
||||||
unsigned PointerSize = TD->getTypeSizeInBits(PTy);
|
unsigned AS = PTy->getAddressSpace();
|
||||||
|
unsigned PointerSize = TD->getPointerSizeInBits(AS);
|
||||||
// Ceiling division.
|
// Ceiling division.
|
||||||
unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
|
unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
|
||||||
|
|
||||||
|
|
|
@ -728,7 +728,7 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout &TD,
|
||||||
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
|
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
|
||||||
} while (Visited.insert(V));
|
} while (Visited.insert(V));
|
||||||
|
|
||||||
Type *IntPtrTy = TD.getIntPtrType(V->getContext(), AS);
|
Type *IntPtrTy = TD.getIntPtrType(V->getContext());
|
||||||
return ConstantInt::get(IntPtrTy, Offset);
|
return ConstantInt::get(IntPtrTy, Offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1880,7 +1880,9 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||||
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
|
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
|
||||||
// if the integer type is the same size as the pointer type.
|
// if the integer type is the same size as the pointer type.
|
||||||
if (MaxRecurse && Q.TD && isa<PtrToIntInst>(LI) &&
|
if (MaxRecurse && Q.TD && isa<PtrToIntInst>(LI) &&
|
||||||
Q.TD->getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
|
Q.TD->getPointerSizeInBits(
|
||||||
|
cast<PtrToIntInst>(LI)->getPointerAddressSpace()) ==
|
||||||
|
DstTy->getPrimitiveSizeInBits()) {
|
||||||
if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
|
if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
|
||||||
// Transfer the cast to the constant.
|
// Transfer the cast to the constant.
|
||||||
if (Value *V = SimplifyICmpInst(Pred, SrcOp,
|
if (Value *V = SimplifyICmpInst(Pred, SrcOp,
|
||||||
|
|
|
@ -626,7 +626,8 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
|
||||||
if (W != V)
|
if (W != V)
|
||||||
return findValueImpl(W, OffsetOk, Visited);
|
return findValueImpl(W, OffsetOk, Visited);
|
||||||
} else if (CastInst *CI = dyn_cast<CastInst>(V)) {
|
} else if (CastInst *CI = dyn_cast<CastInst>(V)) {
|
||||||
if (CI->isNoopCast(*TD))
|
if (CI->isNoopCast(TD ? TD->getIntPtrType(V->getContext()) :
|
||||||
|
Type::getInt64Ty(V->getContext())))
|
||||||
return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
|
return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
|
||||||
} else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
|
} else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
|
||||||
if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
|
if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
|
||||||
|
@ -639,7 +640,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
|
||||||
if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
|
if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
|
||||||
CE->getOperand(0)->getType(),
|
CE->getOperand(0)->getType(),
|
||||||
CE->getType(),
|
CE->getType(),
|
||||||
TD ? TD->getIntPtrType(CE->getType()) :
|
TD ? TD->getIntPtrType(V->getContext()) :
|
||||||
Type::getInt64Ty(V->getContext())))
|
Type::getInt64Ty(V->getContext())))
|
||||||
return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
|
return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
|
||||||
} else if (CE->getOpcode() == Instruction::ExtractValue) {
|
} else if (CE->getOpcode() == Instruction::ExtractValue) {
|
||||||
|
|
|
@ -376,10 +376,9 @@ APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
|
||||||
ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *TD,
|
ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *TD,
|
||||||
const TargetLibraryInfo *TLI,
|
const TargetLibraryInfo *TLI,
|
||||||
LLVMContext &Context,
|
LLVMContext &Context,
|
||||||
bool RoundToAlign,
|
bool RoundToAlign)
|
||||||
unsigned AS)
|
|
||||||
: TD(TD), TLI(TLI), RoundToAlign(RoundToAlign) {
|
: TD(TD), TLI(TLI), RoundToAlign(RoundToAlign) {
|
||||||
IntegerType *IntTy = TD->getIntPtrType(Context, AS);
|
IntegerType *IntTy = TD->getIntPtrType(Context);
|
||||||
IntTyBits = IntTy->getBitWidth();
|
IntTyBits = IntTy->getBitWidth();
|
||||||
Zero = APInt::getNullValue(IntTyBits);
|
Zero = APInt::getNullValue(IntTyBits);
|
||||||
}
|
}
|
||||||
|
@ -562,10 +561,9 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
|
||||||
|
|
||||||
ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *TD,
|
ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *TD,
|
||||||
const TargetLibraryInfo *TLI,
|
const TargetLibraryInfo *TLI,
|
||||||
LLVMContext &Context,
|
LLVMContext &Context)
|
||||||
unsigned AS)
|
|
||||||
: TD(TD), TLI(TLI), Context(Context), Builder(Context, TargetFolder(TD)) {
|
: TD(TD), TLI(TLI), Context(Context), Builder(Context, TargetFolder(TD)) {
|
||||||
IntTy = TD->getIntPtrType(Context, AS);
|
IntTy = TD->getIntPtrType(Context);
|
||||||
Zero = ConstantInt::get(IntTy, 0);
|
Zero = ConstantInt::get(IntTy, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2586,12 +2586,13 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
|
||||||
return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
|
return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
|
||||||
}
|
}
|
||||||
|
|
||||||
const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy, Type *IntPtrTy) {
|
const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
|
||||||
// If we have DataLayout, we can bypass creating a target-independent
|
// If we have DataLayout, we can bypass creating a target-independent
|
||||||
// constant expression and then folding it back into a ConstantInt.
|
// constant expression and then folding it back into a ConstantInt.
|
||||||
// This is just a compile-time optimization.
|
// This is just a compile-time optimization.
|
||||||
if (TD)
|
if (TD)
|
||||||
return getConstant(IntPtrTy, TD->getTypeAllocSize(AllocTy));
|
return getConstant(TD->getIntPtrType(getContext()),
|
||||||
|
TD->getTypeAllocSize(AllocTy));
|
||||||
|
|
||||||
Constant *C = ConstantExpr::getSizeOf(AllocTy);
|
Constant *C = ConstantExpr::getSizeOf(AllocTy);
|
||||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||||
|
@ -2610,13 +2611,13 @@ const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
|
||||||
return getTruncateOrZeroExtend(getSCEV(C), Ty);
|
return getTruncateOrZeroExtend(getSCEV(C), Ty);
|
||||||
}
|
}
|
||||||
|
|
||||||
const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy, Type *IntPtrTy,
|
const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
|
||||||
unsigned FieldNo) {
|
unsigned FieldNo) {
|
||||||
// If we have DataLayout, we can bypass creating a target-independent
|
// If we have DataLayout, we can bypass creating a target-independent
|
||||||
// constant expression and then folding it back into a ConstantInt.
|
// constant expression and then folding it back into a ConstantInt.
|
||||||
// This is just a compile-time optimization.
|
// This is just a compile-time optimization.
|
||||||
if (TD)
|
if (TD)
|
||||||
return getConstant(IntPtrTy,
|
return getConstant(TD->getIntPtrType(getContext()),
|
||||||
TD->getStructLayout(STy)->getElementOffset(FieldNo));
|
TD->getStructLayout(STy)->getElementOffset(FieldNo));
|
||||||
|
|
||||||
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
|
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
|
||||||
|
@ -2703,7 +2704,7 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
|
||||||
|
|
||||||
// The only other support type is pointer.
|
// The only other support type is pointer.
|
||||||
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
|
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
|
||||||
if (TD) return TD->getIntPtrType(Ty);
|
if (TD) return TD->getIntPtrType(getContext());
|
||||||
|
|
||||||
// Without DataLayout, conservatively assume pointers are 64-bit.
|
// Without DataLayout, conservatively assume pointers are 64-bit.
|
||||||
return Type::getInt64Ty(getContext());
|
return Type::getInt64Ty(getContext());
|
||||||
|
@ -3156,13 +3157,13 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
|
||||||
if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
|
if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
|
||||||
// For a struct, add the member offset.
|
// For a struct, add the member offset.
|
||||||
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
|
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
|
||||||
const SCEV *FieldOffset = getOffsetOfExpr(STy, IntPtrTy, FieldNo);
|
const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
|
||||||
|
|
||||||
// Add the field offset to the running total offset.
|
// Add the field offset to the running total offset.
|
||||||
TotalOffset = getAddExpr(TotalOffset, FieldOffset);
|
TotalOffset = getAddExpr(TotalOffset, FieldOffset);
|
||||||
} else {
|
} else {
|
||||||
// For an array, add the element offset, explicitly scaled.
|
// For an array, add the element offset, explicitly scaled.
|
||||||
const SCEV *ElementSize = getSizeOfExpr(*GTI, IntPtrTy);
|
const SCEV *ElementSize = getSizeOfExpr(*GTI);
|
||||||
const SCEV *IndexS = getSCEV(Index);
|
const SCEV *IndexS = getSCEV(Index);
|
||||||
// Getelementptr indices are signed.
|
// Getelementptr indices are signed.
|
||||||
IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
|
IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
|
||||||
|
|
|
@ -417,9 +417,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||||
// array indexing.
|
// array indexing.
|
||||||
SmallVector<const SCEV *, 8> ScaledOps;
|
SmallVector<const SCEV *, 8> ScaledOps;
|
||||||
if (ElTy->isSized()) {
|
if (ElTy->isSized()) {
|
||||||
Type *IntPtrTy = SE.TD ? SE.TD->getIntPtrType(PTy) :
|
const SCEV *ElSize = SE.getSizeOfExpr(ElTy);
|
||||||
IntegerType::getInt64Ty(PTy->getContext());
|
|
||||||
const SCEV *ElSize = SE.getSizeOfExpr(ElTy, IntPtrTy);
|
|
||||||
if (!ElSize->isZero()) {
|
if (!ElSize->isZero()) {
|
||||||
SmallVector<const SCEV *, 8> NewOps;
|
SmallVector<const SCEV *, 8> NewOps;
|
||||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
|
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
|
||||||
|
|
|
@ -385,8 +385,8 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
|
||||||
// - __tlv_bootstrap - used to make sure support exists
|
// - __tlv_bootstrap - used to make sure support exists
|
||||||
// - spare pointer, used when mapped by the runtime
|
// - spare pointer, used when mapped by the runtime
|
||||||
// - pointer to mangled symbol above with initializer
|
// - pointer to mangled symbol above with initializer
|
||||||
assert(GV->getType()->isPointerTy() && "GV must be a pointer type!");
|
unsigned AS = GV->getType()->getAddressSpace();
|
||||||
unsigned PtrSize = TD->getTypeSizeInBits(GV->getType())/8;
|
unsigned PtrSize = TD->getPointerSizeInBits(AS)/8;
|
||||||
OutStreamer.EmitSymbolValue(GetExternalSymbolSymbol("_tlv_bootstrap"),
|
OutStreamer.EmitSymbolValue(GetExternalSymbolSymbol("_tlv_bootstrap"),
|
||||||
PtrSize, 0);
|
PtrSize, 0);
|
||||||
OutStreamer.EmitIntValue(0, PtrSize, 0);
|
OutStreamer.EmitIntValue(0, PtrSize, 0);
|
||||||
|
@ -1481,9 +1481,9 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
|
||||||
if (Offset == 0)
|
if (Offset == 0)
|
||||||
return Base;
|
return Base;
|
||||||
|
|
||||||
assert(CE->getType()->isPointerTy() && "We must have a pointer type!");
|
unsigned AS = cast<PointerType>(CE->getType())->getAddressSpace();
|
||||||
// Truncate/sext the offset to the pointer size.
|
// Truncate/sext the offset to the pointer size.
|
||||||
unsigned Width = TD.getTypeSizeInBits(CE->getType());
|
unsigned Width = TD.getPointerSizeInBits(AS);
|
||||||
if (Width < 64)
|
if (Width < 64)
|
||||||
Offset = SignExtend64(Offset, Width);
|
Offset = SignExtend64(Offset, Width);
|
||||||
|
|
||||||
|
@ -1505,7 +1505,7 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
|
||||||
// Handle casts to pointers by changing them into casts to the appropriate
|
// Handle casts to pointers by changing them into casts to the appropriate
|
||||||
// integer type. This promotes constant folding and simplifies this code.
|
// integer type. This promotes constant folding and simplifies this code.
|
||||||
Constant *Op = CE->getOperand(0);
|
Constant *Op = CE->getOperand(0);
|
||||||
Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CE->getType()),
|
Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()),
|
||||||
false/*ZExt*/);
|
false/*ZExt*/);
|
||||||
return lowerConstant(Op, AP);
|
return lowerConstant(Op, AP);
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,21 +115,21 @@ void IntrinsicLowering::AddPrototypes(Module &M) {
|
||||||
Type::getInt8PtrTy(Context),
|
Type::getInt8PtrTy(Context),
|
||||||
Type::getInt8PtrTy(Context),
|
Type::getInt8PtrTy(Context),
|
||||||
Type::getInt8PtrTy(Context),
|
Type::getInt8PtrTy(Context),
|
||||||
TD.getIntPtrType(Context, 0), (Type *)0);
|
TD.getIntPtrType(Context), (Type *)0);
|
||||||
break;
|
break;
|
||||||
case Intrinsic::memmove:
|
case Intrinsic::memmove:
|
||||||
M.getOrInsertFunction("memmove",
|
M.getOrInsertFunction("memmove",
|
||||||
Type::getInt8PtrTy(Context),
|
Type::getInt8PtrTy(Context),
|
||||||
Type::getInt8PtrTy(Context),
|
Type::getInt8PtrTy(Context),
|
||||||
Type::getInt8PtrTy(Context),
|
Type::getInt8PtrTy(Context),
|
||||||
TD.getIntPtrType(Context, 0), (Type *)0);
|
TD.getIntPtrType(Context), (Type *)0);
|
||||||
break;
|
break;
|
||||||
case Intrinsic::memset:
|
case Intrinsic::memset:
|
||||||
M.getOrInsertFunction("memset",
|
M.getOrInsertFunction("memset",
|
||||||
Type::getInt8PtrTy(Context),
|
Type::getInt8PtrTy(Context),
|
||||||
Type::getInt8PtrTy(Context),
|
Type::getInt8PtrTy(Context),
|
||||||
Type::getInt32Ty(M.getContext()),
|
Type::getInt32Ty(M.getContext()),
|
||||||
TD.getIntPtrType(Context, 0), (Type *)0);
|
TD.getIntPtrType(Context), (Type *)0);
|
||||||
break;
|
break;
|
||||||
case Intrinsic::sqrt:
|
case Intrinsic::sqrt:
|
||||||
EnsureFPIntrinsicsExist(M, I, "sqrtf", "sqrt", "sqrtl");
|
EnsureFPIntrinsicsExist(M, I, "sqrtf", "sqrt", "sqrtl");
|
||||||
|
@ -457,7 +457,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||||
break; // Strip out annotate intrinsic
|
break; // Strip out annotate intrinsic
|
||||||
|
|
||||||
case Intrinsic::memcpy: {
|
case Intrinsic::memcpy: {
|
||||||
Type *IntPtr = TD.getIntPtrType(CI->getArgOperand(0)->getType());
|
Type *IntPtr = TD.getIntPtrType(Context);
|
||||||
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
||||||
/* isSigned */ false);
|
/* isSigned */ false);
|
||||||
Value *Ops[3];
|
Value *Ops[3];
|
||||||
|
@ -468,7 +468,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Intrinsic::memmove: {
|
case Intrinsic::memmove: {
|
||||||
Type *IntPtr = TD.getIntPtrType(CI->getArgOperand(0)->getType());
|
Type *IntPtr = TD.getIntPtrType(Context);
|
||||||
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
||||||
/* isSigned */ false);
|
/* isSigned */ false);
|
||||||
Value *Ops[3];
|
Value *Ops[3];
|
||||||
|
@ -479,7 +479,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Intrinsic::memset: {
|
case Intrinsic::memset: {
|
||||||
Type *IntPtr = TD.getIntPtrType(CI->getArgOperand(0)->getType());
|
Type *IntPtr = TD.getIntPtrType(Context);
|
||||||
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
||||||
/* isSigned */ false);
|
/* isSigned */ false);
|
||||||
Value *Ops[3];
|
Value *Ops[3];
|
||||||
|
|
|
@ -101,7 +101,8 @@ bool FastISel::hasTrivialKill(const Value *V) const {
|
||||||
|
|
||||||
// No-op casts are trivially coalesced by fast-isel.
|
// No-op casts are trivially coalesced by fast-isel.
|
||||||
if (const CastInst *Cast = dyn_cast<CastInst>(I))
|
if (const CastInst *Cast = dyn_cast<CastInst>(I))
|
||||||
if (Cast->isNoopCast(TD) && !hasTrivialKill(Cast->getOperand(0)))
|
if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
|
||||||
|
!hasTrivialKill(Cast->getOperand(0)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// GEPs with all zero indices are trivially coalesced by fast-isel.
|
// GEPs with all zero indices are trivially coalesced by fast-isel.
|
||||||
|
@ -174,7 +175,7 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
|
||||||
// Translate this as an integer zero so that it can be
|
// Translate this as an integer zero so that it can be
|
||||||
// local-CSE'd with actual integer zeros.
|
// local-CSE'd with actual integer zeros.
|
||||||
Reg =
|
Reg =
|
||||||
getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getType())));
|
getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
|
||||||
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
|
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
|
||||||
if (CF->isNullValue()) {
|
if (CF->isNullValue()) {
|
||||||
Reg = TargetMaterializeFloatZero(CF);
|
Reg = TargetMaterializeFloatZero(CF);
|
||||||
|
|
|
@ -3791,8 +3791,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
|
||||||
// Emit a library call.
|
// Emit a library call.
|
||||||
TargetLowering::ArgListTy Args;
|
TargetLowering::ArgListTy Args;
|
||||||
TargetLowering::ArgListEntry Entry;
|
TargetLowering::ArgListEntry Entry;
|
||||||
unsigned AS = SrcPtrInfo.getAddrSpace();
|
Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
|
||||||
Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext(), AS);
|
|
||||||
Entry.Node = Dst; Args.push_back(Entry);
|
Entry.Node = Dst; Args.push_back(Entry);
|
||||||
Entry.Node = Src; Args.push_back(Entry);
|
Entry.Node = Src; Args.push_back(Entry);
|
||||||
Entry.Node = Size; Args.push_back(Entry);
|
Entry.Node = Size; Args.push_back(Entry);
|
||||||
|
@ -3847,8 +3846,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
|
||||||
// Emit a library call.
|
// Emit a library call.
|
||||||
TargetLowering::ArgListTy Args;
|
TargetLowering::ArgListTy Args;
|
||||||
TargetLowering::ArgListEntry Entry;
|
TargetLowering::ArgListEntry Entry;
|
||||||
unsigned AS = SrcPtrInfo.getAddrSpace();
|
Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
|
||||||
Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext(), AS);
|
|
||||||
Entry.Node = Dst; Args.push_back(Entry);
|
Entry.Node = Dst; Args.push_back(Entry);
|
||||||
Entry.Node = Src; Args.push_back(Entry);
|
Entry.Node = Src; Args.push_back(Entry);
|
||||||
Entry.Node = Size; Args.push_back(Entry);
|
Entry.Node = Size; Args.push_back(Entry);
|
||||||
|
@ -3897,8 +3895,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
|
||||||
return Result;
|
return Result;
|
||||||
|
|
||||||
// Emit a library call.
|
// Emit a library call.
|
||||||
unsigned AS = DstPtrInfo.getAddrSpace();
|
Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext());
|
||||||
Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext(), AS);
|
|
||||||
TargetLowering::ArgListTy Args;
|
TargetLowering::ArgListTy Args;
|
||||||
TargetLowering::ArgListEntry Entry;
|
TargetLowering::ArgListEntry Entry;
|
||||||
Entry.Node = Dst; Entry.Ty = IntPtrTy;
|
Entry.Node = Dst; Entry.Ty = IntPtrTy;
|
||||||
|
|
|
@ -155,8 +155,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
|
||||||
TargetLowering::ArgListEntry Entry;
|
TargetLowering::ArgListEntry Entry;
|
||||||
|
|
||||||
// First argument: data pointer
|
// First argument: data pointer
|
||||||
unsigned AS = DstPtrInfo.getAddrSpace();
|
Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*DAG.getContext());
|
||||||
Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
|
|
||||||
Entry.Node = Dst;
|
Entry.Node = Dst;
|
||||||
Entry.Ty = IntPtrTy;
|
Entry.Ty = IntPtrTy;
|
||||||
Args.push_back(Entry);
|
Args.push_back(Entry);
|
||||||
|
|
|
@ -126,9 +126,10 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
|
||||||
return Base;
|
return Base;
|
||||||
|
|
||||||
// Truncate/sext the offset to the pointer size.
|
// Truncate/sext the offset to the pointer size.
|
||||||
unsigned PtrSize = TD.getPointerTypeSizeInBits(PtrVal->getType());
|
unsigned AS = PtrVal->getType()->isPointerTy() ?
|
||||||
if (PtrSize != 64) {
|
cast<PointerType>(PtrVal->getType())->getAddressSpace() : 0;
|
||||||
int SExtAmount = 64-PtrSize;
|
if (TD.getPointerSizeInBits(AS) != 64) {
|
||||||
|
int SExtAmount = 64-TD.getPointerSizeInBits(AS);
|
||||||
Offset = (Offset << SExtAmount) >> SExtAmount;
|
Offset = (Offset << SExtAmount) >> SExtAmount;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,7 +151,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
|
||||||
// Handle casts to pointers by changing them into casts to the appropriate
|
// Handle casts to pointers by changing them into casts to the appropriate
|
||||||
// integer type. This promotes constant folding and simplifies this code.
|
// integer type. This promotes constant folding and simplifies this code.
|
||||||
Constant *Op = CE->getOperand(0);
|
Constant *Op = CE->getOperand(0);
|
||||||
Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CE->getType()),
|
Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()),
|
||||||
false/*ZExt*/);
|
false/*ZExt*/);
|
||||||
return LowerConstant(Op, AP);
|
return LowerConstant(Op, AP);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1512,10 +1512,9 @@ SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
|
||||||
|
|
||||||
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
|
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
|
||||||
bool isPPC64 = (PtrVT == MVT::i64);
|
bool isPPC64 = (PtrVT == MVT::i64);
|
||||||
unsigned AS = 0;
|
|
||||||
Type *IntPtrTy =
|
Type *IntPtrTy =
|
||||||
DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType(
|
DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType(
|
||||||
*DAG.getContext(), AS);
|
*DAG.getContext());
|
||||||
|
|
||||||
TargetLowering::ArgListTy Args;
|
TargetLowering::ArgListTy Args;
|
||||||
TargetLowering::ArgListEntry Entry;
|
TargetLowering::ArgListEntry Entry;
|
||||||
|
|
|
@ -64,7 +64,7 @@ unsigned LLVMPointerSizeForAS(LLVMTargetDataRef TD, unsigned AS) {
|
||||||
}
|
}
|
||||||
|
|
||||||
LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef TD) {
|
LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef TD) {
|
||||||
return wrap(unwrap(TD)->getIntPtrType(getGlobalContext(), 0));
|
return wrap(unwrap(TD)->getIntPtrType(getGlobalContext()));
|
||||||
}
|
}
|
||||||
|
|
||||||
LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef TD, unsigned AS) {
|
LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef TD, unsigned AS) {
|
||||||
|
|
|
@ -282,9 +282,8 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) {
|
||||||
bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
|
bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
|
||||||
const X86AddressMode &AM) {
|
const X86AddressMode &AM) {
|
||||||
// Handle 'null' like i32/i64 0.
|
// Handle 'null' like i32/i64 0.
|
||||||
if (isa<ConstantPointerNull>(Val)) {
|
if (isa<ConstantPointerNull>(Val))
|
||||||
Val = Constant::getNullValue(TD.getIntPtrType(Val->getType()));
|
Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
|
||||||
}
|
|
||||||
|
|
||||||
// If this is a store of a simple constant, fold the constant into the store.
|
// If this is a store of a simple constant, fold the constant into the store.
|
||||||
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
|
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
|
||||||
|
@ -895,9 +894,8 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
|
||||||
if (Op0Reg == 0) return false;
|
if (Op0Reg == 0) return false;
|
||||||
|
|
||||||
// Handle 'null' like i32/i64 0.
|
// Handle 'null' like i32/i64 0.
|
||||||
if (isa<ConstantPointerNull>(Op1)) {
|
if (isa<ConstantPointerNull>(Op1))
|
||||||
Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getType()));
|
Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getContext()));
|
||||||
}
|
|
||||||
|
|
||||||
// We have two options: compare with register or immediate. If the RHS of
|
// We have two options: compare with register or immediate. If the RHS of
|
||||||
// the compare is an immediate that we can fold into this compare, use
|
// the compare is an immediate that we can fold into this compare, use
|
||||||
|
|
|
@ -54,8 +54,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
|
||||||
if (const char *bzeroEntry = V &&
|
if (const char *bzeroEntry = V &&
|
||||||
V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
|
V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
|
||||||
EVT IntPtr = TLI.getPointerTy();
|
EVT IntPtr = TLI.getPointerTy();
|
||||||
unsigned AS = DstPtrInfo.getAddrSpace();
|
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
|
||||||
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
|
|
||||||
TargetLowering::ArgListTy Args;
|
TargetLowering::ArgListTy Args;
|
||||||
TargetLowering::ArgListEntry Entry;
|
TargetLowering::ArgListEntry Entry;
|
||||||
Entry.Node = Dst;
|
Entry.Node = Dst;
|
||||||
|
|
|
@ -477,8 +477,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lower to a call to __misaligned_load(BasePtr).
|
// Lower to a call to __misaligned_load(BasePtr).
|
||||||
unsigned AS = LD->getAddressSpace();
|
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
|
||||||
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
|
|
||||||
TargetLowering::ArgListTy Args;
|
TargetLowering::ArgListTy Args;
|
||||||
TargetLowering::ArgListEntry Entry;
|
TargetLowering::ArgListEntry Entry;
|
||||||
|
|
||||||
|
@ -537,8 +536,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lower to a call to __misaligned_store(BasePtr, Value).
|
// Lower to a call to __misaligned_store(BasePtr, Value).
|
||||||
unsigned AS = ST->getAddressSpace();
|
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
|
||||||
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
|
|
||||||
TargetLowering::ArgListTy Args;
|
TargetLowering::ArgListTy Args;
|
||||||
TargetLowering::ArgListEntry Entry;
|
TargetLowering::ArgListEntry Entry;
|
||||||
|
|
||||||
|
|
|
@ -1500,7 +1500,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
|
||||||
unsigned TypeSize = TD->getTypeAllocSize(FieldTy);
|
unsigned TypeSize = TD->getTypeAllocSize(FieldTy);
|
||||||
if (StructType *ST = dyn_cast<StructType>(FieldTy))
|
if (StructType *ST = dyn_cast<StructType>(FieldTy))
|
||||||
TypeSize = TD->getStructLayout(ST)->getSizeInBytes();
|
TypeSize = TD->getStructLayout(ST)->getSizeInBytes();
|
||||||
Type *IntPtrTy = TD->getIntPtrType(GV->getType());
|
Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
|
||||||
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
|
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
|
||||||
ConstantInt::get(IntPtrTy, TypeSize),
|
ConstantInt::get(IntPtrTy, TypeSize),
|
||||||
NElems, 0,
|
NElems, 0,
|
||||||
|
@ -1730,7 +1730,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
|
||||||
// If this is a fixed size array, transform the Malloc to be an alloc of
|
// If this is a fixed size array, transform the Malloc to be an alloc of
|
||||||
// structs. malloc [100 x struct],1 -> malloc struct, 100
|
// structs. malloc [100 x struct],1 -> malloc struct, 100
|
||||||
if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
|
if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
|
||||||
Type *IntPtrTy = TD->getIntPtrType(GV->getType());
|
Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
|
||||||
unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
|
unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
|
||||||
Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
|
Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
|
||||||
Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
|
Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
|
||||||
|
|
|
@ -206,8 +206,9 @@ bool FunctionComparator::isEquivalentType(Type *Ty1,
|
||||||
return true;
|
return true;
|
||||||
if (Ty1->getTypeID() != Ty2->getTypeID()) {
|
if (Ty1->getTypeID() != Ty2->getTypeID()) {
|
||||||
if (TD) {
|
if (TD) {
|
||||||
if (isa<PointerType>(Ty1) && Ty2 == TD->getIntPtrType(Ty1)) return true;
|
LLVMContext &Ctx = Ty1->getContext();
|
||||||
if (isa<PointerType>(Ty2) && Ty1 == TD->getIntPtrType(Ty2)) return true;
|
if (isa<PointerType>(Ty1) && Ty2 == TD->getIntPtrType(Ctx)) return true;
|
||||||
|
if (isa<PointerType>(Ty2) && Ty1 == TD->getIntPtrType(Ctx)) return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -208,7 +208,7 @@ private:
|
||||||
bool ShouldChangeType(Type *From, Type *To) const;
|
bool ShouldChangeType(Type *From, Type *To) const;
|
||||||
Value *dyn_castNegVal(Value *V) const;
|
Value *dyn_castNegVal(Value *V) const;
|
||||||
Value *dyn_castFNegVal(Value *V) const;
|
Value *dyn_castFNegVal(Value *V) const;
|
||||||
Type *FindElementAtOffset(Type *Ty, int64_t Offset, Type *IntPtrTy,
|
Type *FindElementAtOffset(Type *Ty, int64_t Offset,
|
||||||
SmallVectorImpl<Value*> &NewIndices);
|
SmallVectorImpl<Value*> &NewIndices);
|
||||||
Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
|
Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
|
||||||
|
|
||||||
|
|
|
@ -996,9 +996,9 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
|
||||||
// Conversion is ok if changing from one pointer type to another or from
|
// Conversion is ok if changing from one pointer type to another or from
|
||||||
// a pointer to an integer of the same size.
|
// a pointer to an integer of the same size.
|
||||||
!((OldRetTy->isPointerTy() || !TD ||
|
!((OldRetTy->isPointerTy() || !TD ||
|
||||||
OldRetTy == TD->getIntPtrType(NewRetTy)) &&
|
OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
|
||||||
(NewRetTy->isPointerTy() || !TD ||
|
(NewRetTy->isPointerTy() || !TD ||
|
||||||
NewRetTy == TD->getIntPtrType(OldRetTy))))
|
NewRetTy == TD->getIntPtrType(Caller->getContext()))))
|
||||||
return false; // Cannot transform this return value.
|
return false; // Cannot transform this return value.
|
||||||
|
|
||||||
if (!Caller->use_empty() &&
|
if (!Caller->use_empty() &&
|
||||||
|
@ -1057,13 +1057,11 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
|
||||||
|
|
||||||
// Converting from one pointer type to another or between a pointer and an
|
// Converting from one pointer type to another or between a pointer and an
|
||||||
// integer of the same size is safe even if we do not have a body.
|
// integer of the same size is safe even if we do not have a body.
|
||||||
// FIXME: Not sure what to do here, so setting AS to 0.
|
|
||||||
// How can the AS for a function call be outside the default?
|
|
||||||
bool isConvertible = ActTy == ParamTy ||
|
bool isConvertible = ActTy == ParamTy ||
|
||||||
(TD && ((ParamTy->isPointerTy() ||
|
(TD && ((ParamTy->isPointerTy() ||
|
||||||
ParamTy == TD->getIntPtrType(ActTy)) &&
|
ParamTy == TD->getIntPtrType(Caller->getContext())) &&
|
||||||
(ActTy->isPointerTy() ||
|
(ActTy->isPointerTy() ||
|
||||||
ActTy == TD->getIntPtrType(ParamTy))));
|
ActTy == TD->getIntPtrType(Caller->getContext()))));
|
||||||
if (Callee->isDeclaration() && !isConvertible) return false;
|
if (Callee->isDeclaration() && !isConvertible) return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
|
||||||
Scale = 0;
|
Scale = 0;
|
||||||
return ConstantInt::get(Val->getType(), 0);
|
return ConstantInt::get(Val->getType(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
|
if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
|
||||||
// Cannot look past anything that might overflow.
|
// Cannot look past anything that might overflow.
|
||||||
OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val);
|
OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val);
|
||||||
|
@ -47,19 +47,19 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
|
||||||
Offset = 0;
|
Offset = 0;
|
||||||
return I->getOperand(0);
|
return I->getOperand(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (I->getOpcode() == Instruction::Mul) {
|
if (I->getOpcode() == Instruction::Mul) {
|
||||||
// This value is scaled by 'RHS'.
|
// This value is scaled by 'RHS'.
|
||||||
Scale = RHS->getZExtValue();
|
Scale = RHS->getZExtValue();
|
||||||
Offset = 0;
|
Offset = 0;
|
||||||
return I->getOperand(0);
|
return I->getOperand(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (I->getOpcode() == Instruction::Add) {
|
if (I->getOpcode() == Instruction::Add) {
|
||||||
// We have X+C. Check to see if we really have (X*C2)+C1,
|
// We have X+C. Check to see if we really have (X*C2)+C1,
|
||||||
// where C1 is divisible by C2.
|
// where C1 is divisible by C2.
|
||||||
unsigned SubScale;
|
unsigned SubScale;
|
||||||
Value *SubVal =
|
Value *SubVal =
|
||||||
DecomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
|
DecomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
|
||||||
Offset += RHS->getZExtValue();
|
Offset += RHS->getZExtValue();
|
||||||
Scale = SubScale;
|
Scale = SubScale;
|
||||||
|
@ -82,7 +82,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
|
||||||
if (!TD) return 0;
|
if (!TD) return 0;
|
||||||
|
|
||||||
PointerType *PTy = cast<PointerType>(CI.getType());
|
PointerType *PTy = cast<PointerType>(CI.getType());
|
||||||
|
|
||||||
BuilderTy AllocaBuilder(*Builder);
|
BuilderTy AllocaBuilder(*Builder);
|
||||||
AllocaBuilder.SetInsertPoint(AI.getParent(), &AI);
|
AllocaBuilder.SetInsertPoint(AI.getParent(), &AI);
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
|
||||||
uint64_t ArrayOffset;
|
uint64_t ArrayOffset;
|
||||||
Value *NumElements = // See if the array size is a decomposable linear expr.
|
Value *NumElements = // See if the array size is a decomposable linear expr.
|
||||||
DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
|
DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
|
||||||
|
|
||||||
// If we can now satisfy the modulus, by using a non-1 scale, we really can
|
// If we can now satisfy the modulus, by using a non-1 scale, we really can
|
||||||
// do the xform.
|
// do the xform.
|
||||||
if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
|
if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
|
||||||
|
@ -125,17 +125,17 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
|
||||||
// Insert before the alloca, not before the cast.
|
// Insert before the alloca, not before the cast.
|
||||||
Amt = AllocaBuilder.CreateMul(Amt, NumElements);
|
Amt = AllocaBuilder.CreateMul(Amt, NumElements);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
|
if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
|
||||||
Value *Off = ConstantInt::get(AI.getArraySize()->getType(),
|
Value *Off = ConstantInt::get(AI.getArraySize()->getType(),
|
||||||
Offset, true);
|
Offset, true);
|
||||||
Amt = AllocaBuilder.CreateAdd(Amt, Off);
|
Amt = AllocaBuilder.CreateAdd(Amt, Off);
|
||||||
}
|
}
|
||||||
|
|
||||||
AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
|
AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
|
||||||
New->setAlignment(AI.getAlignment());
|
New->setAlignment(AI.getAlignment());
|
||||||
New->takeName(&AI);
|
New->takeName(&AI);
|
||||||
|
|
||||||
// If the allocation has multiple real uses, insert a cast and change all
|
// If the allocation has multiple real uses, insert a cast and change all
|
||||||
// things that used it to use the new cast. This will also hack on CI, but it
|
// things that used it to use the new cast. This will also hack on CI, but it
|
||||||
// will die soon.
|
// will die soon.
|
||||||
|
@ -148,10 +148,10 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
|
||||||
return ReplaceInstUsesWith(CI, New);
|
return ReplaceInstUsesWith(CI, New);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// EvaluateInDifferentType - Given an expression that
|
/// EvaluateInDifferentType - Given an expression that
|
||||||
/// CanEvaluateTruncated or CanEvaluateSExtd returns true for, actually
|
/// CanEvaluateTruncated or CanEvaluateSExtd returns true for, actually
|
||||||
/// insert the code to evaluate the expression.
|
/// insert the code to evaluate the expression.
|
||||||
Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
|
Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
|
||||||
bool isSigned) {
|
bool isSigned) {
|
||||||
if (Constant *C = dyn_cast<Constant>(V)) {
|
if (Constant *C = dyn_cast<Constant>(V)) {
|
||||||
C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
|
C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
|
||||||
|
@ -181,7 +181,7 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
|
||||||
Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
|
Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
|
||||||
Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
|
Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Instruction::Trunc:
|
case Instruction::Trunc:
|
||||||
case Instruction::ZExt:
|
case Instruction::ZExt:
|
||||||
case Instruction::SExt:
|
case Instruction::SExt:
|
||||||
|
@ -190,7 +190,7 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
|
||||||
// new.
|
// new.
|
||||||
if (I->getOperand(0)->getType() == Ty)
|
if (I->getOperand(0)->getType() == Ty)
|
||||||
return I->getOperand(0);
|
return I->getOperand(0);
|
||||||
|
|
||||||
// Otherwise, must be the same type of cast, so just reinsert a new one.
|
// Otherwise, must be the same type of cast, so just reinsert a new one.
|
||||||
// This also handles the case of zext(trunc(x)) -> zext(x).
|
// This also handles the case of zext(trunc(x)) -> zext(x).
|
||||||
Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty,
|
Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty,
|
||||||
|
@ -212,11 +212,11 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
|
||||||
Res = NPN;
|
Res = NPN;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
// TODO: Can handle more cases here.
|
// TODO: Can handle more cases here.
|
||||||
llvm_unreachable("Unreachable!");
|
llvm_unreachable("Unreachable!");
|
||||||
}
|
}
|
||||||
|
|
||||||
Res->takeName(I);
|
Res->takeName(I);
|
||||||
return InsertNewInstWith(Res, *I);
|
return InsertNewInstWith(Res, *I);
|
||||||
}
|
}
|
||||||
|
@ -224,7 +224,7 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
|
||||||
|
|
||||||
/// This function is a wrapper around CastInst::isEliminableCastPair. It
|
/// This function is a wrapper around CastInst::isEliminableCastPair. It
|
||||||
/// simply extracts arguments and returns what that function returns.
|
/// simply extracts arguments and returns what that function returns.
|
||||||
static Instruction::CastOps
|
static Instruction::CastOps
|
||||||
isEliminableCastPair(
|
isEliminableCastPair(
|
||||||
const CastInst *CI, ///< The first cast instruction
|
const CastInst *CI, ///< The first cast instruction
|
||||||
unsigned opcode, ///< The opcode of the second cast instruction
|
unsigned opcode, ///< The opcode of the second cast instruction
|
||||||
|
@ -253,7 +253,7 @@ isEliminableCastPair(
|
||||||
if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
|
if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
|
||||||
(Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
|
(Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
|
||||||
Res = 0;
|
Res = 0;
|
||||||
|
|
||||||
return Instruction::CastOps(Res);
|
return Instruction::CastOps(Res);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -265,18 +265,18 @@ bool InstCombiner::ShouldOptimizeCast(Instruction::CastOps opc, const Value *V,
|
||||||
Type *Ty) {
|
Type *Ty) {
|
||||||
// Noop casts and casts of constants should be eliminated trivially.
|
// Noop casts and casts of constants should be eliminated trivially.
|
||||||
if (V->getType() == Ty || isa<Constant>(V)) return false;
|
if (V->getType() == Ty || isa<Constant>(V)) return false;
|
||||||
|
|
||||||
// If this is another cast that can be eliminated, we prefer to have it
|
// If this is another cast that can be eliminated, we prefer to have it
|
||||||
// eliminated.
|
// eliminated.
|
||||||
if (const CastInst *CI = dyn_cast<CastInst>(V))
|
if (const CastInst *CI = dyn_cast<CastInst>(V))
|
||||||
if (isEliminableCastPair(CI, opc, Ty, TD))
|
if (isEliminableCastPair(CI, opc, Ty, TD))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// If this is a vector sext from a compare, then we don't want to break the
|
// If this is a vector sext from a compare, then we don't want to break the
|
||||||
// idiom where each element of the extended vector is either zero or all ones.
|
// idiom where each element of the extended vector is either zero or all ones.
|
||||||
if (opc == Instruction::SExt && isa<CmpInst>(V) && Ty->isVectorTy())
|
if (opc == Instruction::SExt && isa<CmpInst>(V) && Ty->isVectorTy())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -288,7 +288,7 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
|
||||||
// Many cases of "cast of a cast" are eliminable. If it's eliminable we just
|
// Many cases of "cast of a cast" are eliminable. If it's eliminable we just
|
||||||
// eliminate it now.
|
// eliminate it now.
|
||||||
if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
|
if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
|
||||||
if (Instruction::CastOps opc =
|
if (Instruction::CastOps opc =
|
||||||
isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
|
isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
|
||||||
// The first cast (CSrc) is eliminable so we need to fix up or replace
|
// The first cast (CSrc) is eliminable so we need to fix up or replace
|
||||||
// the second cast (CI). CSrc will then have a good chance of being dead.
|
// the second cast (CI). CSrc will then have a good chance of being dead.
|
||||||
|
@ -311,7 +311,7 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
|
||||||
if (Instruction *NV = FoldOpIntoPhi(CI))
|
if (Instruction *NV = FoldOpIntoPhi(CI))
|
||||||
return NV;
|
return NV;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -330,15 +330,15 @@ static bool CanEvaluateTruncated(Value *V, Type *Ty) {
|
||||||
// We can always evaluate constants in another type.
|
// We can always evaluate constants in another type.
|
||||||
if (isa<Constant>(V))
|
if (isa<Constant>(V))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
Instruction *I = dyn_cast<Instruction>(V);
|
Instruction *I = dyn_cast<Instruction>(V);
|
||||||
if (!I) return false;
|
if (!I) return false;
|
||||||
|
|
||||||
Type *OrigTy = V->getType();
|
Type *OrigTy = V->getType();
|
||||||
|
|
||||||
// If this is an extension from the dest type, we can eliminate it, even if it
|
// If this is an extension from the dest type, we can eliminate it, even if it
|
||||||
// has multiple uses.
|
// has multiple uses.
|
||||||
if ((isa<ZExtInst>(I) || isa<SExtInst>(I)) &&
|
if ((isa<ZExtInst>(I) || isa<SExtInst>(I)) &&
|
||||||
I->getOperand(0)->getType() == Ty)
|
I->getOperand(0)->getType() == Ty)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
@ -423,29 +423,29 @@ static bool CanEvaluateTruncated(Value *V, Type *Ty) {
|
||||||
// TODO: Can handle more cases here.
|
// TODO: Can handle more cases here.
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
|
Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
|
||||||
if (Instruction *Result = commonCastTransforms(CI))
|
if (Instruction *Result = commonCastTransforms(CI))
|
||||||
return Result;
|
return Result;
|
||||||
|
|
||||||
// See if we can simplify any instructions used by the input whose sole
|
// See if we can simplify any instructions used by the input whose sole
|
||||||
// purpose is to compute bits we don't care about.
|
// purpose is to compute bits we don't care about.
|
||||||
if (SimplifyDemandedInstructionBits(CI))
|
if (SimplifyDemandedInstructionBits(CI))
|
||||||
return &CI;
|
return &CI;
|
||||||
|
|
||||||
Value *Src = CI.getOperand(0);
|
Value *Src = CI.getOperand(0);
|
||||||
Type *DestTy = CI.getType(), *SrcTy = Src->getType();
|
Type *DestTy = CI.getType(), *SrcTy = Src->getType();
|
||||||
|
|
||||||
// Attempt to truncate the entire input expression tree to the destination
|
// Attempt to truncate the entire input expression tree to the destination
|
||||||
// type. Only do this if the dest type is a simple type, don't convert the
|
// type. Only do this if the dest type is a simple type, don't convert the
|
||||||
// expression tree to something weird like i93 unless the source is also
|
// expression tree to something weird like i93 unless the source is also
|
||||||
// strange.
|
// strange.
|
||||||
if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
|
if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
|
||||||
CanEvaluateTruncated(Src, DestTy)) {
|
CanEvaluateTruncated(Src, DestTy)) {
|
||||||
|
|
||||||
// If this cast is a truncate, evaluting in a different type always
|
// If this cast is a truncate, evaluting in a different type always
|
||||||
// eliminates the cast, so it is always a win.
|
// eliminates the cast, so it is always a win.
|
||||||
DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
|
DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
|
||||||
|
@ -462,7 +462,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
|
||||||
Value *Zero = Constant::getNullValue(Src->getType());
|
Value *Zero = Constant::getNullValue(Src->getType());
|
||||||
return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
|
return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transform trunc(lshr (zext A), Cst) to eliminate one type conversion.
|
// Transform trunc(lshr (zext A), Cst) to eliminate one type conversion.
|
||||||
Value *A = 0; ConstantInt *Cst = 0;
|
Value *A = 0; ConstantInt *Cst = 0;
|
||||||
if (Src->hasOneUse() &&
|
if (Src->hasOneUse() &&
|
||||||
|
@ -472,7 +472,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
|
||||||
// ASize < MidSize and MidSize > ResultSize, but don't know the relation
|
// ASize < MidSize and MidSize > ResultSize, but don't know the relation
|
||||||
// between ASize and ResultSize.
|
// between ASize and ResultSize.
|
||||||
unsigned ASize = A->getType()->getPrimitiveSizeInBits();
|
unsigned ASize = A->getType()->getPrimitiveSizeInBits();
|
||||||
|
|
||||||
// If the shift amount is larger than the size of A, then the result is
|
// If the shift amount is larger than the size of A, then the result is
|
||||||
// known to be zero because all the input bits got shifted out.
|
// known to be zero because all the input bits got shifted out.
|
||||||
if (Cst->getZExtValue() >= ASize)
|
if (Cst->getZExtValue() >= ASize)
|
||||||
|
@ -485,7 +485,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
|
||||||
Shift->takeName(Src);
|
Shift->takeName(Src);
|
||||||
return CastInst::CreateIntegerCast(Shift, CI.getType(), false);
|
return CastInst::CreateIntegerCast(Shift, CI.getType(), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transform "trunc (and X, cst)" -> "and (trunc X), cst" so long as the dest
|
// Transform "trunc (and X, cst)" -> "and (trunc X), cst" so long as the dest
|
||||||
// type isn't non-native.
|
// type isn't non-native.
|
||||||
if (Src->hasOneUse() && isa<IntegerType>(Src->getType()) &&
|
if (Src->hasOneUse() && isa<IntegerType>(Src->getType()) &&
|
||||||
|
@ -508,7 +508,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
|
||||||
// cast to integer to avoid the comparison.
|
// cast to integer to avoid the comparison.
|
||||||
if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
|
if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
|
||||||
const APInt &Op1CV = Op1C->getValue();
|
const APInt &Op1CV = Op1C->getValue();
|
||||||
|
|
||||||
// zext (x <s 0) to i32 --> x>>u31 true if signbit set.
|
// zext (x <s 0) to i32 --> x>>u31 true if signbit set.
|
||||||
// zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
|
// zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
|
||||||
if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) ||
|
if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) ||
|
||||||
|
@ -538,14 +538,14 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
|
||||||
// zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
|
// zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
|
||||||
// zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
|
// zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
|
||||||
// zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
|
// zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
|
||||||
if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
|
if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
|
||||||
// This only works for EQ and NE
|
// This only works for EQ and NE
|
||||||
ICI->isEquality()) {
|
ICI->isEquality()) {
|
||||||
// If Op1C some other power of two, convert:
|
// If Op1C some other power of two, convert:
|
||||||
uint32_t BitWidth = Op1C->getType()->getBitWidth();
|
uint32_t BitWidth = Op1C->getType()->getBitWidth();
|
||||||
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(ICI->getOperand(0), KnownZero, KnownOne);
|
ComputeMaskedBits(ICI->getOperand(0), KnownZero, KnownOne);
|
||||||
|
|
||||||
APInt KnownZeroMask(~KnownZero);
|
APInt KnownZeroMask(~KnownZero);
|
||||||
if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
|
if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
|
||||||
if (!DoXform) return ICI;
|
if (!DoXform) return ICI;
|
||||||
|
@ -559,7 +559,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
|
||||||
Res = ConstantExpr::getZExt(Res, CI.getType());
|
Res = ConstantExpr::getZExt(Res, CI.getType());
|
||||||
return ReplaceInstUsesWith(CI, Res);
|
return ReplaceInstUsesWith(CI, Res);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t ShiftAmt = KnownZeroMask.logBase2();
|
uint32_t ShiftAmt = KnownZeroMask.logBase2();
|
||||||
Value *In = ICI->getOperand(0);
|
Value *In = ICI->getOperand(0);
|
||||||
if (ShiftAmt) {
|
if (ShiftAmt) {
|
||||||
|
@ -568,12 +568,12 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
|
||||||
In = Builder->CreateLShr(In, ConstantInt::get(In->getType(),ShiftAmt),
|
In = Builder->CreateLShr(In, ConstantInt::get(In->getType(),ShiftAmt),
|
||||||
In->getName()+".lobit");
|
In->getName()+".lobit");
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((Op1CV != 0) == isNE) { // Toggle the low bit.
|
if ((Op1CV != 0) == isNE) { // Toggle the low bit.
|
||||||
Constant *One = ConstantInt::get(In->getType(), 1);
|
Constant *One = ConstantInt::get(In->getType(), 1);
|
||||||
In = Builder->CreateXor(In, One);
|
In = Builder->CreateXor(In, One);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (CI.getType() == In->getType())
|
if (CI.getType() == In->getType())
|
||||||
return ReplaceInstUsesWith(CI, In);
|
return ReplaceInstUsesWith(CI, In);
|
||||||
return CastInst::CreateIntegerCast(In, CI.getType(), false/*ZExt*/);
|
return CastInst::CreateIntegerCast(In, CI.getType(), false/*ZExt*/);
|
||||||
|
@ -646,19 +646,19 @@ static bool CanEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear) {
|
||||||
BitsToClear = 0;
|
BitsToClear = 0;
|
||||||
if (isa<Constant>(V))
|
if (isa<Constant>(V))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
Instruction *I = dyn_cast<Instruction>(V);
|
Instruction *I = dyn_cast<Instruction>(V);
|
||||||
if (!I) return false;
|
if (!I) return false;
|
||||||
|
|
||||||
// If the input is a truncate from the destination type, we can trivially
|
// If the input is a truncate from the destination type, we can trivially
|
||||||
// eliminate it.
|
// eliminate it.
|
||||||
if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
|
if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
// We can't extend or shrink something that has multiple uses: doing so would
|
// We can't extend or shrink something that has multiple uses: doing so would
|
||||||
// require duplicating the instruction in general, which isn't profitable.
|
// require duplicating the instruction in general, which isn't profitable.
|
||||||
if (!I->hasOneUse()) return false;
|
if (!I->hasOneUse()) return false;
|
||||||
|
|
||||||
unsigned Opc = I->getOpcode(), Tmp;
|
unsigned Opc = I->getOpcode(), Tmp;
|
||||||
switch (Opc) {
|
switch (Opc) {
|
||||||
case Instruction::ZExt: // zext(zext(x)) -> zext(x).
|
case Instruction::ZExt: // zext(zext(x)) -> zext(x).
|
||||||
|
@ -678,7 +678,7 @@ static bool CanEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear) {
|
||||||
// These can all be promoted if neither operand has 'bits to clear'.
|
// These can all be promoted if neither operand has 'bits to clear'.
|
||||||
if (BitsToClear == 0 && Tmp == 0)
|
if (BitsToClear == 0 && Tmp == 0)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
// If the operation is an AND/OR/XOR and the bits to clear are zero in the
|
// If the operation is an AND/OR/XOR and the bits to clear are zero in the
|
||||||
// other side, BitsToClear is ok.
|
// other side, BitsToClear is ok.
|
||||||
if (Tmp == 0 &&
|
if (Tmp == 0 &&
|
||||||
|
@ -691,10 +691,10 @@ static bool CanEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear) {
|
||||||
APInt::getHighBitsSet(VSize, BitsToClear)))
|
APInt::getHighBitsSet(VSize, BitsToClear)))
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise, we don't know how to analyze this BitsToClear case yet.
|
// Otherwise, we don't know how to analyze this BitsToClear case yet.
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
case Instruction::LShr:
|
case Instruction::LShr:
|
||||||
// We can promote lshr(x, cst) if we can promote x. This requires the
|
// We can promote lshr(x, cst) if we can promote x. This requires the
|
||||||
// ultimate 'and' to clear out the high zero bits we're clearing out though.
|
// ultimate 'and' to clear out the high zero bits we're clearing out though.
|
||||||
|
@ -716,7 +716,7 @@ static bool CanEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear) {
|
||||||
Tmp != BitsToClear)
|
Tmp != BitsToClear)
|
||||||
return false;
|
return false;
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
case Instruction::PHI: {
|
case Instruction::PHI: {
|
||||||
// We can change a phi if we can change all operands. Note that we never
|
// We can change a phi if we can change all operands. Note that we never
|
||||||
// get into trouble with cyclic PHIs here because we only consider
|
// get into trouble with cyclic PHIs here because we only consider
|
||||||
|
@ -743,44 +743,44 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
|
||||||
// eliminated before we try to optimize this zext.
|
// eliminated before we try to optimize this zext.
|
||||||
if (CI.hasOneUse() && isa<TruncInst>(CI.use_back()))
|
if (CI.hasOneUse() && isa<TruncInst>(CI.use_back()))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// If one of the common conversion will work, do it.
|
// If one of the common conversion will work, do it.
|
||||||
if (Instruction *Result = commonCastTransforms(CI))
|
if (Instruction *Result = commonCastTransforms(CI))
|
||||||
return Result;
|
return Result;
|
||||||
|
|
||||||
// See if we can simplify any instructions used by the input whose sole
|
// See if we can simplify any instructions used by the input whose sole
|
||||||
// purpose is to compute bits we don't care about.
|
// purpose is to compute bits we don't care about.
|
||||||
if (SimplifyDemandedInstructionBits(CI))
|
if (SimplifyDemandedInstructionBits(CI))
|
||||||
return &CI;
|
return &CI;
|
||||||
|
|
||||||
Value *Src = CI.getOperand(0);
|
Value *Src = CI.getOperand(0);
|
||||||
Type *SrcTy = Src->getType(), *DestTy = CI.getType();
|
Type *SrcTy = Src->getType(), *DestTy = CI.getType();
|
||||||
|
|
||||||
// Attempt to extend the entire input expression tree to the destination
|
// Attempt to extend the entire input expression tree to the destination
|
||||||
// type. Only do this if the dest type is a simple type, don't convert the
|
// type. Only do this if the dest type is a simple type, don't convert the
|
||||||
// expression tree to something weird like i93 unless the source is also
|
// expression tree to something weird like i93 unless the source is also
|
||||||
// strange.
|
// strange.
|
||||||
unsigned BitsToClear;
|
unsigned BitsToClear;
|
||||||
if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
|
if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
|
||||||
CanEvaluateZExtd(Src, DestTy, BitsToClear)) {
|
CanEvaluateZExtd(Src, DestTy, BitsToClear)) {
|
||||||
assert(BitsToClear < SrcTy->getScalarSizeInBits() &&
|
assert(BitsToClear < SrcTy->getScalarSizeInBits() &&
|
||||||
"Unreasonable BitsToClear");
|
"Unreasonable BitsToClear");
|
||||||
|
|
||||||
// Okay, we can transform this! Insert the new expression now.
|
// Okay, we can transform this! Insert the new expression now.
|
||||||
DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
|
DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
|
||||||
" to avoid zero extend: " << CI);
|
" to avoid zero extend: " << CI);
|
||||||
Value *Res = EvaluateInDifferentType(Src, DestTy, false);
|
Value *Res = EvaluateInDifferentType(Src, DestTy, false);
|
||||||
assert(Res->getType() == DestTy);
|
assert(Res->getType() == DestTy);
|
||||||
|
|
||||||
uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear;
|
uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear;
|
||||||
uint32_t DestBitSize = DestTy->getScalarSizeInBits();
|
uint32_t DestBitSize = DestTy->getScalarSizeInBits();
|
||||||
|
|
||||||
// If the high bits are already filled with zeros, just replace this
|
// If the high bits are already filled with zeros, just replace this
|
||||||
// cast with the result.
|
// cast with the result.
|
||||||
if (MaskedValueIsZero(Res, APInt::getHighBitsSet(DestBitSize,
|
if (MaskedValueIsZero(Res, APInt::getHighBitsSet(DestBitSize,
|
||||||
DestBitSize-SrcBitsKept)))
|
DestBitSize-SrcBitsKept)))
|
||||||
return ReplaceInstUsesWith(CI, Res);
|
return ReplaceInstUsesWith(CI, Res);
|
||||||
|
|
||||||
// We need to emit an AND to clear the high bits.
|
// We need to emit an AND to clear the high bits.
|
||||||
Constant *C = ConstantInt::get(Res->getType(),
|
Constant *C = ConstantInt::get(Res->getType(),
|
||||||
APInt::getLowBitsSet(DestBitSize, SrcBitsKept));
|
APInt::getLowBitsSet(DestBitSize, SrcBitsKept));
|
||||||
|
@ -792,7 +792,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
|
||||||
// 'and' which will be much cheaper than the pair of casts.
|
// 'and' which will be much cheaper than the pair of casts.
|
||||||
if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
|
if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
|
||||||
// TODO: Subsume this into EvaluateInDifferentType.
|
// TODO: Subsume this into EvaluateInDifferentType.
|
||||||
|
|
||||||
// Get the sizes of the types involved. We know that the intermediate type
|
// Get the sizes of the types involved. We know that the intermediate type
|
||||||
// will be smaller than A or C, but don't know the relation between A and C.
|
// will be smaller than A or C, but don't know the relation between A and C.
|
||||||
Value *A = CSrc->getOperand(0);
|
Value *A = CSrc->getOperand(0);
|
||||||
|
@ -809,7 +809,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
|
||||||
Value *And = Builder->CreateAnd(A, AndConst, CSrc->getName()+".mask");
|
Value *And = Builder->CreateAnd(A, AndConst, CSrc->getName()+".mask");
|
||||||
return new ZExtInst(And, CI.getType());
|
return new ZExtInst(And, CI.getType());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (SrcSize == DstSize) {
|
if (SrcSize == DstSize) {
|
||||||
APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
|
APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
|
||||||
return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
|
return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
|
||||||
|
@ -818,7 +818,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
|
||||||
if (SrcSize > DstSize) {
|
if (SrcSize > DstSize) {
|
||||||
Value *Trunc = Builder->CreateTrunc(A, CI.getType());
|
Value *Trunc = Builder->CreateTrunc(A, CI.getType());
|
||||||
APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
|
APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
|
||||||
return BinaryOperator::CreateAnd(Trunc,
|
return BinaryOperator::CreateAnd(Trunc,
|
||||||
ConstantInt::get(Trunc->getType(),
|
ConstantInt::get(Trunc->getType(),
|
||||||
AndValue));
|
AndValue));
|
||||||
}
|
}
|
||||||
|
@ -876,7 +876,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
|
||||||
Value *New = Builder->CreateZExt(X, CI.getType());
|
Value *New = Builder->CreateZExt(X, CI.getType());
|
||||||
return BinaryOperator::CreateXor(New, ConstantInt::get(CI.getType(), 1));
|
return BinaryOperator::CreateXor(New, ConstantInt::get(CI.getType(), 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -989,14 +989,14 @@ static bool CanEvaluateSExtd(Value *V, Type *Ty) {
|
||||||
// If this is a constant, it can be trivially promoted.
|
// If this is a constant, it can be trivially promoted.
|
||||||
if (isa<Constant>(V))
|
if (isa<Constant>(V))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
Instruction *I = dyn_cast<Instruction>(V);
|
Instruction *I = dyn_cast<Instruction>(V);
|
||||||
if (!I) return false;
|
if (!I) return false;
|
||||||
|
|
||||||
// If this is a truncate from the dest type, we can trivially eliminate it.
|
// If this is a truncate from the dest type, we can trivially eliminate it.
|
||||||
if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
|
if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
// We can't extend or shrink something that has multiple uses: doing so would
|
// We can't extend or shrink something that has multiple uses: doing so would
|
||||||
// require duplicating the instruction in general, which isn't profitable.
|
// require duplicating the instruction in general, which isn't profitable.
|
||||||
if (!I->hasOneUse()) return false;
|
if (!I->hasOneUse()) return false;
|
||||||
|
@ -1015,14 +1015,14 @@ static bool CanEvaluateSExtd(Value *V, Type *Ty) {
|
||||||
// These operators can all arbitrarily be extended if their inputs can.
|
// These operators can all arbitrarily be extended if their inputs can.
|
||||||
return CanEvaluateSExtd(I->getOperand(0), Ty) &&
|
return CanEvaluateSExtd(I->getOperand(0), Ty) &&
|
||||||
CanEvaluateSExtd(I->getOperand(1), Ty);
|
CanEvaluateSExtd(I->getOperand(1), Ty);
|
||||||
|
|
||||||
//case Instruction::Shl: TODO
|
//case Instruction::Shl: TODO
|
||||||
//case Instruction::LShr: TODO
|
//case Instruction::LShr: TODO
|
||||||
|
|
||||||
case Instruction::Select:
|
case Instruction::Select:
|
||||||
return CanEvaluateSExtd(I->getOperand(1), Ty) &&
|
return CanEvaluateSExtd(I->getOperand(1), Ty) &&
|
||||||
CanEvaluateSExtd(I->getOperand(2), Ty);
|
CanEvaluateSExtd(I->getOperand(2), Ty);
|
||||||
|
|
||||||
case Instruction::PHI: {
|
case Instruction::PHI: {
|
||||||
// We can change a phi if we can change all operands. Note that we never
|
// We can change a phi if we can change all operands. Note that we never
|
||||||
// get into trouble with cyclic PHIs here because we only consider
|
// get into trouble with cyclic PHIs here because we only consider
|
||||||
|
@ -1036,7 +1036,7 @@ static bool CanEvaluateSExtd(Value *V, Type *Ty) {
|
||||||
// TODO: Can handle more cases here.
|
// TODO: Can handle more cases here.
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1045,15 +1045,15 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
|
||||||
// eliminated before we try to optimize this zext.
|
// eliminated before we try to optimize this zext.
|
||||||
if (CI.hasOneUse() && isa<TruncInst>(CI.use_back()))
|
if (CI.hasOneUse() && isa<TruncInst>(CI.use_back()))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (Instruction *I = commonCastTransforms(CI))
|
if (Instruction *I = commonCastTransforms(CI))
|
||||||
return I;
|
return I;
|
||||||
|
|
||||||
// See if we can simplify any instructions used by the input whose sole
|
// See if we can simplify any instructions used by the input whose sole
|
||||||
// purpose is to compute bits we don't care about.
|
// purpose is to compute bits we don't care about.
|
||||||
if (SimplifyDemandedInstructionBits(CI))
|
if (SimplifyDemandedInstructionBits(CI))
|
||||||
return &CI;
|
return &CI;
|
||||||
|
|
||||||
Value *Src = CI.getOperand(0);
|
Value *Src = CI.getOperand(0);
|
||||||
Type *SrcTy = Src->getType(), *DestTy = CI.getType();
|
Type *SrcTy = Src->getType(), *DestTy = CI.getType();
|
||||||
|
|
||||||
|
@ -1076,7 +1076,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
|
||||||
// cast with the result.
|
// cast with the result.
|
||||||
if (ComputeNumSignBits(Res) > DestBitSize - SrcBitSize)
|
if (ComputeNumSignBits(Res) > DestBitSize - SrcBitSize)
|
||||||
return ReplaceInstUsesWith(CI, Res);
|
return ReplaceInstUsesWith(CI, Res);
|
||||||
|
|
||||||
// We need to emit a shl + ashr to do the sign extend.
|
// We need to emit a shl + ashr to do the sign extend.
|
||||||
Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
|
Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
|
||||||
return BinaryOperator::CreateAShr(Builder->CreateShl(Res, ShAmt, "sext"),
|
return BinaryOperator::CreateAShr(Builder->CreateShl(Res, ShAmt, "sext"),
|
||||||
|
@ -1089,7 +1089,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
|
||||||
if (TI->hasOneUse() && TI->getOperand(0)->getType() == DestTy) {
|
if (TI->hasOneUse() && TI->getOperand(0)->getType() == DestTy) {
|
||||||
uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
|
uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
|
||||||
uint32_t DestBitSize = DestTy->getScalarSizeInBits();
|
uint32_t DestBitSize = DestTy->getScalarSizeInBits();
|
||||||
|
|
||||||
// We need to emit a shl + ashr to do the sign extend.
|
// We need to emit a shl + ashr to do the sign extend.
|
||||||
Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
|
Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
|
||||||
Value *Res = Builder->CreateShl(TI->getOperand(0), ShAmt, "sext");
|
Value *Res = Builder->CreateShl(TI->getOperand(0), ShAmt, "sext");
|
||||||
|
@ -1125,7 +1125,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
|
||||||
A = Builder->CreateShl(A, ShAmtV, CI.getName());
|
A = Builder->CreateShl(A, ShAmtV, CI.getName());
|
||||||
return BinaryOperator::CreateAShr(A, ShAmtV);
|
return BinaryOperator::CreateAShr(A, ShAmtV);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1147,7 +1147,7 @@ static Value *LookThroughFPExtensions(Value *V) {
|
||||||
if (Instruction *I = dyn_cast<Instruction>(V))
|
if (Instruction *I = dyn_cast<Instruction>(V))
|
||||||
if (I->getOpcode() == Instruction::FPExt)
|
if (I->getOpcode() == Instruction::FPExt)
|
||||||
return LookThroughFPExtensions(I->getOperand(0));
|
return LookThroughFPExtensions(I->getOperand(0));
|
||||||
|
|
||||||
// If this value is a constant, return the constant in the smallest FP type
|
// If this value is a constant, return the constant in the smallest FP type
|
||||||
// that can accurately represent it. This allows us to turn
|
// that can accurately represent it. This allows us to turn
|
||||||
// (float)((double)X+2.0) into x+2.0f.
|
// (float)((double)X+2.0) into x+2.0f.
|
||||||
|
@ -1166,14 +1166,14 @@ static Value *LookThroughFPExtensions(Value *V) {
|
||||||
return V;
|
return V;
|
||||||
// Don't try to shrink to various long double types.
|
// Don't try to shrink to various long double types.
|
||||||
}
|
}
|
||||||
|
|
||||||
return V;
|
return V;
|
||||||
}
|
}
|
||||||
|
|
||||||
Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
|
Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
|
||||||
if (Instruction *I = commonCastTransforms(CI))
|
if (Instruction *I = commonCastTransforms(CI))
|
||||||
return I;
|
return I;
|
||||||
|
|
||||||
// If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are
|
// If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are
|
||||||
// smaller than the destination type, we can eliminate the truncate by doing
|
// smaller than the destination type, we can eliminate the truncate by doing
|
||||||
// the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well
|
// the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well
|
||||||
|
@ -1190,7 +1190,7 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
|
||||||
Type *SrcTy = OpI->getType();
|
Type *SrcTy = OpI->getType();
|
||||||
Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0));
|
Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0));
|
||||||
Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1));
|
Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1));
|
||||||
if (LHSTrunc->getType() != SrcTy &&
|
if (LHSTrunc->getType() != SrcTy &&
|
||||||
RHSTrunc->getType() != SrcTy) {
|
RHSTrunc->getType() != SrcTy) {
|
||||||
unsigned DstSize = CI.getType()->getScalarSizeInBits();
|
unsigned DstSize = CI.getType()->getScalarSizeInBits();
|
||||||
// If the source types were both smaller than the destination type of
|
// If the source types were both smaller than the destination type of
|
||||||
|
@ -1202,10 +1202,10 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
|
||||||
return BinaryOperator::Create(OpI->getOpcode(), LHSTrunc, RHSTrunc);
|
return BinaryOperator::Create(OpI->getOpcode(), LHSTrunc, RHSTrunc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fold (fptrunc (sqrt (fpext x))) -> (sqrtf x)
|
// Fold (fptrunc (sqrt (fpext x))) -> (sqrtf x)
|
||||||
CallInst *Call = dyn_cast<CallInst>(CI.getOperand(0));
|
CallInst *Call = dyn_cast<CallInst>(CI.getOperand(0));
|
||||||
if (Call && Call->getCalledFunction() && TLI->has(LibFunc::sqrtf) &&
|
if (Call && Call->getCalledFunction() && TLI->has(LibFunc::sqrtf) &&
|
||||||
|
@ -1220,7 +1220,7 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
|
||||||
Arg->getOperand(0)->getType()->isFloatTy()) {
|
Arg->getOperand(0)->getType()->isFloatTy()) {
|
||||||
Function *Callee = Call->getCalledFunction();
|
Function *Callee = Call->getCalledFunction();
|
||||||
Module *M = CI.getParent()->getParent()->getParent();
|
Module *M = CI.getParent()->getParent()->getParent();
|
||||||
Constant *SqrtfFunc = M->getOrInsertFunction("sqrtf",
|
Constant *SqrtfFunc = M->getOrInsertFunction("sqrtf",
|
||||||
Callee->getAttributes(),
|
Callee->getAttributes(),
|
||||||
Builder->getFloatTy(),
|
Builder->getFloatTy(),
|
||||||
Builder->getFloatTy(),
|
Builder->getFloatTy(),
|
||||||
|
@ -1228,15 +1228,15 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
|
||||||
CallInst *ret = CallInst::Create(SqrtfFunc, Arg->getOperand(0),
|
CallInst *ret = CallInst::Create(SqrtfFunc, Arg->getOperand(0),
|
||||||
"sqrtfcall");
|
"sqrtfcall");
|
||||||
ret->setAttributes(Callee->getAttributes());
|
ret->setAttributes(Callee->getAttributes());
|
||||||
|
|
||||||
|
|
||||||
// Remove the old Call. With -fmath-errno, it won't get marked readnone.
|
// Remove the old Call. With -fmath-errno, it won't get marked readnone.
|
||||||
ReplaceInstUsesWith(*Call, UndefValue::get(Call->getType()));
|
ReplaceInstUsesWith(*Call, UndefValue::get(Call->getType()));
|
||||||
EraseInstFromFunction(*Call);
|
EraseInstFromFunction(*Call);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1254,7 +1254,7 @@ Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
|
||||||
// This is safe if the intermediate type has enough bits in its mantissa to
|
// This is safe if the intermediate type has enough bits in its mantissa to
|
||||||
// accurately represent all values of X. For example, do not do this with
|
// accurately represent all values of X. For example, do not do this with
|
||||||
// i64->float->i64. This is also safe for sitofp case, because any negative
|
// i64->float->i64. This is also safe for sitofp case, because any negative
|
||||||
// 'X' value would cause an undefined result for the fptoui.
|
// 'X' value would cause an undefined result for the fptoui.
|
||||||
if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
|
if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
|
||||||
OpI->getOperand(0)->getType() == FI.getType() &&
|
OpI->getOperand(0)->getType() == FI.getType() &&
|
||||||
(int)FI.getType()->getScalarSizeInBits() < /*extra bit for sign */
|
(int)FI.getType()->getScalarSizeInBits() < /*extra bit for sign */
|
||||||
|
@ -1268,19 +1268,19 @@ Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) {
|
||||||
Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
|
Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
|
||||||
if (OpI == 0)
|
if (OpI == 0)
|
||||||
return commonCastTransforms(FI);
|
return commonCastTransforms(FI);
|
||||||
|
|
||||||
// fptosi(sitofp(X)) --> X
|
// fptosi(sitofp(X)) --> X
|
||||||
// fptosi(uitofp(X)) --> X
|
// fptosi(uitofp(X)) --> X
|
||||||
// This is safe if the intermediate type has enough bits in its mantissa to
|
// This is safe if the intermediate type has enough bits in its mantissa to
|
||||||
// accurately represent all values of X. For example, do not do this with
|
// accurately represent all values of X. For example, do not do this with
|
||||||
// i64->float->i64. This is also safe for sitofp case, because any negative
|
// i64->float->i64. This is also safe for sitofp case, because any negative
|
||||||
// 'X' value would cause an undefined result for the fptoui.
|
// 'X' value would cause an undefined result for the fptoui.
|
||||||
if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
|
if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
|
||||||
OpI->getOperand(0)->getType() == FI.getType() &&
|
OpI->getOperand(0)->getType() == FI.getType() &&
|
||||||
(int)FI.getType()->getScalarSizeInBits() <=
|
(int)FI.getType()->getScalarSizeInBits() <=
|
||||||
OpI->getType()->getFPMantissaWidth())
|
OpI->getType()->getFPMantissaWidth())
|
||||||
return ReplaceInstUsesWith(FI, OpI->getOperand(0));
|
return ReplaceInstUsesWith(FI, OpI->getOperand(0));
|
||||||
|
|
||||||
return commonCastTransforms(FI);
|
return commonCastTransforms(FI);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1301,17 +1301,17 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
|
||||||
if (CI.getOperand(0)->getType()->getScalarSizeInBits() >
|
if (CI.getOperand(0)->getType()->getScalarSizeInBits() >
|
||||||
TD->getPointerSizeInBits(AS)) {
|
TD->getPointerSizeInBits(AS)) {
|
||||||
Value *P = Builder->CreateTrunc(CI.getOperand(0),
|
Value *P = Builder->CreateTrunc(CI.getOperand(0),
|
||||||
TD->getIntPtrType(CI.getType()));
|
TD->getIntPtrType(CI.getContext()));
|
||||||
return new IntToPtrInst(P, CI.getType());
|
return new IntToPtrInst(P, CI.getType());
|
||||||
}
|
}
|
||||||
if (CI.getOperand(0)->getType()->getScalarSizeInBits() <
|
if (CI.getOperand(0)->getType()->getScalarSizeInBits() <
|
||||||
TD->getPointerSizeInBits(AS)) {
|
TD->getPointerSizeInBits(AS)) {
|
||||||
Value *P = Builder->CreateZExt(CI.getOperand(0),
|
Value *P = Builder->CreateZExt(CI.getOperand(0),
|
||||||
TD->getIntPtrType(CI.getType()));
|
TD->getIntPtrType(CI.getContext()));
|
||||||
return new IntToPtrInst(P, CI.getType());
|
return new IntToPtrInst(P, CI.getType());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Instruction *I = commonCastTransforms(CI))
|
if (Instruction *I = commonCastTransforms(CI))
|
||||||
return I;
|
return I;
|
||||||
|
|
||||||
|
@ -1321,19 +1321,19 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
|
||||||
/// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
|
/// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
|
||||||
Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
|
Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
|
||||||
Value *Src = CI.getOperand(0);
|
Value *Src = CI.getOperand(0);
|
||||||
|
|
||||||
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
|
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
|
||||||
// If casting the result of a getelementptr instruction with no offset, turn
|
// If casting the result of a getelementptr instruction with no offset, turn
|
||||||
// this into a cast of the original pointer!
|
// this into a cast of the original pointer!
|
||||||
if (GEP->hasAllZeroIndices()) {
|
if (GEP->hasAllZeroIndices()) {
|
||||||
// Changing the cast operand is usually not a good idea but it is safe
|
// Changing the cast operand is usually not a good idea but it is safe
|
||||||
// here because the pointer operand is being replaced with another
|
// here because the pointer operand is being replaced with another
|
||||||
// pointer operand so the opcode doesn't need to change.
|
// pointer operand so the opcode doesn't need to change.
|
||||||
Worklist.Add(GEP);
|
Worklist.Add(GEP);
|
||||||
CI.setOperand(0, GEP->getOperand(0));
|
CI.setOperand(0, GEP->getOperand(0));
|
||||||
return &CI;
|
return &CI;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the GEP has a single use, and the base pointer is a bitcast, and the
|
// If the GEP has a single use, and the base pointer is a bitcast, and the
|
||||||
// GEP computes a constant offset, see if we can convert these three
|
// GEP computes a constant offset, see if we can convert these three
|
||||||
// instructions into fewer. This typically happens with unions and other
|
// instructions into fewer. This typically happens with unions and other
|
||||||
|
@ -1348,8 +1348,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
|
||||||
Type *GEPIdxTy =
|
Type *GEPIdxTy =
|
||||||
cast<PointerType>(OrigBase->getType())->getElementType();
|
cast<PointerType>(OrigBase->getType())->getElementType();
|
||||||
SmallVector<Value*, 8> NewIndices;
|
SmallVector<Value*, 8> NewIndices;
|
||||||
Type *IntPtrTy = TD->getIntPtrType(OrigBase->getType());
|
if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices)) {
|
||||||
if (FindElementAtOffset(GEPIdxTy, Offset, IntPtrTy, NewIndices)) {
|
|
||||||
// If we were able to index down into an element, create the GEP
|
// If we were able to index down into an element, create the GEP
|
||||||
// and bitcast the result. This eliminates one bitcast, potentially
|
// and bitcast the result. This eliminates one bitcast, potentially
|
||||||
// two.
|
// two.
|
||||||
|
@ -1357,15 +1356,15 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
|
||||||
Builder->CreateInBoundsGEP(OrigBase, NewIndices) :
|
Builder->CreateInBoundsGEP(OrigBase, NewIndices) :
|
||||||
Builder->CreateGEP(OrigBase, NewIndices);
|
Builder->CreateGEP(OrigBase, NewIndices);
|
||||||
NGEP->takeName(GEP);
|
NGEP->takeName(GEP);
|
||||||
|
|
||||||
if (isa<BitCastInst>(CI))
|
if (isa<BitCastInst>(CI))
|
||||||
return new BitCastInst(NGEP, CI.getType());
|
return new BitCastInst(NGEP, CI.getType());
|
||||||
assert(isa<PtrToIntInst>(CI));
|
assert(isa<PtrToIntInst>(CI));
|
||||||
return new PtrToIntInst(NGEP, CI.getType());
|
return new PtrToIntInst(NGEP, CI.getType());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return commonCastTransforms(CI);
|
return commonCastTransforms(CI);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1377,16 +1376,16 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
|
||||||
if (TD) {
|
if (TD) {
|
||||||
if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits(AS)) {
|
if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits(AS)) {
|
||||||
Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
|
Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
|
||||||
TD->getIntPtrType(CI.getContext(), AS));
|
TD->getIntPtrType(CI.getContext()));
|
||||||
return new TruncInst(P, CI.getType());
|
return new TruncInst(P, CI.getType());
|
||||||
}
|
}
|
||||||
if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits(AS)) {
|
if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits(AS)) {
|
||||||
Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
|
Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
|
||||||
TD->getIntPtrType(CI.getContext(), AS));
|
TD->getIntPtrType(CI.getContext()));
|
||||||
return new ZExtInst(P, CI.getType());
|
return new ZExtInst(P, CI.getType());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return commonPointerCastTransforms(CI);
|
return commonPointerCastTransforms(CI);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1401,33 +1400,33 @@ static Instruction *OptimizeVectorResize(Value *InVal, VectorType *DestTy,
|
||||||
// element size, or the input is a multiple of the output element size.
|
// element size, or the input is a multiple of the output element size.
|
||||||
// Convert the input type to have the same element type as the output.
|
// Convert the input type to have the same element type as the output.
|
||||||
VectorType *SrcTy = cast<VectorType>(InVal->getType());
|
VectorType *SrcTy = cast<VectorType>(InVal->getType());
|
||||||
|
|
||||||
if (SrcTy->getElementType() != DestTy->getElementType()) {
|
if (SrcTy->getElementType() != DestTy->getElementType()) {
|
||||||
// The input types don't need to be identical, but for now they must be the
|
// The input types don't need to be identical, but for now they must be the
|
||||||
// same size. There is no specific reason we couldn't handle things like
|
// same size. There is no specific reason we couldn't handle things like
|
||||||
// <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten
|
// <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten
|
||||||
// there yet.
|
// there yet.
|
||||||
if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
|
if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
|
||||||
DestTy->getElementType()->getPrimitiveSizeInBits())
|
DestTy->getElementType()->getPrimitiveSizeInBits())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements());
|
SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements());
|
||||||
InVal = IC.Builder->CreateBitCast(InVal, SrcTy);
|
InVal = IC.Builder->CreateBitCast(InVal, SrcTy);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now that the element types match, get the shuffle mask and RHS of the
|
// Now that the element types match, get the shuffle mask and RHS of the
|
||||||
// shuffle to use, which depends on whether we're increasing or decreasing the
|
// shuffle to use, which depends on whether we're increasing or decreasing the
|
||||||
// size of the input.
|
// size of the input.
|
||||||
SmallVector<uint32_t, 16> ShuffleMask;
|
SmallVector<uint32_t, 16> ShuffleMask;
|
||||||
Value *V2;
|
Value *V2;
|
||||||
|
|
||||||
if (SrcTy->getNumElements() > DestTy->getNumElements()) {
|
if (SrcTy->getNumElements() > DestTy->getNumElements()) {
|
||||||
// If we're shrinking the number of elements, just shuffle in the low
|
// If we're shrinking the number of elements, just shuffle in the low
|
||||||
// elements from the input and use undef as the second shuffle input.
|
// elements from the input and use undef as the second shuffle input.
|
||||||
V2 = UndefValue::get(SrcTy);
|
V2 = UndefValue::get(SrcTy);
|
||||||
for (unsigned i = 0, e = DestTy->getNumElements(); i != e; ++i)
|
for (unsigned i = 0, e = DestTy->getNumElements(); i != e; ++i)
|
||||||
ShuffleMask.push_back(i);
|
ShuffleMask.push_back(i);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// If we're increasing the number of elements, shuffle in all of the
|
// If we're increasing the number of elements, shuffle in all of the
|
||||||
// elements from InVal and fill the rest of the result elements with zeros
|
// elements from InVal and fill the rest of the result elements with zeros
|
||||||
|
@ -1441,7 +1440,7 @@ static Instruction *OptimizeVectorResize(Value *InVal, VectorType *DestTy,
|
||||||
for (unsigned i = 0, e = DestTy->getNumElements()-SrcElts; i != e; ++i)
|
for (unsigned i = 0, e = DestTy->getNumElements()-SrcElts; i != e; ++i)
|
||||||
ShuffleMask.push_back(SrcElts);
|
ShuffleMask.push_back(SrcElts);
|
||||||
}
|
}
|
||||||
|
|
||||||
return new ShuffleVectorInst(InVal, V2,
|
return new ShuffleVectorInst(InVal, V2,
|
||||||
ConstantDataVector::get(V2->getContext(),
|
ConstantDataVector::get(V2->getContext(),
|
||||||
ShuffleMask));
|
ShuffleMask));
|
||||||
|
@ -1468,7 +1467,7 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
|
||||||
Type *VecEltTy) {
|
Type *VecEltTy) {
|
||||||
// Undef values never contribute useful bits to the result.
|
// Undef values never contribute useful bits to the result.
|
||||||
if (isa<UndefValue>(V)) return true;
|
if (isa<UndefValue>(V)) return true;
|
||||||
|
|
||||||
// If we got down to a value of the right type, we win, try inserting into the
|
// If we got down to a value of the right type, we win, try inserting into the
|
||||||
// right element.
|
// right element.
|
||||||
if (V->getType() == VecEltTy) {
|
if (V->getType() == VecEltTy) {
|
||||||
|
@ -1476,15 +1475,15 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
|
||||||
if (Constant *C = dyn_cast<Constant>(V))
|
if (Constant *C = dyn_cast<Constant>(V))
|
||||||
if (C->isNullValue())
|
if (C->isNullValue())
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
// Fail if multiple elements are inserted into this slot.
|
// Fail if multiple elements are inserted into this slot.
|
||||||
if (ElementIndex >= Elements.size() || Elements[ElementIndex] != 0)
|
if (ElementIndex >= Elements.size() || Elements[ElementIndex] != 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
Elements[ElementIndex] = V;
|
Elements[ElementIndex] = V;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Constant *C = dyn_cast<Constant>(V)) {
|
if (Constant *C = dyn_cast<Constant>(V)) {
|
||||||
// Figure out the # elements this provides, and bitcast it or slice it up
|
// Figure out the # elements this provides, and bitcast it or slice it up
|
||||||
// as required.
|
// as required.
|
||||||
|
@ -1495,7 +1494,7 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
|
||||||
if (NumElts == 1)
|
if (NumElts == 1)
|
||||||
return CollectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy),
|
return CollectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy),
|
||||||
ElementIndex, Elements, VecEltTy);
|
ElementIndex, Elements, VecEltTy);
|
||||||
|
|
||||||
// Okay, this is a constant that covers multiple elements. Slice it up into
|
// Okay, this is a constant that covers multiple elements. Slice it up into
|
||||||
// pieces and insert each element-sized piece into the vector.
|
// pieces and insert each element-sized piece into the vector.
|
||||||
if (!isa<IntegerType>(C->getType()))
|
if (!isa<IntegerType>(C->getType()))
|
||||||
|
@ -1503,7 +1502,7 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
|
||||||
C->getType()->getPrimitiveSizeInBits()));
|
C->getType()->getPrimitiveSizeInBits()));
|
||||||
unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits();
|
unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits();
|
||||||
Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
|
Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
|
||||||
|
|
||||||
for (unsigned i = 0; i != NumElts; ++i) {
|
for (unsigned i = 0; i != NumElts; ++i) {
|
||||||
Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
|
Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
|
||||||
i*ElementSize));
|
i*ElementSize));
|
||||||
|
@ -1513,23 +1512,23 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!V->hasOneUse()) return false;
|
if (!V->hasOneUse()) return false;
|
||||||
|
|
||||||
Instruction *I = dyn_cast<Instruction>(V);
|
Instruction *I = dyn_cast<Instruction>(V);
|
||||||
if (I == 0) return false;
|
if (I == 0) return false;
|
||||||
switch (I->getOpcode()) {
|
switch (I->getOpcode()) {
|
||||||
default: return false; // Unhandled case.
|
default: return false; // Unhandled case.
|
||||||
case Instruction::BitCast:
|
case Instruction::BitCast:
|
||||||
return CollectInsertionElements(I->getOperand(0), ElementIndex,
|
return CollectInsertionElements(I->getOperand(0), ElementIndex,
|
||||||
Elements, VecEltTy);
|
Elements, VecEltTy);
|
||||||
case Instruction::ZExt:
|
case Instruction::ZExt:
|
||||||
if (!isMultipleOfTypeSize(
|
if (!isMultipleOfTypeSize(
|
||||||
I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
|
I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
|
||||||
VecEltTy))
|
VecEltTy))
|
||||||
return false;
|
return false;
|
||||||
return CollectInsertionElements(I->getOperand(0), ElementIndex,
|
return CollectInsertionElements(I->getOperand(0), ElementIndex,
|
||||||
Elements, VecEltTy);
|
Elements, VecEltTy);
|
||||||
case Instruction::Or:
|
case Instruction::Or:
|
||||||
return CollectInsertionElements(I->getOperand(0), ElementIndex,
|
return CollectInsertionElements(I->getOperand(0), ElementIndex,
|
||||||
Elements, VecEltTy) &&
|
Elements, VecEltTy) &&
|
||||||
|
@ -1541,11 +1540,11 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
|
||||||
if (CI == 0) return false;
|
if (CI == 0) return false;
|
||||||
if (!isMultipleOfTypeSize(CI->getZExtValue(), VecEltTy)) return false;
|
if (!isMultipleOfTypeSize(CI->getZExtValue(), VecEltTy)) return false;
|
||||||
unsigned IndexShift = getTypeSizeIndex(CI->getZExtValue(), VecEltTy);
|
unsigned IndexShift = getTypeSizeIndex(CI->getZExtValue(), VecEltTy);
|
||||||
|
|
||||||
return CollectInsertionElements(I->getOperand(0), ElementIndex+IndexShift,
|
return CollectInsertionElements(I->getOperand(0), ElementIndex+IndexShift,
|
||||||
Elements, VecEltTy);
|
Elements, VecEltTy);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1580,11 +1579,11 @@ static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI,
|
||||||
Value *Result = Constant::getNullValue(CI.getType());
|
Value *Result = Constant::getNullValue(CI.getType());
|
||||||
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
|
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
|
||||||
if (Elements[i] == 0) continue; // Unset element.
|
if (Elements[i] == 0) continue; // Unset element.
|
||||||
|
|
||||||
Result = IC.Builder->CreateInsertElement(Result, Elements[i],
|
Result = IC.Builder->CreateInsertElement(Result, Elements[i],
|
||||||
IC.Builder->getInt32(i));
|
IC.Builder->getInt32(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
return Result;
|
return Result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1612,11 +1611,11 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
|
||||||
VecTy->getPrimitiveSizeInBits() / DestWidth);
|
VecTy->getPrimitiveSizeInBits() / DestWidth);
|
||||||
VecInput = IC.Builder->CreateBitCast(VecInput, VecTy);
|
VecInput = IC.Builder->CreateBitCast(VecInput, VecTy);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(0));
|
return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// bitcast(trunc(lshr(bitcast(somevector), cst))
|
// bitcast(trunc(lshr(bitcast(somevector), cst))
|
||||||
ConstantInt *ShAmt = 0;
|
ConstantInt *ShAmt = 0;
|
||||||
if (match(Src, m_Trunc(m_LShr(m_BitCast(m_Value(VecInput)),
|
if (match(Src, m_Trunc(m_LShr(m_BitCast(m_Value(VecInput)),
|
||||||
|
@ -1633,7 +1632,7 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
|
||||||
VecTy->getPrimitiveSizeInBits() / DestWidth);
|
VecTy->getPrimitiveSizeInBits() / DestWidth);
|
||||||
VecInput = IC.Builder->CreateBitCast(VecInput, VecTy);
|
VecInput = IC.Builder->CreateBitCast(VecInput, VecTy);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned Elt = ShAmt->getZExtValue() / DestWidth;
|
unsigned Elt = ShAmt->getZExtValue() / DestWidth;
|
||||||
return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(Elt));
|
return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(Elt));
|
||||||
}
|
}
|
||||||
|
@ -1657,12 +1656,12 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
|
||||||
PointerType *SrcPTy = cast<PointerType>(SrcTy);
|
PointerType *SrcPTy = cast<PointerType>(SrcTy);
|
||||||
Type *DstElTy = DstPTy->getElementType();
|
Type *DstElTy = DstPTy->getElementType();
|
||||||
Type *SrcElTy = SrcPTy->getElementType();
|
Type *SrcElTy = SrcPTy->getElementType();
|
||||||
|
|
||||||
// If the address spaces don't match, don't eliminate the bitcast, which is
|
// If the address spaces don't match, don't eliminate the bitcast, which is
|
||||||
// required for changing types.
|
// required for changing types.
|
||||||
if (SrcPTy->getAddressSpace() != DstPTy->getAddressSpace())
|
if (SrcPTy->getAddressSpace() != DstPTy->getAddressSpace())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// If we are casting a alloca to a pointer to a type of the same
|
// If we are casting a alloca to a pointer to a type of the same
|
||||||
// size, rewrite the allocation instruction to allocate the "right" type.
|
// size, rewrite the allocation instruction to allocate the "right" type.
|
||||||
// There is no need to modify malloc calls because it is their bitcast that
|
// There is no need to modify malloc calls because it is their bitcast that
|
||||||
|
@ -1670,14 +1669,14 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
|
||||||
if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
|
if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
|
||||||
if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
|
if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
|
||||||
return V;
|
return V;
|
||||||
|
|
||||||
// If the source and destination are pointers, and this cast is equivalent
|
// If the source and destination are pointers, and this cast is equivalent
|
||||||
// to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
|
// to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
|
||||||
// This can enhance SROA and other transforms that want type-safe pointers.
|
// This can enhance SROA and other transforms that want type-safe pointers.
|
||||||
Constant *ZeroUInt =
|
Constant *ZeroUInt =
|
||||||
Constant::getNullValue(Type::getInt32Ty(CI.getContext()));
|
Constant::getNullValue(Type::getInt32Ty(CI.getContext()));
|
||||||
unsigned NumZeros = 0;
|
unsigned NumZeros = 0;
|
||||||
while (SrcElTy != DstElTy &&
|
while (SrcElTy != DstElTy &&
|
||||||
isa<CompositeType>(SrcElTy) && !SrcElTy->isPointerTy() &&
|
isa<CompositeType>(SrcElTy) && !SrcElTy->isPointerTy() &&
|
||||||
SrcElTy->getNumContainedTypes() /* not "{}" */) {
|
SrcElTy->getNumContainedTypes() /* not "{}" */) {
|
||||||
SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(ZeroUInt);
|
SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(ZeroUInt);
|
||||||
|
@ -1690,7 +1689,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
|
||||||
return GetElementPtrInst::CreateInBounds(Src, Idxs);
|
return GetElementPtrInst::CreateInBounds(Src, Idxs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to optimize int -> float bitcasts.
|
// Try to optimize int -> float bitcasts.
|
||||||
if ((DestTy->isFloatTy() || DestTy->isDoubleTy()) && isa<IntegerType>(SrcTy))
|
if ((DestTy->isFloatTy() || DestTy->isDoubleTy()) && isa<IntegerType>(SrcTy))
|
||||||
if (Instruction *I = OptimizeIntToFloatBitCast(CI, *this))
|
if (Instruction *I = OptimizeIntToFloatBitCast(CI, *this))
|
||||||
|
@ -1703,7 +1702,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
|
||||||
Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
|
Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
|
||||||
// FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
|
// FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isa<IntegerType>(SrcTy)) {
|
if (isa<IntegerType>(SrcTy)) {
|
||||||
// If this is a cast from an integer to vector, check to see if the input
|
// If this is a cast from an integer to vector, check to see if the input
|
||||||
// is a trunc or zext of a bitcast from vector. If so, we can replace all
|
// is a trunc or zext of a bitcast from vector. If so, we can replace all
|
||||||
|
@ -1716,7 +1715,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
|
||||||
cast<VectorType>(DestTy), *this))
|
cast<VectorType>(DestTy), *this))
|
||||||
return I;
|
return I;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the input is an 'or' instruction, we may be doing shifts and ors to
|
// If the input is an 'or' instruction, we may be doing shifts and ors to
|
||||||
// assemble the elements of the vector manually. Try to rip the code out
|
// assemble the elements of the vector manually. Try to rip the code out
|
||||||
// and replace it with insertelements.
|
// and replace it with insertelements.
|
||||||
|
@ -1727,7 +1726,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
|
||||||
|
|
||||||
if (VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
|
if (VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
|
||||||
if (SrcVTy->getNumElements() == 1 && !DestTy->isVectorTy()) {
|
if (SrcVTy->getNumElements() == 1 && !DestTy->isVectorTy()) {
|
||||||
Value *Elem =
|
Value *Elem =
|
||||||
Builder->CreateExtractElement(Src,
|
Builder->CreateExtractElement(Src,
|
||||||
Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
|
Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
|
||||||
return CastInst::Create(Instruction::BitCast, Elem, DestTy);
|
return CastInst::Create(Instruction::BitCast, Elem, DestTy);
|
||||||
|
@ -1737,7 +1736,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
|
||||||
if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) {
|
if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) {
|
||||||
// Okay, we have (bitcast (shuffle ..)). Check to see if this is
|
// Okay, we have (bitcast (shuffle ..)). Check to see if this is
|
||||||
// a bitcast to a vector with the same # elts.
|
// a bitcast to a vector with the same # elts.
|
||||||
if (SVI->hasOneUse() && DestTy->isVectorTy() &&
|
if (SVI->hasOneUse() && DestTy->isVectorTy() &&
|
||||||
cast<VectorType>(DestTy)->getNumElements() ==
|
cast<VectorType>(DestTy)->getNumElements() ==
|
||||||
SVI->getType()->getNumElements() &&
|
SVI->getType()->getNumElements() &&
|
||||||
SVI->getType()->getNumElements() ==
|
SVI->getType()->getNumElements() ==
|
||||||
|
@ -1746,9 +1745,9 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
|
||||||
// If either of the operands is a cast from CI.getType(), then
|
// If either of the operands is a cast from CI.getType(), then
|
||||||
// evaluating the shuffle in the casted destination's type will allow
|
// evaluating the shuffle in the casted destination's type will allow
|
||||||
// us to eliminate at least one cast.
|
// us to eliminate at least one cast.
|
||||||
if (((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(0))) &&
|
if (((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(0))) &&
|
||||||
Tmp->getOperand(0)->getType() == DestTy) ||
|
Tmp->getOperand(0)->getType() == DestTy) ||
|
||||||
((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(1))) &&
|
((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(1))) &&
|
||||||
Tmp->getOperand(0)->getType() == DestTy)) {
|
Tmp->getOperand(0)->getType() == DestTy)) {
|
||||||
Value *LHS = Builder->CreateBitCast(SVI->getOperand(0), DestTy);
|
Value *LHS = Builder->CreateBitCast(SVI->getOperand(0), DestTy);
|
||||||
Value *RHS = Builder->CreateBitCast(SVI->getOperand(1), DestTy);
|
Value *RHS = Builder->CreateBitCast(SVI->getOperand(1), DestTy);
|
||||||
|
@ -1758,7 +1757,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (SrcTy->isPointerTy())
|
if (SrcTy->isPointerTy())
|
||||||
return commonPointerCastTransforms(CI);
|
return commonPointerCastTransforms(CI);
|
||||||
return commonCastTransforms(CI);
|
return commonCastTransforms(CI);
|
||||||
|
|
|
@ -371,7 +371,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
|
||||||
// an inbounds GEP because the index can't be out of range.
|
// an inbounds GEP because the index can't be out of range.
|
||||||
if (!GEP->isInBounds() &&
|
if (!GEP->isInBounds() &&
|
||||||
Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits(AS))
|
Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits(AS))
|
||||||
Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext(), AS));
|
Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext()));
|
||||||
|
|
||||||
// If the comparison is only true for one or two elements, emit direct
|
// If the comparison is only true for one or two elements, emit direct
|
||||||
// comparisons.
|
// comparisons.
|
||||||
|
@ -539,7 +539,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
|
||||||
// we don't need to bother extending: the extension won't affect where the
|
// we don't need to bother extending: the extension won't affect where the
|
||||||
// computation crosses zero.
|
// computation crosses zero.
|
||||||
if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
|
if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
|
||||||
Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext(), AS);
|
Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
|
||||||
VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy);
|
VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy);
|
||||||
}
|
}
|
||||||
return VariableIdx;
|
return VariableIdx;
|
||||||
|
@ -561,7 +561,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// Okay, we can do this evaluation. Start by converting the index to intptr.
|
// Okay, we can do this evaluation. Start by converting the index to intptr.
|
||||||
Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext(), AS);
|
Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
|
||||||
if (VariableIdx->getType() != IntPtrTy)
|
if (VariableIdx->getType() != IntPtrTy)
|
||||||
VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy,
|
VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy,
|
||||||
true /*Signed*/);
|
true /*Signed*/);
|
||||||
|
@ -1554,7 +1554,8 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
|
||||||
// Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
|
// Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
|
||||||
// integer type is the same size as the pointer type.
|
// integer type is the same size as the pointer type.
|
||||||
if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
|
if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
|
||||||
TD->getTypeSizeInBits(DestTy) ==
|
TD->getPointerSizeInBits(
|
||||||
|
cast<PtrToIntInst>(LHSCI)->getPointerAddressSpace()) ==
|
||||||
cast<IntegerType>(DestTy)->getBitWidth()) {
|
cast<IntegerType>(DestTy)->getBitWidth()) {
|
||||||
Value *RHSOp = 0;
|
Value *RHSOp = 0;
|
||||||
if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
|
if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
|
||||||
|
@ -2250,7 +2251,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
||||||
case Instruction::IntToPtr:
|
case Instruction::IntToPtr:
|
||||||
// icmp pred inttoptr(X), null -> icmp pred X, 0
|
// icmp pred inttoptr(X), null -> icmp pred X, 0
|
||||||
if (RHSC->isNullValue() && TD &&
|
if (RHSC->isNullValue() && TD &&
|
||||||
TD->getIntPtrType(LHSI->getType()) ==
|
TD->getIntPtrType(RHSC->getContext()) ==
|
||||||
LHSI->getOperand(0)->getType())
|
LHSI->getOperand(0)->getType())
|
||||||
return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
|
return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
|
||||||
Constant::getNullValue(LHSI->getOperand(0)->getType()));
|
Constant::getNullValue(LHSI->getOperand(0)->getType()));
|
||||||
|
|
|
@ -173,7 +173,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
|
||||||
// Ensure that the alloca array size argument has type intptr_t, so that
|
// Ensure that the alloca array size argument has type intptr_t, so that
|
||||||
// any casting is exposed early.
|
// any casting is exposed early.
|
||||||
if (TD) {
|
if (TD) {
|
||||||
Type *IntPtrTy = TD->getIntPtrType(AI.getType());
|
Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
|
||||||
if (AI.getArraySize()->getType() != IntPtrTy) {
|
if (AI.getArraySize()->getType() != IntPtrTy) {
|
||||||
Value *V = Builder->CreateIntCast(AI.getArraySize(),
|
Value *V = Builder->CreateIntCast(AI.getArraySize(),
|
||||||
IntPtrTy, false);
|
IntPtrTy, false);
|
||||||
|
@ -185,7 +185,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
|
||||||
// Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
|
// Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
|
||||||
if (AI.isArrayAllocation()) { // Check C != 1
|
if (AI.isArrayAllocation()) { // Check C != 1
|
||||||
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
|
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
|
||||||
Type *NewTy =
|
Type *NewTy =
|
||||||
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
|
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
|
||||||
AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
|
AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
|
||||||
New->setAlignment(AI.getAlignment());
|
New->setAlignment(AI.getAlignment());
|
||||||
|
@ -311,7 +311,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
|
||||||
|
|
||||||
Type *SrcPTy = SrcTy->getElementType();
|
Type *SrcPTy = SrcTy->getElementType();
|
||||||
|
|
||||||
if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
|
if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
|
||||||
DestPTy->isVectorTy()) {
|
DestPTy->isVectorTy()) {
|
||||||
// If the source is an array, the code below will not succeed. Check to
|
// If the source is an array, the code below will not succeed. Check to
|
||||||
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
|
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
|
||||||
|
@ -328,7 +328,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IC.getDataLayout() &&
|
if (IC.getDataLayout() &&
|
||||||
(SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
|
(SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
|
||||||
SrcPTy->isVectorTy()) &&
|
SrcPTy->isVectorTy()) &&
|
||||||
// Do not allow turning this into a load of an integer, which is then
|
// Do not allow turning this into a load of an integer, which is then
|
||||||
// casted to a pointer, this pessimizes pointer analysis a lot.
|
// casted to a pointer, this pessimizes pointer analysis a lot.
|
||||||
|
@ -339,7 +339,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
|
||||||
// Okay, we are casting from one integer or pointer type to another of
|
// Okay, we are casting from one integer or pointer type to another of
|
||||||
// the same size. Instead of casting the pointer before the load, cast
|
// the same size. Instead of casting the pointer before the load, cast
|
||||||
// the result of the loaded value.
|
// the result of the loaded value.
|
||||||
LoadInst *NewLoad =
|
LoadInst *NewLoad =
|
||||||
IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
|
IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
|
||||||
NewLoad->setAlignment(LI.getAlignment());
|
NewLoad->setAlignment(LI.getAlignment());
|
||||||
NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
|
NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
|
||||||
|
@ -376,7 +376,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
|
||||||
// None of the following transforms are legal for volatile/atomic loads.
|
// None of the following transforms are legal for volatile/atomic loads.
|
||||||
// FIXME: Some of it is okay for atomic loads; needs refactoring.
|
// FIXME: Some of it is okay for atomic loads; needs refactoring.
|
||||||
if (!LI.isSimple()) return 0;
|
if (!LI.isSimple()) return 0;
|
||||||
|
|
||||||
// Do really simple store-to-load forwarding and load CSE, to catch cases
|
// Do really simple store-to-load forwarding and load CSE, to catch cases
|
||||||
// where there are several consecutive memory accesses to the same location,
|
// where there are several consecutive memory accesses to the same location,
|
||||||
// separated by a few arithmetic operations.
|
// separated by a few arithmetic operations.
|
||||||
|
@ -397,7 +397,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
|
||||||
Constant::getNullValue(Op->getType()), &LI);
|
Constant::getNullValue(Op->getType()), &LI);
|
||||||
return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
|
return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// load null/undef -> unreachable
|
// load null/undef -> unreachable
|
||||||
// TODO: Consider a target hook for valid address spaces for this xform.
|
// TODO: Consider a target hook for valid address spaces for this xform.
|
||||||
|
@ -416,7 +416,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
|
||||||
if (CE->isCast())
|
if (CE->isCast())
|
||||||
if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
|
if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
|
||||||
return Res;
|
return Res;
|
||||||
|
|
||||||
if (Op->hasOneUse()) {
|
if (Op->hasOneUse()) {
|
||||||
// Change select and PHI nodes to select values instead of addresses: this
|
// Change select and PHI nodes to select values instead of addresses: this
|
||||||
// helps alias analysis out a lot, allows many others simplifications, and
|
// helps alias analysis out a lot, allows many others simplifications, and
|
||||||
|
@ -470,18 +470,18 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
|
||||||
Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
|
Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
|
||||||
PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
|
PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
|
||||||
if (SrcTy == 0) return 0;
|
if (SrcTy == 0) return 0;
|
||||||
|
|
||||||
Type *SrcPTy = SrcTy->getElementType();
|
Type *SrcPTy = SrcTy->getElementType();
|
||||||
|
|
||||||
if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
|
if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
|
/// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
|
||||||
/// to its first element. This allows us to handle things like:
|
/// to its first element. This allows us to handle things like:
|
||||||
/// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
|
/// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
|
||||||
/// on 32-bit hosts.
|
/// on 32-bit hosts.
|
||||||
SmallVector<Value*, 4> NewGEPIndices;
|
SmallVector<Value*, 4> NewGEPIndices;
|
||||||
|
|
||||||
// If the source is an array, the code below will not succeed. Check to
|
// If the source is an array, the code below will not succeed. Check to
|
||||||
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
|
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
|
||||||
// constants.
|
// constants.
|
||||||
|
@ -489,7 +489,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
|
||||||
// Index through pointer.
|
// Index through pointer.
|
||||||
Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
|
Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
|
||||||
NewGEPIndices.push_back(Zero);
|
NewGEPIndices.push_back(Zero);
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
if (StructType *STy = dyn_cast<StructType>(SrcPTy)) {
|
if (StructType *STy = dyn_cast<StructType>(SrcPTy)) {
|
||||||
if (!STy->getNumElements()) /* Struct can be empty {} */
|
if (!STy->getNumElements()) /* Struct can be empty {} */
|
||||||
|
@ -503,23 +503,24 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
|
SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
|
if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// If the pointers point into different address spaces or if they point to
|
// If the pointers point into different address spaces or if they point to
|
||||||
// values with different sizes, we can't do the transformation.
|
// values with different sizes, we can't do the transformation.
|
||||||
if (!IC.getDataLayout() ||
|
if (!IC.getDataLayout() ||
|
||||||
SrcTy->getAddressSpace() != CI->getType()->getPointerAddressSpace() ||
|
SrcTy->getAddressSpace() !=
|
||||||
|
cast<PointerType>(CI->getType())->getAddressSpace() ||
|
||||||
IC.getDataLayout()->getTypeSizeInBits(SrcPTy) !=
|
IC.getDataLayout()->getTypeSizeInBits(SrcPTy) !=
|
||||||
IC.getDataLayout()->getTypeSizeInBits(DestPTy))
|
IC.getDataLayout()->getTypeSizeInBits(DestPTy))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// Okay, we are casting from one integer or pointer type to another of
|
// Okay, we are casting from one integer or pointer type to another of
|
||||||
// the same size. Instead of casting the pointer before
|
// the same size. Instead of casting the pointer before
|
||||||
// the store, cast the value to be stored.
|
// the store, cast the value to be stored.
|
||||||
Value *NewCast;
|
Value *NewCast;
|
||||||
Value *SIOp0 = SI.getOperand(0);
|
Value *SIOp0 = SI.getOperand(0);
|
||||||
|
@ -533,12 +534,12 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
|
||||||
if (SIOp0->getType()->isPointerTy())
|
if (SIOp0->getType()->isPointerTy())
|
||||||
opcode = Instruction::PtrToInt;
|
opcode = Instruction::PtrToInt;
|
||||||
}
|
}
|
||||||
|
|
||||||
// SIOp0 is a pointer to aggregate and this is a store to the first field,
|
// SIOp0 is a pointer to aggregate and this is a store to the first field,
|
||||||
// emit a GEP to index into its first field.
|
// emit a GEP to index into its first field.
|
||||||
if (!NewGEPIndices.empty())
|
if (!NewGEPIndices.empty())
|
||||||
CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices);
|
CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices);
|
||||||
|
|
||||||
NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
|
NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
|
||||||
SIOp0->getName()+".c");
|
SIOp0->getName()+".c");
|
||||||
SI.setOperand(0, NewCast);
|
SI.setOperand(0, NewCast);
|
||||||
|
@ -557,7 +558,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
|
||||||
static bool equivalentAddressValues(Value *A, Value *B) {
|
static bool equivalentAddressValues(Value *A, Value *B) {
|
||||||
// Test if the values are trivially equivalent.
|
// Test if the values are trivially equivalent.
|
||||||
if (A == B) return true;
|
if (A == B) return true;
|
||||||
|
|
||||||
// Test if the values come form identical arithmetic instructions.
|
// Test if the values come form identical arithmetic instructions.
|
||||||
// This uses isIdenticalToWhenDefined instead of isIdenticalTo because
|
// This uses isIdenticalToWhenDefined instead of isIdenticalTo because
|
||||||
// its only used to compare two uses within the same basic block, which
|
// its only used to compare two uses within the same basic block, which
|
||||||
|
@ -570,7 +571,7 @@ static bool equivalentAddressValues(Value *A, Value *B) {
|
||||||
if (Instruction *BI = dyn_cast<Instruction>(B))
|
if (Instruction *BI = dyn_cast<Instruction>(B))
|
||||||
if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
|
if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
// Otherwise they may not be equivalent.
|
// Otherwise they may not be equivalent.
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -601,7 +602,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
||||||
// If the RHS is an alloca with a single use, zapify the store, making the
|
// If the RHS is an alloca with a single use, zapify the store, making the
|
||||||
// alloca dead.
|
// alloca dead.
|
||||||
if (Ptr->hasOneUse()) {
|
if (Ptr->hasOneUse()) {
|
||||||
if (isa<AllocaInst>(Ptr))
|
if (isa<AllocaInst>(Ptr))
|
||||||
return EraseInstFromFunction(SI);
|
return EraseInstFromFunction(SI);
|
||||||
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
|
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
|
||||||
if (isa<AllocaInst>(GEP->getOperand(0))) {
|
if (isa<AllocaInst>(GEP->getOperand(0))) {
|
||||||
|
@ -624,8 +625,8 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
||||||
(isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
|
(isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
|
||||||
ScanInsts++;
|
ScanInsts++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
|
if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
|
||||||
// Prev store isn't volatile, and stores to the same location?
|
// Prev store isn't volatile, and stores to the same location?
|
||||||
if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
|
if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
|
||||||
|
@ -637,7 +638,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this is a load, we have to stop. However, if the loaded value is from
|
// If this is a load, we have to stop. However, if the loaded value is from
|
||||||
// the pointer we're loading and is producing the pointer we're storing,
|
// the pointer we're loading and is producing the pointer we're storing,
|
||||||
// then *this* store is dead (X = load P; store X -> P).
|
// then *this* store is dead (X = load P; store X -> P).
|
||||||
|
@ -645,12 +646,12 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
||||||
if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
|
if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
|
||||||
LI->isSimple())
|
LI->isSimple())
|
||||||
return EraseInstFromFunction(SI);
|
return EraseInstFromFunction(SI);
|
||||||
|
|
||||||
// Otherwise, this is a load from some other location. Stores before it
|
// Otherwise, this is a load from some other location. Stores before it
|
||||||
// may not be dead.
|
// may not be dead.
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't skip over loads or things that can modify memory.
|
// Don't skip over loads or things that can modify memory.
|
||||||
if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
|
if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
|
||||||
break;
|
break;
|
||||||
|
@ -680,11 +681,11 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
||||||
if (Instruction *Res = InstCombineStoreToCast(*this, SI))
|
if (Instruction *Res = InstCombineStoreToCast(*this, SI))
|
||||||
return Res;
|
return Res;
|
||||||
|
|
||||||
|
|
||||||
// If this store is the last instruction in the basic block (possibly
|
// If this store is the last instruction in the basic block (possibly
|
||||||
// excepting debug info instructions), and if the block ends with an
|
// excepting debug info instructions), and if the block ends with an
|
||||||
// unconditional branch, try to move it to the successor block.
|
// unconditional branch, try to move it to the successor block.
|
||||||
BBI = &SI;
|
BBI = &SI;
|
||||||
do {
|
do {
|
||||||
++BBI;
|
++BBI;
|
||||||
} while (isa<DbgInfoIntrinsic>(BBI) ||
|
} while (isa<DbgInfoIntrinsic>(BBI) ||
|
||||||
|
@ -693,7 +694,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
||||||
if (BI->isUnconditional())
|
if (BI->isUnconditional())
|
||||||
if (SimplifyStoreAtEndOfBlock(SI))
|
if (SimplifyStoreAtEndOfBlock(SI))
|
||||||
return 0; // xform done!
|
return 0; // xform done!
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -707,12 +708,12 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
||||||
///
|
///
|
||||||
bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
|
bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
|
||||||
BasicBlock *StoreBB = SI.getParent();
|
BasicBlock *StoreBB = SI.getParent();
|
||||||
|
|
||||||
// Check to see if the successor block has exactly two incoming edges. If
|
// Check to see if the successor block has exactly two incoming edges. If
|
||||||
// so, see if the other predecessor contains a store to the same location.
|
// so, see if the other predecessor contains a store to the same location.
|
||||||
// if so, insert a PHI node (if needed) and move the stores down.
|
// if so, insert a PHI node (if needed) and move the stores down.
|
||||||
BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
|
BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
|
||||||
|
|
||||||
// Determine whether Dest has exactly two predecessors and, if so, compute
|
// Determine whether Dest has exactly two predecessors and, if so, compute
|
||||||
// the other predecessor.
|
// the other predecessor.
|
||||||
pred_iterator PI = pred_begin(DestBB);
|
pred_iterator PI = pred_begin(DestBB);
|
||||||
|
@ -724,7 +725,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
|
||||||
|
|
||||||
if (++PI == pred_end(DestBB))
|
if (++PI == pred_end(DestBB))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
P = *PI;
|
P = *PI;
|
||||||
if (P != StoreBB) {
|
if (P != StoreBB) {
|
||||||
if (OtherBB)
|
if (OtherBB)
|
||||||
|
@ -744,7 +745,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
|
||||||
BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
|
BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
|
||||||
if (!OtherBr || BBI == OtherBB->begin())
|
if (!OtherBr || BBI == OtherBB->begin())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// If the other block ends in an unconditional branch, check for the 'if then
|
// If the other block ends in an unconditional branch, check for the 'if then
|
||||||
// else' case. there is an instruction before the branch.
|
// else' case. there is an instruction before the branch.
|
||||||
StoreInst *OtherStore = 0;
|
StoreInst *OtherStore = 0;
|
||||||
|
@ -766,10 +767,10 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
|
||||||
} else {
|
} else {
|
||||||
// Otherwise, the other block ended with a conditional branch. If one of the
|
// Otherwise, the other block ended with a conditional branch. If one of the
|
||||||
// destinations is StoreBB, then we have the if/then case.
|
// destinations is StoreBB, then we have the if/then case.
|
||||||
if (OtherBr->getSuccessor(0) != StoreBB &&
|
if (OtherBr->getSuccessor(0) != StoreBB &&
|
||||||
OtherBr->getSuccessor(1) != StoreBB)
|
OtherBr->getSuccessor(1) != StoreBB)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
|
// Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
|
||||||
// if/then triangle. See if there is a store to the same ptr as SI that
|
// if/then triangle. See if there is a store to the same ptr as SI that
|
||||||
// lives in OtherBB.
|
// lives in OtherBB.
|
||||||
|
@ -787,7 +788,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
|
||||||
BBI == OtherBB->begin())
|
BBI == OtherBB->begin())
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// In order to eliminate the store in OtherBr, we have to
|
// In order to eliminate the store in OtherBr, we have to
|
||||||
// make sure nothing reads or overwrites the stored value in
|
// make sure nothing reads or overwrites the stored value in
|
||||||
// StoreBB.
|
// StoreBB.
|
||||||
|
@ -797,7 +798,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Insert a PHI node now if we need it.
|
// Insert a PHI node now if we need it.
|
||||||
Value *MergedVal = OtherStore->getOperand(0);
|
Value *MergedVal = OtherStore->getOperand(0);
|
||||||
if (MergedVal != SI.getOperand(0)) {
|
if (MergedVal != SI.getOperand(0)) {
|
||||||
|
@ -806,7 +807,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
|
||||||
PN->addIncoming(OtherStore->getOperand(0), OtherBB);
|
PN->addIncoming(OtherStore->getOperand(0), OtherBB);
|
||||||
MergedVal = InsertNewInstBefore(PN, DestBB->front());
|
MergedVal = InsertNewInstBefore(PN, DestBB->front());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Advance to a place where it is safe to insert the new store and
|
// Advance to a place where it is safe to insert the new store and
|
||||||
// insert it.
|
// insert it.
|
||||||
BBI = DestBB->getFirstInsertionPt();
|
BBI = DestBB->getFirstInsertionPt();
|
||||||
|
@ -816,7 +817,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
|
||||||
SI.getOrdering(),
|
SI.getOrdering(),
|
||||||
SI.getSynchScope());
|
SI.getSynchScope());
|
||||||
InsertNewInstBefore(NewSI, *BBI);
|
InsertNewInstBefore(NewSI, *BBI);
|
||||||
NewSI->setDebugLoc(OtherStore->getDebugLoc());
|
NewSI->setDebugLoc(OtherStore->getDebugLoc());
|
||||||
|
|
||||||
// Nuke the old stores.
|
// Nuke the old stores.
|
||||||
EraseInstFromFunction(SI);
|
EraseInstFromFunction(SI);
|
||||||
|
|
|
@ -738,7 +738,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
|
||||||
/// or not there is a sequence of GEP indices into the type that will land us at
|
/// or not there is a sequence of GEP indices into the type that will land us at
|
||||||
/// the specified offset. If so, fill them into NewIndices and return the
|
/// the specified offset. If so, fill them into NewIndices and return the
|
||||||
/// resultant element type, otherwise return null.
|
/// resultant element type, otherwise return null.
|
||||||
Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset, Type *IntPtrTy,
|
Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
|
||||||
SmallVectorImpl<Value*> &NewIndices) {
|
SmallVectorImpl<Value*> &NewIndices) {
|
||||||
if (!TD) return 0;
|
if (!TD) return 0;
|
||||||
if (!Ty->isSized()) return 0;
|
if (!Ty->isSized()) return 0;
|
||||||
|
@ -746,6 +746,7 @@ Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset, Type *IntPtrTy
|
||||||
// Start with the index over the outer type. Note that the type size
|
// Start with the index over the outer type. Note that the type size
|
||||||
// might be zero (even if the offset isn't zero) if the indexed type
|
// might be zero (even if the offset isn't zero) if the indexed type
|
||||||
// is something like [0 x {int, int}]
|
// is something like [0 x {int, int}]
|
||||||
|
Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
|
||||||
int64_t FirstIdx = 0;
|
int64_t FirstIdx = 0;
|
||||||
if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
|
if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
|
||||||
FirstIdx = Offset/TySize;
|
FirstIdx = Offset/TySize;
|
||||||
|
@ -1054,7 +1055,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||||
// by multiples of a zero size type with zero.
|
// by multiples of a zero size type with zero.
|
||||||
if (TD) {
|
if (TD) {
|
||||||
bool MadeChange = false;
|
bool MadeChange = false;
|
||||||
Type *IntPtrTy = TD->getIntPtrType(PtrOp->getType());
|
Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
|
||||||
|
|
||||||
gep_type_iterator GTI = gep_type_begin(GEP);
|
gep_type_iterator GTI = gep_type_begin(GEP);
|
||||||
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
|
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
|
||||||
|
@ -1239,7 +1240,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||||
|
|
||||||
// Earlier transforms ensure that the index has type IntPtrType, which
|
// Earlier transforms ensure that the index has type IntPtrType, which
|
||||||
// considerably simplifies the logic by eliminating implicit casts.
|
// considerably simplifies the logic by eliminating implicit casts.
|
||||||
assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
|
assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) &&
|
||||||
"Index not cast to pointer width?");
|
"Index not cast to pointer width?");
|
||||||
|
|
||||||
bool NSW;
|
bool NSW;
|
||||||
|
@ -1274,7 +1275,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||||
|
|
||||||
// Earlier transforms ensure that the index has type IntPtrType, which
|
// Earlier transforms ensure that the index has type IntPtrType, which
|
||||||
// considerably simplifies the logic by eliminating implicit casts.
|
// considerably simplifies the logic by eliminating implicit casts.
|
||||||
assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
|
assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) &&
|
||||||
"Index not cast to pointer width?");
|
"Index not cast to pointer width?");
|
||||||
|
|
||||||
bool NSW;
|
bool NSW;
|
||||||
|
@ -1336,8 +1337,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||||
SmallVector<Value*, 8> NewIndices;
|
SmallVector<Value*, 8> NewIndices;
|
||||||
Type *InTy =
|
Type *InTy =
|
||||||
cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
|
cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
|
||||||
Type *IntPtrTy = TD->getIntPtrType(BCI->getOperand(0)->getType());
|
if (FindElementAtOffset(InTy, Offset, NewIndices)) {
|
||||||
if (FindElementAtOffset(InTy, Offset, IntPtrTy, NewIndices)) {
|
|
||||||
Value *NGEP = GEP.isInBounds() ?
|
Value *NGEP = GEP.isInBounds() ?
|
||||||
Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices) :
|
Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices) :
|
||||||
Builder->CreateGEP(BCI->getOperand(0), NewIndices);
|
Builder->CreateGEP(BCI->getOperand(0), NewIndices);
|
||||||
|
|
|
@ -933,7 +933,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
|
||||||
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
|
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
|
||||||
<< *MemoryInst);
|
<< *MemoryInst);
|
||||||
Type *IntPtrTy =
|
Type *IntPtrTy =
|
||||||
TLI->getDataLayout()->getIntPtrType(Addr->getType());
|
TLI->getDataLayout()->getIntPtrType(AccessTy->getContext());
|
||||||
|
|
||||||
Value *Result = 0;
|
Value *Result = 0;
|
||||||
|
|
||||||
|
|
|
@ -1428,8 +1428,7 @@ FindLoopCounter(Loop *L, const SCEV *BECount,
|
||||||
/// genLoopLimit - Help LinearFunctionTestReplace by generating a value that
|
/// genLoopLimit - Help LinearFunctionTestReplace by generating a value that
|
||||||
/// holds the RHS of the new loop test.
|
/// holds the RHS of the new loop test.
|
||||||
static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
|
static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
|
||||||
SCEVExpander &Rewriter, ScalarEvolution *SE,
|
SCEVExpander &Rewriter, ScalarEvolution *SE) {
|
||||||
Type *IntPtrTy) {
|
|
||||||
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(IndVar));
|
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(IndVar));
|
||||||
assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter");
|
assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter");
|
||||||
const SCEV *IVInit = AR->getStart();
|
const SCEV *IVInit = AR->getStart();
|
||||||
|
@ -1455,8 +1454,7 @@ static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
|
||||||
// We could handle pointer IVs other than i8*, but we need to compensate for
|
// We could handle pointer IVs other than i8*, but we need to compensate for
|
||||||
// gep index scaling. See canExpandBackedgeTakenCount comments.
|
// gep index scaling. See canExpandBackedgeTakenCount comments.
|
||||||
assert(SE->getSizeOfExpr(
|
assert(SE->getSizeOfExpr(
|
||||||
cast<PointerType>(GEPBase->getType())->getElementType(),
|
cast<PointerType>(GEPBase->getType())->getElementType())->isOne()
|
||||||
IntPtrTy)->isOne()
|
|
||||||
&& "unit stride pointer IV must be i8*");
|
&& "unit stride pointer IV must be i8*");
|
||||||
|
|
||||||
IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
|
IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
|
||||||
|
@ -1555,9 +1553,7 @@ LinearFunctionTestReplace(Loop *L,
|
||||||
CmpIndVar = IndVar;
|
CmpIndVar = IndVar;
|
||||||
}
|
}
|
||||||
|
|
||||||
Type *IntPtrTy = TD ? TD->getIntPtrType(IndVar->getType()) :
|
Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE);
|
||||||
IntegerType::getInt64Ty(IndVar->getContext());
|
|
||||||
Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE, IntPtrTy);
|
|
||||||
assert(ExitCnt->getType()->isPointerTy() == IndVar->getType()->isPointerTy()
|
assert(ExitCnt->getType()->isPointerTy() == IndVar->getType()->isPointerTy()
|
||||||
&& "genLoopLimit missed a cast");
|
&& "genLoopLimit missed a cast");
|
||||||
|
|
||||||
|
|
|
@ -458,10 +458,9 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
|
||||||
// Okay, we have a strided store "p[i]" of a splattable value. We can turn
|
// Okay, we have a strided store "p[i]" of a splattable value. We can turn
|
||||||
// this into a memset in the loop preheader now if we want. However, this
|
// this into a memset in the loop preheader now if we want. However, this
|
||||||
// would be unsafe to do if there is anything else in the loop that may read
|
// would be unsafe to do if there is anything else in the loop that may read
|
||||||
// or write to the aliased location.
|
// or write to the aliased location. Check for any overlap by generating the
|
||||||
assert(DestPtr->getType()->isPointerTy()
|
// base pointer and checking the region.
|
||||||
&& "Must be a pointer type.");
|
unsigned AddrSpace = cast<PointerType>(DestPtr->getType())->getAddressSpace();
|
||||||
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
|
|
||||||
Value *BasePtr =
|
Value *BasePtr =
|
||||||
Expander.expandCodeFor(Ev->getStart(), Builder.getInt8PtrTy(AddrSpace),
|
Expander.expandCodeFor(Ev->getStart(), Builder.getInt8PtrTy(AddrSpace),
|
||||||
Preheader->getTerminator());
|
Preheader->getTerminator());
|
||||||
|
@ -471,7 +470,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
|
||||||
|
|
||||||
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
|
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
|
||||||
// pointer size if it isn't already.
|
// pointer size if it isn't already.
|
||||||
Type *IntPtr = TD->getIntPtrType(DestPtr->getType());
|
Type *IntPtr = TD->getIntPtrType(DestPtr->getContext());
|
||||||
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
|
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
|
||||||
|
|
||||||
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
|
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
|
||||||
|
@ -587,7 +586,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
|
||||||
|
|
||||||
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
|
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
|
||||||
// pointer size if it isn't already.
|
// pointer size if it isn't already.
|
||||||
Type *IntPtr = TD->getIntPtrType(SI->getType());
|
Type *IntPtr = TD->getIntPtrType(SI->getContext());
|
||||||
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
|
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
|
||||||
|
|
||||||
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
|
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
|
||||||
|
|
|
@ -2395,9 +2395,8 @@ private:
|
||||||
|
|
||||||
Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) {
|
Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) {
|
||||||
assert(BeginOffset >= NewAllocaBeginOffset);
|
assert(BeginOffset >= NewAllocaBeginOffset);
|
||||||
assert(PointerTy->isPointerTy() &&
|
unsigned AS = cast<PointerType>(PointerTy)->getAddressSpace();
|
||||||
"Type must be pointer type!");
|
APInt Offset(TD.getPointerSizeInBits(AS), BeginOffset - NewAllocaBeginOffset);
|
||||||
APInt Offset(TD.getTypeSizeInBits(PointerTy), BeginOffset - NewAllocaBeginOffset);
|
|
||||||
return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName(""));
|
return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName(""));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2795,8 +2794,9 @@ private:
|
||||||
= P.getMemTransferOffsets(II);
|
= P.getMemTransferOffsets(II);
|
||||||
|
|
||||||
assert(OldPtr->getType()->isPointerTy() && "Must be a pointer type!");
|
assert(OldPtr->getType()->isPointerTy() && "Must be a pointer type!");
|
||||||
|
unsigned AS = cast<PointerType>(OldPtr->getType())->getAddressSpace();
|
||||||
// Compute the relative offset within the transfer.
|
// Compute the relative offset within the transfer.
|
||||||
unsigned IntPtrWidth = TD.getTypeSizeInBits(OldPtr->getType());
|
unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
|
||||||
APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin
|
APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin
|
||||||
: MTO.SourceBegin));
|
: MTO.SourceBegin));
|
||||||
|
|
||||||
|
|
|
@ -963,7 +963,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
|
||||||
if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())
|
if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())
|
||||||
SV = Builder.CreateBitCast(SV, IntegerType::get(SV->getContext(),SrcWidth));
|
SV = Builder.CreateBitCast(SV, IntegerType::get(SV->getContext(),SrcWidth));
|
||||||
else if (SV->getType()->isPointerTy())
|
else if (SV->getType()->isPointerTy())
|
||||||
SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getType()));
|
SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getContext()));
|
||||||
|
|
||||||
// Zero extend or truncate the value if needed.
|
// Zero extend or truncate the value if needed.
|
||||||
if (SV->getType() != AllocaType) {
|
if (SV->getType() != AllocaType) {
|
||||||
|
|
|
@ -311,11 +311,10 @@ struct MemCpyOpt : public LibCallOptimization {
|
||||||
if (!TD) return 0;
|
if (!TD) return 0;
|
||||||
|
|
||||||
FunctionType *FT = Callee->getFunctionType();
|
FunctionType *FT = Callee->getFunctionType();
|
||||||
Type *PT = FT->getParamType(0);
|
|
||||||
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
|
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
|
||||||
!FT->getParamType(0)->isPointerTy() ||
|
!FT->getParamType(0)->isPointerTy() ||
|
||||||
!FT->getParamType(1)->isPointerTy() ||
|
!FT->getParamType(1)->isPointerTy() ||
|
||||||
FT->getParamType(2) != TD->getIntPtrType(PT))
|
FT->getParamType(2) != TD->getIntPtrType(*Context))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
|
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
|
||||||
|
@ -334,11 +333,10 @@ struct MemMoveOpt : public LibCallOptimization {
|
||||||
if (!TD) return 0;
|
if (!TD) return 0;
|
||||||
|
|
||||||
FunctionType *FT = Callee->getFunctionType();
|
FunctionType *FT = Callee->getFunctionType();
|
||||||
Type *PT = FT->getParamType(0);
|
|
||||||
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
|
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
|
||||||
!FT->getParamType(0)->isPointerTy() ||
|
!FT->getParamType(0)->isPointerTy() ||
|
||||||
!FT->getParamType(1)->isPointerTy() ||
|
!FT->getParamType(1)->isPointerTy() ||
|
||||||
FT->getParamType(2) != TD->getIntPtrType(PT))
|
FT->getParamType(2) != TD->getIntPtrType(*Context))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
|
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
|
||||||
|
@ -357,11 +355,10 @@ struct MemSetOpt : public LibCallOptimization {
|
||||||
if (!TD) return 0;
|
if (!TD) return 0;
|
||||||
|
|
||||||
FunctionType *FT = Callee->getFunctionType();
|
FunctionType *FT = Callee->getFunctionType();
|
||||||
Type *PT = FT->getParamType(0);
|
|
||||||
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
|
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
|
||||||
!FT->getParamType(0)->isPointerTy() ||
|
!FT->getParamType(0)->isPointerTy() ||
|
||||||
!FT->getParamType(1)->isIntegerTy() ||
|
!FT->getParamType(1)->isIntegerTy() ||
|
||||||
FT->getParamType(2) != TD->getIntPtrType(PT))
|
FT->getParamType(2) != TD->getIntPtrType(*Context))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// memset(p, v, n) -> llvm.memset(p, v, n, 1)
|
// memset(p, v, n) -> llvm.memset(p, v, n, 1)
|
||||||
|
@ -786,9 +783,8 @@ struct SPrintFOpt : public LibCallOptimization {
|
||||||
if (!TD) return 0;
|
if (!TD) return 0;
|
||||||
|
|
||||||
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
|
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
|
||||||
Type *AT = CI->getArgOperand(0)->getType();
|
|
||||||
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
|
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
|
||||||
ConstantInt::get(TD->getIntPtrType(AT), // Copy the
|
ConstantInt::get(TD->getIntPtrType(*Context), // Copy the
|
||||||
FormatStr.size() + 1), 1); // nul byte.
|
FormatStr.size() + 1), 1); // nul byte.
|
||||||
return ConstantInt::get(CI->getType(), FormatStr.size());
|
return ConstantInt::get(CI->getType(), FormatStr.size());
|
||||||
}
|
}
|
||||||
|
@ -915,9 +911,8 @@ struct FPutsOpt : public LibCallOptimization {
|
||||||
uint64_t Len = GetStringLength(CI->getArgOperand(0));
|
uint64_t Len = GetStringLength(CI->getArgOperand(0));
|
||||||
if (!Len) return 0;
|
if (!Len) return 0;
|
||||||
// Known to have no uses (see above).
|
// Known to have no uses (see above).
|
||||||
Type *PT = FT->getParamType(0);
|
|
||||||
return EmitFWrite(CI->getArgOperand(0),
|
return EmitFWrite(CI->getArgOperand(0),
|
||||||
ConstantInt::get(TD->getIntPtrType(PT), Len-1),
|
ConstantInt::get(TD->getIntPtrType(*Context), Len-1),
|
||||||
CI->getArgOperand(1), B, TD, TLI);
|
CI->getArgOperand(1), B, TD, TLI);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -942,9 +937,8 @@ struct FPrintFOpt : public LibCallOptimization {
|
||||||
// These optimizations require DataLayout.
|
// These optimizations require DataLayout.
|
||||||
if (!TD) return 0;
|
if (!TD) return 0;
|
||||||
|
|
||||||
Type *AT = CI->getArgOperand(1)->getType();
|
|
||||||
Value *NewCI = EmitFWrite(CI->getArgOperand(1),
|
Value *NewCI = EmitFWrite(CI->getArgOperand(1),
|
||||||
ConstantInt::get(TD->getIntPtrType(AT),
|
ConstantInt::get(TD->getIntPtrType(*Context),
|
||||||
FormatStr.size()),
|
FormatStr.size()),
|
||||||
CI->getArgOperand(0), B, TD, TLI);
|
CI->getArgOperand(0), B, TD, TLI);
|
||||||
return NewCI ? ConstantInt::get(CI->getType(), FormatStr.size()) : 0;
|
return NewCI ? ConstantInt::get(CI->getType(), FormatStr.size()) : 0;
|
||||||
|
|
|
@ -46,8 +46,9 @@ Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
|
||||||
AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
||||||
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
||||||
|
|
||||||
|
LLVMContext &Context = B.GetInsertBlock()->getContext();
|
||||||
Constant *StrLen = M->getOrInsertFunction("strlen", AttrListPtr::get(AWI),
|
Constant *StrLen = M->getOrInsertFunction("strlen", AttrListPtr::get(AWI),
|
||||||
TD->getIntPtrType(Ptr->getType()),
|
TD->getIntPtrType(Context),
|
||||||
B.getInt8PtrTy(),
|
B.getInt8PtrTy(),
|
||||||
NULL);
|
NULL);
|
||||||
CallInst *CI = B.CreateCall(StrLen, CastToCStr(Ptr, B), "strlen");
|
CallInst *CI = B.CreateCall(StrLen, CastToCStr(Ptr, B), "strlen");
|
||||||
|
@ -72,10 +73,11 @@ Value *llvm::EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
|
||||||
AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
||||||
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
||||||
|
|
||||||
|
LLVMContext &Context = B.GetInsertBlock()->getContext();
|
||||||
Constant *StrNLen = M->getOrInsertFunction("strnlen", AttrListPtr::get(AWI),
|
Constant *StrNLen = M->getOrInsertFunction("strnlen", AttrListPtr::get(AWI),
|
||||||
TD->getIntPtrType(Ptr->getType()),
|
TD->getIntPtrType(Context),
|
||||||
B.getInt8PtrTy(),
|
B.getInt8PtrTy(),
|
||||||
TD->getIntPtrType(Ptr->getType()),
|
TD->getIntPtrType(Context),
|
||||||
NULL);
|
NULL);
|
||||||
CallInst *CI = B.CreateCall2(StrNLen, CastToCStr(Ptr, B), MaxLen, "strnlen");
|
CallInst *CI = B.CreateCall2(StrNLen, CastToCStr(Ptr, B), MaxLen, "strnlen");
|
||||||
if (const Function *F = dyn_cast<Function>(StrNLen->stripPointerCasts()))
|
if (const Function *F = dyn_cast<Function>(StrNLen->stripPointerCasts()))
|
||||||
|
@ -124,12 +126,12 @@ Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
|
||||||
AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
||||||
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
||||||
|
|
||||||
|
LLVMContext &Context = B.GetInsertBlock()->getContext();
|
||||||
Value *StrNCmp = M->getOrInsertFunction("strncmp", AttrListPtr::get(AWI),
|
Value *StrNCmp = M->getOrInsertFunction("strncmp", AttrListPtr::get(AWI),
|
||||||
B.getInt32Ty(),
|
B.getInt32Ty(),
|
||||||
B.getInt8PtrTy(),
|
B.getInt8PtrTy(),
|
||||||
B.getInt8PtrTy(),
|
B.getInt8PtrTy(),
|
||||||
TD->getIntPtrType(Ptr1->getType()),
|
TD->getIntPtrType(Context), NULL);
|
||||||
NULL);
|
|
||||||
CallInst *CI = B.CreateCall3(StrNCmp, CastToCStr(Ptr1, B),
|
CallInst *CI = B.CreateCall3(StrNCmp, CastToCStr(Ptr1, B),
|
||||||
CastToCStr(Ptr2, B), Len, "strncmp");
|
CastToCStr(Ptr2, B), Len, "strncmp");
|
||||||
|
|
||||||
|
@ -199,14 +201,14 @@ Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
|
||||||
AttributeWithIndex AWI;
|
AttributeWithIndex AWI;
|
||||||
AWI = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
AWI = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
||||||
Attributes::NoUnwind);
|
Attributes::NoUnwind);
|
||||||
|
LLVMContext &Context = B.GetInsertBlock()->getContext();
|
||||||
Value *MemCpy = M->getOrInsertFunction("__memcpy_chk",
|
Value *MemCpy = M->getOrInsertFunction("__memcpy_chk",
|
||||||
AttrListPtr::get(AWI),
|
AttrListPtr::get(AWI),
|
||||||
B.getInt8PtrTy(),
|
B.getInt8PtrTy(),
|
||||||
B.getInt8PtrTy(),
|
B.getInt8PtrTy(),
|
||||||
B.getInt8PtrTy(),
|
B.getInt8PtrTy(),
|
||||||
TD->getIntPtrType(Dst->getType()),
|
TD->getIntPtrType(Context),
|
||||||
TD->getIntPtrType(Src->getType()),
|
TD->getIntPtrType(Context), NULL);
|
||||||
NULL);
|
|
||||||
Dst = CastToCStr(Dst, B);
|
Dst = CastToCStr(Dst, B);
|
||||||
Src = CastToCStr(Src, B);
|
Src = CastToCStr(Src, B);
|
||||||
CallInst *CI = B.CreateCall4(MemCpy, Dst, Src, Len, ObjSize);
|
CallInst *CI = B.CreateCall4(MemCpy, Dst, Src, Len, ObjSize);
|
||||||
|
@ -228,11 +230,12 @@ Value *llvm::EmitMemChr(Value *Ptr, Value *Val,
|
||||||
Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind };
|
Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind };
|
||||||
AWI = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
AWI = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
||||||
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
||||||
|
LLVMContext &Context = B.GetInsertBlock()->getContext();
|
||||||
Value *MemChr = M->getOrInsertFunction("memchr", AttrListPtr::get(AWI),
|
Value *MemChr = M->getOrInsertFunction("memchr", AttrListPtr::get(AWI),
|
||||||
B.getInt8PtrTy(),
|
B.getInt8PtrTy(),
|
||||||
B.getInt8PtrTy(),
|
B.getInt8PtrTy(),
|
||||||
B.getInt32Ty(),
|
B.getInt32Ty(),
|
||||||
TD->getIntPtrType(Ptr->getType()),
|
TD->getIntPtrType(Context),
|
||||||
NULL);
|
NULL);
|
||||||
CallInst *CI = B.CreateCall3(MemChr, CastToCStr(Ptr, B), Val, Len, "memchr");
|
CallInst *CI = B.CreateCall3(MemChr, CastToCStr(Ptr, B), Val, Len, "memchr");
|
||||||
|
|
||||||
|
@ -257,12 +260,12 @@ Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
|
||||||
AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
||||||
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
ArrayRef<Attributes::AttrVal>(AVs, 2));
|
||||||
|
|
||||||
|
LLVMContext &Context = B.GetInsertBlock()->getContext();
|
||||||
Value *MemCmp = M->getOrInsertFunction("memcmp", AttrListPtr::get(AWI),
|
Value *MemCmp = M->getOrInsertFunction("memcmp", AttrListPtr::get(AWI),
|
||||||
B.getInt32Ty(),
|
B.getInt32Ty(),
|
||||||
B.getInt8PtrTy(),
|
B.getInt8PtrTy(),
|
||||||
B.getInt8PtrTy(),
|
B.getInt8PtrTy(),
|
||||||
TD->getIntPtrType(Ptr1->getType()),
|
TD->getIntPtrType(Context), NULL);
|
||||||
NULL);
|
|
||||||
CallInst *CI = B.CreateCall3(MemCmp, CastToCStr(Ptr1, B), CastToCStr(Ptr2, B),
|
CallInst *CI = B.CreateCall3(MemCmp, CastToCStr(Ptr1, B), CastToCStr(Ptr2, B),
|
||||||
Len, "memcmp");
|
Len, "memcmp");
|
||||||
|
|
||||||
|
@ -422,24 +425,24 @@ Value *llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File,
|
||||||
AWI[1] = AttributeWithIndex::get(M->getContext(), 4, Attributes::NoCapture);
|
AWI[1] = AttributeWithIndex::get(M->getContext(), 4, Attributes::NoCapture);
|
||||||
AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
|
||||||
Attributes::NoUnwind);
|
Attributes::NoUnwind);
|
||||||
|
LLVMContext &Context = B.GetInsertBlock()->getContext();
|
||||||
StringRef FWriteName = TLI->getName(LibFunc::fwrite);
|
StringRef FWriteName = TLI->getName(LibFunc::fwrite);
|
||||||
Constant *F;
|
Constant *F;
|
||||||
Type *PtrTy = Ptr->getType();
|
|
||||||
if (File->getType()->isPointerTy())
|
if (File->getType()->isPointerTy())
|
||||||
F = M->getOrInsertFunction(FWriteName, AttrListPtr::get(AWI),
|
F = M->getOrInsertFunction(FWriteName, AttrListPtr::get(AWI),
|
||||||
TD->getIntPtrType(PtrTy),
|
TD->getIntPtrType(Context),
|
||||||
B.getInt8PtrTy(),
|
B.getInt8PtrTy(),
|
||||||
TD->getIntPtrType(PtrTy),
|
TD->getIntPtrType(Context),
|
||||||
TD->getIntPtrType(PtrTy),
|
TD->getIntPtrType(Context),
|
||||||
File->getType(), NULL);
|
File->getType(), NULL);
|
||||||
else
|
else
|
||||||
F = M->getOrInsertFunction(FWriteName, TD->getIntPtrType(PtrTy),
|
F = M->getOrInsertFunction(FWriteName, TD->getIntPtrType(Context),
|
||||||
B.getInt8PtrTy(),
|
B.getInt8PtrTy(),
|
||||||
TD->getIntPtrType(PtrTy),
|
TD->getIntPtrType(Context),
|
||||||
TD->getIntPtrType(PtrTy),
|
TD->getIntPtrType(Context),
|
||||||
File->getType(), NULL);
|
File->getType(), NULL);
|
||||||
CallInst *CI = B.CreateCall4(F, CastToCStr(Ptr, B), Size,
|
CallInst *CI = B.CreateCall4(F, CastToCStr(Ptr, B), Size,
|
||||||
ConstantInt::get(TD->getIntPtrType(PtrTy), 1), File);
|
ConstantInt::get(TD->getIntPtrType(Context), 1), File);
|
||||||
|
|
||||||
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
|
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
|
||||||
CI->setCallingConv(Fn->getCallingConv());
|
CI->setCallingConv(Fn->getCallingConv());
|
||||||
|
@ -461,13 +464,12 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
|
||||||
IRBuilder<> B(CI);
|
IRBuilder<> B(CI);
|
||||||
|
|
||||||
if (Name == "__memcpy_chk") {
|
if (Name == "__memcpy_chk") {
|
||||||
Type *PT = FT->getParamType(0);
|
|
||||||
// Check if this has the right signature.
|
// Check if this has the right signature.
|
||||||
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
||||||
!FT->getParamType(0)->isPointerTy() ||
|
!FT->getParamType(0)->isPointerTy() ||
|
||||||
!FT->getParamType(1)->isPointerTy() ||
|
!FT->getParamType(1)->isPointerTy() ||
|
||||||
FT->getParamType(2) != TD->getIntPtrType(PT) ||
|
FT->getParamType(2) != TD->getIntPtrType(Context) ||
|
||||||
FT->getParamType(3) != TD->getIntPtrType(PT))
|
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (isFoldable(3, 2, false)) {
|
if (isFoldable(3, 2, false)) {
|
||||||
|
@ -486,12 +488,11 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
|
||||||
|
|
||||||
if (Name == "__memmove_chk") {
|
if (Name == "__memmove_chk") {
|
||||||
// Check if this has the right signature.
|
// Check if this has the right signature.
|
||||||
Type *PT = FT->getParamType(0);
|
|
||||||
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
||||||
!FT->getParamType(0)->isPointerTy() ||
|
!FT->getParamType(0)->isPointerTy() ||
|
||||||
!FT->getParamType(1)->isPointerTy() ||
|
!FT->getParamType(1)->isPointerTy() ||
|
||||||
FT->getParamType(2) != TD->getIntPtrType(PT) ||
|
FT->getParamType(2) != TD->getIntPtrType(Context) ||
|
||||||
FT->getParamType(3) != TD->getIntPtrType(PT))
|
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (isFoldable(3, 2, false)) {
|
if (isFoldable(3, 2, false)) {
|
||||||
|
@ -505,12 +506,11 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
|
||||||
|
|
||||||
if (Name == "__memset_chk") {
|
if (Name == "__memset_chk") {
|
||||||
// Check if this has the right signature.
|
// Check if this has the right signature.
|
||||||
Type *PT = FT->getParamType(0);
|
|
||||||
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
||||||
!FT->getParamType(0)->isPointerTy() ||
|
!FT->getParamType(0)->isPointerTy() ||
|
||||||
!FT->getParamType(1)->isIntegerTy() ||
|
!FT->getParamType(1)->isIntegerTy() ||
|
||||||
FT->getParamType(2) != TD->getIntPtrType(PT) ||
|
FT->getParamType(2) != TD->getIntPtrType(Context) ||
|
||||||
FT->getParamType(3) != TD->getIntPtrType(PT))
|
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (isFoldable(3, 2, false)) {
|
if (isFoldable(3, 2, false)) {
|
||||||
|
@ -525,12 +525,11 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
|
||||||
|
|
||||||
if (Name == "__strcpy_chk" || Name == "__stpcpy_chk") {
|
if (Name == "__strcpy_chk" || Name == "__stpcpy_chk") {
|
||||||
// Check if this has the right signature.
|
// Check if this has the right signature.
|
||||||
Type *PT = FT->getParamType(0);
|
|
||||||
if (FT->getNumParams() != 3 ||
|
if (FT->getNumParams() != 3 ||
|
||||||
FT->getReturnType() != FT->getParamType(0) ||
|
FT->getReturnType() != FT->getParamType(0) ||
|
||||||
FT->getParamType(0) != FT->getParamType(1) ||
|
FT->getParamType(0) != FT->getParamType(1) ||
|
||||||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
|
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
|
||||||
FT->getParamType(2) != TD->getIntPtrType(PT))
|
FT->getParamType(2) != TD->getIntPtrType(Context))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
||||||
|
@ -552,12 +551,11 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
|
||||||
|
|
||||||
if (Name == "__strncpy_chk" || Name == "__stpncpy_chk") {
|
if (Name == "__strncpy_chk" || Name == "__stpncpy_chk") {
|
||||||
// Check if this has the right signature.
|
// Check if this has the right signature.
|
||||||
Type *PT = FT->getParamType(0);
|
|
||||||
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
||||||
FT->getParamType(0) != FT->getParamType(1) ||
|
FT->getParamType(0) != FT->getParamType(1) ||
|
||||||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
|
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
|
||||||
!FT->getParamType(2)->isIntegerTy() ||
|
!FT->getParamType(2)->isIntegerTy() ||
|
||||||
FT->getParamType(3) != TD->getIntPtrType(PT))
|
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (isFoldable(3, 2, false)) {
|
if (isFoldable(3, 2, false)) {
|
||||||
|
|
|
@ -806,7 +806,8 @@ unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
|
||||||
const DataLayout *TD) {
|
const DataLayout *TD) {
|
||||||
assert(V->getType()->isPointerTy() &&
|
assert(V->getType()->isPointerTy() &&
|
||||||
"getOrEnforceKnownAlignment expects a pointer!");
|
"getOrEnforceKnownAlignment expects a pointer!");
|
||||||
unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) : 64;
|
unsigned AS = cast<PointerType>(V->getType())->getAddressSpace();
|
||||||
|
unsigned BitWidth = TD ? TD->getPointerSizeInBits(AS) : 64;
|
||||||
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
||||||
ComputeMaskedBits(V, KnownZero, KnownOne, TD);
|
ComputeMaskedBits(V, KnownZero, KnownOne, TD);
|
||||||
unsigned TrailZ = KnownZero.countTrailingOnes();
|
unsigned TrailZ = KnownZero.countTrailingOnes();
|
||||||
|
|
|
@ -535,13 +535,9 @@ Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) {
|
||||||
CV = ICI->getOperand(0);
|
CV = ICI->getOperand(0);
|
||||||
|
|
||||||
// Unwrap any lossless ptrtoint cast.
|
// Unwrap any lossless ptrtoint cast.
|
||||||
if (TD && CV) {
|
if (TD && CV && CV->getType() == TD->getIntPtrType(CV->getContext()))
|
||||||
PtrToIntInst *PTII = NULL;
|
if (PtrToIntInst *PTII = dyn_cast<PtrToIntInst>(CV))
|
||||||
if ((PTII = dyn_cast<PtrToIntInst>(CV)) &&
|
|
||||||
CV->getType() == TD->getIntPtrType(CV->getContext(),
|
|
||||||
PTII->getPointerAddressSpace()))
|
|
||||||
CV = PTII->getOperand(0);
|
CV = PTII->getOperand(0);
|
||||||
}
|
|
||||||
return CV;
|
return CV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -988,7 +984,7 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
|
||||||
// Convert pointer to int before we switch.
|
// Convert pointer to int before we switch.
|
||||||
if (CV->getType()->isPointerTy()) {
|
if (CV->getType()->isPointerTy()) {
|
||||||
assert(TD && "Cannot switch on pointer without DataLayout");
|
assert(TD && "Cannot switch on pointer without DataLayout");
|
||||||
CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getType()),
|
CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getContext()),
|
||||||
"magicptr");
|
"magicptr");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2716,7 +2712,7 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD,
|
||||||
if (CompVal->getType()->isPointerTy()) {
|
if (CompVal->getType()->isPointerTy()) {
|
||||||
assert(TD && "Cannot switch on pointer without DataLayout");
|
assert(TD && "Cannot switch on pointer without DataLayout");
|
||||||
CompVal = Builder.CreatePtrToInt(CompVal,
|
CompVal = Builder.CreatePtrToInt(CompVal,
|
||||||
TD->getIntPtrType(CompVal->getType()),
|
TD->getIntPtrType(CompVal->getContext()),
|
||||||
"magicptr");
|
"magicptr");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -122,13 +122,14 @@ struct MemCpyChkOpt : public InstFortifiedLibCallOptimization {
|
||||||
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
|
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
|
||||||
this->CI = CI;
|
this->CI = CI;
|
||||||
FunctionType *FT = Callee->getFunctionType();
|
FunctionType *FT = Callee->getFunctionType();
|
||||||
|
LLVMContext &Context = CI->getParent()->getContext();
|
||||||
|
|
||||||
// Check if this has the right signature.
|
// Check if this has the right signature.
|
||||||
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
||||||
!FT->getParamType(0)->isPointerTy() ||
|
!FT->getParamType(0)->isPointerTy() ||
|
||||||
!FT->getParamType(1)->isPointerTy() ||
|
!FT->getParamType(1)->isPointerTy() ||
|
||||||
FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)) ||
|
FT->getParamType(2) != TD->getIntPtrType(Context) ||
|
||||||
FT->getParamType(3) != TD->getIntPtrType(FT->getParamType(1)))
|
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (isFoldable(3, 2, false)) {
|
if (isFoldable(3, 2, false)) {
|
||||||
|
@ -144,13 +145,14 @@ struct MemMoveChkOpt : public InstFortifiedLibCallOptimization {
|
||||||
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
|
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
|
||||||
this->CI = CI;
|
this->CI = CI;
|
||||||
FunctionType *FT = Callee->getFunctionType();
|
FunctionType *FT = Callee->getFunctionType();
|
||||||
|
LLVMContext &Context = CI->getParent()->getContext();
|
||||||
|
|
||||||
// Check if this has the right signature.
|
// Check if this has the right signature.
|
||||||
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
||||||
!FT->getParamType(0)->isPointerTy() ||
|
!FT->getParamType(0)->isPointerTy() ||
|
||||||
!FT->getParamType(1)->isPointerTy() ||
|
!FT->getParamType(1)->isPointerTy() ||
|
||||||
FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)) ||
|
FT->getParamType(2) != TD->getIntPtrType(Context) ||
|
||||||
FT->getParamType(3) != TD->getIntPtrType(FT->getParamType(1)))
|
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (isFoldable(3, 2, false)) {
|
if (isFoldable(3, 2, false)) {
|
||||||
|
@ -166,13 +168,14 @@ struct MemSetChkOpt : public InstFortifiedLibCallOptimization {
|
||||||
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
|
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
|
||||||
this->CI = CI;
|
this->CI = CI;
|
||||||
FunctionType *FT = Callee->getFunctionType();
|
FunctionType *FT = Callee->getFunctionType();
|
||||||
|
LLVMContext &Context = CI->getParent()->getContext();
|
||||||
|
|
||||||
// Check if this has the right signature.
|
// Check if this has the right signature.
|
||||||
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
|
||||||
!FT->getParamType(0)->isPointerTy() ||
|
!FT->getParamType(0)->isPointerTy() ||
|
||||||
!FT->getParamType(1)->isIntegerTy() ||
|
!FT->getParamType(1)->isIntegerTy() ||
|
||||||
FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)) ||
|
FT->getParamType(2) != TD->getIntPtrType(Context) ||
|
||||||
FT->getParamType(3) != TD->getIntPtrType(FT->getParamType(0)))
|
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (isFoldable(3, 2, false)) {
|
if (isFoldable(3, 2, false)) {
|
||||||
|
@ -197,7 +200,7 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
|
||||||
FT->getReturnType() != FT->getParamType(0) ||
|
FT->getReturnType() != FT->getParamType(0) ||
|
||||||
FT->getParamType(0) != FT->getParamType(1) ||
|
FT->getParamType(0) != FT->getParamType(1) ||
|
||||||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
|
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
|
||||||
FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)))
|
FT->getParamType(2) != TD->getIntPtrType(Context))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
|
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
|
||||||
|
@ -222,8 +225,8 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
|
||||||
|
|
||||||
Value *Ret =
|
Value *Ret =
|
||||||
EmitMemCpyChk(Dst, Src,
|
EmitMemCpyChk(Dst, Src,
|
||||||
ConstantInt::get(TD->getIntPtrType(Dst->getType()),
|
ConstantInt::get(TD->getIntPtrType(Context), Len),
|
||||||
Len), CI->getArgOperand(2), B, TD, TLI);
|
CI->getArgOperand(2), B, TD, TLI);
|
||||||
return Ret;
|
return Ret;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -292,7 +295,7 @@ struct StrNCpyChkOpt : public InstFortifiedLibCallOptimization {
|
||||||
FT->getParamType(0) != FT->getParamType(1) ||
|
FT->getParamType(0) != FT->getParamType(1) ||
|
||||||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
|
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
|
||||||
!FT->getParamType(2)->isIntegerTy() ||
|
!FT->getParamType(2)->isIntegerTy() ||
|
||||||
FT->getParamType(3) != TD->getIntPtrType(FT->getParamType(0)))
|
FT->getParamType(3) != TD->getIntPtrType(Context))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (isFoldable(3, 2, false)) {
|
if (isFoldable(3, 2, false)) {
|
||||||
|
@ -354,8 +357,7 @@ struct StrCatOpt : public LibCallOptimization {
|
||||||
// We have enough information to now generate the memcpy call to do the
|
// We have enough information to now generate the memcpy call to do the
|
||||||
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
|
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
|
||||||
B.CreateMemCpy(CpyDst, Src,
|
B.CreateMemCpy(CpyDst, Src,
|
||||||
ConstantInt::get(TD->getIntPtrType(Src->getType()),
|
ConstantInt::get(TD->getIntPtrType(*Context), Len + 1), 1);
|
||||||
Len + 1), 1);
|
|
||||||
return Dst;
|
return Dst;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -427,9 +429,8 @@ struct StrChrOpt : public LibCallOptimization {
|
||||||
if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.
|
if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
Type *PT = FT->getParamType(0);
|
|
||||||
return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
|
return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
|
||||||
ConstantInt::get(TD->getIntPtrType(PT), Len),
|
ConstantInt::get(TD->getIntPtrType(*Context), Len),
|
||||||
B, TD, TLI);
|
B, TD, TLI);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -523,9 +524,8 @@ struct StrCmpOpt : public LibCallOptimization {
|
||||||
// These optimizations require DataLayout.
|
// These optimizations require DataLayout.
|
||||||
if (!TD) return 0;
|
if (!TD) return 0;
|
||||||
|
|
||||||
Type *PT = FT->getParamType(0);
|
|
||||||
return EmitMemCmp(Str1P, Str2P,
|
return EmitMemCmp(Str1P, Str2P,
|
||||||
ConstantInt::get(TD->getIntPtrType(PT),
|
ConstantInt::get(TD->getIntPtrType(*Context),
|
||||||
std::min(Len1, Len2)), B, TD, TLI);
|
std::min(Len1, Len2)), B, TD, TLI);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -607,7 +607,7 @@ struct StrCpyOpt : public LibCallOptimization {
|
||||||
// We have enough information to now generate the memcpy call to do the
|
// We have enough information to now generate the memcpy call to do the
|
||||||
// copy for us. Make a memcpy to copy the nul byte with align = 1.
|
// copy for us. Make a memcpy to copy the nul byte with align = 1.
|
||||||
B.CreateMemCpy(Dst, Src,
|
B.CreateMemCpy(Dst, Src,
|
||||||
ConstantInt::get(TD->getIntPtrType(Dst->getType()), Len), 1);
|
ConstantInt::get(TD->getIntPtrType(*Context), Len), 1);
|
||||||
return Dst;
|
return Dst;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -524,14 +524,6 @@ std::string DataLayout::getStringRepresentation() const {
|
||||||
return OS.str();
|
return OS.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const
|
|
||||||
{
|
|
||||||
if (Ty->isPointerTy()) return getTypeSizeInBits(Ty);
|
|
||||||
if (Ty->isVectorTy()
|
|
||||||
&& cast<VectorType>(Ty)->getElementType()->isPointerTy())
|
|
||||||
return getTypeSizeInBits(cast<VectorType>(Ty)->getElementType());
|
|
||||||
return getPointerSizeInBits(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const {
|
uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const {
|
||||||
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
|
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
|
||||||
|
@ -679,14 +671,20 @@ IntegerType *DataLayout::getIntPtrType(LLVMContext &C,
|
||||||
/// least as big as that of a pointer of the given pointer (vector of pointer)
|
/// least as big as that of a pointer of the given pointer (vector of pointer)
|
||||||
/// type.
|
/// type.
|
||||||
Type *DataLayout::getIntPtrType(Type *Ty) const {
|
Type *DataLayout::getIntPtrType(Type *Ty) const {
|
||||||
unsigned NumBits = getPointerTypeSizeInBits(Ty);
|
#if 0
|
||||||
|
// FIXME: This assert should always have been here, but the review comments
|
||||||
|
// weren't addressed in time, and now there is lots of code "depending" on
|
||||||
|
// this. Uncomment once this is cleaned up.
|
||||||
|
assert(Ty->isPtrOrPtrVectorTy() &&
|
||||||
|
"Expected a pointer or pointer vector type.");
|
||||||
|
#endif
|
||||||
|
unsigned NumBits = getTypeSizeInBits(Ty->getScalarType());
|
||||||
IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits);
|
IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits);
|
||||||
if (VectorType *VecTy = dyn_cast<VectorType>(Ty))
|
if (VectorType *VecTy = dyn_cast<VectorType>(Ty))
|
||||||
return VectorType::get(IntTy, VecTy->getNumElements());
|
return VectorType::get(IntTy, VecTy->getNumElements());
|
||||||
return IntTy;
|
return IntTy;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
uint64_t DataLayout::getIndexedOffset(Type *ptrTy,
|
uint64_t DataLayout::getIndexedOffset(Type *ptrTy,
|
||||||
ArrayRef<Value *> Indices) const {
|
ArrayRef<Value *> Indices) const {
|
||||||
Type *Ty = ptrTy;
|
Type *Ty = ptrTy;
|
||||||
|
|
|
@ -2120,17 +2120,6 @@ bool CastInst::isNoopCast(Type *IntPtrTy) const {
|
||||||
return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
|
return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Determine if a cast is a no-op
|
|
||||||
bool CastInst::isNoopCast(const DataLayout &DL) const {
|
|
||||||
unsigned AS = 0;
|
|
||||||
if (getOpcode() == Instruction::PtrToInt)
|
|
||||||
AS = getOperand(0)->getType()->getPointerAddressSpace();
|
|
||||||
else if (getOpcode() == Instruction::IntToPtr)
|
|
||||||
AS = getType()->getPointerAddressSpace();
|
|
||||||
Type *IntPtrTy = DL.getIntPtrType(getContext(), AS);
|
|
||||||
return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This function determines if a pair of casts can be eliminated and what
|
/// This function determines if a pair of casts can be eliminated and what
|
||||||
/// opcode should be used in the elimination. This assumes that there are two
|
/// opcode should be used in the elimination. This assumes that there are two
|
||||||
/// instructions like this:
|
/// instructions like this:
|
||||||
|
|
|
@ -215,12 +215,7 @@ unsigned Type::getVectorNumElements() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned Type::getPointerAddressSpace() const {
|
unsigned Type::getPointerAddressSpace() const {
|
||||||
if (isPointerTy())
|
return cast<PointerType>(this)->getAddressSpace();
|
||||||
return cast<PointerType>(this)->getAddressSpace();
|
|
||||||
if (isVectorTy())
|
|
||||||
return getSequentialElementType()->getPointerAddressSpace();
|
|
||||||
llvm_unreachable("Should never reach here!");
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,43 +0,0 @@
|
||||||
; RUN: opt -instcombine %s | llvm-dis | FileCheck %s
|
|
||||||
target datalayout = "e-p:32:32:32-p1:64:64:64-p2:8:8:8-p3:16:16:16--p4:96:96:96-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32"
|
|
||||||
|
|
||||||
define i32 @test_as0(i32 addrspace(0)* %A) {
|
|
||||||
entry:
|
|
||||||
; CHECK: %arrayidx = getelementptr i32* %A, i32 1
|
|
||||||
%arrayidx = getelementptr i32 addrspace(0)* %A, i64 1
|
|
||||||
%y = load i32 addrspace(0)* %arrayidx, align 4
|
|
||||||
ret i32 %y
|
|
||||||
}
|
|
||||||
|
|
||||||
define i32 @test_as1(i32 addrspace(1)* %A) {
|
|
||||||
entry:
|
|
||||||
; CHECK: %arrayidx = getelementptr i32 addrspace(1)* %A, i64 1
|
|
||||||
%arrayidx = getelementptr i32 addrspace(1)* %A, i32 1
|
|
||||||
%y = load i32 addrspace(1)* %arrayidx, align 4
|
|
||||||
ret i32 %y
|
|
||||||
}
|
|
||||||
|
|
||||||
define i32 @test_as2(i32 addrspace(2)* %A) {
|
|
||||||
entry:
|
|
||||||
; CHECK: %arrayidx = getelementptr i32 addrspace(2)* %A, i8 1
|
|
||||||
%arrayidx = getelementptr i32 addrspace(2)* %A, i32 1
|
|
||||||
%y = load i32 addrspace(2)* %arrayidx, align 4
|
|
||||||
ret i32 %y
|
|
||||||
}
|
|
||||||
|
|
||||||
define i32 @test_as3(i32 addrspace(3)* %A) {
|
|
||||||
entry:
|
|
||||||
; CHECK: %arrayidx = getelementptr i32 addrspace(3)* %A, i16 1
|
|
||||||
%arrayidx = getelementptr i32 addrspace(3)* %A, i32 1
|
|
||||||
%y = load i32 addrspace(3)* %arrayidx, align 4
|
|
||||||
ret i32 %y
|
|
||||||
}
|
|
||||||
|
|
||||||
define i32 @test_as4(i32 addrspace(4)* %A) {
|
|
||||||
entry:
|
|
||||||
; CHECK: %arrayidx = getelementptr i32 addrspace(4)* %A, i96 1
|
|
||||||
%arrayidx = getelementptr i32 addrspace(4)* %A, i32 1
|
|
||||||
%y = load i32 addrspace(4)* %arrayidx, align 4
|
|
||||||
ret i32 %y
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,235 +0,0 @@
|
||||||
; "PLAIN" - No optimizations. This tests the target-independent
|
|
||||||
; constant folder.
|
|
||||||
; RUN: opt -S -o - < %s | FileCheck --check-prefix=PLAIN %s
|
|
||||||
|
|
||||||
target datalayout = "e-p:128:128:128-p1:32:32:32-p2:8:8:8-p3:16:16:16-p4:64:64:64-p5:96:96:96-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32"
|
|
||||||
|
|
||||||
; PLAIN: ModuleID = '<stdin>'
|
|
||||||
|
|
||||||
; The automatic constant folder in opt does not have targetdata access, so
|
|
||||||
; it can't fold gep arithmetic, in general. However, the constant folder run
|
|
||||||
; from instcombine and global opt can use targetdata.
|
|
||||||
; PLAIN: @G8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1)
|
|
||||||
@G8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1)
|
|
||||||
; PLAIN: @G1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -1)
|
|
||||||
@G1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -1)
|
|
||||||
; PLAIN: @F8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2)
|
|
||||||
@F8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2)
|
|
||||||
; PLAIN: @F1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -2)
|
|
||||||
@F1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -2)
|
|
||||||
; PLAIN: @H8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* null, i32 -1)
|
|
||||||
@H8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1)
|
|
||||||
; PLAIN: @H1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i8 -1)
|
|
||||||
@H1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 0 to i1 addrspace(2)*), i8 -1)
|
|
||||||
|
|
||||||
|
|
||||||
; The target-independent folder should be able to do some clever
|
|
||||||
; simplifications on sizeof, alignof, and offsetof expressions. The
|
|
||||||
; target-dependent folder should fold these down to constants.
|
|
||||||
; PLAIN-X: @a = constant i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2310)
|
|
||||||
@a = constant i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]} addrspace(4)* getelementptr ({[7 x double], [7 x double]} addrspace(4)* null, i64 11) to i64), i64 5))
|
|
||||||
|
|
||||||
; PLAIN-X: @b = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
|
|
||||||
@b = constant i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64)
|
|
||||||
|
|
||||||
; PLAIN-X: @c = constant i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2)
|
|
||||||
@c = constant i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64)
|
|
||||||
|
|
||||||
; PLAIN-X: @d = constant i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 11)
|
|
||||||
@d = constant i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64)
|
|
||||||
|
|
||||||
; PLAIN-X: @e = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64)
|
|
||||||
@e = constant i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64)
|
|
||||||
|
|
||||||
; PLAIN-X: @f = constant i64 1
|
|
||||||
@f = constant i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64)
|
|
||||||
|
|
||||||
; PLAIN-X: @g = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
|
|
||||||
@g = constant i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64)
|
|
||||||
|
|
||||||
; PLAIN-X: @h = constant i64 ptrtoint (i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 1) to i64)
|
|
||||||
@h = constant i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i64 1) to i64)
|
|
||||||
|
|
||||||
; PLAIN-X: @i = constant i64 ptrtoint (i1 addrspace(2)* getelementptr ({ i1, i1 addrspace(2)* }* null, i64 0, i32 1) to i64)
|
|
||||||
@i = constant i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double} addrspace(4)* null, i64 0, i32 1) to i64)
|
|
||||||
|
|
||||||
; The target-dependent folder should cast GEP indices to integer-sized pointers.
|
|
||||||
|
|
||||||
; PLAIN: @M = constant i64 addrspace(5)* getelementptr (i64 addrspace(5)* null, i32 1)
|
|
||||||
; PLAIN: @N = constant i64 addrspace(5)* getelementptr ({ i64, i64 } addrspace(5)* null, i32 0, i32 1)
|
|
||||||
; PLAIN: @O = constant i64 addrspace(5)* getelementptr ([2 x i64] addrspace(5)* null, i32 0, i32 1)
|
|
||||||
|
|
||||||
@M = constant i64 addrspace(5)* getelementptr (i64 addrspace(5)* null, i32 1)
|
|
||||||
@N = constant i64 addrspace(5)* getelementptr ({ i64, i64 } addrspace(5)* null, i32 0, i32 1)
|
|
||||||
@O = constant i64 addrspace(5)* getelementptr ([2 x i64] addrspace(5)* null, i32 0, i32 1)
|
|
||||||
|
|
||||||
; Fold GEP of a GEP. Very simple cases are folded.
|
|
||||||
|
|
||||||
; PLAIN-X: @Y = global [3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 2)
|
|
||||||
@ext = external addrspace(3) global [3 x { i32, i32 }]
|
|
||||||
@Y = global [3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 1), i64 1)
|
|
||||||
|
|
||||||
; PLAIN-X: @Z = global i32addrspace(3)* getelementptr inbounds (i32addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 0, i64 1, i32 0), i64 1)
|
|
||||||
@Z = global i32addrspace(3)* getelementptr inbounds (i32addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 0, i64 1, i32 0), i64 1)
|
|
||||||
|
|
||||||
|
|
||||||
; Duplicate all of the above as function return values rather than
|
|
||||||
; global initializers.
|
|
||||||
|
|
||||||
; PLAIN: define i8 addrspace(1)* @goo8() nounwind {
|
|
||||||
; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)*
|
|
||||||
; PLAIN: ret i8 addrspace(1)* %t
|
|
||||||
; PLAIN: }
|
|
||||||
; PLAIN: define i1 addrspace(2)* @goo1() nounwind {
|
|
||||||
; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)*
|
|
||||||
; PLAIN: ret i1 addrspace(2)* %t
|
|
||||||
; PLAIN: }
|
|
||||||
; PLAIN: define i8 addrspace(1)* @foo8() nounwind {
|
|
||||||
; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) to i8 addrspace(1)*
|
|
||||||
; PLAIN: ret i8 addrspace(1)* %t
|
|
||||||
; PLAIN: }
|
|
||||||
; PLAIN: define i1 addrspace(2)* @foo1() nounwind {
|
|
||||||
; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) to i1 addrspace(2)*
|
|
||||||
; PLAIN: ret i1 addrspace(2)* %t
|
|
||||||
; PLAIN: }
|
|
||||||
; PLAIN: define i8 addrspace(1)* @hoo8() nounwind {
|
|
||||||
; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* null, i32 -1) to i8 addrspace(1)*
|
|
||||||
; PLAIN: ret i8 addrspace(1)* %t
|
|
||||||
; PLAIN: }
|
|
||||||
; PLAIN: define i1 addrspace(2)* @hoo1() nounwind {
|
|
||||||
; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 -1) to i1 addrspace(2)*
|
|
||||||
; PLAIN: ret i1 addrspace(2)* %t
|
|
||||||
; PLAIN: }
|
|
||||||
define i8 addrspace(1)* @goo8() nounwind {
|
|
||||||
%t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)*
|
|
||||||
ret i8 addrspace(1)* %t
|
|
||||||
}
|
|
||||||
define i1 addrspace(2)* @goo1() nounwind {
|
|
||||||
%t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)*
|
|
||||||
ret i1 addrspace(2)* %t
|
|
||||||
}
|
|
||||||
define i8 addrspace(1)* @foo8() nounwind {
|
|
||||||
%t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) to i8 addrspace(1)*
|
|
||||||
ret i8 addrspace(1)* %t
|
|
||||||
}
|
|
||||||
define i1 addrspace(2)* @foo1() nounwind {
|
|
||||||
%t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) to i1 addrspace(2)*
|
|
||||||
ret i1 addrspace(2)* %t
|
|
||||||
}
|
|
||||||
define i8 addrspace(1)* @hoo8() nounwind {
|
|
||||||
%t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)*
|
|
||||||
ret i8 addrspace(1)* %t
|
|
||||||
}
|
|
||||||
define i1 addrspace(2)* @hoo1() nounwind {
|
|
||||||
%t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 0 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)*
|
|
||||||
ret i1 addrspace(2)* %t
|
|
||||||
}
|
|
||||||
|
|
||||||
; PLAIN-X: define i64 @fa() nounwind {
|
|
||||||
; PLAIN-X: %t = bitcast i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2310) to i64
|
|
||||||
; PLAIN-X: ret i64 %t
|
|
||||||
; PLAIN-X: }
|
|
||||||
; PLAIN-X: define i64 @fb() nounwind {
|
|
||||||
; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
|
|
||||||
; PLAIN-X: ret i64 %t
|
|
||||||
; PLAIN-X: }
|
|
||||||
; PLAIN-X: define i64 @fc() nounwind {
|
|
||||||
; PLAIN-X: %t = bitcast i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2) to i64
|
|
||||||
; PLAIN-X: ret i64 %t
|
|
||||||
; PLAIN-X: }
|
|
||||||
; PLAIN-X: define i64 @fd() nounwind {
|
|
||||||
; PLAIN-X: %t = bitcast i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 11) to i64
|
|
||||||
; PLAIN-X: ret i64 %t
|
|
||||||
; PLAIN-X: }
|
|
||||||
; PLAIN-X: define i64 @fe() nounwind {
|
|
||||||
; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) to i64
|
|
||||||
; PLAIN-X: ret i64 %t
|
|
||||||
; PLAIN-X: }
|
|
||||||
; PLAIN-X: define i64 @ff() nounwind {
|
|
||||||
; PLAIN-X: %t = bitcast i64 1 to i64
|
|
||||||
; PLAIN-X: ret i64 %t
|
|
||||||
; PLAIN-X: }
|
|
||||||
; PLAIN-X: define i64 @fg() nounwind {
|
|
||||||
; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
|
|
||||||
; PLAIN-X: ret i64 %t
|
|
||||||
; PLAIN-X: }
|
|
||||||
; PLAIN-X: define i64 @fh() nounwind {
|
|
||||||
; PLAIN-X: %t = bitcast i64 ptrtoint (i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 1) to i64) to i64
|
|
||||||
; PLAIN-X: ret i64 %t
|
|
||||||
; PLAIN-X: }
|
|
||||||
; PLAIN-X: define i64 @fi() nounwind {
|
|
||||||
; PLAIN-X: %t = bitcast i64 ptrtoint (i1 addrspace(2)* getelementptr ({ i1, i1 addrspace(2)* }* null, i64 0, i32 1) to i64) to i64
|
|
||||||
; PLAIN-X: ret i64 %t
|
|
||||||
; PLAIN-X: }
|
|
||||||
define i64 @fa() nounwind {
|
|
||||||
%t = bitcast i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]}* getelementptr ({[7 x double], [7 x double]}* null, i64 11) to i64), i64 5)) to i64
|
|
||||||
ret i64 %t
|
|
||||||
}
|
|
||||||
define i64 @fb() nounwind {
|
|
||||||
%t = bitcast i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64) to i64
|
|
||||||
ret i64 %t
|
|
||||||
}
|
|
||||||
define i64 @fc() nounwind {
|
|
||||||
%t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64
|
|
||||||
ret i64 %t
|
|
||||||
}
|
|
||||||
define i64 @fd() nounwind {
|
|
||||||
%t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64) to i64
|
|
||||||
ret i64 %t
|
|
||||||
}
|
|
||||||
define i64 @fe() nounwind {
|
|
||||||
%t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64
|
|
||||||
ret i64 %t
|
|
||||||
}
|
|
||||||
define i64 @ff() nounwind {
|
|
||||||
%t = bitcast i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64) to i64
|
|
||||||
ret i64 %t
|
|
||||||
}
|
|
||||||
define i64 @fg() nounwind {
|
|
||||||
%t = bitcast i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64) to i64
|
|
||||||
ret i64 %t
|
|
||||||
}
|
|
||||||
define i64 @fh() nounwind {
|
|
||||||
%t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64) to i64
|
|
||||||
ret i64 %t
|
|
||||||
}
|
|
||||||
define i64 @fi() nounwind {
|
|
||||||
%t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double}addrspace(4)* null, i64 0, i32 1) to i64) to i64
|
|
||||||
ret i64 %t
|
|
||||||
}
|
|
||||||
|
|
||||||
; PLAIN: define i64* @fM() nounwind {
|
|
||||||
; PLAIN: %t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
|
|
||||||
; PLAIN: ret i64* %t
|
|
||||||
; PLAIN: }
|
|
||||||
; PLAIN: define i64* @fN() nounwind {
|
|
||||||
; PLAIN: %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64*
|
|
||||||
; PLAIN: ret i64* %t
|
|
||||||
; PLAIN: }
|
|
||||||
; PLAIN: define i64* @fO() nounwind {
|
|
||||||
; PLAIN: %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
|
|
||||||
; PLAIN: ret i64* %t
|
|
||||||
; PLAIN: }
|
|
||||||
|
|
||||||
define i64* @fM() nounwind {
|
|
||||||
%t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
|
|
||||||
ret i64* %t
|
|
||||||
}
|
|
||||||
define i64* @fN() nounwind {
|
|
||||||
%t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64*
|
|
||||||
ret i64* %t
|
|
||||||
}
|
|
||||||
define i64* @fO() nounwind {
|
|
||||||
%t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
|
|
||||||
ret i64* %t
|
|
||||||
}
|
|
||||||
|
|
||||||
; PLAIN: define i32 addrspace(1)* @fZ() nounwind {
|
|
||||||
; PLAIN: %t = bitcast i32 addrspace(1)* getelementptr inbounds (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) to i32 addrspace(1)*
|
|
||||||
; PLAIN: ret i32 addrspace(1)* %t
|
|
||||||
; PLAIN: }
|
|
||||||
@ext2 = external addrspace(1) global [3 x { i32, i32 }]
|
|
||||||
define i32 addrspace(1)* @fZ() nounwind {
|
|
||||||
%t = bitcast i32 addrspace(1)* getelementptr inbounds (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) to i32 addrspace(1)*
|
|
||||||
ret i32 addrspace(1)* %t
|
|
||||||
}
|
|
Loading…
Reference in New Issue