forked from OSchip/llvm-project
convert a bunch of callers from DataLayout::getIndexedOffset() to GEP::accumulateConstantOffset().
The later API is nicer than the former, and is correct regarding wrap-around offsets (if anyone cares). There are a few more places left with duplicated code, which I'll remove soon. llvm-svn: 171259
This commit is contained in:
parent
12be928dfb
commit
b6ad98224a
|
@ -510,11 +510,10 @@ ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) {
|
|||
|
||||
SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) {
|
||||
SizeOffsetType PtrData = compute(GEP.getPointerOperand());
|
||||
if (!bothKnown(PtrData) || !GEP.hasAllConstantIndices())
|
||||
APInt Offset(IntTyBits, 0);
|
||||
if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(*TD, Offset))
|
||||
return unknown();
|
||||
|
||||
SmallVector<Value*, 8> Ops(GEP.idx_begin(), GEP.idx_end());
|
||||
APInt Offset(IntTyBits,TD->getIndexedOffset(GEP.getPointerOperandType(),Ops));
|
||||
return std::make_pair(PtrData.first, PtrData.second + Offset);
|
||||
}
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include "llvm/MC/MCStreamer.h"
|
||||
#include "llvm/MC/MCSymbol.h"
|
||||
#include "llvm/Module.h"
|
||||
#include "llvm/Operator.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Support/Format.h"
|
||||
#include "llvm/Support/MathExtras.h"
|
||||
|
@ -1477,19 +1478,14 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
|
|||
case Instruction::GetElementPtr: {
|
||||
const DataLayout &TD = *AP.TM.getDataLayout();
|
||||
// Generate a symbolic expression for the byte address
|
||||
const Constant *PtrVal = CE->getOperand(0);
|
||||
SmallVector<Value*, 8> IdxVec(CE->op_begin()+1, CE->op_end());
|
||||
int64_t Offset = TD.getIndexedOffset(PtrVal->getType(), IdxVec);
|
||||
APInt OffsetAI(TD.getPointerSizeInBits(), 0);
|
||||
cast<GEPOperator>(CE)->accumulateConstantOffset(TD, OffsetAI);
|
||||
|
||||
const MCExpr *Base = lowerConstant(CE->getOperand(0), AP);
|
||||
if (Offset == 0)
|
||||
if (!OffsetAI)
|
||||
return Base;
|
||||
|
||||
// Truncate/sext the offset to the pointer size.
|
||||
unsigned Width = TD.getPointerSizeInBits();
|
||||
if (Width < 64)
|
||||
Offset = SignExtend64(Offset, Width);
|
||||
|
||||
int64_t Offset = OffsetAI.getSExtValue();
|
||||
return MCBinaryExpr::CreateAdd(Base, MCConstantExpr::Create(Offset, Ctx),
|
||||
Ctx);
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "llvm/DerivedTypes.h"
|
||||
#include "llvm/ExecutionEngine/GenericValue.h"
|
||||
#include "llvm/Module.h"
|
||||
#include "llvm/Operator.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/DynamicLibrary.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
|
@ -555,11 +556,11 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
|
|||
case Instruction::GetElementPtr: {
|
||||
// Compute the index
|
||||
GenericValue Result = getConstantValue(Op0);
|
||||
SmallVector<Value*, 8> Indices(CE->op_begin()+1, CE->op_end());
|
||||
uint64_t Offset = TD->getIndexedOffset(Op0->getType(), Indices);
|
||||
APInt Offset(TD->getPointerSizeInBits(), 0);
|
||||
cast<GEPOperator>(CE)->accumulateConstantOffset(*TD, Offset);
|
||||
|
||||
char* tmp = (char*) Result.PointerVal;
|
||||
Result = PTOGV(tmp + Offset);
|
||||
Result = PTOGV(tmp + Offset.getSExtValue());
|
||||
return Result;
|
||||
}
|
||||
case Instruction::Trunc: {
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include "llvm/MC/MCStreamer.h"
|
||||
#include "llvm/MC/MCSymbol.h"
|
||||
#include "llvm/Module.h"
|
||||
#include "llvm/Operator.h"
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Support/FormattedStream.h"
|
||||
|
@ -164,20 +165,14 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
|
|||
case Instruction::GetElementPtr: {
|
||||
const DataLayout &TD = *AP.TM.getDataLayout();
|
||||
// Generate a symbolic expression for the byte address
|
||||
const Constant *PtrVal = CE->getOperand(0);
|
||||
SmallVector<Value*, 8> IdxVec(CE->op_begin()+1, CE->op_end());
|
||||
int64_t Offset = TD.getIndexedOffset(PtrVal->getType(), IdxVec);
|
||||
APInt OffsetAI(TD.getPointerSizeInBits(), 0);
|
||||
cast<GEPOperator>(CE)->accumulateConstantOffset(TD, OffsetAI);
|
||||
|
||||
const MCExpr *Base = LowerConstant(CE->getOperand(0), AP);
|
||||
if (Offset == 0)
|
||||
if (!OffsetAI)
|
||||
return Base;
|
||||
|
||||
// Truncate/sext the offset to the pointer size.
|
||||
if (TD.getPointerSizeInBits() != 64) {
|
||||
int SExtAmount = 64-TD.getPointerSizeInBits();
|
||||
Offset = (Offset << SExtAmount) >> SExtAmount;
|
||||
}
|
||||
|
||||
int64_t Offset = OffsetAI.getSExtValue();
|
||||
return MCBinaryExpr::CreateAdd(Base, MCConstantExpr::Create(Offset, Ctx),
|
||||
Ctx);
|
||||
}
|
||||
|
|
|
@ -346,13 +346,11 @@ bool FunctionComparator::isEquivalentGEP(const GEPOperator *GEP1,
|
|||
const GEPOperator *GEP2) {
|
||||
// When we have target data, we can reduce the GEP down to the value in bytes
|
||||
// added to the address.
|
||||
if (TD && GEP1->hasAllConstantIndices() && GEP2->hasAllConstantIndices()) {
|
||||
SmallVector<Value *, 8> Indices1(GEP1->idx_begin(), GEP1->idx_end());
|
||||
SmallVector<Value *, 8> Indices2(GEP2->idx_begin(), GEP2->idx_end());
|
||||
uint64_t Offset1 = TD->getIndexedOffset(GEP1->getPointerOperandType(),
|
||||
Indices1);
|
||||
uint64_t Offset2 = TD->getIndexedOffset(GEP2->getPointerOperandType(),
|
||||
Indices2);
|
||||
unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 1;
|
||||
APInt Offset1(BitWidth, 0), Offset2(BitWidth, 0);
|
||||
if (TD &&
|
||||
GEP1->accumulateConstantOffset(*TD, Offset1) &&
|
||||
GEP2->accumulateConstantOffset(*TD, Offset2)) {
|
||||
return Offset1 == Offset2;
|
||||
}
|
||||
|
||||
|
|
|
@ -1337,17 +1337,15 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
|
|||
// GEP computes a constant offset, see if we can convert these three
|
||||
// instructions into fewer. This typically happens with unions and other
|
||||
// non-type-safe code.
|
||||
APInt Offset(TD ? TD->getPointerSizeInBits() : 1, 0);
|
||||
if (TD && GEP->hasOneUse() && isa<BitCastInst>(GEP->getOperand(0)) &&
|
||||
GEP->hasAllConstantIndices()) {
|
||||
SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end());
|
||||
int64_t Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops);
|
||||
|
||||
GEP->accumulateConstantOffset(*TD, Offset)) {
|
||||
// Get the base pointer input of the bitcast, and the type it points to.
|
||||
Value *OrigBase = cast<BitCastInst>(GEP->getOperand(0))->getOperand(0);
|
||||
Type *GEPIdxTy =
|
||||
cast<PointerType>(OrigBase->getType())->getElementType();
|
||||
SmallVector<Value*, 8> NewIndices;
|
||||
if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices)) {
|
||||
if (FindElementAtOffset(GEPIdxTy, Offset.getSExtValue(), NewIndices)) {
|
||||
// If we were able to index down into an element, create the GEP
|
||||
// and bitcast the result. This eliminates one bitcast, potentially
|
||||
// two.
|
||||
|
|
|
@ -1309,17 +1309,15 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
|||
/// into a gep of the original struct. This is important for SROA and alias
|
||||
/// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
|
||||
if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
|
||||
APInt Offset(TD ? TD->getPointerSizeInBits() : 1, 0);
|
||||
if (TD &&
|
||||
!isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices() &&
|
||||
!isa<BitCastInst>(BCI->getOperand(0)) &&
|
||||
GEP.accumulateConstantOffset(*TD, Offset) &&
|
||||
StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
|
||||
|
||||
// Determine how much the GEP moves the pointer.
|
||||
SmallVector<Value*, 8> Ops(GEP.idx_begin(), GEP.idx_end());
|
||||
int64_t Offset = TD->getIndexedOffset(GEP.getPointerOperandType(), Ops);
|
||||
|
||||
// If this GEP instruction doesn't move the pointer, just replace the GEP
|
||||
// with a bitcast of the real input to the dest type.
|
||||
if (Offset == 0) {
|
||||
if (!Offset) {
|
||||
// If the bitcast is of an allocation, and the allocation will be
|
||||
// converted to match the type of the cast, don't touch this.
|
||||
if (isa<AllocaInst>(BCI->getOperand(0)) ||
|
||||
|
@ -1343,7 +1341,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
|||
SmallVector<Value*, 8> NewIndices;
|
||||
Type *InTy =
|
||||
cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
|
||||
if (FindElementAtOffset(InTy, Offset, NewIndices)) {
|
||||
if (FindElementAtOffset(InTy, Offset.getSExtValue(), NewIndices)) {
|
||||
Value *NGEP = GEP.isInBounds() ?
|
||||
Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices) :
|
||||
Builder->CreateGEP(BCI->getOperand(0), NewIndices);
|
||||
|
|
|
@ -1642,44 +1642,6 @@ private:
|
|||
};
|
||||
}
|
||||
|
||||
/// \brief Accumulate the constant offsets in a GEP into a single APInt offset.
|
||||
///
|
||||
/// If the provided GEP is all-constant, the total byte offset formed by the
|
||||
/// GEP is computed and Offset is set to it. If the GEP has any non-constant
|
||||
/// operands, the function returns false and the value of Offset is unmodified.
|
||||
static bool accumulateGEPOffsets(const DataLayout &TD, GEPOperator &GEP,
|
||||
APInt &Offset) {
|
||||
APInt GEPOffset(Offset.getBitWidth(), 0);
|
||||
for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
|
||||
GTI != GTE; ++GTI) {
|
||||
ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
|
||||
if (!OpC)
|
||||
return false;
|
||||
if (OpC->isZero()) continue;
|
||||
|
||||
// Handle a struct index, which adds its field offset to the pointer.
|
||||
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
|
||||
unsigned ElementIdx = OpC->getZExtValue();
|
||||
const StructLayout *SL = TD.getStructLayout(STy);
|
||||
GEPOffset += APInt(Offset.getBitWidth(),
|
||||
SL->getElementOffset(ElementIdx));
|
||||
continue;
|
||||
}
|
||||
|
||||
APInt TypeSize(Offset.getBitWidth(),
|
||||
TD.getTypeAllocSize(GTI.getIndexedType()));
|
||||
if (VectorType *VTy = dyn_cast<VectorType>(*GTI)) {
|
||||
assert((TD.getTypeSizeInBits(VTy->getScalarType()) % 8) == 0 &&
|
||||
"vector element size is not a multiple of 8, cannot GEP over it");
|
||||
TypeSize = TD.getTypeSizeInBits(VTy->getScalarType()) / 8;
|
||||
}
|
||||
|
||||
GEPOffset += OpC->getValue().sextOrTrunc(Offset.getBitWidth()) * TypeSize;
|
||||
}
|
||||
Offset = GEPOffset;
|
||||
return true;
|
||||
}
|
||||
|
||||
/// \brief Build a GEP out of a base pointer and indices.
|
||||
///
|
||||
/// This will return the BasePtr if that is valid, or build a new GEP
|
||||
|
@ -1882,7 +1844,7 @@ static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD,
|
|||
// First fold any existing GEPs into the offset.
|
||||
while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
|
||||
APInt GEPOffset(Offset.getBitWidth(), 0);
|
||||
if (!accumulateGEPOffsets(TD, *GEP, GEPOffset))
|
||||
if (!GEP->accumulateConstantOffset(TD, GEPOffset))
|
||||
break;
|
||||
Offset += GEPOffset;
|
||||
Ptr = GEP->getPointerOperand();
|
||||
|
|
Loading…
Reference in New Issue