forked from OSchip/llvm-project
[VectorCombine] improve readability; NFC
If we are going to allow adjusting the pointer for GEPs, rearranging the code a bit will make it easier to follow.
This commit is contained in:
parent
ed4783fc59
commit
12b684ae02
|
@ -116,15 +116,16 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// TODO: Extend this to match GEP with constant offsets.
|
// TODO: Extend this to match GEP with constant offsets.
|
||||||
Value *PtrOp = Load->getPointerOperand()->stripPointerCasts();
|
const DataLayout &DL = I.getModule()->getDataLayout();
|
||||||
assert(isa<PointerType>(PtrOp->getType()) && "Expected a pointer type");
|
Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
|
||||||
unsigned AS = Load->getPointerAddressSpace();
|
assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
|
||||||
|
|
||||||
// If original AS != Load's AS, we can't bitcast the original pointer and have
|
// If original AS != Load's AS, we can't bitcast the original pointer and have
|
||||||
// to use Load's operand instead. Ideally we would want to strip pointer casts
|
// to use Load's operand instead. Ideally we would want to strip pointer casts
|
||||||
// without changing AS, but there's no API to do that ATM.
|
// without changing AS, but there's no API to do that ATM.
|
||||||
if (AS != PtrOp->getType()->getPointerAddressSpace())
|
unsigned AS = Load->getPointerAddressSpace();
|
||||||
PtrOp = Load->getPointerOperand();
|
if (AS != SrcPtr->getType()->getPointerAddressSpace())
|
||||||
|
SrcPtr = Load->getPointerOperand();
|
||||||
|
|
||||||
Type *ScalarTy = Scalar->getType();
|
Type *ScalarTy = Scalar->getType();
|
||||||
uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits();
|
uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits();
|
||||||
|
@ -136,11 +137,9 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
|
||||||
unsigned MinVecNumElts = MinVectorSize / ScalarSize;
|
unsigned MinVecNumElts = MinVectorSize / ScalarSize;
|
||||||
auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false);
|
auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false);
|
||||||
Align Alignment = Load->getAlign();
|
Align Alignment = Load->getAlign();
|
||||||
const DataLayout &DL = I.getModule()->getDataLayout();
|
if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Alignment, DL, Load, &DT))
|
||||||
if (!isSafeToLoadUnconditionally(PtrOp, MinVecTy, Alignment, DL, Load, &DT))
|
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
|
||||||
// Original pattern: insertelt undef, load [free casts of] PtrOp, 0
|
// Original pattern: insertelt undef, load [free casts of] PtrOp, 0
|
||||||
Type *LoadTy = Load->getType();
|
Type *LoadTy = Load->getType();
|
||||||
int OldCost = TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS);
|
int OldCost = TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS);
|
||||||
|
@ -159,7 +158,7 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
|
||||||
// It is safe and potentially profitable to load a vector directly:
|
// It is safe and potentially profitable to load a vector directly:
|
||||||
// inselt undef, load Scalar, 0 --> load VecPtr
|
// inselt undef, load Scalar, 0 --> load VecPtr
|
||||||
IRBuilder<> Builder(Load);
|
IRBuilder<> Builder(Load);
|
||||||
Value *CastedPtr = Builder.CreateBitCast(PtrOp, MinVecTy->getPointerTo(AS));
|
Value *CastedPtr = Builder.CreateBitCast(SrcPtr, MinVecTy->getPointerTo(AS));
|
||||||
Value *VecLd = Builder.CreateAlignedLoad(MinVecTy, CastedPtr, Alignment);
|
Value *VecLd = Builder.CreateAlignedLoad(MinVecTy, CastedPtr, Alignment);
|
||||||
|
|
||||||
// If the insert type does not match the target's minimum vector type,
|
// If the insert type does not match the target's minimum vector type,
|
||||||
|
|
Loading…
Reference in New Issue