2017-10-18 05:27:42 +08:00
|
|
|
//===- LoadStoreVectorizer.cpp - GPU Load & Store Vectorizer --------------===//
|
2016-07-01 07:11:38 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2018-01-10 11:02:12 +08:00
|
|
|
//
|
|
|
|
// This pass merges loads/stores to/from sequential memory addresses into vector
|
|
|
|
// loads/stores. Although there's nothing GPU-specific in here, this pass is
|
|
|
|
// motivated by the microarchitectural quirks of nVidia and AMD GPUs.
|
|
|
|
//
|
|
|
|
// (For simplicity below we talk about loads only, but everything also applies
|
|
|
|
// to stores.)
|
|
|
|
//
|
|
|
|
// This pass is intended to be run late in the pipeline, after other
|
|
|
|
// vectorization opportunities have been exploited. So the assumption here is
|
|
|
|
// that immediately following our new vector load we'll need to extract out the
|
|
|
|
// individual elements of the load, so we can operate on them individually.
|
|
|
|
//
|
|
|
|
// On CPUs this transformation is usually not beneficial, because extracting the
|
|
|
|
// elements of a vector register is expensive on most architectures. It's
|
|
|
|
// usually better just to load each element individually into its own scalar
|
|
|
|
// register.
|
|
|
|
//
|
|
|
|
// However, nVidia and AMD GPUs don't have proper vector registers. Instead, a
|
|
|
|
// "vector load" loads directly into a series of scalar registers. In effect,
|
|
|
|
// extracting the elements of the vector is free. It's therefore always
|
|
|
|
// beneficial to vectorize a sequence of loads on these architectures.
|
|
|
|
//
|
|
|
|
// Vectorizing (perhaps a better name might be "coalescing") loads can have
|
|
|
|
// large performance impacts on GPU kernels, and opportunities for vectorizing
|
|
|
|
// are common in GPU code. This pass tries very hard to find such
|
|
|
|
// opportunities; its runtime is quadratic in the number of loads in a BB.
|
|
|
|
//
|
|
|
|
// Some CPU architectures, such as ARM, have instructions that load into
|
|
|
|
// multiple scalar registers, similar to a GPU vectorized load. In theory ARM
|
|
|
|
// could use this pass (with some modifications), but currently it implements
|
|
|
|
// its own pass to do something similar to what we do here.
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2017-10-18 05:27:42 +08:00
|
|
|
#include "llvm/ADT/APInt.h"
|
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
2016-07-01 07:11:38 +08:00
|
|
|
#include "llvm/ADT/MapVector.h"
|
|
|
|
#include "llvm/ADT/PostOrderIterator.h"
|
2017-10-18 05:27:42 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2016-07-01 07:11:38 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2017-10-18 05:27:42 +08:00
|
|
|
#include "llvm/ADT/iterator_range.h"
|
2016-07-01 07:11:38 +08:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2017-10-18 05:27:42 +08:00
|
|
|
#include "llvm/Analysis/MemoryLocation.h"
|
2016-08-13 08:04:08 +08:00
|
|
|
#include "llvm/Analysis/OrderedBasicBlock.h"
|
2016-07-01 07:11:38 +08:00
|
|
|
#include "llvm/Analysis/ScalarEvolution.h"
|
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
|
|
|
#include "llvm/Analysis/VectorUtils.h"
|
2017-10-18 05:27:42 +08:00
|
|
|
#include "llvm/IR/Attributes.h"
|
|
|
|
#include "llvm/IR/BasicBlock.h"
|
|
|
|
#include "llvm/IR/Constants.h"
|
2016-07-01 07:11:38 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2017-10-18 05:27:42 +08:00
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
2016-07-01 07:11:38 +08:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2017-10-18 05:27:42 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
2016-07-01 07:11:38 +08:00
|
|
|
#include "llvm/IR/IRBuilder.h"
|
2017-10-18 05:27:42 +08:00
|
|
|
#include "llvm/IR/InstrTypes.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
2016-07-01 07:11:38 +08:00
|
|
|
#include "llvm/IR/Instructions.h"
|
Add an @llvm.sideeffect intrinsic
This patch implements Chandler's idea [0] for supporting languages that
require support for infinite loops with side effects, such as Rust, providing
part of a solution to bug 965 [1].
Specifically, it adds an `llvm.sideeffect()` intrinsic, which has no actual
effect, but which appears to optimization passes to have obscure side effects,
such that they don't optimize away loops containing it. It also teaches
several optimization passes to ignore this intrinsic, so that it doesn't
significantly impact optimization in most cases.
As discussed on llvm-dev [2], this patch is the first of two major parts.
The second part, to change LLVM's semantics to have defined behavior
on infinite loops by default, with a function attribute for opting into
potential-undefined-behavior, will be implemented and posted for review in
a separate patch.
[0] http://lists.llvm.org/pipermail/llvm-dev/2015-July/088103.html
[1] https://bugs.llvm.org/show_bug.cgi?id=965
[2] http://lists.llvm.org/pipermail/llvm-dev/2017-October/118632.html
Differential Revision: https://reviews.llvm.org/D38336
llvm-svn: 317729
2017-11-09 05:59:51 +08:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2016-07-01 07:11:38 +08:00
|
|
|
#include "llvm/IR/Module.h"
|
|
|
|
#include "llvm/IR/Type.h"
|
2017-10-18 05:27:42 +08:00
|
|
|
#include "llvm/IR/User.h"
|
2016-07-01 07:11:38 +08:00
|
|
|
#include "llvm/IR/Value.h"
|
2017-10-18 05:27:42 +08:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
2016-07-01 07:11:38 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2017-04-27 00:39:58 +08:00
|
|
|
#include "llvm/Support/KnownBits.h"
|
2017-10-18 05:27:42 +08:00
|
|
|
#include "llvm/Support/MathExtras.h"
|
2016-07-01 07:11:38 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2016-09-10 06:20:14 +08:00
|
|
|
#include "llvm/Transforms/Utils/Local.h"
|
2016-07-08 04:10:35 +08:00
|
|
|
#include "llvm/Transforms/Vectorize.h"
|
2017-10-18 05:27:42 +08:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <cstdlib>
|
|
|
|
#include <tuple>
|
|
|
|
#include <utility>
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "load-store-vectorizer"
|
2017-10-18 05:27:42 +08:00
|
|
|
|
2016-07-01 07:11:38 +08:00
|
|
|
STATISTIC(NumVectorInstructions, "Number of vector accesses generated");
|
|
|
|
STATISTIC(NumScalarsVectorized, "Number of scalar accesses vectorized");
|
|
|
|
|
2016-08-05 00:38:44 +08:00
|
|
|
// FIXME: Assuming stack alignment of 4 is always good enough
|
|
|
|
static const unsigned StackAdjustedAlignment = 4;
|
2017-10-18 05:27:42 +08:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
using InstrList = SmallVector<Instruction *, 8>;
|
|
|
|
using InstrListMap = MapVector<Value *, InstrList>;
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2016-07-20 07:19:16 +08:00
|
|
|
class Vectorizer {
|
2016-07-01 07:11:38 +08:00
|
|
|
Function &F;
|
|
|
|
AliasAnalysis &AA;
|
|
|
|
DominatorTree &DT;
|
|
|
|
ScalarEvolution &SE;
|
2016-07-01 10:07:22 +08:00
|
|
|
TargetTransformInfo &TTI;
|
2016-07-01 07:11:38 +08:00
|
|
|
const DataLayout &DL;
|
|
|
|
IRBuilder<> Builder;
|
|
|
|
|
|
|
|
public:
|
|
|
|
Vectorizer(Function &F, AliasAnalysis &AA, DominatorTree &DT,
|
2016-07-01 10:07:22 +08:00
|
|
|
ScalarEvolution &SE, TargetTransformInfo &TTI)
|
2016-07-08 04:10:35 +08:00
|
|
|
: F(F), AA(AA), DT(DT), SE(SE), TTI(TTI),
|
|
|
|
DL(F.getParent()->getDataLayout()), Builder(SE.getContext()) {}
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
bool run();
|
|
|
|
|
|
|
|
private:
|
2017-04-26 02:00:08 +08:00
|
|
|
Value *getPointerOperand(Value *I) const;
|
|
|
|
|
|
|
|
GetElementPtrInst *getSourceGEP(Value *Src) const;
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
unsigned getPointerAddressSpace(Value *I);
|
|
|
|
|
2016-07-01 10:09:38 +08:00
|
|
|
unsigned getAlignment(LoadInst *LI) const {
|
|
|
|
unsigned Align = LI->getAlignment();
|
|
|
|
if (Align != 0)
|
|
|
|
return Align;
|
|
|
|
|
|
|
|
return DL.getABITypeAlignment(LI->getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned getAlignment(StoreInst *SI) const {
|
|
|
|
unsigned Align = SI->getAlignment();
|
|
|
|
if (Align != 0)
|
|
|
|
return Align;
|
|
|
|
|
|
|
|
return DL.getABITypeAlignment(SI->getValueOperand()->getType());
|
|
|
|
}
|
|
|
|
|
2016-07-01 07:11:38 +08:00
|
|
|
bool isConsecutiveAccess(Value *A, Value *B);
|
|
|
|
|
Correct ordering of loads/stores.
Summary:
Aiming to correct the ordering of loads/stores. This patch changes the
insert point for loads to the position of the first load.
It updates the ordering method for loads to insert before, rather than after.
Before this patch the following sequence:
"load a[1], store a[1], store a[0], load a[2]"
Would incorrectly vectorize to "store a[0,1], load a[1,2]".
The correctness check was assuming the insertion point for loads is at
the position of the first load, when in practice it was at the last
load. An alternative fix would have been to invert the correctness check.
The current fix changes insert position but also requires reordering of
instructions before the vectorized load.
Updated testcases to reflect the changes.
Reviewers: tstellarAMD, llvm-commits, jlebar, arsenm
Subscribers: mzolotukhin
Differential Revision: http://reviews.llvm.org/D22071
llvm-svn: 275117
2016-07-12 06:34:29 +08:00
|
|
|
/// After vectorization, reorder the instructions that I depends on
|
|
|
|
/// (the instructions defining its operands), to ensure they dominate I.
|
2016-07-01 07:11:38 +08:00
|
|
|
void reorder(Instruction *I);
|
|
|
|
|
|
|
|
/// Returns the first and the last instructions in Chain.
|
|
|
|
std::pair<BasicBlock::iterator, BasicBlock::iterator>
|
2016-07-28 07:06:00 +08:00
|
|
|
getBoundaryInstrs(ArrayRef<Instruction *> Chain);
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
/// Erases the original instructions after vectorizing.
|
2016-07-28 07:06:00 +08:00
|
|
|
void eraseInstructions(ArrayRef<Instruction *> Chain);
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
/// "Legalize" the vector type that would be produced by combining \p
|
|
|
|
/// ElementSizeBits elements in \p Chain. Break into two pieces such that the
|
|
|
|
/// total size of each piece is 1, 2 or a multiple of 4 bytes. \p Chain is
|
|
|
|
/// expected to have more than 4 elements.
|
2016-07-28 07:06:00 +08:00
|
|
|
std::pair<ArrayRef<Instruction *>, ArrayRef<Instruction *>>
|
|
|
|
splitOddVectorElts(ArrayRef<Instruction *> Chain, unsigned ElementSizeBits);
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2016-07-20 07:19:20 +08:00
|
|
|
/// Finds the largest prefix of Chain that's vectorizable, checking for
|
|
|
|
/// intervening instructions which may affect the memory accessed by the
|
|
|
|
/// instructions within Chain.
|
|
|
|
///
|
2016-07-20 08:55:12 +08:00
|
|
|
/// The elements of \p Chain must be all loads or all stores and must be in
|
|
|
|
/// address order.
|
2016-07-28 07:06:00 +08:00
|
|
|
ArrayRef<Instruction *> getVectorizablePrefix(ArrayRef<Instruction *> Chain);
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
/// Collects load and store instructions to vectorize.
|
2016-07-28 07:06:00 +08:00
|
|
|
std::pair<InstrListMap, InstrListMap> collectInstructions(BasicBlock *BB);
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2016-07-28 07:06:00 +08:00
|
|
|
/// Processes the collected instructions, the \p Map. The values of \p Map
|
2016-07-01 07:11:38 +08:00
|
|
|
/// should be all loads or all stores.
|
2016-07-28 07:06:00 +08:00
|
|
|
bool vectorizeChains(InstrListMap &Map);
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
/// Finds the load/stores to consecutive memory addresses and vectorizes them.
|
2016-07-28 07:06:00 +08:00
|
|
|
bool vectorizeInstructions(ArrayRef<Instruction *> Instrs);
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
/// Vectorizes the load instructions in Chain.
|
2016-07-28 07:06:00 +08:00
|
|
|
bool
|
|
|
|
vectorizeLoadChain(ArrayRef<Instruction *> Chain,
|
|
|
|
SmallPtrSet<Instruction *, 16> *InstructionsProcessed);
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
/// Vectorizes the store instructions in Chain.
|
2016-07-28 07:06:00 +08:00
|
|
|
bool
|
|
|
|
vectorizeStoreChain(ArrayRef<Instruction *> Chain,
|
|
|
|
SmallPtrSet<Instruction *, 16> *InstructionsProcessed);
|
2016-07-12 04:46:17 +08:00
|
|
|
|
2016-09-29 21:04:37 +08:00
|
|
|
/// Check if this load/store access is misaligned accesses.
|
2016-07-12 04:46:17 +08:00
|
|
|
bool accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
|
|
|
|
unsigned Alignment);
|
2016-07-01 07:11:38 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
class LoadStoreVectorizer : public FunctionPass {
|
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
|
2016-07-01 10:07:22 +08:00
|
|
|
LoadStoreVectorizer() : FunctionPass(ID) {
|
2016-07-01 07:11:38 +08:00
|
|
|
initializeLoadStoreVectorizerPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool runOnFunction(Function &F) override;
|
|
|
|
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override {
|
2016-07-01 07:11:38 +08:00
|
|
|
return "GPU Load and Store Vectorizer";
|
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.addRequired<AAResultsWrapperPass>();
|
|
|
|
AU.addRequired<ScalarEvolutionWrapperPass>();
|
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
2016-07-01 10:07:22 +08:00
|
|
|
AU.addRequired<TargetTransformInfoWrapperPass>();
|
2016-07-01 07:11:38 +08:00
|
|
|
AU.setPreservesCFG();
|
|
|
|
}
|
|
|
|
};
|
2017-10-18 05:27:42 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
char LoadStoreVectorizer::ID = 0;
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
INITIALIZE_PASS_BEGIN(LoadStoreVectorizer, DEBUG_TYPE,
|
2016-07-02 07:26:54 +08:00
|
|
|
"Vectorize load and Store instructions", false, false)
|
2016-07-01 07:11:38 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
|
2016-07-01 10:07:22 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
|
2016-07-01 07:11:38 +08:00
|
|
|
INITIALIZE_PASS_END(LoadStoreVectorizer, DEBUG_TYPE,
|
2016-07-02 07:26:54 +08:00
|
|
|
"Vectorize load and store instructions", false, false)
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2016-07-01 10:07:22 +08:00
|
|
|
Pass *llvm::createLoadStoreVectorizerPass() {
|
|
|
|
return new LoadStoreVectorizer();
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
|
2016-07-28 07:06:00 +08:00
|
|
|
// The real propagateMetadata expects a SmallVector<Value*>, but we deal in
|
|
|
|
// vectors of Instructions.
|
|
|
|
static void propagateMetadata(Instruction *I, ArrayRef<Instruction *> IL) {
|
|
|
|
SmallVector<Value *, 8> VL(IL.begin(), IL.end());
|
|
|
|
propagateMetadata(I, VL);
|
|
|
|
}
|
|
|
|
|
2016-07-01 07:11:38 +08:00
|
|
|
bool LoadStoreVectorizer::runOnFunction(Function &F) {
|
2016-07-01 07:50:18 +08:00
|
|
|
// Don't vectorize when the attribute NoImplicitFloat is used.
|
|
|
|
if (skipFunction(F) || F.hasFnAttribute(Attribute::NoImplicitFloat))
|
|
|
|
return false;
|
|
|
|
|
2016-07-01 07:11:38 +08:00
|
|
|
AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
|
|
|
|
DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
|
|
|
ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
|
2016-07-08 04:10:35 +08:00
|
|
|
TargetTransformInfo &TTI =
|
|
|
|
getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2016-07-01 10:07:22 +08:00
|
|
|
Vectorizer V(F, AA, DT, SE, TTI);
|
2016-07-01 07:11:38 +08:00
|
|
|
return V.run();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Vectorizer Implementation
|
|
|
|
bool Vectorizer::run() {
|
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
// Scan the blocks in the function in post order.
|
|
|
|
for (BasicBlock *BB : post_order(&F)) {
|
2016-07-28 07:06:00 +08:00
|
|
|
InstrListMap LoadRefs, StoreRefs;
|
2016-07-20 07:19:16 +08:00
|
|
|
std::tie(LoadRefs, StoreRefs) = collectInstructions(BB);
|
2016-07-01 07:11:38 +08:00
|
|
|
Changed |= vectorizeChains(LoadRefs);
|
|
|
|
Changed |= vectorizeChains(StoreRefs);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2017-04-26 02:00:08 +08:00
|
|
|
Value *Vectorizer::getPointerOperand(Value *I) const {
|
2016-07-01 07:11:38 +08:00
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(I))
|
|
|
|
return LI->getPointerOperand();
|
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(I))
|
|
|
|
return SI->getPointerOperand();
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned Vectorizer::getPointerAddressSpace(Value *I) {
|
|
|
|
if (LoadInst *L = dyn_cast<LoadInst>(I))
|
|
|
|
return L->getPointerAddressSpace();
|
|
|
|
if (StoreInst *S = dyn_cast<StoreInst>(I))
|
|
|
|
return S->getPointerAddressSpace();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-04-26 02:00:08 +08:00
|
|
|
GetElementPtrInst *Vectorizer::getSourceGEP(Value *Src) const {
|
|
|
|
// First strip pointer bitcasts. Make sure pointee size is the same with
|
|
|
|
// and without casts.
|
|
|
|
// TODO: a stride set by the add instruction below can match the difference
|
|
|
|
// in pointee type size here. Currently it will not be vectorized.
|
|
|
|
Value *SrcPtr = getPointerOperand(Src);
|
|
|
|
Value *SrcBase = SrcPtr->stripPointerCasts();
|
|
|
|
if (DL.getTypeStoreSize(SrcPtr->getType()->getPointerElementType()) ==
|
|
|
|
DL.getTypeStoreSize(SrcBase->getType()->getPointerElementType()))
|
|
|
|
SrcPtr = SrcBase;
|
|
|
|
return dyn_cast<GetElementPtrInst>(SrcPtr);
|
|
|
|
}
|
|
|
|
|
2016-07-01 07:11:38 +08:00
|
|
|
// FIXME: Merge with llvm::isConsecutiveAccess
|
|
|
|
bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) {
|
|
|
|
Value *PtrA = getPointerOperand(A);
|
|
|
|
Value *PtrB = getPointerOperand(B);
|
|
|
|
unsigned ASA = getPointerAddressSpace(A);
|
|
|
|
unsigned ASB = getPointerAddressSpace(B);
|
|
|
|
|
|
|
|
// Check that the address spaces match and that the pointers are valid.
|
|
|
|
if (!PtrA || !PtrB || (ASA != ASB))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Make sure that A and B are different pointers of the same size type.
|
|
|
|
unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA);
|
|
|
|
Type *PtrATy = PtrA->getType()->getPointerElementType();
|
|
|
|
Type *PtrBTy = PtrB->getType()->getPointerElementType();
|
|
|
|
if (PtrA == PtrB ||
|
|
|
|
DL.getTypeStoreSize(PtrATy) != DL.getTypeStoreSize(PtrBTy) ||
|
|
|
|
DL.getTypeStoreSize(PtrATy->getScalarType()) !=
|
2016-07-08 04:10:35 +08:00
|
|
|
DL.getTypeStoreSize(PtrBTy->getScalarType()))
|
2016-07-01 07:11:38 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
APInt Size(PtrBitWidth, DL.getTypeStoreSize(PtrATy));
|
|
|
|
|
|
|
|
APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0);
|
|
|
|
PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
|
|
|
|
PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
|
|
|
|
|
|
|
|
APInt OffsetDelta = OffsetB - OffsetA;
|
|
|
|
|
|
|
|
// Check if they are based on the same pointer. That makes the offsets
|
|
|
|
// sufficient.
|
|
|
|
if (PtrA == PtrB)
|
|
|
|
return OffsetDelta == Size;
|
|
|
|
|
|
|
|
// Compute the necessary base pointer delta to have the necessary final delta
|
|
|
|
// equal to the size.
|
|
|
|
APInt BaseDelta = Size - OffsetDelta;
|
|
|
|
|
|
|
|
// Compute the distance with SCEV between the base pointers.
|
|
|
|
const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
|
|
|
|
const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
|
|
|
|
const SCEV *C = SE.getConstant(BaseDelta);
|
|
|
|
const SCEV *X = SE.getAddExpr(PtrSCEVA, C);
|
|
|
|
if (X == PtrSCEVB)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Sometimes even this doesn't work, because SCEV can't always see through
|
|
|
|
// patterns that look like (gep (ext (add (shl X, C1), C2))). Try checking
|
|
|
|
// things the hard way.
|
|
|
|
|
|
|
|
// Look through GEPs after checking they're the same except for the last
|
|
|
|
// index.
|
2017-04-26 02:00:08 +08:00
|
|
|
GetElementPtrInst *GEPA = getSourceGEP(A);
|
|
|
|
GetElementPtrInst *GEPB = getSourceGEP(B);
|
2016-07-01 07:11:38 +08:00
|
|
|
if (!GEPA || !GEPB || GEPA->getNumOperands() != GEPB->getNumOperands())
|
|
|
|
return false;
|
|
|
|
unsigned FinalIndex = GEPA->getNumOperands() - 1;
|
|
|
|
for (unsigned i = 0; i < FinalIndex; i++)
|
|
|
|
if (GEPA->getOperand(i) != GEPB->getOperand(i))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Instruction *OpA = dyn_cast<Instruction>(GEPA->getOperand(FinalIndex));
|
|
|
|
Instruction *OpB = dyn_cast<Instruction>(GEPB->getOperand(FinalIndex));
|
|
|
|
if (!OpA || !OpB || OpA->getOpcode() != OpB->getOpcode() ||
|
|
|
|
OpA->getType() != OpB->getType())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Only look through a ZExt/SExt.
|
|
|
|
if (!isa<SExtInst>(OpA) && !isa<ZExtInst>(OpA))
|
|
|
|
return false;
|
|
|
|
|
2016-07-01 10:16:24 +08:00
|
|
|
bool Signed = isa<SExtInst>(OpA);
|
|
|
|
|
2016-07-01 07:11:38 +08:00
|
|
|
OpA = dyn_cast<Instruction>(OpA->getOperand(0));
|
|
|
|
OpB = dyn_cast<Instruction>(OpB->getOperand(0));
|
|
|
|
if (!OpA || !OpB || OpA->getType() != OpB->getType())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Now we need to prove that adding 1 to OpA won't overflow.
|
2016-07-01 10:16:24 +08:00
|
|
|
bool Safe = false;
|
|
|
|
// First attempt: if OpB is an add with NSW/NUW, and OpB is 1 added to OpA,
|
|
|
|
// we're okay.
|
|
|
|
if (OpB->getOpcode() == Instruction::Add &&
|
|
|
|
isa<ConstantInt>(OpB->getOperand(1)) &&
|
|
|
|
cast<ConstantInt>(OpB->getOperand(1))->getSExtValue() > 0) {
|
|
|
|
if (Signed)
|
|
|
|
Safe = cast<BinaryOperator>(OpB)->hasNoSignedWrap();
|
|
|
|
else
|
|
|
|
Safe = cast<BinaryOperator>(OpB)->hasNoUnsignedWrap();
|
|
|
|
}
|
|
|
|
|
2016-07-01 07:11:38 +08:00
|
|
|
unsigned BitWidth = OpA->getType()->getScalarSizeInBits();
|
2016-07-01 10:16:24 +08:00
|
|
|
|
|
|
|
// Second attempt:
|
2016-07-01 07:11:38 +08:00
|
|
|
// If any bits are known to be zero other than the sign bit in OpA, we can
|
|
|
|
// add 1 to it while guaranteeing no overflow of any sort.
|
2016-07-01 10:16:24 +08:00
|
|
|
if (!Safe) {
|
2017-04-27 00:39:58 +08:00
|
|
|
KnownBits Known(BitWidth);
|
|
|
|
computeKnownBits(OpA, Known, DL, 0, nullptr, OpA, &DT);
|
2017-05-13 01:20:30 +08:00
|
|
|
if (Known.countMaxTrailingOnes() < (BitWidth - 1))
|
2016-07-01 10:16:24 +08:00
|
|
|
Safe = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Safe)
|
2016-07-01 07:11:38 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
const SCEV *OffsetSCEVA = SE.getSCEV(OpA);
|
|
|
|
const SCEV *OffsetSCEVB = SE.getSCEV(OpB);
|
|
|
|
const SCEV *One = SE.getConstant(APInt(BitWidth, 1));
|
|
|
|
const SCEV *X2 = SE.getAddExpr(OffsetSCEVA, One);
|
|
|
|
return X2 == OffsetSCEVB;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Vectorizer::reorder(Instruction *I) {
|
2016-08-13 08:04:08 +08:00
|
|
|
OrderedBasicBlock OBB(I->getParent());
|
Correct ordering of loads/stores.
Summary:
Aiming to correct the ordering of loads/stores. This patch changes the
insert point for loads to the position of the first load.
It updates the ordering method for loads to insert before, rather than after.
Before this patch the following sequence:
"load a[1], store a[1], store a[0], load a[2]"
Would incorrectly vectorize to "store a[0,1], load a[1,2]".
The correctness check was assuming the insertion point for loads is at
the position of the first load, when in practice it was at the last
load. An alternative fix would have been to invert the correctness check.
The current fix changes insert position but also requires reordering of
instructions before the vectorized load.
Updated testcases to reflect the changes.
Reviewers: tstellarAMD, llvm-commits, jlebar, arsenm
Subscribers: mzolotukhin
Differential Revision: http://reviews.llvm.org/D22071
llvm-svn: 275117
2016-07-12 06:34:29 +08:00
|
|
|
SmallPtrSet<Instruction *, 16> InstructionsToMove;
|
|
|
|
SmallVector<Instruction *, 16> Worklist;
|
|
|
|
|
|
|
|
Worklist.push_back(I);
|
|
|
|
while (!Worklist.empty()) {
|
|
|
|
Instruction *IW = Worklist.pop_back_val();
|
|
|
|
int NumOperands = IW->getNumOperands();
|
|
|
|
for (int i = 0; i < NumOperands; i++) {
|
|
|
|
Instruction *IM = dyn_cast<Instruction>(IW->getOperand(i));
|
|
|
|
if (!IM || IM->getOpcode() == Instruction::PHI)
|
|
|
|
continue;
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2016-08-13 08:04:08 +08:00
|
|
|
// If IM is in another BB, no need to move it, because this pass only
|
|
|
|
// vectorizes instructions within one BB.
|
|
|
|
if (IM->getParent() != I->getParent())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!OBB.dominates(IM, I)) {
|
Correct ordering of loads/stores.
Summary:
Aiming to correct the ordering of loads/stores. This patch changes the
insert point for loads to the position of the first load.
It updates the ordering method for loads to insert before, rather than after.
Before this patch the following sequence:
"load a[1], store a[1], store a[0], load a[2]"
Would incorrectly vectorize to "store a[0,1], load a[1,2]".
The correctness check was assuming the insertion point for loads is at
the position of the first load, when in practice it was at the last
load. An alternative fix would have been to invert the correctness check.
The current fix changes insert position but also requires reordering of
instructions before the vectorized load.
Updated testcases to reflect the changes.
Reviewers: tstellarAMD, llvm-commits, jlebar, arsenm
Subscribers: mzolotukhin
Differential Revision: http://reviews.llvm.org/D22071
llvm-svn: 275117
2016-07-12 06:34:29 +08:00
|
|
|
InstructionsToMove.insert(IM);
|
|
|
|
Worklist.push_back(IM);
|
|
|
|
}
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
}
|
Correct ordering of loads/stores.
Summary:
Aiming to correct the ordering of loads/stores. This patch changes the
insert point for loads to the position of the first load.
It updates the ordering method for loads to insert before, rather than after.
Before this patch the following sequence:
"load a[1], store a[1], store a[0], load a[2]"
Would incorrectly vectorize to "store a[0,1], load a[1,2]".
The correctness check was assuming the insertion point for loads is at
the position of the first load, when in practice it was at the last
load. An alternative fix would have been to invert the correctness check.
The current fix changes insert position but also requires reordering of
instructions before the vectorized load.
Updated testcases to reflect the changes.
Reviewers: tstellarAMD, llvm-commits, jlebar, arsenm
Subscribers: mzolotukhin
Differential Revision: http://reviews.llvm.org/D22071
llvm-svn: 275117
2016-07-12 06:34:29 +08:00
|
|
|
|
|
|
|
// All instructions to move should follow I. Start from I, not from begin().
|
|
|
|
for (auto BBI = I->getIterator(), E = I->getParent()->end(); BBI != E;
|
|
|
|
++BBI) {
|
2016-08-02 17:35:17 +08:00
|
|
|
if (!InstructionsToMove.count(&*BBI))
|
Correct ordering of loads/stores.
Summary:
Aiming to correct the ordering of loads/stores. This patch changes the
insert point for loads to the position of the first load.
It updates the ordering method for loads to insert before, rather than after.
Before this patch the following sequence:
"load a[1], store a[1], store a[0], load a[2]"
Would incorrectly vectorize to "store a[0,1], load a[1,2]".
The correctness check was assuming the insertion point for loads is at
the position of the first load, when in practice it was at the last
load. An alternative fix would have been to invert the correctness check.
The current fix changes insert position but also requires reordering of
instructions before the vectorized load.
Updated testcases to reflect the changes.
Reviewers: tstellarAMD, llvm-commits, jlebar, arsenm
Subscribers: mzolotukhin
Differential Revision: http://reviews.llvm.org/D22071
llvm-svn: 275117
2016-07-12 06:34:29 +08:00
|
|
|
continue;
|
|
|
|
Instruction *IM = &*BBI;
|
|
|
|
--BBI;
|
|
|
|
IM->removeFromParent();
|
|
|
|
IM->insertBefore(I);
|
|
|
|
}
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
std::pair<BasicBlock::iterator, BasicBlock::iterator>
|
2016-07-28 07:06:00 +08:00
|
|
|
Vectorizer::getBoundaryInstrs(ArrayRef<Instruction *> Chain) {
|
|
|
|
Instruction *C0 = Chain[0];
|
2016-07-01 07:11:38 +08:00
|
|
|
BasicBlock::iterator FirstInstr = C0->getIterator();
|
|
|
|
BasicBlock::iterator LastInstr = C0->getIterator();
|
|
|
|
|
|
|
|
BasicBlock *BB = C0->getParent();
|
|
|
|
unsigned NumFound = 0;
|
|
|
|
for (Instruction &I : *BB) {
|
|
|
|
if (!is_contained(Chain, &I))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
++NumFound;
|
|
|
|
if (NumFound == 1) {
|
|
|
|
FirstInstr = I.getIterator();
|
2016-07-02 05:44:12 +08:00
|
|
|
}
|
|
|
|
if (NumFound == Chain.size()) {
|
2016-07-01 07:11:38 +08:00
|
|
|
LastInstr = I.getIterator();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-02 05:44:12 +08:00
|
|
|
// Range is [first, last).
|
|
|
|
return std::make_pair(FirstInstr, ++LastInstr);
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
|
2016-07-28 07:06:00 +08:00
|
|
|
void Vectorizer::eraseInstructions(ArrayRef<Instruction *> Chain) {
|
2016-07-01 07:11:38 +08:00
|
|
|
SmallVector<Instruction *, 16> Instrs;
|
2016-07-28 07:06:00 +08:00
|
|
|
for (Instruction *I : Chain) {
|
|
|
|
Value *PtrOperand = getPointerOperand(I);
|
2016-07-01 07:11:38 +08:00
|
|
|
assert(PtrOperand && "Instruction must have a pointer operand.");
|
2016-07-28 07:06:00 +08:00
|
|
|
Instrs.push_back(I);
|
2016-07-01 07:11:38 +08:00
|
|
|
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(PtrOperand))
|
|
|
|
Instrs.push_back(GEP);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Erase instructions.
|
2016-07-28 07:06:00 +08:00
|
|
|
for (Instruction *I : Instrs)
|
|
|
|
if (I->use_empty())
|
|
|
|
I->eraseFromParent();
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
|
2016-07-28 07:06:00 +08:00
|
|
|
std::pair<ArrayRef<Instruction *>, ArrayRef<Instruction *>>
|
|
|
|
Vectorizer::splitOddVectorElts(ArrayRef<Instruction *> Chain,
|
2016-07-01 07:11:38 +08:00
|
|
|
unsigned ElementSizeBits) {
|
2016-10-03 18:31:34 +08:00
|
|
|
unsigned ElementSizeBytes = ElementSizeBits / 8;
|
|
|
|
unsigned SizeBytes = ElementSizeBytes * Chain.size();
|
|
|
|
unsigned NumLeft = (SizeBytes - (SizeBytes % 4)) / ElementSizeBytes;
|
2017-02-23 11:58:53 +08:00
|
|
|
if (NumLeft == Chain.size()) {
|
|
|
|
if ((NumLeft & 1) == 0)
|
|
|
|
NumLeft /= 2; // Split even in half
|
|
|
|
else
|
|
|
|
--NumLeft; // Split off last element
|
|
|
|
} else if (NumLeft == 0)
|
2016-10-03 18:31:34 +08:00
|
|
|
NumLeft = 1;
|
2016-07-01 07:11:38 +08:00
|
|
|
return std::make_pair(Chain.slice(0, NumLeft), Chain.slice(NumLeft));
|
|
|
|
}
|
|
|
|
|
2016-07-28 07:06:00 +08:00
|
|
|
ArrayRef<Instruction *>
|
|
|
|
Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) {
|
2016-07-20 08:55:12 +08:00
|
|
|
// These are in BB order, unlike Chain, which is in address order.
|
2016-08-13 08:04:08 +08:00
|
|
|
SmallVector<Instruction *, 16> MemoryInstrs;
|
|
|
|
SmallVector<Instruction *, 16> ChainInstrs;
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2016-07-21 04:07:37 +08:00
|
|
|
bool IsLoadChain = isa<LoadInst>(Chain[0]);
|
|
|
|
DEBUG({
|
2016-07-28 07:06:00 +08:00
|
|
|
for (Instruction *I : Chain) {
|
2016-07-21 04:07:37 +08:00
|
|
|
if (IsLoadChain)
|
2016-07-28 07:06:00 +08:00
|
|
|
assert(isa<LoadInst>(I) &&
|
2016-07-21 04:07:37 +08:00
|
|
|
"All elements of Chain must be loads, or all must be stores.");
|
|
|
|
else
|
2016-07-28 07:06:00 +08:00
|
|
|
assert(isa<StoreInst>(I) &&
|
2016-07-21 04:07:37 +08:00
|
|
|
"All elements of Chain must be loads, or all must be stores.");
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2016-07-20 07:19:20 +08:00
|
|
|
for (Instruction &I : make_range(getBoundaryInstrs(Chain))) {
|
2016-07-01 07:11:38 +08:00
|
|
|
if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
|
2016-07-20 07:19:18 +08:00
|
|
|
if (!is_contained(Chain, &I))
|
2016-08-13 08:04:08 +08:00
|
|
|
MemoryInstrs.push_back(&I);
|
2016-07-01 07:11:38 +08:00
|
|
|
else
|
2016-08-13 08:04:08 +08:00
|
|
|
ChainInstrs.push_back(&I);
|
Add an @llvm.sideeffect intrinsic
This patch implements Chandler's idea [0] for supporting languages that
require support for infinite loops with side effects, such as Rust, providing
part of a solution to bug 965 [1].
Specifically, it adds an `llvm.sideeffect()` intrinsic, which has no actual
effect, but which appears to optimization passes to have obscure side effects,
such that they don't optimize away loops containing it. It also teaches
several optimization passes to ignore this intrinsic, so that it doesn't
significantly impact optimization in most cases.
As discussed on llvm-dev [2], this patch is the first of two major parts.
The second part, to change LLVM's semantics to have defined behavior
on infinite loops by default, with a function attribute for opting into
potential-undefined-behavior, will be implemented and posted for review in
a separate patch.
[0] http://lists.llvm.org/pipermail/llvm-dev/2015-July/088103.html
[1] https://bugs.llvm.org/show_bug.cgi?id=965
[2] http://lists.llvm.org/pipermail/llvm-dev/2017-October/118632.html
Differential Revision: https://reviews.llvm.org/D38336
llvm-svn: 317729
2017-11-09 05:59:51 +08:00
|
|
|
} else if (isa<IntrinsicInst>(&I) &&
|
|
|
|
cast<IntrinsicInst>(&I)->getIntrinsicID() ==
|
|
|
|
Intrinsic::sideeffect) {
|
|
|
|
// Ignore llvm.sideeffect calls.
|
2016-07-21 04:07:37 +08:00
|
|
|
} else if (IsLoadChain && (I.mayWriteToMemory() || I.mayThrow())) {
|
|
|
|
DEBUG(dbgs() << "LSV: Found may-write/throw operation: " << I << '\n');
|
|
|
|
break;
|
|
|
|
} else if (!IsLoadChain && (I.mayReadOrWriteMemory() || I.mayThrow())) {
|
|
|
|
DEBUG(dbgs() << "LSV: Found may-read/write/throw operation: " << I
|
|
|
|
<< '\n');
|
2016-07-21 04:07:34 +08:00
|
|
|
break;
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-13 08:04:08 +08:00
|
|
|
OrderedBasicBlock OBB(Chain[0]->getParent());
|
|
|
|
|
2016-07-20 08:55:12 +08:00
|
|
|
// Loop until we find an instruction in ChainInstrs that we can't vectorize.
|
2016-08-13 08:04:08 +08:00
|
|
|
unsigned ChainInstrIdx = 0;
|
2016-11-24 01:43:15 +08:00
|
|
|
Instruction *BarrierMemoryInstr = nullptr;
|
|
|
|
|
2016-08-13 08:04:08 +08:00
|
|
|
for (unsigned E = ChainInstrs.size(); ChainInstrIdx < E; ++ChainInstrIdx) {
|
|
|
|
Instruction *ChainInstr = ChainInstrs[ChainInstrIdx];
|
2016-11-24 01:43:15 +08:00
|
|
|
|
|
|
|
// If a barrier memory instruction was found, chain instructions that follow
|
|
|
|
// will not be added to the valid prefix.
|
|
|
|
if (BarrierMemoryInstr && OBB.dominates(BarrierMemoryInstr, ChainInstr))
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Check (in BB order) if any instruction prevents ChainInstr from being
|
|
|
|
// vectorized. Find and store the first such "conflicting" instruction.
|
2016-08-13 08:04:08 +08:00
|
|
|
for (Instruction *MemInstr : MemoryInstrs) {
|
2016-11-24 01:43:15 +08:00
|
|
|
// If a barrier memory instruction was found, do not check past it.
|
|
|
|
if (BarrierMemoryInstr && OBB.dominates(BarrierMemoryInstr, MemInstr))
|
|
|
|
break;
|
|
|
|
|
2016-07-20 08:55:12 +08:00
|
|
|
if (isa<LoadInst>(MemInstr) && isa<LoadInst>(ChainInstr))
|
2016-07-01 07:11:38 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// We can ignore the alias as long as the load comes before the store,
|
|
|
|
// because that means we won't be moving the load past the store to
|
|
|
|
// vectorize it (the vectorized load is inserted at the location of the
|
|
|
|
// first load in the chain).
|
2016-07-20 08:55:12 +08:00
|
|
|
if (isa<StoreInst>(MemInstr) && isa<LoadInst>(ChainInstr) &&
|
2016-08-13 08:04:08 +08:00
|
|
|
OBB.dominates(ChainInstr, MemInstr))
|
2016-07-01 07:11:38 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Same case, but in reverse.
|
2016-07-20 08:55:12 +08:00
|
|
|
if (isa<LoadInst>(MemInstr) && isa<StoreInst>(ChainInstr) &&
|
2016-08-13 08:04:08 +08:00
|
|
|
OBB.dominates(MemInstr, ChainInstr))
|
2016-07-01 07:11:38 +08:00
|
|
|
continue;
|
|
|
|
|
2016-07-28 07:06:00 +08:00
|
|
|
if (!AA.isNoAlias(MemoryLocation::get(MemInstr),
|
|
|
|
MemoryLocation::get(ChainInstr))) {
|
2016-07-08 04:10:35 +08:00
|
|
|
DEBUG({
|
2016-07-20 07:19:18 +08:00
|
|
|
dbgs() << "LSV: Found alias:\n"
|
|
|
|
" Aliasing instruction and pointer:\n"
|
2016-07-20 08:55:12 +08:00
|
|
|
<< " " << *MemInstr << '\n'
|
2016-07-28 07:06:00 +08:00
|
|
|
<< " " << *getPointerOperand(MemInstr) << '\n'
|
2016-07-20 07:19:18 +08:00
|
|
|
<< " Aliased instruction and pointer:\n"
|
2016-07-20 08:55:12 +08:00
|
|
|
<< " " << *ChainInstr << '\n'
|
2016-07-28 07:06:00 +08:00
|
|
|
<< " " << *getPointerOperand(ChainInstr) << '\n';
|
2016-07-08 04:10:35 +08:00
|
|
|
});
|
2016-11-24 01:43:15 +08:00
|
|
|
// Save this aliasing memory instruction as a barrier, but allow other
|
|
|
|
// instructions that precede the barrier to be vectorized with this one.
|
|
|
|
BarrierMemoryInstr = MemInstr;
|
2016-07-20 08:55:12 +08:00
|
|
|
break;
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
}
|
2016-11-24 01:43:15 +08:00
|
|
|
// Continue the search only for store chains, since vectorizing stores that
|
|
|
|
// precede an aliasing load is valid. Conversely, vectorizing loads is valid
|
|
|
|
// up to an aliasing store, but should not pull loads from further down in
|
|
|
|
// the basic block.
|
|
|
|
if (IsLoadChain && BarrierMemoryInstr) {
|
|
|
|
// The BarrierMemoryInstr is a store that precedes ChainInstr.
|
|
|
|
assert(OBB.dominates(BarrierMemoryInstr, ChainInstr));
|
2016-07-20 08:55:12 +08:00
|
|
|
break;
|
2016-11-24 01:43:15 +08:00
|
|
|
}
|
2016-07-20 08:55:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Find the largest prefix of Chain whose elements are all in
|
|
|
|
// ChainInstrs[0, ChainInstrIdx). This is the largest vectorizable prefix of
|
|
|
|
// Chain. (Recall that Chain is in address order, but ChainInstrs is in BB
|
|
|
|
// order.)
|
2016-08-13 08:04:12 +08:00
|
|
|
SmallPtrSet<Instruction *, 8> VectorizableChainInstrs(
|
|
|
|
ChainInstrs.begin(), ChainInstrs.begin() + ChainInstrIdx);
|
|
|
|
unsigned ChainIdx = 0;
|
|
|
|
for (unsigned ChainLen = Chain.size(); ChainIdx < ChainLen; ++ChainIdx) {
|
|
|
|
if (!VectorizableChainInstrs.count(Chain[ChainIdx]))
|
2016-07-20 08:55:12 +08:00
|
|
|
break;
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
2016-07-20 08:55:12 +08:00
|
|
|
return Chain.slice(0, ChainIdx);
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
|
2016-07-28 07:06:00 +08:00
|
|
|
std::pair<InstrListMap, InstrListMap>
|
2016-07-20 07:19:16 +08:00
|
|
|
Vectorizer::collectInstructions(BasicBlock *BB) {
|
2016-07-28 07:06:00 +08:00
|
|
|
InstrListMap LoadRefs;
|
|
|
|
InstrListMap StoreRefs;
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
for (Instruction &I : *BB) {
|
|
|
|
if (!I.mayReadOrWriteMemory())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
|
|
|
|
if (!LI->isSimple())
|
|
|
|
continue;
|
|
|
|
|
2016-10-03 18:31:34 +08:00
|
|
|
// Skip if it's not legal.
|
|
|
|
if (!TTI.isLegalToVectorizeLoad(LI))
|
|
|
|
continue;
|
|
|
|
|
2016-07-01 07:11:38 +08:00
|
|
|
Type *Ty = LI->getType();
|
|
|
|
if (!VectorType::isValidElementType(Ty->getScalarType()))
|
|
|
|
continue;
|
|
|
|
|
2016-07-01 08:36:54 +08:00
|
|
|
// Skip weird non-byte sizes. They probably aren't worth the effort of
|
|
|
|
// handling correctly.
|
|
|
|
unsigned TySize = DL.getTypeSizeInBits(Ty);
|
[LSV] Skip all non-byte sizes, not only less than eight bits
Summary:
The code comments indicate that no effort has been spent on
handling load/stores when the size isn't a multiple of the
byte size correctly. However, the code only avoided types
smaller than 8 bits. So for example a load of an i28 could
still be considered as a candidate for vectorization.
This patch adjusts the code to behave according to the code
comment.
The test case used to hit the following assert when
trying to use "cast" an i32 to i28 using CreateBitOrPointerCast:
opt: ../lib/IR/Instructions.cpp:2565: Assertion `castIsValid(op, S, Ty) && "Invalid cast!"' failed.
#0 PrintStackTraceSignalHandler(void*)
#1 SignalHandler(int)
#2 __restore_rt
#3 __GI_raise
#4 __GI_abort
#5 __GI___assert_fail
#6 llvm::CastInst::Create(llvm::Instruction::CastOps, llvm::Value*, llvm::Type*, llvm::Twine const&, llvm::Instruction*)
#7 llvm::IRBuilder<llvm::ConstantFolder, llvm::IRBuilderDefaultInserter>::CreateBitOrPointerCast(llvm::Value*, llvm::Type*, llvm::Twine const&)
#8 (anonymous namespace)::Vectorizer::vectorizeLoadChain(llvm::ArrayRef<llvm::Instruction*>, llvm::SmallPtrSet<llvm::Instruction*, 16u>*)
Reviewers: arsenm
Reviewed By: arsenm
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D39295
llvm-svn: 316663
2017-10-26 21:42:55 +08:00
|
|
|
if ((TySize % 8) != 0)
|
2016-07-01 08:36:54 +08:00
|
|
|
continue;
|
|
|
|
|
[LSV] Avoid adding vectors of pointers as candidates
Summary:
We no longer add vectors of pointers as candidates for
load/store vectorization. It does not seem to work anyway,
but without this patch we can end up in asserts when trying
to create casts between an integer type and the pointer of
vectors type.
The test case I've added used to assert like this when trying to
cast between i64 and <2 x i16*>:
opt: ../lib/IR/Instructions.cpp:2565: Assertion `castIsValid(op, S, Ty) && "Invalid cast!"' failed.
#0 PrintStackTraceSignalHandler(void*)
#1 SignalHandler(int)
#2 __restore_rt
#3 __GI_raise
#4 __GI_abort
#5 __GI___assert_fail
#6 llvm::CastInst::Create(llvm::Instruction::CastOps, llvm::Value*, llvm::Type*, llvm::Twine const&, llvm::Instruction*)
#7 llvm::IRBuilder<llvm::ConstantFolder, llvm::IRBuilderDefaultInserter>::CreateBitOrPointerCast(llvm::Value*, llvm::Type*, llvm::Twine const&)
#8 Vectorizer::vectorizeStoreChain(llvm::ArrayRef<llvm::Instruction*>, llvm::SmallPtrSet<llvm::Instruction*, 16u>*)
Reviewers: arsenm
Reviewed By: arsenm
Subscribers: nhaehnle, llvm-commits
Differential Revision: https://reviews.llvm.org/D39296
llvm-svn: 316665
2017-10-26 21:59:15 +08:00
|
|
|
// Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain
|
|
|
|
// functions are currently using an integer type for the vectorized
|
|
|
|
// load/store, and does not support casting between the integer type and a
|
|
|
|
// vector of pointers (e.g. i64 to <2 x i16*>)
|
|
|
|
if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy())
|
|
|
|
continue;
|
|
|
|
|
2016-07-01 10:07:22 +08:00
|
|
|
Value *Ptr = LI->getPointerOperand();
|
|
|
|
unsigned AS = Ptr->getType()->getPointerAddressSpace();
|
|
|
|
unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
|
|
|
|
|
2016-07-01 07:11:38 +08:00
|
|
|
// No point in looking at these if they're too big to vectorize.
|
2016-07-01 08:36:54 +08:00
|
|
|
if (TySize > VecRegSize / 2)
|
2016-07-01 07:11:38 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Make sure all the users of a vector are constant-index extracts.
|
2017-10-18 05:27:42 +08:00
|
|
|
if (isa<VectorType>(Ty) && !llvm::all_of(LI->users(), [](const User *U) {
|
2016-07-28 07:06:00 +08:00
|
|
|
const ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(U);
|
|
|
|
return EEI && isa<ConstantInt>(EEI->getOperand(1));
|
2016-07-01 07:11:38 +08:00
|
|
|
}))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Save the load locations.
|
2016-07-01 10:07:22 +08:00
|
|
|
Value *ObjPtr = GetUnderlyingObject(Ptr, DL);
|
|
|
|
LoadRefs[ObjPtr].push_back(LI);
|
2016-07-01 07:11:38 +08:00
|
|
|
} else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) {
|
|
|
|
if (!SI->isSimple())
|
|
|
|
continue;
|
|
|
|
|
2016-10-03 18:31:34 +08:00
|
|
|
// Skip if it's not legal.
|
|
|
|
if (!TTI.isLegalToVectorizeStore(SI))
|
|
|
|
continue;
|
|
|
|
|
2016-07-01 07:11:38 +08:00
|
|
|
Type *Ty = SI->getValueOperand()->getType();
|
|
|
|
if (!VectorType::isValidElementType(Ty->getScalarType()))
|
|
|
|
continue;
|
|
|
|
|
[LSV] Avoid adding vectors of pointers as candidates
Summary:
We no longer add vectors of pointers as candidates for
load/store vectorization. It does not seem to work anyway,
but without this patch we can end up in asserts when trying
to create casts between an integer type and the pointer of
vectors type.
The test case I've added used to assert like this when trying to
cast between i64 and <2 x i16*>:
opt: ../lib/IR/Instructions.cpp:2565: Assertion `castIsValid(op, S, Ty) && "Invalid cast!"' failed.
#0 PrintStackTraceSignalHandler(void*)
#1 SignalHandler(int)
#2 __restore_rt
#3 __GI_raise
#4 __GI_abort
#5 __GI___assert_fail
#6 llvm::CastInst::Create(llvm::Instruction::CastOps, llvm::Value*, llvm::Type*, llvm::Twine const&, llvm::Instruction*)
#7 llvm::IRBuilder<llvm::ConstantFolder, llvm::IRBuilderDefaultInserter>::CreateBitOrPointerCast(llvm::Value*, llvm::Type*, llvm::Twine const&)
#8 Vectorizer::vectorizeStoreChain(llvm::ArrayRef<llvm::Instruction*>, llvm::SmallPtrSet<llvm::Instruction*, 16u>*)
Reviewers: arsenm
Reviewed By: arsenm
Subscribers: nhaehnle, llvm-commits
Differential Revision: https://reviews.llvm.org/D39296
llvm-svn: 316665
2017-10-26 21:59:15 +08:00
|
|
|
// Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain
|
|
|
|
// functions are currently using an integer type for the vectorized
|
|
|
|
// load/store, and does not support casting between the integer type and a
|
|
|
|
// vector of pointers (e.g. i64 to <2 x i16*>)
|
|
|
|
if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy())
|
|
|
|
continue;
|
|
|
|
|
2016-07-01 08:36:54 +08:00
|
|
|
// Skip weird non-byte sizes. They probably aren't worth the effort of
|
|
|
|
// handling correctly.
|
|
|
|
unsigned TySize = DL.getTypeSizeInBits(Ty);
|
[LSV] Skip all non-byte sizes, not only less than eight bits
Summary:
The code comments indicate that no effort has been spent on
handling load/stores when the size isn't a multiple of the
byte size correctly. However, the code only avoided types
smaller than 8 bits. So for example a load of an i28 could
still be considered as a candidate for vectorization.
This patch adjusts the code to behave according to the code
comment.
The test case used to hit the following assert when
trying to use "cast" an i32 to i28 using CreateBitOrPointerCast:
opt: ../lib/IR/Instructions.cpp:2565: Assertion `castIsValid(op, S, Ty) && "Invalid cast!"' failed.
#0 PrintStackTraceSignalHandler(void*)
#1 SignalHandler(int)
#2 __restore_rt
#3 __GI_raise
#4 __GI_abort
#5 __GI___assert_fail
#6 llvm::CastInst::Create(llvm::Instruction::CastOps, llvm::Value*, llvm::Type*, llvm::Twine const&, llvm::Instruction*)
#7 llvm::IRBuilder<llvm::ConstantFolder, llvm::IRBuilderDefaultInserter>::CreateBitOrPointerCast(llvm::Value*, llvm::Type*, llvm::Twine const&)
#8 (anonymous namespace)::Vectorizer::vectorizeLoadChain(llvm::ArrayRef<llvm::Instruction*>, llvm::SmallPtrSet<llvm::Instruction*, 16u>*)
Reviewers: arsenm
Reviewed By: arsenm
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D39295
llvm-svn: 316663
2017-10-26 21:42:55 +08:00
|
|
|
if ((TySize % 8) != 0)
|
2016-07-01 08:36:54 +08:00
|
|
|
continue;
|
|
|
|
|
2016-07-01 10:07:22 +08:00
|
|
|
Value *Ptr = SI->getPointerOperand();
|
|
|
|
unsigned AS = Ptr->getType()->getPointerAddressSpace();
|
|
|
|
unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
|
[LSV] Skip all non-byte sizes, not only less than eight bits
Summary:
The code comments indicate that no effort has been spent on
handling load/stores when the size isn't a multiple of the
byte size correctly. However, the code only avoided types
smaller than 8 bits. So for example a load of an i28 could
still be considered as a candidate for vectorization.
This patch adjusts the code to behave according to the code
comment.
The test case used to hit the following assert when
trying to use "cast" an i32 to i28 using CreateBitOrPointerCast:
opt: ../lib/IR/Instructions.cpp:2565: Assertion `castIsValid(op, S, Ty) && "Invalid cast!"' failed.
#0 PrintStackTraceSignalHandler(void*)
#1 SignalHandler(int)
#2 __restore_rt
#3 __GI_raise
#4 __GI_abort
#5 __GI___assert_fail
#6 llvm::CastInst::Create(llvm::Instruction::CastOps, llvm::Value*, llvm::Type*, llvm::Twine const&, llvm::Instruction*)
#7 llvm::IRBuilder<llvm::ConstantFolder, llvm::IRBuilderDefaultInserter>::CreateBitOrPointerCast(llvm::Value*, llvm::Type*, llvm::Twine const&)
#8 (anonymous namespace)::Vectorizer::vectorizeLoadChain(llvm::ArrayRef<llvm::Instruction*>, llvm::SmallPtrSet<llvm::Instruction*, 16u>*)
Reviewers: arsenm
Reviewed By: arsenm
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D39295
llvm-svn: 316663
2017-10-26 21:42:55 +08:00
|
|
|
|
|
|
|
// No point in looking at these if they're too big to vectorize.
|
2016-07-01 08:36:54 +08:00
|
|
|
if (TySize > VecRegSize / 2)
|
2016-07-01 07:11:38 +08:00
|
|
|
continue;
|
|
|
|
|
2017-10-18 05:27:42 +08:00
|
|
|
if (isa<VectorType>(Ty) && !llvm::all_of(SI->users(), [](const User *U) {
|
2016-07-28 07:06:00 +08:00
|
|
|
const ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(U);
|
|
|
|
return EEI && isa<ConstantInt>(EEI->getOperand(1));
|
2016-07-01 07:11:38 +08:00
|
|
|
}))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Save store location.
|
2016-07-01 10:07:22 +08:00
|
|
|
Value *ObjPtr = GetUnderlyingObject(Ptr, DL);
|
|
|
|
StoreRefs[ObjPtr].push_back(SI);
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
}
|
2016-07-20 07:19:16 +08:00
|
|
|
|
|
|
|
return {LoadRefs, StoreRefs};
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
|
2016-07-28 07:06:00 +08:00
|
|
|
bool Vectorizer::vectorizeChains(InstrListMap &Map) {
|
2016-07-01 07:11:38 +08:00
|
|
|
bool Changed = false;
|
|
|
|
|
2016-07-28 07:06:00 +08:00
|
|
|
for (const std::pair<Value *, InstrList> &Chain : Map) {
|
2016-07-01 07:11:38 +08:00
|
|
|
unsigned Size = Chain.second.size();
|
|
|
|
if (Size < 2)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n");
|
|
|
|
|
|
|
|
// Process the stores in chunks of 64.
|
|
|
|
for (unsigned CI = 0, CE = Size; CI < CE; CI += 64) {
|
|
|
|
unsigned Len = std::min<unsigned>(CE - CI, 64);
|
2016-07-28 07:06:00 +08:00
|
|
|
ArrayRef<Instruction *> Chunk(&Chain.second[CI], Len);
|
2016-07-01 07:11:38 +08:00
|
|
|
Changed |= vectorizeInstructions(Chunk);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2016-07-28 07:06:00 +08:00
|
|
|
bool Vectorizer::vectorizeInstructions(ArrayRef<Instruction *> Instrs) {
|
2016-07-01 07:11:38 +08:00
|
|
|
DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size() << " instructions.\n");
|
2016-08-31 07:53:59 +08:00
|
|
|
SmallVector<int, 16> Heads, Tails;
|
2016-07-01 07:11:38 +08:00
|
|
|
int ConsecutiveChain[64];
|
|
|
|
|
[LSV] Avoid adding vectors of pointers as candidates
Summary:
We no longer add vectors of pointers as candidates for
load/store vectorization. It does not seem to work anyway,
but without this patch we can end up in asserts when trying
to create casts between an integer type and the pointer of
vectors type.
The test case I've added used to assert like this when trying to
cast between i64 and <2 x i16*>:
opt: ../lib/IR/Instructions.cpp:2565: Assertion `castIsValid(op, S, Ty) && "Invalid cast!"' failed.
#0 PrintStackTraceSignalHandler(void*)
#1 SignalHandler(int)
#2 __restore_rt
#3 __GI_raise
#4 __GI_abort
#5 __GI___assert_fail
#6 llvm::CastInst::Create(llvm::Instruction::CastOps, llvm::Value*, llvm::Type*, llvm::Twine const&, llvm::Instruction*)
#7 llvm::IRBuilder<llvm::ConstantFolder, llvm::IRBuilderDefaultInserter>::CreateBitOrPointerCast(llvm::Value*, llvm::Type*, llvm::Twine const&)
#8 Vectorizer::vectorizeStoreChain(llvm::ArrayRef<llvm::Instruction*>, llvm::SmallPtrSet<llvm::Instruction*, 16u>*)
Reviewers: arsenm
Reviewed By: arsenm
Subscribers: nhaehnle, llvm-commits
Differential Revision: https://reviews.llvm.org/D39296
llvm-svn: 316665
2017-10-26 21:59:15 +08:00
|
|
|
// Do a quadratic search on all of the given loads/stores and find all of the
|
|
|
|
// pairs of loads/stores that follow each other.
|
2016-07-01 07:11:38 +08:00
|
|
|
for (int i = 0, e = Instrs.size(); i < e; ++i) {
|
|
|
|
ConsecutiveChain[i] = -1;
|
|
|
|
for (int j = e - 1; j >= 0; --j) {
|
|
|
|
if (i == j)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (isConsecutiveAccess(Instrs[i], Instrs[j])) {
|
|
|
|
if (ConsecutiveChain[i] != -1) {
|
|
|
|
int CurDistance = std::abs(ConsecutiveChain[i] - i);
|
|
|
|
int NewDistance = std::abs(ConsecutiveChain[i] - j);
|
|
|
|
if (j < i || NewDistance > CurDistance)
|
|
|
|
continue; // Should not insert.
|
|
|
|
}
|
|
|
|
|
2016-08-31 07:53:59 +08:00
|
|
|
Tails.push_back(j);
|
|
|
|
Heads.push_back(i);
|
2016-07-01 07:11:38 +08:00
|
|
|
ConsecutiveChain[i] = j;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Changed = false;
|
2016-07-28 07:06:00 +08:00
|
|
|
SmallPtrSet<Instruction *, 16> InstructionsProcessed;
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
for (int Head : Heads) {
|
2016-07-14 05:20:01 +08:00
|
|
|
if (InstructionsProcessed.count(Instrs[Head]))
|
|
|
|
continue;
|
2016-08-31 07:53:59 +08:00
|
|
|
bool LongerChainExists = false;
|
2016-07-14 05:20:01 +08:00
|
|
|
for (unsigned TIt = 0; TIt < Tails.size(); TIt++)
|
|
|
|
if (Head == Tails[TIt] &&
|
|
|
|
!InstructionsProcessed.count(Instrs[Heads[TIt]])) {
|
2016-08-31 07:53:59 +08:00
|
|
|
LongerChainExists = true;
|
2016-07-14 05:20:01 +08:00
|
|
|
break;
|
|
|
|
}
|
2016-08-31 07:53:59 +08:00
|
|
|
if (LongerChainExists)
|
2016-07-01 07:11:38 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// We found an instr that starts a chain. Now follow the chain and try to
|
|
|
|
// vectorize it.
|
2016-07-28 07:06:00 +08:00
|
|
|
SmallVector<Instruction *, 16> Operands;
|
2016-07-01 07:11:38 +08:00
|
|
|
int I = Head;
|
2016-08-31 07:53:59 +08:00
|
|
|
while (I != -1 && (is_contained(Tails, I) || is_contained(Heads, I))) {
|
2016-07-14 05:20:01 +08:00
|
|
|
if (InstructionsProcessed.count(Instrs[I]))
|
2016-07-01 07:11:38 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
Operands.push_back(Instrs[I]);
|
|
|
|
I = ConsecutiveChain[I];
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Vectorized = false;
|
|
|
|
if (isa<LoadInst>(*Operands.begin()))
|
2016-07-14 05:20:01 +08:00
|
|
|
Vectorized = vectorizeLoadChain(Operands, &InstructionsProcessed);
|
2016-07-01 07:11:38 +08:00
|
|
|
else
|
2016-07-14 05:20:01 +08:00
|
|
|
Vectorized = vectorizeStoreChain(Operands, &InstructionsProcessed);
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
Changed |= Vectorized;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2016-07-14 05:20:01 +08:00
|
|
|
bool Vectorizer::vectorizeStoreChain(
|
2016-07-28 07:06:00 +08:00
|
|
|
ArrayRef<Instruction *> Chain,
|
|
|
|
SmallPtrSet<Instruction *, 16> *InstructionsProcessed) {
|
2016-07-01 07:11:38 +08:00
|
|
|
StoreInst *S0 = cast<StoreInst>(Chain[0]);
|
2016-07-01 08:37:01 +08:00
|
|
|
|
[LSV] Avoid adding vectors of pointers as candidates
Summary:
We no longer add vectors of pointers as candidates for
load/store vectorization. It does not seem to work anyway,
but without this patch we can end up in asserts when trying
to create casts between an integer type and the pointer of
vectors type.
The test case I've added used to assert like this when trying to
cast between i64 and <2 x i16*>:
opt: ../lib/IR/Instructions.cpp:2565: Assertion `castIsValid(op, S, Ty) && "Invalid cast!"' failed.
#0 PrintStackTraceSignalHandler(void*)
#1 SignalHandler(int)
#2 __restore_rt
#3 __GI_raise
#4 __GI_abort
#5 __GI___assert_fail
#6 llvm::CastInst::Create(llvm::Instruction::CastOps, llvm::Value*, llvm::Type*, llvm::Twine const&, llvm::Instruction*)
#7 llvm::IRBuilder<llvm::ConstantFolder, llvm::IRBuilderDefaultInserter>::CreateBitOrPointerCast(llvm::Value*, llvm::Type*, llvm::Twine const&)
#8 Vectorizer::vectorizeStoreChain(llvm::ArrayRef<llvm::Instruction*>, llvm::SmallPtrSet<llvm::Instruction*, 16u>*)
Reviewers: arsenm
Reviewed By: arsenm
Subscribers: nhaehnle, llvm-commits
Differential Revision: https://reviews.llvm.org/D39296
llvm-svn: 316665
2017-10-26 21:59:15 +08:00
|
|
|
// If the vector has an int element, default to int for the whole store.
|
2016-07-01 08:37:01 +08:00
|
|
|
Type *StoreTy;
|
2016-07-28 07:06:00 +08:00
|
|
|
for (Instruction *I : Chain) {
|
|
|
|
StoreTy = cast<StoreInst>(I)->getValueOperand()->getType();
|
2016-07-01 08:37:01 +08:00
|
|
|
if (StoreTy->isIntOrIntVectorTy())
|
|
|
|
break;
|
2016-07-01 09:55:52 +08:00
|
|
|
|
|
|
|
if (StoreTy->isPtrOrPtrVectorTy()) {
|
|
|
|
StoreTy = Type::getIntNTy(F.getParent()->getContext(),
|
|
|
|
DL.getTypeSizeInBits(StoreTy));
|
|
|
|
break;
|
|
|
|
}
|
2016-07-01 08:37:01 +08:00
|
|
|
}
|
|
|
|
|
2016-07-01 07:11:38 +08:00
|
|
|
unsigned Sz = DL.getTypeSizeInBits(StoreTy);
|
2016-07-01 10:07:22 +08:00
|
|
|
unsigned AS = S0->getPointerAddressSpace();
|
|
|
|
unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
|
2016-07-01 07:11:38 +08:00
|
|
|
unsigned VF = VecRegSize / Sz;
|
|
|
|
unsigned ChainSize = Chain.size();
|
2016-10-03 18:31:34 +08:00
|
|
|
unsigned Alignment = getAlignment(S0);
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2016-07-14 05:20:01 +08:00
|
|
|
if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) {
|
|
|
|
InstructionsProcessed->insert(Chain.begin(), Chain.end());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-07-28 07:06:00 +08:00
|
|
|
ArrayRef<Instruction *> NewChain = getVectorizablePrefix(Chain);
|
2016-07-20 07:19:20 +08:00
|
|
|
if (NewChain.empty()) {
|
2016-07-21 04:07:34 +08:00
|
|
|
// No vectorization possible.
|
2016-07-14 05:20:01 +08:00
|
|
|
InstructionsProcessed->insert(Chain.begin(), Chain.end());
|
2016-07-01 07:11:38 +08:00
|
|
|
return false;
|
2016-07-14 05:20:01 +08:00
|
|
|
}
|
2016-07-20 07:19:20 +08:00
|
|
|
if (NewChain.size() == 1) {
|
2016-07-14 05:20:01 +08:00
|
|
|
// Failed after the first instruction. Discard it and try the smaller chain.
|
2016-07-20 07:19:20 +08:00
|
|
|
InstructionsProcessed->insert(NewChain.front());
|
2016-07-14 05:20:01 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update Chain to the valid vectorizable subchain.
|
2016-07-20 07:19:20 +08:00
|
|
|
Chain = NewChain;
|
2016-07-14 05:20:01 +08:00
|
|
|
ChainSize = Chain.size();
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2016-10-03 18:31:34 +08:00
|
|
|
// Check if it's legal to vectorize this chain. If not, split the chain and
|
|
|
|
// try again.
|
2016-09-10 06:20:14 +08:00
|
|
|
unsigned EltSzInBytes = Sz / 8;
|
|
|
|
unsigned SzInBytes = EltSzInBytes * ChainSize;
|
2016-10-03 18:31:34 +08:00
|
|
|
if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment, AS)) {
|
2016-07-01 07:11:38 +08:00
|
|
|
auto Chains = splitOddVectorElts(Chain, Sz);
|
2016-07-14 05:20:01 +08:00
|
|
|
return vectorizeStoreChain(Chains.first, InstructionsProcessed) |
|
|
|
|
vectorizeStoreChain(Chains.second, InstructionsProcessed);
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
VectorType *VecTy;
|
|
|
|
VectorType *VecStoreTy = dyn_cast<VectorType>(StoreTy);
|
|
|
|
if (VecStoreTy)
|
|
|
|
VecTy = VectorType::get(StoreTy->getScalarType(),
|
|
|
|
Chain.size() * VecStoreTy->getNumElements());
|
|
|
|
else
|
|
|
|
VecTy = VectorType::get(StoreTy, Chain.size());
|
|
|
|
|
2016-10-03 18:31:34 +08:00
|
|
|
// If it's more than the max vector size or the target has a better
|
|
|
|
// vector factor, break it into two pieces.
|
|
|
|
unsigned TargetVF = TTI.getStoreVectorFactor(VF, Sz, SzInBytes, VecTy);
|
|
|
|
if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) {
|
|
|
|
DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor."
|
2016-07-01 07:11:38 +08:00
|
|
|
" Creating two separate arrays.\n");
|
2016-10-03 18:31:34 +08:00
|
|
|
return vectorizeStoreChain(Chain.slice(0, TargetVF),
|
|
|
|
InstructionsProcessed) |
|
|
|
|
vectorizeStoreChain(Chain.slice(TargetVF), InstructionsProcessed);
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
|
2016-07-08 04:10:35 +08:00
|
|
|
DEBUG({
|
2016-07-01 07:11:38 +08:00
|
|
|
dbgs() << "LSV: Stores to vectorize:\n";
|
2016-07-28 07:06:00 +08:00
|
|
|
for (Instruction *I : Chain)
|
|
|
|
dbgs() << " " << *I << "\n";
|
2016-07-08 04:10:35 +08:00
|
|
|
});
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2016-07-14 05:20:01 +08:00
|
|
|
// We won't try again to vectorize the elements of the chain, regardless of
|
|
|
|
// whether we succeed below.
|
|
|
|
InstructionsProcessed->insert(Chain.begin(), Chain.end());
|
|
|
|
|
2016-07-01 07:11:38 +08:00
|
|
|
// If the store is going to be misaligned, don't vectorize it.
|
2016-07-12 04:46:17 +08:00
|
|
|
if (accessIsMisaligned(SzInBytes, AS, Alignment)) {
|
|
|
|
if (S0->getPointerAddressSpace() != 0)
|
|
|
|
return false;
|
|
|
|
|
2016-09-10 06:20:14 +08:00
|
|
|
unsigned NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(),
|
|
|
|
StackAdjustedAlignment,
|
2016-12-19 16:22:17 +08:00
|
|
|
DL, S0, nullptr, &DT);
|
2016-09-10 06:20:14 +08:00
|
|
|
if (NewAlign < StackAdjustedAlignment)
|
2016-07-01 07:11:38 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-07-20 07:19:20 +08:00
|
|
|
BasicBlock::iterator First, Last;
|
|
|
|
std::tie(First, Last) = getBoundaryInstrs(Chain);
|
2016-07-01 07:11:38 +08:00
|
|
|
Builder.SetInsertPoint(&*Last);
|
|
|
|
|
|
|
|
Value *Vec = UndefValue::get(VecTy);
|
|
|
|
|
|
|
|
if (VecStoreTy) {
|
|
|
|
unsigned VecWidth = VecStoreTy->getNumElements();
|
|
|
|
for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
|
|
|
|
StoreInst *Store = cast<StoreInst>(Chain[I]);
|
|
|
|
for (unsigned J = 0, NE = VecStoreTy->getNumElements(); J != NE; ++J) {
|
|
|
|
unsigned NewIdx = J + I * VecWidth;
|
|
|
|
Value *Extract = Builder.CreateExtractElement(Store->getValueOperand(),
|
|
|
|
Builder.getInt32(J));
|
|
|
|
if (Extract->getType() != StoreTy->getScalarType())
|
|
|
|
Extract = Builder.CreateBitCast(Extract, StoreTy->getScalarType());
|
|
|
|
|
2016-07-08 04:10:35 +08:00
|
|
|
Value *Insert =
|
|
|
|
Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(NewIdx));
|
2016-07-01 07:11:38 +08:00
|
|
|
Vec = Insert;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
|
|
|
|
StoreInst *Store = cast<StoreInst>(Chain[I]);
|
|
|
|
Value *Extract = Store->getValueOperand();
|
|
|
|
if (Extract->getType() != StoreTy->getScalarType())
|
2016-07-08 04:10:35 +08:00
|
|
|
Extract =
|
|
|
|
Builder.CreateBitOrPointerCast(Extract, StoreTy->getScalarType());
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2016-07-08 04:10:35 +08:00
|
|
|
Value *Insert =
|
|
|
|
Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(I));
|
2016-07-01 07:11:38 +08:00
|
|
|
Vec = Insert;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-28 07:06:00 +08:00
|
|
|
// This cast is safe because Builder.CreateStore() always creates a bona fide
|
|
|
|
// StoreInst.
|
|
|
|
StoreInst *SI = cast<StoreInst>(
|
|
|
|
Builder.CreateStore(Vec, Builder.CreateBitCast(S0->getPointerOperand(),
|
|
|
|
VecTy->getPointerTo(AS))));
|
2016-07-01 07:11:38 +08:00
|
|
|
propagateMetadata(SI, Chain);
|
|
|
|
SI->setAlignment(Alignment);
|
|
|
|
|
|
|
|
eraseInstructions(Chain);
|
|
|
|
++NumVectorInstructions;
|
|
|
|
NumScalarsVectorized += Chain.size();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-07-14 05:20:01 +08:00
|
|
|
bool Vectorizer::vectorizeLoadChain(
|
2016-07-28 07:06:00 +08:00
|
|
|
ArrayRef<Instruction *> Chain,
|
|
|
|
SmallPtrSet<Instruction *, 16> *InstructionsProcessed) {
|
2016-07-01 07:11:38 +08:00
|
|
|
LoadInst *L0 = cast<LoadInst>(Chain[0]);
|
2016-07-01 08:37:01 +08:00
|
|
|
|
|
|
|
// If the vector has an int element, default to int for the whole load.
|
|
|
|
Type *LoadTy;
|
|
|
|
for (const auto &V : Chain) {
|
|
|
|
LoadTy = cast<LoadInst>(V)->getType();
|
|
|
|
if (LoadTy->isIntOrIntVectorTy())
|
|
|
|
break;
|
2016-07-01 09:55:52 +08:00
|
|
|
|
|
|
|
if (LoadTy->isPtrOrPtrVectorTy()) {
|
|
|
|
LoadTy = Type::getIntNTy(F.getParent()->getContext(),
|
|
|
|
DL.getTypeSizeInBits(LoadTy));
|
|
|
|
break;
|
|
|
|
}
|
2016-07-01 08:37:01 +08:00
|
|
|
}
|
|
|
|
|
2016-07-01 07:11:38 +08:00
|
|
|
unsigned Sz = DL.getTypeSizeInBits(LoadTy);
|
2016-07-01 10:07:22 +08:00
|
|
|
unsigned AS = L0->getPointerAddressSpace();
|
|
|
|
unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
|
2016-07-01 07:11:38 +08:00
|
|
|
unsigned VF = VecRegSize / Sz;
|
|
|
|
unsigned ChainSize = Chain.size();
|
2016-10-03 18:31:34 +08:00
|
|
|
unsigned Alignment = getAlignment(L0);
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2016-07-14 05:20:01 +08:00
|
|
|
if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) {
|
|
|
|
InstructionsProcessed->insert(Chain.begin(), Chain.end());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-07-28 07:06:00 +08:00
|
|
|
ArrayRef<Instruction *> NewChain = getVectorizablePrefix(Chain);
|
2016-07-20 07:19:20 +08:00
|
|
|
if (NewChain.empty()) {
|
2016-07-21 04:07:34 +08:00
|
|
|
// No vectorization possible.
|
2016-07-14 05:20:01 +08:00
|
|
|
InstructionsProcessed->insert(Chain.begin(), Chain.end());
|
|
|
|
return false;
|
|
|
|
}
|
2016-07-20 07:19:20 +08:00
|
|
|
if (NewChain.size() == 1) {
|
2016-07-14 05:20:01 +08:00
|
|
|
// Failed after the first instruction. Discard it and try the smaller chain.
|
2016-07-20 07:19:20 +08:00
|
|
|
InstructionsProcessed->insert(NewChain.front());
|
2016-07-01 07:11:38 +08:00
|
|
|
return false;
|
2016-07-14 05:20:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update Chain to the valid vectorizable subchain.
|
2016-07-20 07:19:20 +08:00
|
|
|
Chain = NewChain;
|
2016-07-14 05:20:01 +08:00
|
|
|
ChainSize = Chain.size();
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2016-10-03 18:31:34 +08:00
|
|
|
// Check if it's legal to vectorize this chain. If not, split the chain and
|
|
|
|
// try again.
|
2016-09-10 06:20:14 +08:00
|
|
|
unsigned EltSzInBytes = Sz / 8;
|
|
|
|
unsigned SzInBytes = EltSzInBytes * ChainSize;
|
2016-10-03 18:31:34 +08:00
|
|
|
if (!TTI.isLegalToVectorizeLoadChain(SzInBytes, Alignment, AS)) {
|
2016-07-01 07:11:38 +08:00
|
|
|
auto Chains = splitOddVectorElts(Chain, Sz);
|
2016-07-14 05:20:01 +08:00
|
|
|
return vectorizeLoadChain(Chains.first, InstructionsProcessed) |
|
|
|
|
vectorizeLoadChain(Chains.second, InstructionsProcessed);
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
VectorType *VecTy;
|
|
|
|
VectorType *VecLoadTy = dyn_cast<VectorType>(LoadTy);
|
|
|
|
if (VecLoadTy)
|
|
|
|
VecTy = VectorType::get(LoadTy->getScalarType(),
|
|
|
|
Chain.size() * VecLoadTy->getNumElements());
|
|
|
|
else
|
|
|
|
VecTy = VectorType::get(LoadTy, Chain.size());
|
|
|
|
|
2016-10-03 18:31:34 +08:00
|
|
|
// If it's more than the max vector size or the target has a better
|
|
|
|
// vector factor, break it into two pieces.
|
|
|
|
unsigned TargetVF = TTI.getLoadVectorFactor(VF, Sz, SzInBytes, VecTy);
|
|
|
|
if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) {
|
|
|
|
DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor."
|
|
|
|
" Creating two separate arrays.\n");
|
|
|
|
return vectorizeLoadChain(Chain.slice(0, TargetVF), InstructionsProcessed) |
|
|
|
|
vectorizeLoadChain(Chain.slice(TargetVF), InstructionsProcessed);
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
|
2016-07-14 05:20:01 +08:00
|
|
|
// We won't try again to vectorize the elements of the chain, regardless of
|
|
|
|
// whether we succeed below.
|
|
|
|
InstructionsProcessed->insert(Chain.begin(), Chain.end());
|
|
|
|
|
2016-07-01 07:11:38 +08:00
|
|
|
// If the load is going to be misaligned, don't vectorize it.
|
2016-07-12 04:46:17 +08:00
|
|
|
if (accessIsMisaligned(SzInBytes, AS, Alignment)) {
|
|
|
|
if (L0->getPointerAddressSpace() != 0)
|
|
|
|
return false;
|
|
|
|
|
2016-09-10 06:20:14 +08:00
|
|
|
unsigned NewAlign = getOrEnforceKnownAlignment(L0->getPointerOperand(),
|
|
|
|
StackAdjustedAlignment,
|
2016-12-19 16:22:17 +08:00
|
|
|
DL, L0, nullptr, &DT);
|
2016-09-10 06:20:14 +08:00
|
|
|
if (NewAlign < StackAdjustedAlignment)
|
2016-07-01 07:11:38 +08:00
|
|
|
return false;
|
2016-09-10 06:20:14 +08:00
|
|
|
|
|
|
|
Alignment = NewAlign;
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
|
2016-07-08 04:10:35 +08:00
|
|
|
DEBUG({
|
2016-07-01 07:11:38 +08:00
|
|
|
dbgs() << "LSV: Loads to vectorize:\n";
|
2016-07-28 07:06:00 +08:00
|
|
|
for (Instruction *I : Chain)
|
|
|
|
I->dump();
|
2016-07-08 04:10:35 +08:00
|
|
|
});
|
2016-07-01 07:11:38 +08:00
|
|
|
|
2016-07-20 07:19:20 +08:00
|
|
|
// getVectorizablePrefix already computed getBoundaryInstrs. The value of
|
|
|
|
// Last may have changed since then, but the value of First won't have. If it
|
|
|
|
// matters, we could compute getBoundaryInstrs only once and reuse it here.
|
|
|
|
BasicBlock::iterator First, Last;
|
|
|
|
std::tie(First, Last) = getBoundaryInstrs(Chain);
|
Correct ordering of loads/stores.
Summary:
Aiming to correct the ordering of loads/stores. This patch changes the
insert point for loads to the position of the first load.
It updates the ordering method for loads to insert before, rather than after.
Before this patch the following sequence:
"load a[1], store a[1], store a[0], load a[2]"
Would incorrectly vectorize to "store a[0,1], load a[1,2]".
The correctness check was assuming the insertion point for loads is at
the position of the first load, when in practice it was at the last
load. An alternative fix would have been to invert the correctness check.
The current fix changes insert position but also requires reordering of
instructions before the vectorized load.
Updated testcases to reflect the changes.
Reviewers: tstellarAMD, llvm-commits, jlebar, arsenm
Subscribers: mzolotukhin
Differential Revision: http://reviews.llvm.org/D22071
llvm-svn: 275117
2016-07-12 06:34:29 +08:00
|
|
|
Builder.SetInsertPoint(&*First);
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
Value *Bitcast =
|
2016-07-08 04:10:35 +08:00
|
|
|
Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS));
|
2016-07-28 07:06:00 +08:00
|
|
|
// This cast is safe because Builder.CreateLoad always creates a bona fide
|
|
|
|
// LoadInst.
|
2016-07-01 07:11:38 +08:00
|
|
|
LoadInst *LI = cast<LoadInst>(Builder.CreateLoad(Bitcast));
|
|
|
|
propagateMetadata(LI, Chain);
|
|
|
|
LI->setAlignment(Alignment);
|
|
|
|
|
|
|
|
if (VecLoadTy) {
|
|
|
|
SmallVector<Instruction *, 16> InstrsToErase;
|
|
|
|
|
|
|
|
unsigned VecWidth = VecLoadTy->getNumElements();
|
|
|
|
for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
|
|
|
|
for (auto Use : Chain[I]->users()) {
|
2016-07-28 07:06:00 +08:00
|
|
|
// All users of vector loads are ExtractElement instructions with
|
|
|
|
// constant indices, otherwise we would have bailed before now.
|
2016-07-01 07:11:38 +08:00
|
|
|
Instruction *UI = cast<Instruction>(Use);
|
|
|
|
unsigned Idx = cast<ConstantInt>(UI->getOperand(1))->getZExtValue();
|
|
|
|
unsigned NewIdx = Idx + I * VecWidth;
|
2016-09-07 23:49:48 +08:00
|
|
|
Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(NewIdx),
|
|
|
|
UI->getName());
|
2016-07-28 07:06:00 +08:00
|
|
|
if (V->getType() != UI->getType())
|
|
|
|
V = Builder.CreateBitCast(V, UI->getType());
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
// Replace the old instruction.
|
2016-07-28 07:06:00 +08:00
|
|
|
UI->replaceAllUsesWith(V);
|
2016-07-01 07:11:38 +08:00
|
|
|
InstrsToErase.push_back(UI);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-28 05:45:48 +08:00
|
|
|
// Bitcast might not be an Instruction, if the value being loaded is a
|
|
|
|
// constant. In that case, no need to reorder anything.
|
|
|
|
if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast))
|
|
|
|
reorder(BitcastInst);
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
for (auto I : InstrsToErase)
|
|
|
|
I->eraseFromParent();
|
|
|
|
} else {
|
|
|
|
for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
|
2016-07-28 07:06:00 +08:00
|
|
|
Value *CV = Chain[I];
|
2016-09-07 23:49:48 +08:00
|
|
|
Value *V =
|
|
|
|
Builder.CreateExtractElement(LI, Builder.getInt32(I), CV->getName());
|
2016-07-28 07:06:00 +08:00
|
|
|
if (V->getType() != CV->getType()) {
|
|
|
|
V = Builder.CreateBitOrPointerCast(V, CV->getType());
|
2016-07-01 09:55:52 +08:00
|
|
|
}
|
2016-07-01 07:11:38 +08:00
|
|
|
|
|
|
|
// Replace the old instruction.
|
2016-07-28 07:06:00 +08:00
|
|
|
CV->replaceAllUsesWith(V);
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
|
2016-07-28 05:45:48 +08:00
|
|
|
if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast))
|
|
|
|
reorder(BitcastInst);
|
2016-07-01 07:11:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
eraseInstructions(Chain);
|
|
|
|
|
|
|
|
++NumVectorInstructions;
|
|
|
|
NumScalarsVectorized += Chain.size();
|
|
|
|
return true;
|
|
|
|
}
|
2016-07-12 04:46:17 +08:00
|
|
|
|
|
|
|
bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
|
|
|
|
unsigned Alignment) {
|
2016-08-05 00:38:44 +08:00
|
|
|
if (Alignment % SzInBytes == 0)
|
|
|
|
return false;
|
2016-09-10 06:20:14 +08:00
|
|
|
|
2016-07-12 04:46:17 +08:00
|
|
|
bool Fast = false;
|
2016-08-05 00:38:44 +08:00
|
|
|
bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(),
|
|
|
|
SzInBytes * 8, AddressSpace,
|
2016-07-12 04:46:17 +08:00
|
|
|
Alignment, &Fast);
|
2016-08-05 00:38:44 +08:00
|
|
|
DEBUG(dbgs() << "LSV: Target said misaligned is allowed? " << Allows
|
|
|
|
<< " and fast? " << Fast << "\n";);
|
|
|
|
return !Allows || !Fast;
|
2016-07-12 04:46:17 +08:00
|
|
|
}
|