2017-10-21 05:47:29 +08:00
|
|
|
//===- Scalarizer.cpp - Scalarize vector operations -----------------------===//
|
2013-11-23 00:58:05 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2013-11-23 00:58:05 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass converts vector operations into scalar operations, in order
|
|
|
|
// to expose optimization opportunities on the individual scalar operations.
|
|
|
|
// It is mainly intended for targets that do not have vector units, but it
|
|
|
|
// may also be useful for revectorizing code to different vector widths.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/Transforms/Scalar/Scalarizer.h"
|
2018-10-10 17:27:45 +08:00
|
|
|
#include "llvm/ADT/PostOrderIterator.h"
|
2017-10-21 05:47:29 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/Twine.h"
|
2016-07-26 04:02:54 +08:00
|
|
|
#include "llvm/Analysis/VectorUtils.h"
|
2017-10-21 05:47:29 +08:00
|
|
|
#include "llvm/IR/Argument.h"
|
|
|
|
#include "llvm/IR/BasicBlock.h"
|
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
2019-11-15 17:29:47 +08:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2017-10-21 05:47:29 +08:00
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
2013-11-23 00:58:05 +08:00
|
|
|
#include "llvm/IR/IRBuilder.h"
|
2014-03-06 11:23:41 +08:00
|
|
|
#include "llvm/IR/InstVisitor.h"
|
2017-10-21 05:47:29 +08:00
|
|
|
#include "llvm/IR/InstrTypes.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
|
|
|
#include "llvm/IR/LLVMContext.h"
|
|
|
|
#include "llvm/IR/Module.h"
|
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/InitializePasses.h"
|
2013-11-23 00:58:05 +08:00
|
|
|
#include "llvm/Pass.h"
|
2017-10-21 05:47:29 +08:00
|
|
|
#include "llvm/Support/Casting.h"
|
2019-11-15 06:47:11 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2017-10-21 05:47:29 +08:00
|
|
|
#include "llvm/Support/MathExtras.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2017-10-21 05:47:29 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <iterator>
|
|
|
|
#include <map>
|
|
|
|
#include <utility>
|
2013-11-23 00:58:05 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 10:55:47 +08:00
|
|
|
#define DEBUG_TYPE "scalarizer"
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
// This is disabled by default because having separate loads and stores
|
|
|
|
// makes it more likely that the -combiner-alias-analysis limits will be
|
|
|
|
// reached.
|
|
|
|
static cl::opt<bool>
|
|
|
|
ScalarizeLoadStore("scalarize-load-store", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Allow the scalarizer pass to scalarize loads and store"));
|
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
namespace {
|
2017-10-21 05:47:29 +08:00
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
// Used to store the scattered form of a vector.
|
2017-10-21 05:47:29 +08:00
|
|
|
using ValueVector = SmallVector<Value *, 8>;
|
2013-11-23 00:58:05 +08:00
|
|
|
|
|
|
|
// Used to map a vector Value to its scattered form. We use std::map
|
|
|
|
// because we want iterators to persist across insertion and because the
|
|
|
|
// values are relatively large.
|
2017-10-21 05:47:29 +08:00
|
|
|
using ScatterMap = std::map<Value *, ValueVector>;
|
2013-11-23 00:58:05 +08:00
|
|
|
|
|
|
|
// Lists Instructions that have been replaced with scalar implementations,
|
|
|
|
// along with a pointer to their scattered forms.
|
2017-10-21 05:47:29 +08:00
|
|
|
using GatherList = SmallVector<std::pair<Instruction *, ValueVector *>, 16>;
|
2013-11-23 00:58:05 +08:00
|
|
|
|
|
|
|
// Provides a very limited vector-like interface for lazily accessing one
|
|
|
|
// component of a scattered vector or vector pointer.
|
|
|
|
class Scatterer {
|
|
|
|
public:
|
2017-10-21 05:47:29 +08:00
|
|
|
Scatterer() = default;
|
2013-12-23 22:45:00 +08:00
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
// Scatter V into Size components. If new instructions are needed,
|
|
|
|
// insert them before BBI in BB. If Cache is nonnull, use it to cache
|
|
|
|
// the results.
|
|
|
|
Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
|
2014-04-25 13:29:35 +08:00
|
|
|
ValueVector *cachePtr = nullptr);
|
2013-11-23 00:58:05 +08:00
|
|
|
|
|
|
|
// Return component I, creating a new Value for it if necessary.
|
|
|
|
Value *operator[](unsigned I);
|
|
|
|
|
|
|
|
// Return the number of components.
|
|
|
|
unsigned size() const { return Size; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
BasicBlock *BB;
|
|
|
|
BasicBlock::iterator BBI;
|
|
|
|
Value *V;
|
|
|
|
ValueVector *CachePtr;
|
|
|
|
PointerType *PtrTy;
|
|
|
|
ValueVector Tmp;
|
|
|
|
unsigned Size;
|
|
|
|
};
|
|
|
|
|
|
|
|
// FCmpSpliiter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp
|
|
|
|
// called Name that compares X and Y in the same way as FCI.
|
|
|
|
struct FCmpSplitter {
|
|
|
|
FCmpSplitter(FCmpInst &fci) : FCI(fci) {}
|
2017-10-21 05:47:29 +08:00
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
|
|
|
|
const Twine &Name) const {
|
|
|
|
return Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name);
|
|
|
|
}
|
2017-10-21 05:47:29 +08:00
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
FCmpInst &FCI;
|
|
|
|
};
|
|
|
|
|
|
|
|
// ICmpSpliiter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp
|
|
|
|
// called Name that compares X and Y in the same way as ICI.
|
|
|
|
struct ICmpSplitter {
|
|
|
|
ICmpSplitter(ICmpInst &ici) : ICI(ici) {}
|
2017-10-21 05:47:29 +08:00
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
|
|
|
|
const Twine &Name) const {
|
|
|
|
return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name);
|
|
|
|
}
|
2017-10-21 05:47:29 +08:00
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
ICmpInst &ICI;
|
|
|
|
};
|
|
|
|
|
2019-06-05 07:01:36 +08:00
|
|
|
// UnarySpliiter(UO)(Builder, X, Name) uses Builder to create
|
|
|
|
// a unary operator like UO called Name with operand X.
|
|
|
|
struct UnarySplitter {
|
|
|
|
UnarySplitter(UnaryOperator &uo) : UO(uo) {}
|
|
|
|
|
|
|
|
Value *operator()(IRBuilder<> &Builder, Value *Op, const Twine &Name) const {
|
|
|
|
return Builder.CreateUnOp(UO.getOpcode(), Op, Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
UnaryOperator &UO;
|
|
|
|
};
|
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
// BinarySpliiter(BO)(Builder, X, Y, Name) uses Builder to create
|
|
|
|
// a binary operator like BO called Name with operands X and Y.
|
|
|
|
struct BinarySplitter {
|
|
|
|
BinarySplitter(BinaryOperator &bo) : BO(bo) {}
|
2017-10-21 05:47:29 +08:00
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
|
|
|
|
const Twine &Name) const {
|
|
|
|
return Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name);
|
|
|
|
}
|
2017-10-21 05:47:29 +08:00
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
BinaryOperator &BO;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Information about a load or store that we're scalarizing.
|
|
|
|
struct VectorLayout {
|
2017-10-21 05:47:29 +08:00
|
|
|
VectorLayout() = default;
|
2013-11-23 00:58:05 +08:00
|
|
|
|
|
|
|
// Return the alignment of element I.
|
|
|
|
uint64_t getElemAlign(unsigned I) {
|
|
|
|
return MinAlign(VecAlign, I * ElemSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
// The type of the vector.
|
2017-10-21 05:47:29 +08:00
|
|
|
VectorType *VecTy = nullptr;
|
2013-11-23 00:58:05 +08:00
|
|
|
|
|
|
|
// The type of each element.
|
2017-10-21 05:47:29 +08:00
|
|
|
Type *ElemTy = nullptr;
|
2013-11-23 00:58:05 +08:00
|
|
|
|
|
|
|
// The alignment of the vector.
|
2017-10-21 05:47:29 +08:00
|
|
|
uint64_t VecAlign = 0;
|
2013-11-23 00:58:05 +08:00
|
|
|
|
|
|
|
// The size of each element.
|
2017-10-21 05:47:29 +08:00
|
|
|
uint64_t ElemSize = 0;
|
2013-11-23 00:58:05 +08:00
|
|
|
};
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
class ScalarizerVisitor : public InstVisitor<ScalarizerVisitor, bool> {
|
2013-11-23 00:58:05 +08:00
|
|
|
public:
|
2019-11-15 17:29:47 +08:00
|
|
|
ScalarizerVisitor(unsigned ParallelLoopAccessMDKind, DominatorTree *DT)
|
|
|
|
: ParallelLoopAccessMDKind(ParallelLoopAccessMDKind), DT(DT) {
|
2013-11-23 00:58:05 +08:00
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
bool visit(Function &F);
|
2013-11-23 00:58:05 +08:00
|
|
|
|
|
|
|
// InstVisitor methods. They return true if the instruction was scalarized,
|
|
|
|
// false if nothing changed.
|
2017-10-21 05:47:29 +08:00
|
|
|
bool visitInstruction(Instruction &I) { return false; }
|
2013-11-23 00:58:05 +08:00
|
|
|
bool visitSelectInst(SelectInst &SI);
|
2017-10-21 05:47:29 +08:00
|
|
|
bool visitICmpInst(ICmpInst &ICI);
|
|
|
|
bool visitFCmpInst(FCmpInst &FCI);
|
2019-06-05 07:01:36 +08:00
|
|
|
bool visitUnaryOperator(UnaryOperator &UO);
|
2017-10-21 05:47:29 +08:00
|
|
|
bool visitBinaryOperator(BinaryOperator &BO);
|
|
|
|
bool visitGetElementPtrInst(GetElementPtrInst &GEPI);
|
|
|
|
bool visitCastInst(CastInst &CI);
|
|
|
|
bool visitBitCastInst(BitCastInst &BCI);
|
|
|
|
bool visitShuffleVectorInst(ShuffleVectorInst &SVI);
|
|
|
|
bool visitPHINode(PHINode &PHI);
|
|
|
|
bool visitLoadInst(LoadInst &LI);
|
|
|
|
bool visitStoreInst(StoreInst &SI);
|
|
|
|
bool visitCallInst(CallInst &ICI);
|
2013-11-23 00:58:05 +08:00
|
|
|
|
|
|
|
private:
|
2017-10-21 05:47:29 +08:00
|
|
|
Scatterer scatter(Instruction *Point, Value *V);
|
|
|
|
void gather(Instruction *Op, const ValueVector &CV);
|
2013-11-23 00:58:05 +08:00
|
|
|
bool canTransferMetadata(unsigned Kind);
|
2019-06-21 22:10:18 +08:00
|
|
|
void transferMetadataAndIRFlags(Instruction *Op, const ValueVector &CV);
|
2017-10-21 05:47:29 +08:00
|
|
|
bool getVectorLayout(Type *Ty, unsigned Alignment, VectorLayout &Layout,
|
|
|
|
const DataLayout &DL);
|
2013-11-23 00:58:05 +08:00
|
|
|
bool finish();
|
|
|
|
|
2019-06-05 07:01:36 +08:00
|
|
|
template<typename T> bool splitUnary(Instruction &, const T &);
|
2013-11-23 00:58:05 +08:00
|
|
|
template<typename T> bool splitBinary(Instruction &, const T &);
|
|
|
|
|
2016-07-26 04:02:54 +08:00
|
|
|
bool splitCall(CallInst &CI);
|
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
ScatterMap Scattered;
|
|
|
|
GatherList Gathered;
|
2018-11-21 22:00:17 +08:00
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
unsigned ParallelLoopAccessMDKind;
|
2019-11-15 17:29:47 +08:00
|
|
|
|
|
|
|
DominatorTree *DT;
|
2013-11-23 00:58:05 +08:00
|
|
|
};
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
class ScalarizerLegacyPass : public FunctionPass {
|
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
|
|
|
|
ScalarizerLegacyPass() : FunctionPass(ID) {
|
|
|
|
initializeScalarizerLegacyPassPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool runOnFunction(Function &F) override;
|
2019-11-15 17:29:47 +08:00
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage& AU) const override {
|
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
|
|
|
AU.addPreserved<DominatorTreeWrapperPass>();
|
|
|
|
}
|
2018-11-21 22:00:17 +08:00
|
|
|
};
|
2013-11-23 00:58:05 +08:00
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
} // end anonymous namespace
|
2017-10-21 05:47:29 +08:00
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
char ScalarizerLegacyPass::ID = 0;
|
|
|
|
INITIALIZE_PASS_BEGIN(ScalarizerLegacyPass, "scalarizer",
|
|
|
|
"Scalarize vector operations", false, false)
|
2019-11-15 17:29:47 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
2018-11-21 22:00:17 +08:00
|
|
|
INITIALIZE_PASS_END(ScalarizerLegacyPass, "scalarizer",
|
|
|
|
"Scalarize vector operations", false, false)
|
2013-11-23 00:58:05 +08:00
|
|
|
|
|
|
|
Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
|
|
|
|
ValueVector *cachePtr)
|
|
|
|
: BB(bb), BBI(bbi), V(v), CachePtr(cachePtr) {
|
|
|
|
Type *Ty = V->getType();
|
|
|
|
PtrTy = dyn_cast<PointerType>(Ty);
|
|
|
|
if (PtrTy)
|
|
|
|
Ty = PtrTy->getElementType();
|
2020-04-10 05:19:08 +08:00
|
|
|
Size = cast<VectorType>(Ty)->getNumElements();
|
2013-11-23 00:58:05 +08:00
|
|
|
if (!CachePtr)
|
2014-04-25 13:29:35 +08:00
|
|
|
Tmp.resize(Size, nullptr);
|
2013-11-23 00:58:05 +08:00
|
|
|
else if (CachePtr->empty())
|
2014-04-25 13:29:35 +08:00
|
|
|
CachePtr->resize(Size, nullptr);
|
2013-11-23 00:58:05 +08:00
|
|
|
else
|
|
|
|
assert(Size == CachePtr->size() && "Inconsistent vector sizes");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return component I, creating a new Value for it if necessary.
|
|
|
|
Value *Scatterer::operator[](unsigned I) {
|
|
|
|
ValueVector &CV = (CachePtr ? *CachePtr : Tmp);
|
|
|
|
// Try to reuse a previous value.
|
|
|
|
if (CV[I])
|
|
|
|
return CV[I];
|
|
|
|
IRBuilder<> Builder(BB, BBI);
|
|
|
|
if (PtrTy) {
|
2020-04-10 05:19:08 +08:00
|
|
|
Type *ElTy = cast<VectorType>(PtrTy->getElementType())->getElementType();
|
2013-11-23 00:58:05 +08:00
|
|
|
if (!CV[0]) {
|
2019-02-02 04:44:47 +08:00
|
|
|
Type *NewPtrTy = PointerType::get(ElTy, PtrTy->getAddressSpace());
|
|
|
|
CV[0] = Builder.CreateBitCast(V, NewPtrTy, V->getName() + ".i0");
|
2013-11-23 00:58:05 +08:00
|
|
|
}
|
|
|
|
if (I != 0)
|
2019-02-02 04:44:47 +08:00
|
|
|
CV[I] = Builder.CreateConstGEP1_32(ElTy, CV[0], I,
|
2013-11-23 00:58:05 +08:00
|
|
|
V->getName() + ".i" + Twine(I));
|
|
|
|
} else {
|
|
|
|
// Search through a chain of InsertElementInsts looking for element I.
|
|
|
|
// Record other elements in the cache. The new V is still suitable
|
|
|
|
// for all uncached indices.
|
2017-10-21 05:47:29 +08:00
|
|
|
while (true) {
|
2013-11-23 00:58:05 +08:00
|
|
|
InsertElementInst *Insert = dyn_cast<InsertElementInst>(V);
|
|
|
|
if (!Insert)
|
|
|
|
break;
|
|
|
|
ConstantInt *Idx = dyn_cast<ConstantInt>(Insert->getOperand(2));
|
|
|
|
if (!Idx)
|
|
|
|
break;
|
|
|
|
unsigned J = Idx->getZExtValue();
|
|
|
|
V = Insert->getOperand(0);
|
2015-08-10 22:48:47 +08:00
|
|
|
if (I == J) {
|
|
|
|
CV[J] = Insert->getOperand(1);
|
2013-11-23 00:58:05 +08:00
|
|
|
return CV[J];
|
2015-08-10 22:48:47 +08:00
|
|
|
} else if (!CV[J]) {
|
|
|
|
// Only cache the first entry we find for each index we're not actively
|
|
|
|
// searching for. This prevents us from going too far up the chain and
|
|
|
|
// caching incorrect entries.
|
|
|
|
CV[J] = Insert->getOperand(1);
|
|
|
|
}
|
2013-11-23 00:58:05 +08:00
|
|
|
}
|
|
|
|
CV[I] = Builder.CreateExtractElement(V, Builder.getInt32(I),
|
|
|
|
V->getName() + ".i" + Twine(I));
|
|
|
|
}
|
|
|
|
return CV[I];
|
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerLegacyPass::runOnFunction(Function &F) {
|
|
|
|
if (skipFunction(F))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Module &M = *F.getParent();
|
|
|
|
unsigned ParallelLoopAccessMDKind =
|
2014-10-16 05:54:35 +08:00
|
|
|
M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
|
2019-11-15 17:29:47 +08:00
|
|
|
DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
|
|
|
ScalarizerVisitor Impl(ParallelLoopAccessMDKind, DT);
|
2018-11-21 22:00:17 +08:00
|
|
|
return Impl.visit(F);
|
2013-11-23 00:58:05 +08:00
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
FunctionPass *llvm::createScalarizerPass() {
|
|
|
|
return new ScalarizerLegacyPass();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ScalarizerVisitor::visit(Function &F) {
|
2015-07-24 04:53:46 +08:00
|
|
|
assert(Gathered.empty() && Scattered.empty());
|
2018-10-10 17:27:45 +08:00
|
|
|
|
|
|
|
// To ensure we replace gathered components correctly we need to do an ordered
|
|
|
|
// traversal of the basic blocks in the function.
|
|
|
|
ReversePostOrderTraversal<BasicBlock *> RPOT(&F.getEntryBlock());
|
|
|
|
for (BasicBlock *BB : RPOT) {
|
|
|
|
for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
|
2015-10-14 03:26:58 +08:00
|
|
|
Instruction *I = &*II;
|
2018-11-21 22:00:17 +08:00
|
|
|
bool Done = InstVisitor::visit(I);
|
2013-11-23 00:58:05 +08:00
|
|
|
++II;
|
|
|
|
if (Done && I->getType()->isVoidTy())
|
|
|
|
I->eraseFromParent();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return finish();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return a scattered form of V that can be accessed by Point. V must be a
|
|
|
|
// vector or a pointer to a vector.
|
2018-11-21 22:00:17 +08:00
|
|
|
Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V) {
|
2013-11-23 00:58:05 +08:00
|
|
|
if (Argument *VArg = dyn_cast<Argument>(V)) {
|
|
|
|
// Put the scattered form of arguments in the entry block,
|
|
|
|
// so that it can be used everywhere.
|
|
|
|
Function *F = VArg->getParent();
|
|
|
|
BasicBlock *BB = &F->getEntryBlock();
|
|
|
|
return Scatterer(BB, BB->begin(), V, &Scattered[V]);
|
|
|
|
}
|
|
|
|
if (Instruction *VOp = dyn_cast<Instruction>(V)) {
|
2019-11-15 17:29:47 +08:00
|
|
|
// When scalarizing PHI nodes we might try to examine/rewrite InsertElement
|
|
|
|
// nodes in predecessors. If those predecessors are unreachable from entry,
|
|
|
|
// then the IR in those blocks could have unexpected properties resulting in
|
|
|
|
// infinite loops in Scatterer::operator[]. By simply treating values
|
|
|
|
// originating from instructions in unreachable blocks as undef we do not
|
|
|
|
// need to analyse them further.
|
|
|
|
if (!DT->isReachableFromEntry(VOp->getParent()))
|
|
|
|
return Scatterer(Point->getParent(), Point->getIterator(),
|
|
|
|
UndefValue::get(V->getType()));
|
2013-11-23 00:58:05 +08:00
|
|
|
// Put the scattered form of an instruction directly after the
|
|
|
|
// instruction.
|
|
|
|
BasicBlock *BB = VOp->getParent();
|
2014-03-02 20:27:27 +08:00
|
|
|
return Scatterer(BB, std::next(BasicBlock::iterator(VOp)),
|
2013-11-23 00:58:05 +08:00
|
|
|
V, &Scattered[V]);
|
|
|
|
}
|
|
|
|
// In the fallback case, just put the scattered before Point and
|
|
|
|
// keep the result local to Point.
|
2015-10-14 03:26:58 +08:00
|
|
|
return Scatterer(Point->getParent(), Point->getIterator(), V);
|
2013-11-23 00:58:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Replace Op with the gathered form of the components in CV. Defer the
|
|
|
|
// deletion of Op and creation of the gathered form to the end of the pass,
|
|
|
|
// so that we can avoid creating the gathered form if all uses of Op are
|
|
|
|
// replaced with uses of CV.
|
2018-11-21 22:00:17 +08:00
|
|
|
void ScalarizerVisitor::gather(Instruction *Op, const ValueVector &CV) {
|
2013-11-23 00:58:05 +08:00
|
|
|
// Since we're not deleting Op yet, stub out its operands, so that it
|
|
|
|
// doesn't make anything live unnecessarily.
|
|
|
|
for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I)
|
|
|
|
Op->setOperand(I, UndefValue::get(Op->getOperand(I)->getType()));
|
|
|
|
|
2019-06-21 22:10:18 +08:00
|
|
|
transferMetadataAndIRFlags(Op, CV);
|
2013-11-23 00:58:05 +08:00
|
|
|
|
|
|
|
// If we already have a scattered form of Op (created from ExtractElements
|
|
|
|
// of Op itself), replace them with the new form.
|
|
|
|
ValueVector &SV = Scattered[Op];
|
|
|
|
if (!SV.empty()) {
|
|
|
|
for (unsigned I = 0, E = SV.size(); I != E; ++I) {
|
2016-07-14 09:31:25 +08:00
|
|
|
Value *V = SV[I];
|
|
|
|
if (V == nullptr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
Instruction *Old = cast<Instruction>(V);
|
2013-11-23 00:58:05 +08:00
|
|
|
CV[I]->takeName(Old);
|
|
|
|
Old->replaceAllUsesWith(CV[I]);
|
|
|
|
Old->eraseFromParent();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SV = CV;
|
|
|
|
Gathered.push_back(GatherList::value_type(Op, &SV));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return true if it is safe to transfer the given metadata tag from
|
|
|
|
// vector to scalar instructions.
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::canTransferMetadata(unsigned Tag) {
|
2013-11-23 00:58:05 +08:00
|
|
|
return (Tag == LLVMContext::MD_tbaa
|
|
|
|
|| Tag == LLVMContext::MD_fpmath
|
|
|
|
|| Tag == LLVMContext::MD_tbaa_struct
|
|
|
|
|| Tag == LLVMContext::MD_invariant_load
|
Add scoped-noalias metadata
This commit adds scoped noalias metadata. The primary motivations for this
feature are:
1. To preserve noalias function attribute information when inlining
2. To provide the ability to model block-scope C99 restrict pointers
Neither of these two abilities are added here, only the necessary
infrastructure. In fact, there should be no change to existing functionality,
only the addition of new features. The logic that converts noalias function
parameters into this metadata during inlining will come in a follow-up commit.
What is added here is the ability to generally specify noalias memory-access
sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA
nodes:
!scope0 = metadata !{ metadata !"scope of foo()" }
!scope1 = metadata !{ metadata !"scope 1", metadata !scope0 }
!scope2 = metadata !{ metadata !"scope 2", metadata !scope0 }
!scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 }
!scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 }
Loads and stores can be tagged with an alias-analysis scope, and also, with a
noalias tag for a specific scope:
... = load %ptr1, !alias.scope !{ !scope1 }
... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
When evaluating an aliasing query, if one of the instructions is associated
with an alias.scope id that is identical to the noalias scope associated with
the other instruction, or is a descendant (in the scope hierarchy) of the
noalias scope associated with the other instruction, then the two memory
accesses are assumed not to alias.
Note that is the first element of the scope metadata is a string, then it can
be combined accross functions and translation units. The string can be replaced
by a self-reference to create globally unqiue scope identifiers.
[Note: This overview is slightly stylized, since the metadata nodes really need
to just be numbers (!0 instead of !scope0), and the scope lists are also global
unnamed metadata.]
Existing noalias metadata in a callee is "cloned" for use by the inlined code.
This is necessary because the aliasing scopes are unique to each call site
(because of possible control dependencies on the aliasing properties). For
example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets
inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } --
now just because we know that a1 does not alias with b1 at the first call site,
and a2 does not alias with b2 at the second call site, we cannot let inlining
these functons have the metadata imply that a1 does not alias with b2.
llvm-svn: 213864
2014-07-24 22:25:39 +08:00
|
|
|
|| Tag == LLVMContext::MD_alias_scope
|
|
|
|
|| Tag == LLVMContext::MD_noalias
|
2018-12-20 12:58:07 +08:00
|
|
|
|| Tag == ParallelLoopAccessMDKind
|
|
|
|
|| Tag == LLVMContext::MD_access_group);
|
2013-11-23 00:58:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Transfer metadata from Op to the instructions in CV if it is known
|
|
|
|
// to be safe to do so.
|
2019-06-21 22:10:18 +08:00
|
|
|
void ScalarizerVisitor::transferMetadataAndIRFlags(Instruction *Op,
|
|
|
|
const ValueVector &CV) {
|
2014-11-12 05:30:22 +08:00
|
|
|
SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
|
2013-11-23 00:58:05 +08:00
|
|
|
Op->getAllMetadataOtherThanDebugLoc(MDs);
|
|
|
|
for (unsigned I = 0, E = CV.size(); I != E; ++I) {
|
|
|
|
if (Instruction *New = dyn_cast<Instruction>(CV[I])) {
|
2016-06-26 20:28:59 +08:00
|
|
|
for (const auto &MD : MDs)
|
|
|
|
if (canTransferMetadata(MD.first))
|
|
|
|
New->setMetadata(MD.first, MD.second);
|
2019-06-21 22:10:18 +08:00
|
|
|
New->copyIRFlags(Op);
|
2016-06-16 18:48:54 +08:00
|
|
|
if (Op->getDebugLoc() && !New->getDebugLoc())
|
|
|
|
New->setDebugLoc(Op->getDebugLoc());
|
2013-11-23 00:58:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to fill in Layout from Ty, returning true on success. Alignment is
|
|
|
|
// the alignment of the vector, or 0 if the ABI default should be used.
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::getVectorLayout(Type *Ty, unsigned Alignment,
|
2015-03-10 10:37:25 +08:00
|
|
|
VectorLayout &Layout, const DataLayout &DL) {
|
2013-11-23 00:58:05 +08:00
|
|
|
// Make sure we're dealing with a vector.
|
|
|
|
Layout.VecTy = dyn_cast<VectorType>(Ty);
|
|
|
|
if (!Layout.VecTy)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check that we're dealing with full-byte elements.
|
|
|
|
Layout.ElemTy = Layout.VecTy->getElementType();
|
2019-05-24 17:20:20 +08:00
|
|
|
if (!DL.typeSizeEqualsStoreSize(Layout.ElemTy))
|
2013-11-23 00:58:05 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (Alignment)
|
|
|
|
Layout.VecAlign = Alignment;
|
|
|
|
else
|
2015-03-10 10:37:25 +08:00
|
|
|
Layout.VecAlign = DL.getABITypeAlignment(Layout.VecTy);
|
|
|
|
Layout.ElemSize = DL.getTypeStoreSize(Layout.ElemTy);
|
2013-11-23 00:58:05 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-05 07:01:36 +08:00
|
|
|
// Scalarize one-operand instruction I, using Split(Builder, X, Name)
|
|
|
|
// to create an instruction like I with operand X and name Name.
|
|
|
|
template<typename Splitter>
|
|
|
|
bool ScalarizerVisitor::splitUnary(Instruction &I, const Splitter &Split) {
|
|
|
|
VectorType *VT = dyn_cast<VectorType>(I.getType());
|
|
|
|
if (!VT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned NumElems = VT->getNumElements();
|
|
|
|
IRBuilder<> Builder(&I);
|
|
|
|
Scatterer Op = scatter(&I, I.getOperand(0));
|
|
|
|
assert(Op.size() == NumElems && "Mismatched unary operation");
|
|
|
|
ValueVector Res;
|
|
|
|
Res.resize(NumElems);
|
|
|
|
for (unsigned Elem = 0; Elem < NumElems; ++Elem)
|
|
|
|
Res[Elem] = Split(Builder, Op[Elem], I.getName() + ".i" + Twine(Elem));
|
|
|
|
gather(&I, Res);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
// Scalarize two-operand instruction I, using Split(Builder, X, Y, Name)
|
|
|
|
// to create an instruction like I with operands X and Y and name Name.
|
|
|
|
template<typename Splitter>
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::splitBinary(Instruction &I, const Splitter &Split) {
|
2013-11-23 00:58:05 +08:00
|
|
|
VectorType *VT = dyn_cast<VectorType>(I.getType());
|
|
|
|
if (!VT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned NumElems = VT->getNumElements();
|
2015-10-14 03:26:58 +08:00
|
|
|
IRBuilder<> Builder(&I);
|
2013-11-23 00:58:05 +08:00
|
|
|
Scatterer Op0 = scatter(&I, I.getOperand(0));
|
|
|
|
Scatterer Op1 = scatter(&I, I.getOperand(1));
|
|
|
|
assert(Op0.size() == NumElems && "Mismatched binary operation");
|
|
|
|
assert(Op1.size() == NumElems && "Mismatched binary operation");
|
|
|
|
ValueVector Res;
|
|
|
|
Res.resize(NumElems);
|
|
|
|
for (unsigned Elem = 0; Elem < NumElems; ++Elem)
|
|
|
|
Res[Elem] = Split(Builder, Op0[Elem], Op1[Elem],
|
|
|
|
I.getName() + ".i" + Twine(Elem));
|
|
|
|
gather(&I, Res);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-07-26 04:02:54 +08:00
|
|
|
static bool isTriviallyScalariable(Intrinsic::ID ID) {
|
|
|
|
return isTriviallyVectorizable(ID);
|
|
|
|
}
|
|
|
|
|
|
|
|
// All of the current scalarizable intrinsics only have one mangled type.
|
|
|
|
static Function *getScalarIntrinsicDeclaration(Module *M,
|
|
|
|
Intrinsic::ID ID,
|
|
|
|
VectorType *Ty) {
|
|
|
|
return Intrinsic::getDeclaration(M, ID, { Ty->getScalarType() });
|
|
|
|
}
|
|
|
|
|
|
|
|
/// If a call to a vector typed intrinsic function, split into a scalar call per
|
|
|
|
/// element if possible for the intrinsic.
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::splitCall(CallInst &CI) {
|
2016-07-26 04:02:54 +08:00
|
|
|
VectorType *VT = dyn_cast<VectorType>(CI.getType());
|
|
|
|
if (!VT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Function *F = CI.getCalledFunction();
|
|
|
|
if (!F)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Intrinsic::ID ID = F->getIntrinsicID();
|
|
|
|
if (ID == Intrinsic::not_intrinsic || !isTriviallyScalariable(ID))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned NumElems = VT->getNumElements();
|
|
|
|
unsigned NumArgs = CI.getNumArgOperands();
|
|
|
|
|
|
|
|
ValueVector ScalarOperands(NumArgs);
|
|
|
|
SmallVector<Scatterer, 8> Scattered(NumArgs);
|
|
|
|
|
|
|
|
Scattered.resize(NumArgs);
|
|
|
|
|
|
|
|
// Assumes that any vector type has the same number of elements as the return
|
|
|
|
// vector type, which is true for all current intrinsics.
|
|
|
|
for (unsigned I = 0; I != NumArgs; ++I) {
|
|
|
|
Value *OpI = CI.getOperand(I);
|
|
|
|
if (OpI->getType()->isVectorTy()) {
|
|
|
|
Scattered[I] = scatter(&CI, OpI);
|
|
|
|
assert(Scattered[I].size() == NumElems && "mismatched call operands");
|
|
|
|
} else {
|
|
|
|
ScalarOperands[I] = OpI;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ValueVector Res(NumElems);
|
|
|
|
ValueVector ScalarCallOps(NumArgs);
|
|
|
|
|
|
|
|
Function *NewIntrin = getScalarIntrinsicDeclaration(F->getParent(), ID, VT);
|
|
|
|
IRBuilder<> Builder(&CI);
|
|
|
|
|
|
|
|
// Perform actual scalarization, taking care to preserve any scalar operands.
|
|
|
|
for (unsigned Elem = 0; Elem < NumElems; ++Elem) {
|
|
|
|
ScalarCallOps.clear();
|
|
|
|
|
|
|
|
for (unsigned J = 0; J != NumArgs; ++J) {
|
|
|
|
if (hasVectorInstrinsicScalarOpd(ID, J))
|
|
|
|
ScalarCallOps.push_back(ScalarOperands[J]);
|
|
|
|
else
|
|
|
|
ScalarCallOps.push_back(Scattered[J][Elem]);
|
|
|
|
}
|
|
|
|
|
|
|
|
Res[Elem] = Builder.CreateCall(NewIntrin, ScalarCallOps,
|
|
|
|
CI.getName() + ".i" + Twine(Elem));
|
|
|
|
}
|
|
|
|
|
|
|
|
gather(&CI, Res);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::visitSelectInst(SelectInst &SI) {
|
2013-11-23 00:58:05 +08:00
|
|
|
VectorType *VT = dyn_cast<VectorType>(SI.getType());
|
|
|
|
if (!VT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned NumElems = VT->getNumElements();
|
2015-10-14 03:26:58 +08:00
|
|
|
IRBuilder<> Builder(&SI);
|
2013-11-23 00:58:05 +08:00
|
|
|
Scatterer Op1 = scatter(&SI, SI.getOperand(1));
|
|
|
|
Scatterer Op2 = scatter(&SI, SI.getOperand(2));
|
|
|
|
assert(Op1.size() == NumElems && "Mismatched select");
|
|
|
|
assert(Op2.size() == NumElems && "Mismatched select");
|
|
|
|
ValueVector Res;
|
|
|
|
Res.resize(NumElems);
|
|
|
|
|
|
|
|
if (SI.getOperand(0)->getType()->isVectorTy()) {
|
|
|
|
Scatterer Op0 = scatter(&SI, SI.getOperand(0));
|
|
|
|
assert(Op0.size() == NumElems && "Mismatched select");
|
|
|
|
for (unsigned I = 0; I < NumElems; ++I)
|
|
|
|
Res[I] = Builder.CreateSelect(Op0[I], Op1[I], Op2[I],
|
|
|
|
SI.getName() + ".i" + Twine(I));
|
|
|
|
} else {
|
|
|
|
Value *Op0 = SI.getOperand(0);
|
|
|
|
for (unsigned I = 0; I < NumElems; ++I)
|
|
|
|
Res[I] = Builder.CreateSelect(Op0, Op1[I], Op2[I],
|
|
|
|
SI.getName() + ".i" + Twine(I));
|
|
|
|
}
|
|
|
|
gather(&SI, Res);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::visitICmpInst(ICmpInst &ICI) {
|
2013-11-23 00:58:05 +08:00
|
|
|
return splitBinary(ICI, ICmpSplitter(ICI));
|
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::visitFCmpInst(FCmpInst &FCI) {
|
2013-11-23 00:58:05 +08:00
|
|
|
return splitBinary(FCI, FCmpSplitter(FCI));
|
|
|
|
}
|
|
|
|
|
2019-06-05 07:01:36 +08:00
|
|
|
bool ScalarizerVisitor::visitUnaryOperator(UnaryOperator &UO) {
|
|
|
|
return splitUnary(UO, UnarySplitter(UO));
|
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::visitBinaryOperator(BinaryOperator &BO) {
|
2013-11-23 00:58:05 +08:00
|
|
|
return splitBinary(BO, BinarySplitter(BO));
|
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
|
2013-12-23 22:45:00 +08:00
|
|
|
VectorType *VT = dyn_cast<VectorType>(GEPI.getType());
|
|
|
|
if (!VT)
|
|
|
|
return false;
|
|
|
|
|
2015-10-14 03:26:58 +08:00
|
|
|
IRBuilder<> Builder(&GEPI);
|
2013-12-23 22:45:00 +08:00
|
|
|
unsigned NumElems = VT->getNumElements();
|
|
|
|
unsigned NumIndices = GEPI.getNumIndices();
|
|
|
|
|
2017-03-31 14:29:49 +08:00
|
|
|
// The base pointer might be scalar even if it's a vector GEP. In those cases,
|
|
|
|
// splat the pointer into a vector value, and scatter that vector.
|
|
|
|
Value *Op0 = GEPI.getOperand(0);
|
|
|
|
if (!Op0->getType()->isVectorTy())
|
|
|
|
Op0 = Builder.CreateVectorSplat(NumElems, Op0);
|
|
|
|
Scatterer Base = scatter(&GEPI, Op0);
|
2013-12-23 22:45:00 +08:00
|
|
|
|
|
|
|
SmallVector<Scatterer, 8> Ops;
|
|
|
|
Ops.resize(NumIndices);
|
2017-03-31 14:29:49 +08:00
|
|
|
for (unsigned I = 0; I < NumIndices; ++I) {
|
|
|
|
Value *Op = GEPI.getOperand(I + 1);
|
|
|
|
|
|
|
|
// The indices might be scalars even if it's a vector GEP. In those cases,
|
|
|
|
// splat the scalar into a vector value, and scatter that vector.
|
|
|
|
if (!Op->getType()->isVectorTy())
|
|
|
|
Op = Builder.CreateVectorSplat(NumElems, Op);
|
|
|
|
|
|
|
|
Ops[I] = scatter(&GEPI, Op);
|
|
|
|
}
|
2013-12-23 22:45:00 +08:00
|
|
|
|
|
|
|
ValueVector Res;
|
|
|
|
Res.resize(NumElems);
|
|
|
|
for (unsigned I = 0; I < NumElems; ++I) {
|
|
|
|
SmallVector<Value *, 8> Indices;
|
|
|
|
Indices.resize(NumIndices);
|
|
|
|
for (unsigned J = 0; J < NumIndices; ++J)
|
|
|
|
Indices[J] = Ops[J][I];
|
2015-03-25 06:38:16 +08:00
|
|
|
Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices,
|
2013-12-23 22:45:00 +08:00
|
|
|
GEPI.getName() + ".i" + Twine(I));
|
|
|
|
if (GEPI.isInBounds())
|
|
|
|
if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I]))
|
|
|
|
NewGEPI->setIsInBounds();
|
|
|
|
}
|
|
|
|
gather(&GEPI, Res);
|
|
|
|
return true;
|
2013-11-23 00:58:05 +08:00
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::visitCastInst(CastInst &CI) {
|
2013-11-23 00:58:05 +08:00
|
|
|
VectorType *VT = dyn_cast<VectorType>(CI.getDestTy());
|
|
|
|
if (!VT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned NumElems = VT->getNumElements();
|
2015-10-14 03:26:58 +08:00
|
|
|
IRBuilder<> Builder(&CI);
|
2013-11-23 00:58:05 +08:00
|
|
|
Scatterer Op0 = scatter(&CI, CI.getOperand(0));
|
|
|
|
assert(Op0.size() == NumElems && "Mismatched cast");
|
|
|
|
ValueVector Res;
|
|
|
|
Res.resize(NumElems);
|
|
|
|
for (unsigned I = 0; I < NumElems; ++I)
|
|
|
|
Res[I] = Builder.CreateCast(CI.getOpcode(), Op0[I], VT->getElementType(),
|
|
|
|
CI.getName() + ".i" + Twine(I));
|
|
|
|
gather(&CI, Res);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::visitBitCastInst(BitCastInst &BCI) {
|
2013-11-23 00:58:05 +08:00
|
|
|
VectorType *DstVT = dyn_cast<VectorType>(BCI.getDestTy());
|
|
|
|
VectorType *SrcVT = dyn_cast<VectorType>(BCI.getSrcTy());
|
|
|
|
if (!DstVT || !SrcVT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned DstNumElems = DstVT->getNumElements();
|
|
|
|
unsigned SrcNumElems = SrcVT->getNumElements();
|
2015-10-14 03:26:58 +08:00
|
|
|
IRBuilder<> Builder(&BCI);
|
2013-11-23 00:58:05 +08:00
|
|
|
Scatterer Op0 = scatter(&BCI, BCI.getOperand(0));
|
|
|
|
ValueVector Res;
|
|
|
|
Res.resize(DstNumElems);
|
|
|
|
|
|
|
|
if (DstNumElems == SrcNumElems) {
|
|
|
|
for (unsigned I = 0; I < DstNumElems; ++I)
|
|
|
|
Res[I] = Builder.CreateBitCast(Op0[I], DstVT->getElementType(),
|
|
|
|
BCI.getName() + ".i" + Twine(I));
|
|
|
|
} else if (DstNumElems > SrcNumElems) {
|
|
|
|
// <M x t1> -> <N*M x t2>. Convert each t1 to <N x t2> and copy the
|
|
|
|
// individual elements to the destination.
|
|
|
|
unsigned FanOut = DstNumElems / SrcNumElems;
|
|
|
|
Type *MidTy = VectorType::get(DstVT->getElementType(), FanOut);
|
|
|
|
unsigned ResI = 0;
|
|
|
|
for (unsigned Op0I = 0; Op0I < SrcNumElems; ++Op0I) {
|
|
|
|
Value *V = Op0[Op0I];
|
|
|
|
Instruction *VI;
|
|
|
|
// Look through any existing bitcasts before converting to <N x t2>.
|
|
|
|
// In the best case, the resulting conversion might be a no-op.
|
|
|
|
while ((VI = dyn_cast<Instruction>(V)) &&
|
|
|
|
VI->getOpcode() == Instruction::BitCast)
|
|
|
|
V = VI->getOperand(0);
|
|
|
|
V = Builder.CreateBitCast(V, MidTy, V->getName() + ".cast");
|
|
|
|
Scatterer Mid = scatter(&BCI, V);
|
|
|
|
for (unsigned MidI = 0; MidI < FanOut; ++MidI)
|
|
|
|
Res[ResI++] = Mid[MidI];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// <N*M x t1> -> <M x t2>. Convert each group of <N x t1> into a t2.
|
|
|
|
unsigned FanIn = SrcNumElems / DstNumElems;
|
|
|
|
Type *MidTy = VectorType::get(SrcVT->getElementType(), FanIn);
|
|
|
|
unsigned Op0I = 0;
|
|
|
|
for (unsigned ResI = 0; ResI < DstNumElems; ++ResI) {
|
|
|
|
Value *V = UndefValue::get(MidTy);
|
|
|
|
for (unsigned MidI = 0; MidI < FanIn; ++MidI)
|
|
|
|
V = Builder.CreateInsertElement(V, Op0[Op0I++], Builder.getInt32(MidI),
|
|
|
|
BCI.getName() + ".i" + Twine(ResI)
|
|
|
|
+ ".upto" + Twine(MidI));
|
|
|
|
Res[ResI] = Builder.CreateBitCast(V, DstVT->getElementType(),
|
|
|
|
BCI.getName() + ".i" + Twine(ResI));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
gather(&BCI, Res);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
|
2013-11-23 00:58:05 +08:00
|
|
|
VectorType *VT = dyn_cast<VectorType>(SVI.getType());
|
|
|
|
if (!VT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned NumElems = VT->getNumElements();
|
|
|
|
Scatterer Op0 = scatter(&SVI, SVI.getOperand(0));
|
|
|
|
Scatterer Op1 = scatter(&SVI, SVI.getOperand(1));
|
|
|
|
ValueVector Res;
|
|
|
|
Res.resize(NumElems);
|
|
|
|
|
|
|
|
for (unsigned I = 0; I < NumElems; ++I) {
|
|
|
|
int Selector = SVI.getMaskValue(I);
|
|
|
|
if (Selector < 0)
|
|
|
|
Res[I] = UndefValue::get(VT->getElementType());
|
|
|
|
else if (unsigned(Selector) < Op0.size())
|
|
|
|
Res[I] = Op0[Selector];
|
|
|
|
else
|
|
|
|
Res[I] = Op1[Selector - Op0.size()];
|
|
|
|
}
|
|
|
|
gather(&SVI, Res);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::visitPHINode(PHINode &PHI) {
|
2013-11-23 00:58:05 +08:00
|
|
|
VectorType *VT = dyn_cast<VectorType>(PHI.getType());
|
|
|
|
if (!VT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned NumElems = VT->getNumElements();
|
2015-10-14 03:26:58 +08:00
|
|
|
IRBuilder<> Builder(&PHI);
|
2013-11-23 00:58:05 +08:00
|
|
|
ValueVector Res;
|
|
|
|
Res.resize(NumElems);
|
|
|
|
|
|
|
|
unsigned NumOps = PHI.getNumOperands();
|
|
|
|
for (unsigned I = 0; I < NumElems; ++I)
|
|
|
|
Res[I] = Builder.CreatePHI(VT->getElementType(), NumOps,
|
|
|
|
PHI.getName() + ".i" + Twine(I));
|
|
|
|
|
|
|
|
for (unsigned I = 0; I < NumOps; ++I) {
|
|
|
|
Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I));
|
|
|
|
BasicBlock *IncomingBlock = PHI.getIncomingBlock(I);
|
|
|
|
for (unsigned J = 0; J < NumElems; ++J)
|
|
|
|
cast<PHINode>(Res[J])->addIncoming(Op[J], IncomingBlock);
|
|
|
|
}
|
|
|
|
gather(&PHI, Res);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::visitLoadInst(LoadInst &LI) {
|
2013-11-23 00:58:05 +08:00
|
|
|
if (!ScalarizeLoadStore)
|
|
|
|
return false;
|
|
|
|
if (!LI.isSimple())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
VectorLayout Layout;
|
2015-03-10 10:37:25 +08:00
|
|
|
if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout,
|
|
|
|
LI.getModule()->getDataLayout()))
|
2013-11-23 00:58:05 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned NumElems = Layout.VecTy->getNumElements();
|
2015-10-14 03:26:58 +08:00
|
|
|
IRBuilder<> Builder(&LI);
|
2013-11-23 00:58:05 +08:00
|
|
|
Scatterer Ptr = scatter(&LI, LI.getPointerOperand());
|
|
|
|
ValueVector Res;
|
|
|
|
Res.resize(NumElems);
|
|
|
|
|
|
|
|
for (unsigned I = 0; I < NumElems; ++I)
|
2019-02-02 04:44:24 +08:00
|
|
|
Res[I] = Builder.CreateAlignedLoad(Layout.VecTy->getElementType(), Ptr[I],
|
2020-01-23 18:33:12 +08:00
|
|
|
Align(Layout.getElemAlign(I)),
|
2013-11-23 00:58:05 +08:00
|
|
|
LI.getName() + ".i" + Twine(I));
|
|
|
|
gather(&LI, Res);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::visitStoreInst(StoreInst &SI) {
|
2013-11-23 00:58:05 +08:00
|
|
|
if (!ScalarizeLoadStore)
|
|
|
|
return false;
|
|
|
|
if (!SI.isSimple())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
VectorLayout Layout;
|
|
|
|
Value *FullValue = SI.getValueOperand();
|
2015-03-10 10:37:25 +08:00
|
|
|
if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout,
|
|
|
|
SI.getModule()->getDataLayout()))
|
2013-11-23 00:58:05 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned NumElems = Layout.VecTy->getNumElements();
|
2015-10-14 03:26:58 +08:00
|
|
|
IRBuilder<> Builder(&SI);
|
2013-11-23 00:58:05 +08:00
|
|
|
Scatterer Ptr = scatter(&SI, SI.getPointerOperand());
|
|
|
|
Scatterer Val = scatter(&SI, FullValue);
|
|
|
|
|
|
|
|
ValueVector Stores;
|
|
|
|
Stores.resize(NumElems);
|
|
|
|
for (unsigned I = 0; I < NumElems; ++I) {
|
|
|
|
unsigned Align = Layout.getElemAlign(I);
|
2020-01-23 23:18:34 +08:00
|
|
|
Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], MaybeAlign(Align));
|
2013-11-23 00:58:05 +08:00
|
|
|
}
|
2019-06-21 22:10:18 +08:00
|
|
|
transferMetadataAndIRFlags(&SI, Stores);
|
2013-11-23 00:58:05 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::visitCallInst(CallInst &CI) {
|
2016-07-26 04:02:54 +08:00
|
|
|
return splitCall(CI);
|
|
|
|
}
|
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
// Delete the instructions that we scalarized. If a full vector result
|
|
|
|
// is still needed, recreate it using InsertElements.
|
2018-11-21 22:00:17 +08:00
|
|
|
bool ScalarizerVisitor::finish() {
|
2015-07-24 04:53:46 +08:00
|
|
|
// The presence of data in Gathered or Scattered indicates changes
|
|
|
|
// made to the Function.
|
|
|
|
if (Gathered.empty() && Scattered.empty())
|
2013-11-23 00:58:05 +08:00
|
|
|
return false;
|
2016-06-26 20:28:59 +08:00
|
|
|
for (const auto &GMI : Gathered) {
|
|
|
|
Instruction *Op = GMI.first;
|
|
|
|
ValueVector &CV = *GMI.second;
|
2013-11-23 00:58:05 +08:00
|
|
|
if (!Op->use_empty()) {
|
|
|
|
// The value is still needed, so recreate it using a series of
|
|
|
|
// InsertElements.
|
2020-04-10 05:19:08 +08:00
|
|
|
auto *Ty = cast<VectorType>(Op->getType());
|
2013-11-23 00:58:05 +08:00
|
|
|
Value *Res = UndefValue::get(Ty);
|
2013-12-23 22:51:56 +08:00
|
|
|
BasicBlock *BB = Op->getParent();
|
2020-04-10 05:19:08 +08:00
|
|
|
unsigned Count = Ty->getNumElements();
|
2015-10-14 03:26:58 +08:00
|
|
|
IRBuilder<> Builder(Op);
|
2013-12-23 22:51:56 +08:00
|
|
|
if (isa<PHINode>(Op))
|
|
|
|
Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
|
2013-11-23 00:58:05 +08:00
|
|
|
for (unsigned I = 0; I < Count; ++I)
|
|
|
|
Res = Builder.CreateInsertElement(Res, CV[I], Builder.getInt32(I),
|
|
|
|
Op->getName() + ".upto" + Twine(I));
|
|
|
|
Res->takeName(Op);
|
|
|
|
Op->replaceAllUsesWith(Res);
|
|
|
|
}
|
|
|
|
Op->eraseFromParent();
|
|
|
|
}
|
|
|
|
Gathered.clear();
|
|
|
|
Scattered.clear();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-21 22:00:17 +08:00
|
|
|
PreservedAnalyses ScalarizerPass::run(Function &F, FunctionAnalysisManager &AM) {
|
|
|
|
Module &M = *F.getParent();
|
|
|
|
unsigned ParallelLoopAccessMDKind =
|
|
|
|
M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
|
2019-11-15 17:29:47 +08:00
|
|
|
DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F);
|
|
|
|
ScalarizerVisitor Impl(ParallelLoopAccessMDKind, DT);
|
2018-11-22 06:01:19 +08:00
|
|
|
bool Changed = Impl.visit(F);
|
2019-11-15 17:29:47 +08:00
|
|
|
PreservedAnalyses PA;
|
|
|
|
PA.preserve<DominatorTreeAnalysis>();
|
|
|
|
return Changed ? PA : PreservedAnalyses::all();
|
2013-11-23 00:58:05 +08:00
|
|
|
}
|