2010-12-27 03:39:38 +08:00
|
|
|
//===-- LoopIdiomRecognize.cpp - Loop idiom recognition -------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass implements an idiom recognizer that transforms simple loops into a
|
|
|
|
// non-loop form. In cases that this kicks in, it can be a significant
|
|
|
|
// performance win.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2011-01-03 02:32:09 +08:00
|
|
|
//
|
|
|
|
// TODO List:
|
|
|
|
//
|
|
|
|
// Future loop memory idioms to recognize:
|
|
|
|
// memcmp, memmove, strlen, etc.
|
|
|
|
// Future floating point idioms to recognize in -ffast-math mode:
|
|
|
|
// fpowi
|
|
|
|
// Future integer operation idioms to recognize:
|
|
|
|
// ctpop, ctlz, cttz
|
|
|
|
//
|
|
|
|
// Beware that isel's default lowering for ctpop is highly inefficient for
|
|
|
|
// i64 and larger types when i64 is legal and the value has few bits set. It
|
|
|
|
// would be good to enhance isel to emit a loop for ctpop in this case.
|
|
|
|
//
|
|
|
|
// We should enhance the memset/memcpy recognition to handle multiple stores in
|
|
|
|
// the loop. This would handle things like:
|
|
|
|
// void foo(_Complex float *P)
|
|
|
|
// for (i) { __real__(*P) = 0; __imag__(*P) = 0; }
|
2011-01-03 07:19:45 +08:00
|
|
|
//
|
2011-02-21 10:08:54 +08:00
|
|
|
// We should enhance this to handle negative strides through memory.
|
|
|
|
// Alternatively (and perhaps better) we could rely on an earlier pass to force
|
|
|
|
// forward iteration through memory, which is generally better for cache
|
|
|
|
// behavior. Negative strides *do* happen for memset/memcpy loops.
|
|
|
|
//
|
2011-01-03 09:10:08 +08:00
|
|
|
// This could recognize common matrix multiplies and dot product idioms and
|
2011-01-03 07:19:45 +08:00
|
|
|
// replace them with calls to BLAS (if linked in??).
|
|
|
|
//
|
2011-01-03 02:32:09 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2010-12-27 03:39:38 +08:00
|
|
|
|
|
|
|
#define DEBUG_TYPE "loop-idiom"
|
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2011-01-04 15:46:33 +08:00
|
|
|
#include "llvm/IntrinsicInst.h"
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
#include "llvm/Module.h"
|
2010-12-28 02:39:08 +08:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2010-12-27 03:39:38 +08:00
|
|
|
#include "llvm/Analysis/LoopPass.h"
|
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
implement enough of the memset inference algorithm to recognize and insert
memsets. This is still missing one important validity check, but this is enough
to compile stuff like this:
void test0(std::vector<char> &X) {
for (std::vector<char>::iterator I = X.begin(), E = X.end(); I != E; ++I)
*I = 0;
}
void test1(std::vector<int> &X) {
for (long i = 0, e = X.size(); i != e; ++i)
X[i] = 0x01010101;
}
With:
$ clang t.cpp -S -o - -O2 -emit-llvm | opt -loop-idiom | opt -O3 | llc
to:
__Z5test0RSt6vectorIcSaIcEE: ## @_Z5test0RSt6vectorIcSaIcEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rsi
cmpq %rsi, %rax
je LBB0_2
## BB#1: ## %bb.nph
subq %rax, %rsi
movq %rax, %rdi
callq ___bzero
LBB0_2: ## %for.end
addq $8, %rsp
ret
...
__Z5test1RSt6vectorIiSaIiEE: ## @_Z5test1RSt6vectorIiSaIiEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rdx
subq %rax, %rdx
cmpq $4, %rdx
jb LBB1_2
## BB#1: ## %for.body.preheader
andq $-4, %rdx
movl $1, %esi
movq %rax, %rdi
callq _memset
LBB1_2: ## %for.end
addq $8, %rsp
ret
llvm-svn: 122573
2010-12-27 07:42:51 +08:00
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
2010-12-27 04:45:45 +08:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
|
|
|
#include "llvm/Target/TargetData.h"
|
2011-02-19 06:22:15 +08:00
|
|
|
#include "llvm/Target/TargetLibraryInfo.h"
|
2010-12-27 08:03:23 +08:00
|
|
|
#include "llvm/Transforms/Utils/Local.h"
|
2010-12-27 03:39:38 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
implement enough of the memset inference algorithm to recognize and insert
memsets. This is still missing one important validity check, but this is enough
to compile stuff like this:
void test0(std::vector<char> &X) {
for (std::vector<char>::iterator I = X.begin(), E = X.end(); I != E; ++I)
*I = 0;
}
void test1(std::vector<int> &X) {
for (long i = 0, e = X.size(); i != e; ++i)
X[i] = 0x01010101;
}
With:
$ clang t.cpp -S -o - -O2 -emit-llvm | opt -loop-idiom | opt -O3 | llc
to:
__Z5test0RSt6vectorIcSaIcEE: ## @_Z5test0RSt6vectorIcSaIcEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rsi
cmpq %rsi, %rax
je LBB0_2
## BB#1: ## %bb.nph
subq %rax, %rsi
movq %rax, %rdi
callq ___bzero
LBB0_2: ## %for.end
addq $8, %rsp
ret
...
__Z5test1RSt6vectorIiSaIiEE: ## @_Z5test1RSt6vectorIiSaIiEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rdx
subq %rax, %rdx
cmpq $4, %rdx
jb LBB1_2
## BB#1: ## %for.body.preheader
andq $-4, %rdx
movl $1, %esi
movq %rax, %rdi
callq _memset
LBB1_2: ## %for.end
addq $8, %rsp
ret
llvm-svn: 122573
2010-12-27 07:42:51 +08:00
|
|
|
#include "llvm/Support/IRBuilder.h"
|
2010-12-27 03:39:38 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2011-01-02 15:36:44 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2010-12-27 03:39:38 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2011-01-02 15:36:44 +08:00
|
|
|
STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
|
|
|
|
STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
|
2010-12-27 03:39:38 +08:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
class LoopIdiomRecognize : public LoopPass {
|
2010-12-27 04:45:45 +08:00
|
|
|
Loop *CurLoop;
|
|
|
|
const TargetData *TD;
|
2011-01-03 03:01:03 +08:00
|
|
|
DominatorTree *DT;
|
2010-12-27 04:45:45 +08:00
|
|
|
ScalarEvolution *SE;
|
2011-02-19 06:22:15 +08:00
|
|
|
TargetLibraryInfo *TLI;
|
2010-12-27 03:39:38 +08:00
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
explicit LoopIdiomRecognize() : LoopPass(ID) {
|
|
|
|
initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool runOnLoop(Loop *L, LPPassManager &LPM);
|
2011-01-03 03:01:03 +08:00
|
|
|
bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
|
|
|
|
SmallVectorImpl<BasicBlock*> &ExitBlocks);
|
2010-12-27 03:39:38 +08:00
|
|
|
|
2010-12-27 04:45:45 +08:00
|
|
|
bool processLoopStore(StoreInst *SI, const SCEV *BECount);
|
2011-01-04 15:46:33 +08:00
|
|
|
bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
|
2011-03-15 00:48:10 +08:00
|
|
|
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
|
|
|
|
unsigned StoreAlignment,
|
|
|
|
Value *SplatValue, Instruction *TheStore,
|
|
|
|
const SCEVAddRecExpr *Ev,
|
|
|
|
const SCEV *BECount);
|
2011-01-02 11:37:56 +08:00
|
|
|
bool processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
|
|
|
|
const SCEVAddRecExpr *StoreEv,
|
|
|
|
const SCEVAddRecExpr *LoadEv,
|
|
|
|
const SCEV *BECount);
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 03:39:38 +08:00
|
|
|
/// This transformation requires natural loop information & requires that
|
|
|
|
/// loop preheaders be inserted into the CFG.
|
|
|
|
///
|
|
|
|
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
AU.addRequired<LoopInfo>();
|
|
|
|
AU.addPreserved<LoopInfo>();
|
|
|
|
AU.addRequiredID(LoopSimplifyID);
|
|
|
|
AU.addPreservedID(LoopSimplifyID);
|
|
|
|
AU.addRequiredID(LCSSAID);
|
|
|
|
AU.addPreservedID(LCSSAID);
|
2010-12-28 02:39:08 +08:00
|
|
|
AU.addRequired<AliasAnalysis>();
|
|
|
|
AU.addPreserved<AliasAnalysis>();
|
2010-12-27 03:39:38 +08:00
|
|
|
AU.addRequired<ScalarEvolution>();
|
|
|
|
AU.addPreserved<ScalarEvolution>();
|
|
|
|
AU.addPreserved<DominatorTree>();
|
2011-01-03 03:01:03 +08:00
|
|
|
AU.addRequired<DominatorTree>();
|
2011-02-19 06:22:15 +08:00
|
|
|
AU.addRequired<TargetLibraryInfo>();
|
2010-12-27 03:39:38 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
char LoopIdiomRecognize::ID = 0;
|
|
|
|
INITIALIZE_PASS_BEGIN(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms",
|
|
|
|
false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
|
2011-01-03 03:01:03 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTree)
|
2010-12-27 03:39:38 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LCSSA)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
|
2011-02-19 06:22:15 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
|
2010-12-28 02:39:08 +08:00
|
|
|
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
|
2010-12-27 03:39:38 +08:00
|
|
|
INITIALIZE_PASS_END(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms",
|
|
|
|
false, false)
|
|
|
|
|
|
|
|
Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognize(); }
|
|
|
|
|
2011-05-23 01:39:56 +08:00
|
|
|
/// deleteDeadInstruction - Delete this instruction. Before we do, go through
|
2010-12-27 08:03:23 +08:00
|
|
|
/// and zero out all the operands of this instruction. If any of them become
|
|
|
|
/// dead, delete them and the computation tree that feeds them.
|
|
|
|
///
|
2011-05-23 01:39:56 +08:00
|
|
|
static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE) {
|
2010-12-27 08:03:23 +08:00
|
|
|
SmallVector<Instruction*, 32> NowDeadInsts;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 08:03:23 +08:00
|
|
|
NowDeadInsts.push_back(I);
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 08:03:23 +08:00
|
|
|
// Before we touch this instruction, remove it from SE!
|
|
|
|
do {
|
|
|
|
Instruction *DeadInst = NowDeadInsts.pop_back_val();
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 08:03:23 +08:00
|
|
|
// This instruction is dead, zap it, in stages. Start by removing it from
|
|
|
|
// SCEV.
|
|
|
|
SE.forgetValue(DeadInst);
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 08:03:23 +08:00
|
|
|
for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
|
|
|
|
Value *Op = DeadInst->getOperand(op);
|
|
|
|
DeadInst->setOperand(op, 0);
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 08:03:23 +08:00
|
|
|
// If this operand just became dead, add it to the NowDeadInsts list.
|
|
|
|
if (!Op->use_empty()) continue;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 08:03:23 +08:00
|
|
|
if (Instruction *OpI = dyn_cast<Instruction>(Op))
|
|
|
|
if (isInstructionTriviallyDead(OpI))
|
|
|
|
NowDeadInsts.push_back(OpI);
|
|
|
|
}
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 08:03:23 +08:00
|
|
|
DeadInst->eraseFromParent();
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 08:03:23 +08:00
|
|
|
} while (!NowDeadInsts.empty());
|
|
|
|
}
|
|
|
|
|
2011-05-23 01:39:56 +08:00
|
|
|
/// deleteIfDeadInstruction - If the specified value is a dead instruction,
|
|
|
|
/// delete it and any recursively used instructions.
|
|
|
|
static void deleteIfDeadInstruction(Value *V, ScalarEvolution &SE) {
|
|
|
|
if (Instruction *I = dyn_cast<Instruction>(V))
|
|
|
|
if (isInstructionTriviallyDead(I))
|
2011-06-28 13:04:16 +08:00
|
|
|
deleteDeadInstruction(I, SE);
|
2011-05-23 01:39:56 +08:00
|
|
|
}
|
|
|
|
|
2010-12-27 03:39:38 +08:00
|
|
|
bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) {
|
2010-12-27 04:45:45 +08:00
|
|
|
CurLoop = L;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-07-16 02:25:04 +08:00
|
|
|
// Disable loop idiom recognition if the function's name is a common idiom.
|
|
|
|
StringRef Name = L->getHeader()->getParent()->getName();
|
|
|
|
if (Name == "memset" || Name == "memcpy")
|
|
|
|
return false;
|
|
|
|
|
2010-12-27 04:45:45 +08:00
|
|
|
// The trip count of the loop must be analyzable.
|
|
|
|
SE = &getAnalysis<ScalarEvolution>();
|
|
|
|
if (!SE->hasLoopInvariantBackedgeTakenCount(L))
|
|
|
|
return false;
|
|
|
|
const SCEV *BECount = SE->getBackedgeTakenCount(L);
|
|
|
|
if (isa<SCEVCouldNotCompute>(BECount)) return false;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-03 04:24:21 +08:00
|
|
|
// If this loop executes exactly one time, then it should be peeled, not
|
|
|
|
// optimized by this pass.
|
|
|
|
if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
|
|
|
|
if (BECst->getValue()->getValue() == 0)
|
|
|
|
return false;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 04:45:45 +08:00
|
|
|
// We require target data for now.
|
|
|
|
TD = getAnalysisIfAvailable<TargetData>();
|
|
|
|
if (TD == 0) return false;
|
2011-01-03 03:01:03 +08:00
|
|
|
|
|
|
|
DT = &getAnalysis<DominatorTree>();
|
|
|
|
LoopInfo &LI = getAnalysis<LoopInfo>();
|
2011-02-19 06:22:15 +08:00
|
|
|
TLI = &getAnalysis<TargetLibraryInfo>();
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-03 03:01:03 +08:00
|
|
|
SmallVector<BasicBlock*, 8> ExitBlocks;
|
|
|
|
CurLoop->getUniqueExitBlocks(ExitBlocks);
|
|
|
|
|
2011-01-03 05:14:18 +08:00
|
|
|
DEBUG(dbgs() << "loop-idiom Scanning: F["
|
|
|
|
<< L->getHeader()->getParent()->getName()
|
|
|
|
<< "] Loop %" << L->getHeader()->getName() << "\n");
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-03 03:01:03 +08:00
|
|
|
bool MadeChange = false;
|
|
|
|
// Scan all the blocks in the loop that are not in subloops.
|
|
|
|
for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
|
|
|
|
++BI) {
|
|
|
|
// Ignore blocks in subloops.
|
|
|
|
if (LI.getLoopFor(*BI) != CurLoop)
|
|
|
|
continue;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-03 03:01:03 +08:00
|
|
|
MadeChange |= runOnLoopBlock(*BI, BECount, ExitBlocks);
|
|
|
|
}
|
|
|
|
return MadeChange;
|
|
|
|
}
|
2010-12-27 03:39:38 +08:00
|
|
|
|
2011-01-03 03:01:03 +08:00
|
|
|
/// runOnLoopBlock - Process the specified block, which lives in a counted loop
|
|
|
|
/// with the specified backedge count. This block is known to be in the current
|
|
|
|
/// loop and not in any subloops.
|
|
|
|
bool LoopIdiomRecognize::runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
|
|
|
|
SmallVectorImpl<BasicBlock*> &ExitBlocks) {
|
|
|
|
// We can only promote stores in this block if they are unconditionally
|
|
|
|
// executed in the loop. For a block to be unconditionally executed, it has
|
|
|
|
// to dominate all the exit blocks of the loop. Verify this now.
|
|
|
|
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
|
|
|
|
if (!DT->dominates(BB, ExitBlocks[i]))
|
|
|
|
return false;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 04:45:45 +08:00
|
|
|
bool MadeChange = false;
|
|
|
|
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
|
2011-01-04 15:27:30 +08:00
|
|
|
Instruction *Inst = I++;
|
|
|
|
// Look for store instructions, which may be optimized to memset/memcpy.
|
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
|
|
|
|
WeakVH InstPtr(I);
|
|
|
|
if (!processLoopStore(SI, BECount)) continue;
|
|
|
|
MadeChange = true;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-04 15:27:30 +08:00
|
|
|
// If processing the store invalidated our iterator, start over from the
|
2011-01-04 15:46:33 +08:00
|
|
|
// top of the block.
|
2011-01-04 15:27:30 +08:00
|
|
|
if (InstPtr == 0)
|
|
|
|
I = BB->begin();
|
|
|
|
continue;
|
|
|
|
}
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-04 15:46:33 +08:00
|
|
|
// Look for memset instructions, which may be optimized to a larger memset.
|
|
|
|
if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
|
|
|
|
WeakVH InstPtr(I);
|
|
|
|
if (!processLoopMemSet(MSI, BECount)) continue;
|
|
|
|
MadeChange = true;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-04 15:46:33 +08:00
|
|
|
// If processing the memset invalidated our iterator, start over from the
|
|
|
|
// top of the block.
|
|
|
|
if (InstPtr == 0)
|
|
|
|
I = BB->begin();
|
|
|
|
continue;
|
|
|
|
}
|
2010-12-27 04:45:45 +08:00
|
|
|
}
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 04:45:45 +08:00
|
|
|
return MadeChange;
|
2010-12-27 03:39:38 +08:00
|
|
|
}
|
|
|
|
|
2011-01-03 03:01:03 +08:00
|
|
|
|
2011-01-04 15:46:33 +08:00
|
|
|
/// processLoopStore - See if this store can be promoted to a memset or memcpy.
|
2010-12-27 04:45:45 +08:00
|
|
|
bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
|
2011-01-04 15:46:33 +08:00
|
|
|
if (SI->isVolatile()) return false;
|
|
|
|
|
2010-12-27 04:45:45 +08:00
|
|
|
Value *StoredVal = SI->getValueOperand();
|
implement enough of the memset inference algorithm to recognize and insert
memsets. This is still missing one important validity check, but this is enough
to compile stuff like this:
void test0(std::vector<char> &X) {
for (std::vector<char>::iterator I = X.begin(), E = X.end(); I != E; ++I)
*I = 0;
}
void test1(std::vector<int> &X) {
for (long i = 0, e = X.size(); i != e; ++i)
X[i] = 0x01010101;
}
With:
$ clang t.cpp -S -o - -O2 -emit-llvm | opt -loop-idiom | opt -O3 | llc
to:
__Z5test0RSt6vectorIcSaIcEE: ## @_Z5test0RSt6vectorIcSaIcEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rsi
cmpq %rsi, %rax
je LBB0_2
## BB#1: ## %bb.nph
subq %rax, %rsi
movq %rax, %rdi
callq ___bzero
LBB0_2: ## %for.end
addq $8, %rsp
ret
...
__Z5test1RSt6vectorIiSaIiEE: ## @_Z5test1RSt6vectorIiSaIiEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rdx
subq %rax, %rdx
cmpq $4, %rdx
jb LBB1_2
## BB#1: ## %for.body.preheader
andq $-4, %rdx
movl $1, %esi
movq %rax, %rdi
callq _memset
LBB1_2: ## %for.end
addq $8, %rsp
ret
llvm-svn: 122573
2010-12-27 07:42:51 +08:00
|
|
|
Value *StorePtr = SI->getPointerOperand();
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-29 02:53:48 +08:00
|
|
|
// Reject stores that are so large that they overflow an unsigned.
|
2010-12-27 04:45:45 +08:00
|
|
|
uint64_t SizeInBits = TD->getTypeSizeInBits(StoredVal->getType());
|
2010-12-29 02:53:48 +08:00
|
|
|
if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
|
2010-12-27 04:45:45 +08:00
|
|
|
return false;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 04:45:45 +08:00
|
|
|
// See if the pointer expression is an AddRec like {base,+,1} on the current
|
|
|
|
// loop, which indicates a strided store. If we have something else, it's a
|
|
|
|
// random store we can't handle.
|
2011-01-02 11:37:56 +08:00
|
|
|
const SCEVAddRecExpr *StoreEv =
|
|
|
|
dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
|
|
|
|
if (StoreEv == 0 || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
|
2010-12-27 04:45:45 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check to see if the stride matches the size of the store. If so, then we
|
|
|
|
// know that every byte is touched in the loop.
|
2011-03-15 00:48:10 +08:00
|
|
|
unsigned StoreSize = (unsigned)SizeInBits >> 3;
|
2011-01-02 11:37:56 +08:00
|
|
|
const SCEVConstant *Stride = dyn_cast<SCEVConstant>(StoreEv->getOperand(1));
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-02-21 10:08:54 +08:00
|
|
|
if (Stride == 0 || StoreSize != Stride->getValue()->getValue()) {
|
|
|
|
// TODO: Could also handle negative stride here someday, that will require
|
|
|
|
// the validity check in mayLoopAccessLocation to be updated though.
|
|
|
|
// Enable this to print exact negative strides.
|
2011-02-22 01:02:55 +08:00
|
|
|
if (0 && Stride && StoreSize == -Stride->getValue()->getValue()) {
|
2011-02-21 10:08:54 +08:00
|
|
|
dbgs() << "NEGATIVE STRIDE: " << *SI << "\n";
|
|
|
|
dbgs() << "BB: " << *SI->getParent();
|
|
|
|
}
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 04:45:45 +08:00
|
|
|
return false;
|
2011-02-21 10:08:54 +08:00
|
|
|
}
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
|
|
|
|
// See if we can optimize just this store in isolation.
|
|
|
|
if (processLoopStridedStore(StorePtr, StoreSize, SI->getAlignment(),
|
|
|
|
StoredVal, SI, StoreEv, BECount))
|
|
|
|
return true;
|
implement enough of the memset inference algorithm to recognize and insert
memsets. This is still missing one important validity check, but this is enough
to compile stuff like this:
void test0(std::vector<char> &X) {
for (std::vector<char>::iterator I = X.begin(), E = X.end(); I != E; ++I)
*I = 0;
}
void test1(std::vector<int> &X) {
for (long i = 0, e = X.size(); i != e; ++i)
X[i] = 0x01010101;
}
With:
$ clang t.cpp -S -o - -O2 -emit-llvm | opt -loop-idiom | opt -O3 | llc
to:
__Z5test0RSt6vectorIcSaIcEE: ## @_Z5test0RSt6vectorIcSaIcEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rsi
cmpq %rsi, %rax
je LBB0_2
## BB#1: ## %bb.nph
subq %rax, %rsi
movq %rax, %rdi
callq ___bzero
LBB0_2: ## %for.end
addq $8, %rsp
ret
...
__Z5test1RSt6vectorIiSaIiEE: ## @_Z5test1RSt6vectorIiSaIiEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rdx
subq %rax, %rdx
cmpq $4, %rdx
jb LBB1_2
## BB#1: ## %for.body.preheader
andq $-4, %rdx
movl $1, %esi
movq %rax, %rdi
callq _memset
LBB1_2: ## %for.end
addq $8, %rsp
ret
llvm-svn: 122573
2010-12-27 07:42:51 +08:00
|
|
|
|
2011-01-02 11:37:56 +08:00
|
|
|
// If the stored value is a strided load in the same loop with the same stride
|
|
|
|
// this this may be transformable into a memcpy. This kicks in for stuff like
|
|
|
|
// for (i) A[i] = B[i];
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
|
|
|
|
const SCEVAddRecExpr *LoadEv =
|
|
|
|
dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getOperand(0)));
|
|
|
|
if (LoadEv && LoadEv->getLoop() == CurLoop && LoadEv->isAffine() &&
|
|
|
|
StoreEv->getOperand(1) == LoadEv->getOperand(1) && !LI->isVolatile())
|
|
|
|
if (processLoopStoreOfLoopLoad(SI, StoreSize, StoreEv, LoadEv, BECount))
|
|
|
|
return true;
|
|
|
|
}
|
2011-01-02 15:36:44 +08:00
|
|
|
//errs() << "UNHANDLED strided store: " << *StoreEv << " - " << *SI << "\n";
|
2010-12-27 04:45:45 +08:00
|
|
|
|
2010-12-27 03:39:38 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-01-04 15:46:33 +08:00
|
|
|
/// processLoopMemSet - See if this memset can be promoted to a large memset.
|
|
|
|
bool LoopIdiomRecognize::
|
|
|
|
processLoopMemSet(MemSetInst *MSI, const SCEV *BECount) {
|
|
|
|
// We can only handle non-volatile memsets with a constant size.
|
|
|
|
if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength())) return false;
|
|
|
|
|
2011-02-19 06:22:15 +08:00
|
|
|
// If we're not allowed to hack on memset, we fail.
|
|
|
|
if (!TLI->has(LibFunc::memset))
|
|
|
|
return false;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-04 15:46:33 +08:00
|
|
|
Value *Pointer = MSI->getDest();
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-04 15:46:33 +08:00
|
|
|
// See if the pointer expression is an AddRec like {base,+,1} on the current
|
|
|
|
// loop, which indicates a strided store. If we have something else, it's a
|
|
|
|
// random store we can't handle.
|
|
|
|
const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
|
|
|
|
if (Ev == 0 || Ev->getLoop() != CurLoop || !Ev->isAffine())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Reject memsets that are so large that they overflow an unsigned.
|
|
|
|
uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
|
|
|
|
if ((SizeInBytes >> 32) != 0)
|
|
|
|
return false;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-04 15:46:33 +08:00
|
|
|
// Check to see if the stride matches the size of the memset. If so, then we
|
|
|
|
// know that every byte is touched in the loop.
|
|
|
|
const SCEVConstant *Stride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-04 15:46:33 +08:00
|
|
|
// TODO: Could also handle negative stride here someday, that will require the
|
|
|
|
// validity check in mayLoopAccessLocation to be updated though.
|
|
|
|
if (Stride == 0 || MSI->getLength() != Stride->getValue())
|
|
|
|
return false;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
|
|
|
|
MSI->getAlignment(), MSI->getValue(),
|
|
|
|
MSI, Ev, BECount);
|
2011-01-04 15:46:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-03 05:14:18 +08:00
|
|
|
/// mayLoopAccessLocation - Return true if the specified loop might access the
|
|
|
|
/// specified pointer location, which is a loop-strided access. The 'Access'
|
|
|
|
/// argument specifies what the verboten forms of access are (read or write).
|
|
|
|
static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access,
|
|
|
|
Loop *L, const SCEV *BECount,
|
2011-01-02 11:37:56 +08:00
|
|
|
unsigned StoreSize, AliasAnalysis &AA,
|
2011-01-04 15:46:33 +08:00
|
|
|
Instruction *IgnoredStore) {
|
2011-01-02 03:39:01 +08:00
|
|
|
// Get the location that may be stored across the loop. Since the access is
|
|
|
|
// strided positively through memory, we say that the modified location starts
|
|
|
|
// at the pointer and has infinite size.
|
2011-01-02 03:54:22 +08:00
|
|
|
uint64_t AccessSize = AliasAnalysis::UnknownSize;
|
|
|
|
|
|
|
|
// If the loop iterates a fixed number of times, we can refine the access size
|
|
|
|
// to be exactly the size of the memset, which is (BECount+1)*StoreSize
|
|
|
|
if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
|
|
|
|
AccessSize = (BECst->getValue()->getZExtValue()+1)*StoreSize;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-02 03:54:22 +08:00
|
|
|
// TODO: For this to be really effective, we have to dive into the pointer
|
|
|
|
// operand in the store. Store to &A[i] of 100 will always return may alias
|
|
|
|
// with store of &A[100], we need to StoreLoc to be "A" with size of 100,
|
|
|
|
// which will then no-alias a store to &A[100].
|
2011-01-02 11:37:56 +08:00
|
|
|
AliasAnalysis::Location StoreLoc(Ptr, AccessSize);
|
2011-01-02 03:39:01 +08:00
|
|
|
|
|
|
|
for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
|
|
|
|
++BI)
|
|
|
|
for (BasicBlock::iterator I = (*BI)->begin(), E = (*BI)->end(); I != E; ++I)
|
2011-01-02 11:37:56 +08:00
|
|
|
if (&*I != IgnoredStore &&
|
2011-01-03 05:14:18 +08:00
|
|
|
(AA.getModRefInfo(I, StoreLoc) & Access))
|
2011-01-02 03:39:01 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
/// getMemSetPatternValue - If a strided store of the specified value is safe to
|
|
|
|
/// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
|
|
|
|
/// be passed in. Otherwise, return null.
|
|
|
|
///
|
|
|
|
/// Note that we don't ever attempt to use memset_pattern8 or 4, because these
|
|
|
|
/// just replicate their input array and then pass on to memset_pattern16.
|
|
|
|
static Constant *getMemSetPatternValue(Value *V, const TargetData &TD) {
|
|
|
|
// If the value isn't a constant, we can't promote it to being in a constant
|
|
|
|
// array. We could theoretically do a store to an alloca or something, but
|
|
|
|
// that doesn't seem worthwhile.
|
|
|
|
Constant *C = dyn_cast<Constant>(V);
|
|
|
|
if (C == 0) return 0;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
// Only handle simple values that are a power of two bytes in size.
|
|
|
|
uint64_t Size = TD.getTypeSizeInBits(V->getType());
|
|
|
|
if (Size == 0 || (Size & 7) || (Size & (Size-1)))
|
|
|
|
return 0;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
rewrite the memset_pattern pattern generation stuff to accept any 2/4/8/16-byte
constant, including globals. This makes us generate much more "pretty" pattern
globals as well because it doesn't break it down to an array of bytes all the
time.
This enables us to handle stores of relocatable globals. This kicks in about
48 times in 254.gap, giving us stuff like this:
@.memset_pattern40 = internal constant [2 x %struct.TypHeader* (%struct.TypHeader*, %struct.TypHeader*)*] [%struct.TypHeader* (%struct.TypHeader*, %struct
.TypHeader*)* @IsFalse, %struct.TypHeader* (%struct.TypHeader*, %struct.TypHeader*)* @IsFalse], align 16
...
call void @memset_pattern16(i8* %scevgep5859, i8* bitcast ([2 x %struct.TypHeader* (%struct.TypHeader*, %struct.TypHeader*)*]* @.memset_pattern40 to i8*
), i64 %tmp75) nounwind
llvm-svn: 126044
2011-02-20 03:56:44 +08:00
|
|
|
// Don't care enough about darwin/ppc to implement this.
|
|
|
|
if (TD.isBigEndian())
|
|
|
|
return 0;
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
|
|
|
|
// Convert to size in bytes.
|
|
|
|
Size /= 8;
|
2011-02-19 06:22:15 +08:00
|
|
|
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
// TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
|
rewrite the memset_pattern pattern generation stuff to accept any 2/4/8/16-byte
constant, including globals. This makes us generate much more "pretty" pattern
globals as well because it doesn't break it down to an array of bytes all the
time.
This enables us to handle stores of relocatable globals. This kicks in about
48 times in 254.gap, giving us stuff like this:
@.memset_pattern40 = internal constant [2 x %struct.TypHeader* (%struct.TypHeader*, %struct.TypHeader*)*] [%struct.TypHeader* (%struct.TypHeader*, %struct
.TypHeader*)* @IsFalse, %struct.TypHeader* (%struct.TypHeader*, %struct.TypHeader*)* @IsFalse], align 16
...
call void @memset_pattern16(i8* %scevgep5859, i8* bitcast ([2 x %struct.TypHeader* (%struct.TypHeader*, %struct.TypHeader*)*]* @.memset_pattern40 to i8*
), i64 %tmp75) nounwind
llvm-svn: 126044
2011-02-20 03:56:44 +08:00
|
|
|
// if the top and bottom are the same (e.g. for vectors and large integers).
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
if (Size > 16) return 0;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
rewrite the memset_pattern pattern generation stuff to accept any 2/4/8/16-byte
constant, including globals. This makes us generate much more "pretty" pattern
globals as well because it doesn't break it down to an array of bytes all the
time.
This enables us to handle stores of relocatable globals. This kicks in about
48 times in 254.gap, giving us stuff like this:
@.memset_pattern40 = internal constant [2 x %struct.TypHeader* (%struct.TypHeader*, %struct.TypHeader*)*] [%struct.TypHeader* (%struct.TypHeader*, %struct
.TypHeader*)* @IsFalse, %struct.TypHeader* (%struct.TypHeader*, %struct.TypHeader*)* @IsFalse], align 16
...
call void @memset_pattern16(i8* %scevgep5859, i8* bitcast ([2 x %struct.TypHeader* (%struct.TypHeader*, %struct.TypHeader*)*]* @.memset_pattern40 to i8*
), i64 %tmp75) nounwind
llvm-svn: 126044
2011-02-20 03:56:44 +08:00
|
|
|
// If the constant is exactly 16 bytes, just use it.
|
|
|
|
if (Size == 16) return C;
|
|
|
|
|
|
|
|
// Otherwise, we'll use an array of the constants.
|
|
|
|
unsigned ArraySize = 16/Size;
|
|
|
|
ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
|
|
|
|
return ConstantArray::get(AT, std::vector<Constant*>(ArraySize, C));
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// processLoopStridedStore - We see a strided store of some value. If we can
|
|
|
|
/// transform this into a memset or memset_pattern in the loop preheader, do so.
|
|
|
|
bool LoopIdiomRecognize::
|
|
|
|
processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
|
|
|
|
unsigned StoreAlignment, Value *StoredVal,
|
|
|
|
Instruction *TheStore, const SCEVAddRecExpr *Ev,
|
|
|
|
const SCEV *BECount) {
|
2011-03-15 00:48:10 +08:00
|
|
|
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
// If the stored value is a byte-wise value (like i32 -1), then it may be
|
|
|
|
// turned into a memset of i8 -1, assuming that all the consecutive bytes
|
|
|
|
// are stored. A store of i32 0x01020304 can never be turned into a memset,
|
|
|
|
// but it can be turned into memset_pattern if the target supports it.
|
|
|
|
Value *SplatValue = isBytewiseValue(StoredVal);
|
|
|
|
Constant *PatternValue = 0;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
// If we're allowed to form a memset, and the stored value would be acceptable
|
|
|
|
// for memset, use it.
|
|
|
|
if (SplatValue && TLI->has(LibFunc::memset) &&
|
|
|
|
// Verify that the stored value is loop invariant. If not, we can't
|
|
|
|
// promote the memset.
|
|
|
|
CurLoop->isLoopInvariant(SplatValue)) {
|
|
|
|
// Keep and use SplatValue.
|
|
|
|
PatternValue = 0;
|
|
|
|
} else if (TLI->has(LibFunc::memset_pattern16) &&
|
|
|
|
(PatternValue = getMemSetPatternValue(StoredVal, *TD))) {
|
|
|
|
// It looks like we can use PatternValue!
|
|
|
|
SplatValue = 0;
|
|
|
|
} else {
|
|
|
|
// Otherwise, this isn't an idiom we can transform. For example, we can't
|
|
|
|
// do anything with a 3-byte store, for example.
|
2011-01-02 04:12:04 +08:00
|
|
|
return false;
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
}
|
2011-03-15 00:48:10 +08:00
|
|
|
|
implement enough of the memset inference algorithm to recognize and insert
memsets. This is still missing one important validity check, but this is enough
to compile stuff like this:
void test0(std::vector<char> &X) {
for (std::vector<char>::iterator I = X.begin(), E = X.end(); I != E; ++I)
*I = 0;
}
void test1(std::vector<int> &X) {
for (long i = 0, e = X.size(); i != e; ++i)
X[i] = 0x01010101;
}
With:
$ clang t.cpp -S -o - -O2 -emit-llvm | opt -loop-idiom | opt -O3 | llc
to:
__Z5test0RSt6vectorIcSaIcEE: ## @_Z5test0RSt6vectorIcSaIcEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rsi
cmpq %rsi, %rax
je LBB0_2
## BB#1: ## %bb.nph
subq %rax, %rsi
movq %rax, %rdi
callq ___bzero
LBB0_2: ## %for.end
addq $8, %rsp
ret
...
__Z5test1RSt6vectorIiSaIiEE: ## @_Z5test1RSt6vectorIiSaIiEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rdx
subq %rax, %rdx
cmpq $4, %rdx
jb LBB1_2
## BB#1: ## %for.body.preheader
andq $-4, %rdx
movl $1, %esi
movq %rax, %rdi
callq _memset
LBB1_2: ## %for.end
addq $8, %rsp
ret
llvm-svn: 122573
2010-12-27 07:42:51 +08:00
|
|
|
// The trip count of the loop and the base pointer of the addrec SCEV is
|
|
|
|
// guaranteed to be loop invariant, which means that it should dominate the
|
2011-05-23 01:39:56 +08:00
|
|
|
// header. This allows us to insert code for it in the preheader.
|
|
|
|
BasicBlock *Preheader = CurLoop->getLoopPreheader();
|
|
|
|
IRBuilder<> Builder(Preheader->getTerminator());
|
2011-06-28 13:07:32 +08:00
|
|
|
SCEVExpander Expander(*SE, "loop-idiom");
|
2011-06-28 13:04:16 +08:00
|
|
|
|
2011-05-23 01:39:56 +08:00
|
|
|
// Okay, we have a strided store "p[i]" of a splattable value. We can turn
|
|
|
|
// this into a memset in the loop preheader now if we want. However, this
|
|
|
|
// would be unsafe to do if there is anything else in the loop that may read
|
|
|
|
// or write to the aliased location. Check for any overlap by generating the
|
|
|
|
// base pointer and checking the region.
|
2011-01-04 15:46:33 +08:00
|
|
|
unsigned AddrSpace = cast<PointerType>(DestPtr->getType())->getAddressSpace();
|
2011-03-15 00:48:10 +08:00
|
|
|
Value *BasePtr =
|
implement enough of the memset inference algorithm to recognize and insert
memsets. This is still missing one important validity check, but this is enough
to compile stuff like this:
void test0(std::vector<char> &X) {
for (std::vector<char>::iterator I = X.begin(), E = X.end(); I != E; ++I)
*I = 0;
}
void test1(std::vector<int> &X) {
for (long i = 0, e = X.size(); i != e; ++i)
X[i] = 0x01010101;
}
With:
$ clang t.cpp -S -o - -O2 -emit-llvm | opt -loop-idiom | opt -O3 | llc
to:
__Z5test0RSt6vectorIcSaIcEE: ## @_Z5test0RSt6vectorIcSaIcEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rsi
cmpq %rsi, %rax
je LBB0_2
## BB#1: ## %bb.nph
subq %rax, %rsi
movq %rax, %rdi
callq ___bzero
LBB0_2: ## %for.end
addq $8, %rsp
ret
...
__Z5test1RSt6vectorIiSaIiEE: ## @_Z5test1RSt6vectorIiSaIiEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rdx
subq %rax, %rdx
cmpq $4, %rdx
jb LBB1_2
## BB#1: ## %for.body.preheader
andq $-4, %rdx
movl $1, %esi
movq %rax, %rdi
callq _memset
LBB1_2: ## %for.end
addq $8, %rsp
ret
llvm-svn: 122573
2010-12-27 07:42:51 +08:00
|
|
|
Expander.expandCodeFor(Ev->getStart(), Builder.getInt8PtrTy(AddrSpace),
|
|
|
|
Preheader->getTerminator());
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-05-23 01:39:56 +08:00
|
|
|
|
|
|
|
if (mayLoopAccessLocation(BasePtr, AliasAnalysis::ModRef,
|
|
|
|
CurLoop, BECount,
|
|
|
|
StoreSize, getAnalysis<AliasAnalysis>(), TheStore)){
|
|
|
|
Expander.clear();
|
|
|
|
// If we generated new code for the base pointer, clean up.
|
|
|
|
deleteIfDeadInstruction(BasePtr, *SE);
|
|
|
|
return false;
|
|
|
|
}
|
2011-06-28 13:04:16 +08:00
|
|
|
|
2011-05-23 01:39:56 +08:00
|
|
|
// Okay, everything looks good, insert the memset.
|
|
|
|
|
implement enough of the memset inference algorithm to recognize and insert
memsets. This is still missing one important validity check, but this is enough
to compile stuff like this:
void test0(std::vector<char> &X) {
for (std::vector<char>::iterator I = X.begin(), E = X.end(); I != E; ++I)
*I = 0;
}
void test1(std::vector<int> &X) {
for (long i = 0, e = X.size(); i != e; ++i)
X[i] = 0x01010101;
}
With:
$ clang t.cpp -S -o - -O2 -emit-llvm | opt -loop-idiom | opt -O3 | llc
to:
__Z5test0RSt6vectorIcSaIcEE: ## @_Z5test0RSt6vectorIcSaIcEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rsi
cmpq %rsi, %rax
je LBB0_2
## BB#1: ## %bb.nph
subq %rax, %rsi
movq %rax, %rdi
callq ___bzero
LBB0_2: ## %for.end
addq $8, %rsp
ret
...
__Z5test1RSt6vectorIiSaIiEE: ## @_Z5test1RSt6vectorIiSaIiEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rdx
subq %rax, %rdx
cmpq $4, %rdx
jb LBB1_2
## BB#1: ## %for.body.preheader
andq $-4, %rdx
movl $1, %esi
movq %rax, %rdi
callq _memset
LBB1_2: ## %for.end
addq $8, %rsp
ret
llvm-svn: 122573
2010-12-27 07:42:51 +08:00
|
|
|
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
|
|
|
|
// pointer size if it isn't already.
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *IntPtr = TD->getIntPtrType(DestPtr->getContext());
|
2011-01-04 08:06:55 +08:00
|
|
|
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
|
2011-03-15 00:48:10 +08:00
|
|
|
|
implement enough of the memset inference algorithm to recognize and insert
memsets. This is still missing one important validity check, but this is enough
to compile stuff like this:
void test0(std::vector<char> &X) {
for (std::vector<char>::iterator I = X.begin(), E = X.end(); I != E; ++I)
*I = 0;
}
void test1(std::vector<int> &X) {
for (long i = 0, e = X.size(); i != e; ++i)
X[i] = 0x01010101;
}
With:
$ clang t.cpp -S -o - -O2 -emit-llvm | opt -loop-idiom | opt -O3 | llc
to:
__Z5test0RSt6vectorIcSaIcEE: ## @_Z5test0RSt6vectorIcSaIcEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rsi
cmpq %rsi, %rax
je LBB0_2
## BB#1: ## %bb.nph
subq %rax, %rsi
movq %rax, %rdi
callq ___bzero
LBB0_2: ## %for.end
addq $8, %rsp
ret
...
__Z5test1RSt6vectorIiSaIiEE: ## @_Z5test1RSt6vectorIiSaIiEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rdx
subq %rax, %rdx
cmpq $4, %rdx
jb LBB1_2
## BB#1: ## %for.body.preheader
andq $-4, %rdx
movl $1, %esi
movq %rax, %rdi
callq _memset
LBB1_2: ## %for.end
addq $8, %rsp
ret
llvm-svn: 122573
2010-12-27 07:42:51 +08:00
|
|
|
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
|
2011-03-15 00:50:06 +08:00
|
|
|
SCEV::FlagNUW);
|
implement enough of the memset inference algorithm to recognize and insert
memsets. This is still missing one important validity check, but this is enough
to compile stuff like this:
void test0(std::vector<char> &X) {
for (std::vector<char>::iterator I = X.begin(), E = X.end(); I != E; ++I)
*I = 0;
}
void test1(std::vector<int> &X) {
for (long i = 0, e = X.size(); i != e; ++i)
X[i] = 0x01010101;
}
With:
$ clang t.cpp -S -o - -O2 -emit-llvm | opt -loop-idiom | opt -O3 | llc
to:
__Z5test0RSt6vectorIcSaIcEE: ## @_Z5test0RSt6vectorIcSaIcEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rsi
cmpq %rsi, %rax
je LBB0_2
## BB#1: ## %bb.nph
subq %rax, %rsi
movq %rax, %rdi
callq ___bzero
LBB0_2: ## %for.end
addq $8, %rsp
ret
...
__Z5test1RSt6vectorIiSaIiEE: ## @_Z5test1RSt6vectorIiSaIiEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rdx
subq %rax, %rdx
cmpq $4, %rdx
jb LBB1_2
## BB#1: ## %for.body.preheader
andq $-4, %rdx
movl $1, %esi
movq %rax, %rdi
callq _memset
LBB1_2: ## %for.end
addq $8, %rsp
ret
llvm-svn: 122573
2010-12-27 07:42:51 +08:00
|
|
|
if (StoreSize != 1)
|
|
|
|
NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
|
2011-03-15 00:50:06 +08:00
|
|
|
SCEV::FlagNUW);
|
2011-03-15 00:48:10 +08:00
|
|
|
|
|
|
|
Value *NumBytes =
|
implement enough of the memset inference algorithm to recognize and insert
memsets. This is still missing one important validity check, but this is enough
to compile stuff like this:
void test0(std::vector<char> &X) {
for (std::vector<char>::iterator I = X.begin(), E = X.end(); I != E; ++I)
*I = 0;
}
void test1(std::vector<int> &X) {
for (long i = 0, e = X.size(); i != e; ++i)
X[i] = 0x01010101;
}
With:
$ clang t.cpp -S -o - -O2 -emit-llvm | opt -loop-idiom | opt -O3 | llc
to:
__Z5test0RSt6vectorIcSaIcEE: ## @_Z5test0RSt6vectorIcSaIcEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rsi
cmpq %rsi, %rax
je LBB0_2
## BB#1: ## %bb.nph
subq %rax, %rsi
movq %rax, %rdi
callq ___bzero
LBB0_2: ## %for.end
addq $8, %rsp
ret
...
__Z5test1RSt6vectorIiSaIiEE: ## @_Z5test1RSt6vectorIiSaIiEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rdx
subq %rax, %rdx
cmpq $4, %rdx
jb LBB1_2
## BB#1: ## %for.body.preheader
andq $-4, %rdx
movl $1, %esi
movq %rax, %rdi
callq _memset
LBB1_2: ## %for.end
addq $8, %rsp
ret
llvm-svn: 122573
2010-12-27 07:42:51 +08:00
|
|
|
Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-03-08 06:43:45 +08:00
|
|
|
CallInst *NewCall;
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
if (SplatValue)
|
|
|
|
NewCall = Builder.CreateMemSet(BasePtr, SplatValue,NumBytes,StoreAlignment);
|
|
|
|
else {
|
|
|
|
Module *M = TheStore->getParent()->getParent()->getParent();
|
|
|
|
Value *MSP = M->getOrInsertFunction("memset_pattern16",
|
|
|
|
Builder.getVoidTy(),
|
2011-03-15 00:48:10 +08:00
|
|
|
Builder.getInt8PtrTy(),
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
Builder.getInt8PtrTy(), IntPtr,
|
|
|
|
(void*)0);
|
2011-03-15 00:48:10 +08:00
|
|
|
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
// Otherwise we should form a memset_pattern16. PatternValue is known to be
|
|
|
|
// an constant array of 16-bytes. Plop the value into a mergable global.
|
|
|
|
GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
|
|
|
|
GlobalValue::InternalLinkage,
|
|
|
|
PatternValue, ".memset_pattern");
|
|
|
|
GV->setUnnamedAddr(true); // Ok to merge these.
|
|
|
|
GV->setAlignment(16);
|
rewrite the memset_pattern pattern generation stuff to accept any 2/4/8/16-byte
constant, including globals. This makes us generate much more "pretty" pattern
globals as well because it doesn't break it down to an array of bytes all the
time.
This enables us to handle stores of relocatable globals. This kicks in about
48 times in 254.gap, giving us stuff like this:
@.memset_pattern40 = internal constant [2 x %struct.TypHeader* (%struct.TypHeader*, %struct.TypHeader*)*] [%struct.TypHeader* (%struct.TypHeader*, %struct
.TypHeader*)* @IsFalse, %struct.TypHeader* (%struct.TypHeader*, %struct.TypHeader*)* @IsFalse], align 16
...
call void @memset_pattern16(i8* %scevgep5859, i8* bitcast ([2 x %struct.TypHeader* (%struct.TypHeader*, %struct.TypHeader*)*]* @.memset_pattern40 to i8*
), i64 %tmp75) nounwind
llvm-svn: 126044
2011-02-20 03:56:44 +08:00
|
|
|
Value *PatternPtr = ConstantExpr::getBitCast(GV, Builder.getInt8PtrTy());
|
Implement rdar://9009151, transforming strided loop stores of
unsplatable values into memset_pattern16 when it is available
(recent darwins). This transforms lots of strided loop stores
of ints for example, like 5 in vpr:
Formed memset: call void @memset_pattern16(i8* %4, i8* getelementptr inbounds ([16 x i8]* @.memset_pattern9, i32 0, i32 0), i64 %tmp25)
from store to: {%3,+,4}<%11> at: store i32 3, i32* %scevgep, align 4, !tbaa !4
llvm-svn: 126040
2011-02-20 03:31:39 +08:00
|
|
|
NewCall = Builder.CreateCall3(MSP, BasePtr, PatternPtr, NumBytes);
|
|
|
|
}
|
2011-03-15 00:48:10 +08:00
|
|
|
|
implement enough of the memset inference algorithm to recognize and insert
memsets. This is still missing one important validity check, but this is enough
to compile stuff like this:
void test0(std::vector<char> &X) {
for (std::vector<char>::iterator I = X.begin(), E = X.end(); I != E; ++I)
*I = 0;
}
void test1(std::vector<int> &X) {
for (long i = 0, e = X.size(); i != e; ++i)
X[i] = 0x01010101;
}
With:
$ clang t.cpp -S -o - -O2 -emit-llvm | opt -loop-idiom | opt -O3 | llc
to:
__Z5test0RSt6vectorIcSaIcEE: ## @_Z5test0RSt6vectorIcSaIcEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rsi
cmpq %rsi, %rax
je LBB0_2
## BB#1: ## %bb.nph
subq %rax, %rsi
movq %rax, %rdi
callq ___bzero
LBB0_2: ## %for.end
addq $8, %rsp
ret
...
__Z5test1RSt6vectorIiSaIiEE: ## @_Z5test1RSt6vectorIiSaIiEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rdx
subq %rax, %rdx
cmpq $4, %rdx
jb LBB1_2
## BB#1: ## %for.body.preheader
andq $-4, %rdx
movl $1, %esi
movq %rax, %rdi
callq _memset
LBB1_2: ## %for.end
addq $8, %rsp
ret
llvm-svn: 122573
2010-12-27 07:42:51 +08:00
|
|
|
DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
|
2011-01-04 15:46:33 +08:00
|
|
|
<< " from store to: " << *Ev << " at: " << *TheStore << "\n");
|
2011-03-08 06:43:45 +08:00
|
|
|
NewCall->setDebugLoc(TheStore->getDebugLoc());
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2010-12-27 08:03:23 +08:00
|
|
|
// Okay, the memset has been formed. Zap the original store and anything that
|
|
|
|
// feeds into it.
|
2011-05-23 01:39:56 +08:00
|
|
|
deleteDeadInstruction(TheStore, *SE);
|
2011-01-02 15:36:44 +08:00
|
|
|
++NumMemSet;
|
implement enough of the memset inference algorithm to recognize and insert
memsets. This is still missing one important validity check, but this is enough
to compile stuff like this:
void test0(std::vector<char> &X) {
for (std::vector<char>::iterator I = X.begin(), E = X.end(); I != E; ++I)
*I = 0;
}
void test1(std::vector<int> &X) {
for (long i = 0, e = X.size(); i != e; ++i)
X[i] = 0x01010101;
}
With:
$ clang t.cpp -S -o - -O2 -emit-llvm | opt -loop-idiom | opt -O3 | llc
to:
__Z5test0RSt6vectorIcSaIcEE: ## @_Z5test0RSt6vectorIcSaIcEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rsi
cmpq %rsi, %rax
je LBB0_2
## BB#1: ## %bb.nph
subq %rax, %rsi
movq %rax, %rdi
callq ___bzero
LBB0_2: ## %for.end
addq $8, %rsp
ret
...
__Z5test1RSt6vectorIiSaIiEE: ## @_Z5test1RSt6vectorIiSaIiEE
## BB#0: ## %entry
subq $8, %rsp
movq (%rdi), %rax
movq 8(%rdi), %rdx
subq %rax, %rdx
cmpq $4, %rdx
jb LBB1_2
## BB#1: ## %for.body.preheader
andq $-4, %rdx
movl $1, %esi
movq %rax, %rdi
callq _memset
LBB1_2: ## %for.end
addq $8, %rsp
ret
llvm-svn: 122573
2010-12-27 07:42:51 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-01-02 11:37:56 +08:00
|
|
|
/// processLoopStoreOfLoopLoad - We see a strided store whose value is a
|
|
|
|
/// same-strided load.
|
|
|
|
bool LoopIdiomRecognize::
|
|
|
|
processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
|
|
|
|
const SCEVAddRecExpr *StoreEv,
|
|
|
|
const SCEVAddRecExpr *LoadEv,
|
|
|
|
const SCEV *BECount) {
|
2011-02-19 06:22:15 +08:00
|
|
|
// If we're not allowed to form memcpy, we fail.
|
|
|
|
if (!TLI->has(LibFunc::memcpy))
|
|
|
|
return false;
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-02 11:37:56 +08:00
|
|
|
LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-05-23 01:39:56 +08:00
|
|
|
// The trip count of the loop and the base pointer of the addrec SCEV is
|
|
|
|
// guaranteed to be loop invariant, which means that it should dominate the
|
|
|
|
// header. This allows us to insert code for it in the preheader.
|
|
|
|
BasicBlock *Preheader = CurLoop->getLoopPreheader();
|
|
|
|
IRBuilder<> Builder(Preheader->getTerminator());
|
2011-06-28 13:07:32 +08:00
|
|
|
SCEVExpander Expander(*SE, "loop-idiom");
|
2011-06-28 13:04:16 +08:00
|
|
|
|
2011-01-02 11:37:56 +08:00
|
|
|
// Okay, we have a strided store "p[i]" of a loaded value. We can turn
|
2011-01-03 02:32:09 +08:00
|
|
|
// this into a memcpy in the loop preheader now if we want. However, this
|
2011-01-02 11:37:56 +08:00
|
|
|
// would be unsafe to do if there is anything else in the loop that may read
|
2011-05-23 01:39:56 +08:00
|
|
|
// or write the memory region we're storing to. This includes the load that
|
|
|
|
// feeds the stores. Check for an alias by generating the base address and
|
|
|
|
// checking everything.
|
|
|
|
Value *StoreBasePtr =
|
|
|
|
Expander.expandCodeFor(StoreEv->getStart(),
|
|
|
|
Builder.getInt8PtrTy(SI->getPointerAddressSpace()),
|
|
|
|
Preheader->getTerminator());
|
2011-06-28 13:04:16 +08:00
|
|
|
|
2011-05-23 01:39:56 +08:00
|
|
|
if (mayLoopAccessLocation(StoreBasePtr, AliasAnalysis::ModRef,
|
2011-01-03 05:14:18 +08:00
|
|
|
CurLoop, BECount, StoreSize,
|
2011-05-23 01:39:56 +08:00
|
|
|
getAnalysis<AliasAnalysis>(), SI)) {
|
|
|
|
Expander.clear();
|
|
|
|
// If we generated new code for the base pointer, clean up.
|
|
|
|
deleteIfDeadInstruction(StoreBasePtr, *SE);
|
2011-01-03 05:14:18 +08:00
|
|
|
return false;
|
2011-05-23 01:39:56 +08:00
|
|
|
}
|
2011-01-03 05:14:18 +08:00
|
|
|
|
|
|
|
// For a memcpy, we have to make sure that the input array is not being
|
|
|
|
// mutated by the loop.
|
2011-03-15 00:48:10 +08:00
|
|
|
Value *LoadBasePtr =
|
2011-01-02 11:37:56 +08:00
|
|
|
Expander.expandCodeFor(LoadEv->getStart(),
|
|
|
|
Builder.getInt8PtrTy(LI->getPointerAddressSpace()),
|
|
|
|
Preheader->getTerminator());
|
2011-05-23 01:39:56 +08:00
|
|
|
|
|
|
|
if (mayLoopAccessLocation(LoadBasePtr, AliasAnalysis::Mod, CurLoop, BECount,
|
|
|
|
StoreSize, getAnalysis<AliasAnalysis>(), SI)) {
|
|
|
|
Expander.clear();
|
|
|
|
// If we generated new code for the base pointer, clean up.
|
|
|
|
deleteIfDeadInstruction(LoadBasePtr, *SE);
|
|
|
|
deleteIfDeadInstruction(StoreBasePtr, *SE);
|
|
|
|
return false;
|
|
|
|
}
|
2011-06-28 13:04:16 +08:00
|
|
|
|
2011-05-23 01:39:56 +08:00
|
|
|
// Okay, everything is safe, we can transform this!
|
2011-06-28 13:04:16 +08:00
|
|
|
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-02 11:37:56 +08:00
|
|
|
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
|
|
|
|
// pointer size if it isn't already.
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *IntPtr = TD->getIntPtrType(SI->getContext());
|
2011-01-04 08:06:55 +08:00
|
|
|
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-02 11:37:56 +08:00
|
|
|
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
|
2011-03-15 00:50:06 +08:00
|
|
|
SCEV::FlagNUW);
|
2011-01-02 11:37:56 +08:00
|
|
|
if (StoreSize != 1)
|
|
|
|
NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
|
2011-03-15 00:50:06 +08:00
|
|
|
SCEV::FlagNUW);
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-02 11:37:56 +08:00
|
|
|
Value *NumBytes =
|
|
|
|
Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-05-05 05:37:05 +08:00
|
|
|
CallInst *NewCall =
|
2011-01-02 11:37:56 +08:00
|
|
|
Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes,
|
|
|
|
std::min(SI->getAlignment(), LI->getAlignment()));
|
2011-05-05 05:37:05 +08:00
|
|
|
NewCall->setDebugLoc(SI->getDebugLoc());
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-02 11:37:56 +08:00
|
|
|
DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"
|
|
|
|
<< " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
|
|
|
|
<< " from store ptr=" << *StoreEv << " at: " << *SI << "\n");
|
2011-06-28 13:04:16 +08:00
|
|
|
|
2011-03-15 00:48:10 +08:00
|
|
|
|
2011-01-02 11:37:56 +08:00
|
|
|
// Okay, the memset has been formed. Zap the original store and anything that
|
|
|
|
// feeds into it.
|
2011-05-23 01:39:56 +08:00
|
|
|
deleteDeadInstruction(SI, *SE);
|
2011-01-02 15:36:44 +08:00
|
|
|
++NumMemCpy;
|
2011-01-02 11:37:56 +08:00
|
|
|
return true;
|
|
|
|
}
|