forked from OSchip/llvm-project
Revert r344693 ("[ARM] bottom-top mul support in ARMParallelDSP")
Still causing failures on the polly-aosp buildbot; I'll follow up with a reduced testcase. llvm-svn: 344752
This commit is contained in:
parent
98a6692380
commit
b09c778715
|
@ -55,7 +55,6 @@ namespace {
|
|||
using ReductionList = SmallVector<Reduction, 8>;
|
||||
using ValueList = SmallVector<Value*, 8>;
|
||||
using MemInstList = SmallVector<Instruction*, 8>;
|
||||
using LoadInstList = SmallVector<LoadInst*, 8>;
|
||||
using PMACPair = std::pair<BinOpChain*,BinOpChain*>;
|
||||
using PMACPairList = SmallVector<PMACPair, 8>;
|
||||
using Instructions = SmallVector<Instruction*,16>;
|
||||
|
@ -64,8 +63,7 @@ namespace {
|
|||
struct OpChain {
|
||||
Instruction *Root;
|
||||
ValueList AllValues;
|
||||
MemInstList VecLd; // List of all sequential load instructions.
|
||||
LoadInstList Loads; // List of all load instructions.
|
||||
MemInstList VecLd; // List of all load instructions.
|
||||
MemLocList MemLocs; // All memory locations read by this tree.
|
||||
bool ReadOnly = true;
|
||||
|
||||
|
@ -78,10 +76,8 @@ namespace {
|
|||
if (auto *I = dyn_cast<Instruction>(V)) {
|
||||
if (I->mayWriteToMemory())
|
||||
ReadOnly = false;
|
||||
if (auto *Ld = dyn_cast<LoadInst>(V)) {
|
||||
if (auto *Ld = dyn_cast<LoadInst>(V))
|
||||
MemLocs.push_back(MemoryLocation(Ld->getPointerOperand(), Size));
|
||||
Loads.push_back(Ld);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -139,7 +135,6 @@ namespace {
|
|||
/// exchange the halfwords of the second operand before performing the
|
||||
/// arithmetic.
|
||||
bool MatchSMLAD(Function &F);
|
||||
bool MatchTopBottomMuls(BasicBlock *LoopBody);
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
|
@ -208,8 +203,6 @@ namespace {
|
|||
LLVM_DEBUG(dbgs() << "\n== Parallel DSP pass ==\n");
|
||||
LLVM_DEBUG(dbgs() << " - " << F.getName() << "\n\n");
|
||||
Changes = MatchSMLAD(F);
|
||||
if (!Changes)
|
||||
Changes = MatchTopBottomMuls(Header);
|
||||
return Changes;
|
||||
}
|
||||
};
|
||||
|
@ -503,10 +496,10 @@ static void MatchReductions(Function &F, Loop *TheLoop, BasicBlock *Header,
|
|||
);
|
||||
}
|
||||
|
||||
static void AddMulCandidate(OpChainList &Candidates,
|
||||
static void AddMACCandidate(OpChainList &Candidates,
|
||||
Instruction *Mul,
|
||||
Value *MulOp0, Value *MulOp1) {
|
||||
LLVM_DEBUG(dbgs() << "OK, found mul:\t"; Mul->dump());
|
||||
LLVM_DEBUG(dbgs() << "OK, found acc mul:\t"; Mul->dump());
|
||||
assert(Mul->getOpcode() == Instruction::Mul &&
|
||||
"expected mul instruction");
|
||||
ValueList LHS;
|
||||
|
@ -540,14 +533,14 @@ static void MatchParallelMACSequences(Reduction &R,
|
|||
break;
|
||||
case Instruction::Mul:
|
||||
if (match (I, (m_Mul(m_Value(MulOp0), m_Value(MulOp1))))) {
|
||||
AddMulCandidate(Candidates, I, MulOp0, MulOp1);
|
||||
AddMACCandidate(Candidates, I, MulOp0, MulOp1);
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
case Instruction::SExt:
|
||||
if (match (I, (m_SExt(m_Mul(m_Value(MulOp0), m_Value(MulOp1)))))) {
|
||||
Instruction *Mul = cast<Instruction>(I->getOperand(0));
|
||||
AddMulCandidate(Candidates, Mul, MulOp0, MulOp1);
|
||||
AddMACCandidate(Candidates, Mul, MulOp0, MulOp1);
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
@ -576,24 +569,23 @@ static void AliasCandidates(BasicBlock *Header, Instructions &Reads,
|
|||
// the memory locations accessed by the MAC-chains.
|
||||
// TODO: we need the read statements when we accept more complicated chains.
|
||||
static bool AreAliased(AliasAnalysis *AA, Instructions &Reads,
|
||||
Instructions &Writes, OpChainList &Candidates) {
|
||||
Instructions &Writes, OpChainList &MACCandidates) {
|
||||
LLVM_DEBUG(dbgs() << "Alias checks:\n");
|
||||
for (auto &Candidate : Candidates) {
|
||||
LLVM_DEBUG(dbgs() << "mul: "; Candidate->Root->dump());
|
||||
Candidate->SetMemoryLocations();
|
||||
for (auto &MAC : MACCandidates) {
|
||||
LLVM_DEBUG(dbgs() << "mul: "; MAC->Root->dump());
|
||||
|
||||
// At the moment, we allow only simple chains that only consist of reads,
|
||||
// accumulate their result with an integer add, and thus that don't write
|
||||
// memory, and simply bail if they do.
|
||||
if (!Candidate->ReadOnly)
|
||||
if (!MAC->ReadOnly)
|
||||
return true;
|
||||
|
||||
// Now for all writes in the basic block, check that they don't alias with
|
||||
// the memory locations accessed by our MAC-chain:
|
||||
for (auto *I : Writes) {
|
||||
LLVM_DEBUG(dbgs() << "- "; I->dump());
|
||||
assert(Candidate->MemLocs.size() >= 2 && "expecting at least 2 memlocs");
|
||||
for (auto &MemLoc : Candidate->MemLocs) {
|
||||
assert(MAC->MemLocs.size() >= 2 && "expecting at least 2 memlocs");
|
||||
for (auto &MemLoc : MAC->MemLocs) {
|
||||
if (isModOrRefSet(intersectModRef(AA->getModRefInfo(I, MemLoc),
|
||||
ModRefInfo::ModRef))) {
|
||||
LLVM_DEBUG(dbgs() << "Yes, aliases found\n");
|
||||
|
@ -607,7 +599,7 @@ static bool AreAliased(AliasAnalysis *AA, Instructions &Reads,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool CheckMulMemory(OpChainList &Candidates) {
|
||||
static bool CheckMACMemory(OpChainList &Candidates) {
|
||||
for (auto &C : Candidates) {
|
||||
// A mul has 2 operands, and a narrow op consist of sext and a load; thus
|
||||
// we expect at least 4 items in this operand value list.
|
||||
|
@ -615,6 +607,7 @@ static bool CheckMulMemory(OpChainList &Candidates) {
|
|||
LLVM_DEBUG(dbgs() << "Operand list too short.\n");
|
||||
return false;
|
||||
}
|
||||
C->SetMemoryLocations();
|
||||
ValueList &LHS = static_cast<BinOpChain*>(C.get())->LHS;
|
||||
ValueList &RHS = static_cast<BinOpChain*>(C.get())->RHS;
|
||||
|
||||
|
@ -627,173 +620,6 @@ static bool CheckMulMemory(OpChainList &Candidates) {
|
|||
return true;
|
||||
}
|
||||
|
||||
static LoadInst *CreateLoadIns(IRBuilder<NoFolder> &IRB, LoadInst *BaseLoad,
|
||||
const Type *LoadTy) {
|
||||
const unsigned AddrSpace = BaseLoad->getPointerAddressSpace();
|
||||
|
||||
Value *VecPtr = IRB.CreateBitCast(BaseLoad->getPointerOperand(),
|
||||
LoadTy->getPointerTo(AddrSpace));
|
||||
return IRB.CreateAlignedLoad(VecPtr, BaseLoad->getAlignment());
|
||||
}
|
||||
|
||||
/// Given two instructions, return the one that comes first in the basic block.
|
||||
/// A work around for not being able to do > or < on bb iterators.
|
||||
static Instruction* GetFirst(Instruction *A, Instruction *B) {
|
||||
BasicBlock::iterator First(A);
|
||||
BasicBlock::iterator Second(B);
|
||||
|
||||
BasicBlock *BB = A->getParent();
|
||||
assert(BB == B->getParent() &&
|
||||
"Can't compare instructions in different blocks");
|
||||
BasicBlock::iterator Last = BB->end();
|
||||
|
||||
// Iterate through the block, if the 'First' iterator is found, then return
|
||||
// Second.
|
||||
while (Second != Last) {
|
||||
if (Second == First)
|
||||
return B;
|
||||
++Second;
|
||||
}
|
||||
return A;
|
||||
}
|
||||
|
||||
/// Attempt to widen loads and use smulbb, smulbt, smultb and smultt muls.
|
||||
// TODO: This, like smlad generation, expects the leave operands to be loads
|
||||
// that are sign extended. We should be able to handle scalar values as well
|
||||
// performing these muls on word x half types to generate smulwb and smulwt.
|
||||
bool ARMParallelDSP::MatchTopBottomMuls(BasicBlock *LoopBody) {
|
||||
LLVM_DEBUG(dbgs() << "Attempting to find BT|TB muls.\n");
|
||||
|
||||
OpChainList Candidates;
|
||||
for (auto &I : *LoopBody) {
|
||||
if (I.getOpcode() == Instruction::Mul) {
|
||||
Type *Ty = I.getType();
|
||||
if (Ty->isIntegerTy() &&
|
||||
(Ty->getScalarSizeInBits() == 32 ||
|
||||
Ty->getScalarSizeInBits() == 64))
|
||||
AddMulCandidate(Candidates, &I, I.getOperand(0), I.getOperand(1));
|
||||
}
|
||||
}
|
||||
|
||||
if (Candidates.empty())
|
||||
return false;
|
||||
|
||||
Instructions Reads;
|
||||
Instructions Writes;
|
||||
AliasCandidates(LoopBody, Reads, Writes);
|
||||
|
||||
if (AreAliased(AA, Reads, Writes, Candidates))
|
||||
return false;
|
||||
|
||||
DenseMap<LoadInst*, LoadInst*> SeqLoads;
|
||||
SmallPtrSet<LoadInst*, 8> OffsetLoads;
|
||||
|
||||
for (unsigned i = 0; i < Candidates.size(); ++i) {
|
||||
for (unsigned j = 0; j < Candidates.size(); ++j) {
|
||||
if (i == j)
|
||||
continue;
|
||||
|
||||
OpChain *MulChain0 = Candidates[i].get();
|
||||
OpChain *MulChain1 = Candidates[j].get();
|
||||
|
||||
for (auto *Ld0 : MulChain0->Loads) {
|
||||
if (SeqLoads.count(Ld0) || OffsetLoads.count(Ld0))
|
||||
continue;
|
||||
|
||||
for (auto *Ld1 : MulChain1->Loads) {
|
||||
if (SeqLoads.count(Ld1) || OffsetLoads.count(Ld1))
|
||||
continue;
|
||||
|
||||
MemInstList VecMem;
|
||||
if (AreSequentialLoads(Ld0, Ld1, VecMem)) {
|
||||
SeqLoads[Ld0] = Ld1;
|
||||
OffsetLoads.insert(Ld1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (SeqLoads.empty())
|
||||
return false;
|
||||
|
||||
IRBuilder<NoFolder> IRB(LoopBody);
|
||||
const Type *Ty = IntegerType::get(M->getContext(), 32);
|
||||
|
||||
auto IsUserMul = [](Use &U) {
|
||||
auto *Mul = cast<Instruction>(U.getUser());
|
||||
return Mul->getOpcode() == Instruction::Mul;
|
||||
};
|
||||
|
||||
LLVM_DEBUG(dbgs() << "Found some sequential loads, now widening:\n");
|
||||
for (auto &Pair : SeqLoads) {
|
||||
LoadInst *BaseLd = Pair.first;
|
||||
LoadInst *OffsetLd = Pair.second;
|
||||
|
||||
// Check that all the base users are muls.
|
||||
auto *BaseSExt = cast<Instruction>(BaseLd->user_back());
|
||||
for (Use &U : BaseSExt->uses()) {
|
||||
if (!IsUserMul(U))
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check that all the offset users are muls.
|
||||
// TODO We exit early on finding a sext user which isn't a mul, but many
|
||||
// arm instructions would be able to perform the necessary shift too.
|
||||
auto *OffsetSExt = cast<Instruction>(OffsetLd->user_back());
|
||||
for (Use &U : OffsetSExt->uses()) {
|
||||
if (!IsUserMul(U))
|
||||
return false;
|
||||
}
|
||||
|
||||
LLVM_DEBUG(dbgs() << " - with base load: " << *BaseLd << "\n");
|
||||
LLVM_DEBUG(dbgs() << " - with offset load: " << *OffsetLd << "\n");
|
||||
Instruction *InsertPt = GetFirst(BaseLd, OffsetLd);
|
||||
IRB.SetInsertPoint(InsertPt);
|
||||
LoadInst *WideLd = CreateLoadIns(IRB, BaseLd, Ty);
|
||||
LLVM_DEBUG(dbgs() << " - created wide load: " << *WideLd << "\n");
|
||||
|
||||
// Move the pointer operands before their users.
|
||||
std::function<void(Instruction*, Instruction*)> MoveBefore =
|
||||
[&MoveBefore](Instruction *Source, Instruction *Sink) -> void {
|
||||
Source->moveBefore(Sink);
|
||||
for (Use &U : Source->operands()) {
|
||||
Value *Op = U.get();
|
||||
if (auto *I = dyn_cast<Instruction>(Op)) {
|
||||
if (isa<PHINode>(I) || I->getParent() != Source->getParent())
|
||||
continue;
|
||||
MoveBefore(I, Source);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// If we're inserting the load before BaseLd, we probably need to move the
|
||||
// the pointer operand too. This operand is cast to an i32* in
|
||||
// CreateLoadIns.
|
||||
if (InsertPt != BaseLd) {
|
||||
if (auto *GEP = dyn_cast<GetElementPtrInst>(BaseLd->getPointerOperand()))
|
||||
MoveBefore(GEP, cast<Instruction>(WideLd->getPointerOperand()));
|
||||
}
|
||||
|
||||
// BaseUser needs to: (asr (shl WideLoad, 16), 16)
|
||||
// OffsetUser needs to: (asr WideLoad, 16)
|
||||
auto *Top = cast<Instruction>(IRB.CreateAShr(WideLd, 16));
|
||||
auto *Shl = cast<Instruction>(IRB.CreateShl(WideLd, 16));
|
||||
auto *Bottom = cast<Instruction>(IRB.CreateAShr(Shl, 16));
|
||||
|
||||
BaseSExt->replaceAllUsesWith(Bottom);
|
||||
OffsetSExt->replaceAllUsesWith(Top);
|
||||
|
||||
BaseSExt->eraseFromParent();
|
||||
OffsetSExt->eraseFromParent();
|
||||
BaseLd->eraseFromParent();
|
||||
OffsetLd->eraseFromParent();
|
||||
}
|
||||
LLVM_DEBUG(dbgs() << "Block after top bottom mul replacements:\n"
|
||||
<< *LoopBody << "\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
// Loop Pass that needs to identify integer add/sub reductions of 16-bit vector
|
||||
// multiplications.
|
||||
// To use SMLAD:
|
||||
|
@ -832,15 +658,14 @@ bool ARMParallelDSP::MatchSMLAD(Function &F) {
|
|||
dbgs() << "Header block:\n"; Header->dump();
|
||||
dbgs() << "Loop info:\n\n"; L->dump());
|
||||
|
||||
bool Changed = false;
|
||||
ReductionList Reductions;
|
||||
MatchReductions(F, L, Header, Reductions);
|
||||
if (Reductions.empty())
|
||||
return false;
|
||||
|
||||
for (auto &R : Reductions) {
|
||||
OpChainList MACCandidates;
|
||||
MatchParallelMACSequences(R, MACCandidates);
|
||||
if (!CheckMulMemory(MACCandidates))
|
||||
if (!CheckMACMemory(MACCandidates))
|
||||
continue;
|
||||
|
||||
R.MACCandidates = std::move(MACCandidates);
|
||||
|
@ -857,7 +682,6 @@ bool ARMParallelDSP::MatchSMLAD(Function &F) {
|
|||
Instructions Reads, Writes;
|
||||
AliasCandidates(Header, Reads, Writes);
|
||||
|
||||
bool Changed = false;
|
||||
for (auto &R : Reductions) {
|
||||
if (AreAliased(AA, Reads, Writes, R.MACCandidates))
|
||||
return false;
|
||||
|
@ -869,6 +693,15 @@ bool ARMParallelDSP::MatchSMLAD(Function &F) {
|
|||
return Changed;
|
||||
}
|
||||
|
||||
static LoadInst *CreateLoadIns(IRBuilder<NoFolder> &IRB, LoadInst &BaseLoad,
|
||||
const Type *LoadTy) {
|
||||
const unsigned AddrSpace = BaseLoad.getPointerAddressSpace();
|
||||
|
||||
Value *VecPtr = IRB.CreateBitCast(BaseLoad.getPointerOperand(),
|
||||
LoadTy->getPointerTo(AddrSpace));
|
||||
return IRB.CreateAlignedLoad(VecPtr, BaseLoad.getAlignment());
|
||||
}
|
||||
|
||||
Instruction *ARMParallelDSP::CreateSMLADCall(LoadInst *VecLd0, LoadInst *VecLd1,
|
||||
Instruction *Acc, bool Exchange,
|
||||
Instruction *InsertAfter) {
|
||||
|
@ -883,8 +716,8 @@ Instruction *ARMParallelDSP::CreateSMLADCall(LoadInst *VecLd0, LoadInst *VecLd1,
|
|||
|
||||
// Replace the reduction chain with an intrinsic call
|
||||
const Type *Ty = IntegerType::get(M->getContext(), 32);
|
||||
LoadInst *NewLd0 = CreateLoadIns(Builder, &VecLd0[0], Ty);
|
||||
LoadInst *NewLd1 = CreateLoadIns(Builder, &VecLd1[0], Ty);
|
||||
LoadInst *NewLd0 = CreateLoadIns(Builder, VecLd0[0], Ty);
|
||||
LoadInst *NewLd1 = CreateLoadIns(Builder, VecLd1[0], Ty);
|
||||
Value* Args[] = { NewLd0, NewLd1, Acc };
|
||||
Function *SMLAD = nullptr;
|
||||
if (Exchange)
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
; RUN: opt -mtriple=thumbv8m.main -mcpu=cortex-m33 -S -arm-parallel-dsp %s -o - | FileCheck %s
|
||||
; RUN: opt -mtriple=thumbv7a-linux-android -arm-parallel-dsp -S %s -o - | FileCheck %s
|
||||
|
||||
; CHECK-LABEL: sext_multi_use_undef
|
||||
define void @sext_multi_use_undef() {
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%0 = load i16, i16* undef, align 2
|
||||
%conv3 = sext i16 %0 to i32
|
||||
%1 = load i16, i16* undef, align 2
|
||||
%conv7 = sext i16 %1 to i32
|
||||
%mul8 = mul nsw i32 %conv7, %conv3
|
||||
%x.addr.180 = getelementptr inbounds i16, i16* undef, i32 1
|
||||
%2 = load i16, i16* %x.addr.180, align 2
|
||||
%conv1582 = sext i16 %2 to i32
|
||||
%mul.i7284 = mul nsw i32 %conv7, %conv1582
|
||||
br label %for.body
|
||||
}
|
||||
|
||||
; CHECK-LABEL: sext_multi_use
|
||||
; CHECK: [[PtrA:%[^ ]+]] = bitcast i16* %a to i32*
|
||||
; CHECK: [[DataA:%[^ ]+]] = load i32, i32* [[PtrA]], align 2
|
||||
; CHECK: [[Top:%[^ ]+]] = ashr i32 [[DataA]], 16
|
||||
; CHECK: [[Shl:%[^ ]+]] = shl i32 [[DataA]], 16
|
||||
; CHECK: [[Bottom:%[^ ]+]] = ashr i32 [[Shl]], 16
|
||||
; CHECK: [[DataB:%[^ ]+]] = load i16, i16* %b, align 2
|
||||
; CHECK: [[SextB:%[^ ]+]] = sext i16 [[DataB]] to i32
|
||||
; CHECK: [[Mul0:%[^ ]+]] = mul nsw i32 [[SextB]], [[Bottom]]
|
||||
; CHECK: [[Mul1:%[^ ]+]] = mul nsw i32 [[SextB]], [[Top]]
|
||||
define void @sext_multi_use(i16* %a, i16* %b) {
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%0 = load i16, i16* %a, align 2
|
||||
%conv3 = sext i16 %0 to i32
|
||||
%1 = load i16, i16* %b, align 2
|
||||
%conv7 = sext i16 %1 to i32
|
||||
%mul8 = mul nsw i32 %conv7, %conv3
|
||||
%x.addr.180 = getelementptr inbounds i16, i16* %a, i32 1
|
||||
%2 = load i16, i16* %x.addr.180, align 2
|
||||
%conv1582 = sext i16 %2 to i32
|
||||
%mul.i7284 = mul nsw i32 %conv7, %conv1582
|
||||
br label %for.body
|
||||
}
|
||||
|
||||
; CHECK-LABEL: sext_multi_use_reorder
|
||||
; CHECK: [[PtrA:%[^ ]+]] = bitcast i16* %a to i32*
|
||||
; CHECK: [[DataA:%[^ ]+]] = load i32, i32* [[PtrA]], align 2
|
||||
; CHECK: [[Top:%[^ ]+]] = ashr i32 [[DataA]], 16
|
||||
; CHECK: [[Shl:%[^ ]+]] = shl i32 [[DataA]], 16
|
||||
; CHECK: [[Bottom:%[^ ]+]] = ashr i32 [[Shl]], 16
|
||||
; CHECK: [[Mul0:%[^ ]+]] = mul nsw i32 [[Top]], [[Bottom]]
|
||||
; CHECK: [[DataB:%[^ ]+]] = load i16, i16* %b, align 2
|
||||
; CHECK: [[SextB:%[^ ]+]] = sext i16 [[DataB]] to i32
|
||||
; CHECK: [[Mul1:%[^ ]+]] = mul nsw i32 [[Top]], [[SextB]]
|
||||
define void @sext_multi_use_reorder(i16* %a, i16* %b) {
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%0 = load i16, i16* %a, align 2
|
||||
%conv3 = sext i16 %0 to i32
|
||||
%x.addr.180 = getelementptr inbounds i16, i16* %a, i32 1
|
||||
%1 = load i16, i16* %x.addr.180, align 2
|
||||
%conv7 = sext i16 %1 to i32
|
||||
%mul8 = mul nsw i32 %conv7, %conv3
|
||||
%2 = load i16, i16* %b, align 2
|
||||
%conv1582 = sext i16 %2 to i32
|
||||
%mul.i7284 = mul nsw i32 %conv7, %conv1582
|
||||
br label %for.body
|
||||
}
|
|
@ -1,98 +0,0 @@
|
|||
; RUN: opt -mtriple=thumbv7-unknown-linux-android -arm-parallel-dsp -S %s -o - | FileCheck %s
|
||||
|
||||
@a = local_unnamed_addr global i32 0, align 4
|
||||
@b = local_unnamed_addr global i8* null, align 4
|
||||
@c = local_unnamed_addr global i8 0, align 1
|
||||
@d = local_unnamed_addr global i16* null, align 4
|
||||
|
||||
; CHECK-LABEL: @convolve
|
||||
; CHECK-NOT: bitcast i16* [[ANY:%[^ ]+]] to i32*
|
||||
define void @convolve() local_unnamed_addr #0 {
|
||||
entry:
|
||||
br label %for.cond
|
||||
|
||||
for.cond:
|
||||
%e.0 = phi i32 [ undef, %entry ], [ %e.1.lcssa, %for.end ]
|
||||
%f.0 = phi i32 [ undef, %entry ], [ %f.1.lcssa, %for.end ]
|
||||
%g.0 = phi i32 [ undef, %entry ], [ %g.1.lcssa, %for.end ]
|
||||
%cmp13 = icmp slt i32 %g.0, 1
|
||||
br i1 %cmp13, label %for.body.lr.ph, label %for.end
|
||||
|
||||
for.body.lr.ph:
|
||||
%0 = load i16*, i16** @d, align 4
|
||||
%1 = load i8*, i8** @b, align 4
|
||||
%2 = load i32, i32* @a, align 4
|
||||
%3 = sub i32 1, %g.0
|
||||
%min.iters.check = icmp ugt i32 %3, 3
|
||||
%ident.check = icmp eq i32 %2, 1
|
||||
%or.cond = and i1 %min.iters.check, %ident.check
|
||||
br i1 %or.cond, label %vector.ph, label %for.body.preheader
|
||||
|
||||
vector.ph:
|
||||
%n.vec = and i32 %3, -4
|
||||
%ind.end = add i32 %g.0, %n.vec
|
||||
%4 = mul i32 %2, %n.vec
|
||||
%ind.end20 = add i32 %f.0, %4
|
||||
%5 = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 %e.0, i32 0
|
||||
br label %vector.body
|
||||
|
||||
vector.body:
|
||||
%index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
|
||||
%vec.phi = phi <4 x i32> [ %5, %vector.ph ], [ %14, %vector.body ]
|
||||
%offset.idx = add i32 %g.0, %index
|
||||
%6 = mul i32 %2, %index
|
||||
%offset.idx21 = add i32 %f.0, %6
|
||||
%7 = getelementptr inbounds i16, i16* %0, i32 %offset.idx
|
||||
%8 = bitcast i16* %7 to <4 x i16>*
|
||||
%wide.load = load <4 x i16>, <4 x i16>* %8, align 2
|
||||
%9 = sext <4 x i16> %wide.load to <4 x i32>
|
||||
%10 = getelementptr inbounds i8, i8* %1, i32 %offset.idx21
|
||||
%11 = bitcast i8* %10 to <4 x i8>*
|
||||
%wide.load25 = load <4 x i8>, <4 x i8>* %11, align 1
|
||||
%12 = zext <4 x i8> %wide.load25 to <4 x i32>
|
||||
%13 = mul nsw <4 x i32> %12, %9
|
||||
%14 = add nsw <4 x i32> %13, %vec.phi
|
||||
%index.next = add i32 %index, 4
|
||||
%15 = icmp eq i32 %index.next, %n.vec
|
||||
br i1 %15, label %middle.block, label %vector.body
|
||||
|
||||
middle.block:
|
||||
%rdx.shuf = shufflevector <4 x i32> %14, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
|
||||
%bin.rdx = add <4 x i32> %14, %rdx.shuf
|
||||
%rdx.shuf26 = shufflevector <4 x i32> %bin.rdx, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
|
||||
%bin.rdx27 = add <4 x i32> %bin.rdx, %rdx.shuf26
|
||||
%16 = extractelement <4 x i32> %bin.rdx27, i32 0
|
||||
%cmp.n = icmp eq i32 %3, %n.vec
|
||||
br i1 %cmp.n, label %for.end, label %for.body.preheader
|
||||
|
||||
for.body.preheader:
|
||||
%g.116.ph = phi i32 [ %g.0, %for.body.lr.ph ], [ %ind.end, %middle.block ]
|
||||
%f.115.ph = phi i32 [ %f.0, %for.body.lr.ph ], [ %ind.end20, %middle.block ]
|
||||
%e.114.ph = phi i32 [ %e.0, %for.body.lr.ph ], [ %16, %middle.block ]
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%g.116 = phi i32 [ %inc, %for.body ], [ %g.116.ph, %for.body.preheader ]
|
||||
%f.115 = phi i32 [ %add4, %for.body ], [ %f.115.ph, %for.body.preheader ]
|
||||
%e.114 = phi i32 [ %add, %for.body ], [ %e.114.ph, %for.body.preheader ]
|
||||
%arrayidx = getelementptr inbounds i16, i16* %0, i32 %g.116
|
||||
%17 = load i16, i16* %arrayidx, align 2
|
||||
%conv = sext i16 %17 to i32
|
||||
%arrayidx2 = getelementptr inbounds i8, i8* %1, i32 %f.115
|
||||
%18 = load i8, i8* %arrayidx2, align 1
|
||||
%conv3 = zext i8 %18 to i32
|
||||
%mul = mul nsw i32 %conv3, %conv
|
||||
%add = add nsw i32 %mul, %e.114
|
||||
%inc = add nsw i32 %g.116, 1
|
||||
%add4 = add nsw i32 %2, %f.115
|
||||
%cmp = icmp slt i32 %g.116, 0
|
||||
br i1 %cmp, label %for.body, label %for.end
|
||||
|
||||
for.end:
|
||||
%e.1.lcssa = phi i32 [ %e.0, %for.cond ], [ %16, %middle.block ], [ %add, %for.body ]
|
||||
%f.1.lcssa = phi i32 [ %f.0, %for.cond ], [ %ind.end20, %middle.block ], [ %add4, %for.body ]
|
||||
%g.1.lcssa = phi i32 [ %g.0, %for.cond ], [ %ind.end, %middle.block ], [ %inc, %for.body ]
|
||||
%conv5 = trunc i32 %e.1.lcssa to i8
|
||||
store i8 %conv5, i8* @c, align 1
|
||||
br label %for.cond
|
||||
}
|
|
@ -1,210 +0,0 @@
|
|||
; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -arm-parallel-dsp -S | FileCheck %s
|
||||
; RUN: opt -mtriple=thumbv7a-linux-android -arm-parallel-dsp -S %s -o - | FileCheck %s
|
||||
|
||||
; CHECK-LABEL: topbottom_mul_alias
|
||||
; CHECK-NOT: bitcast i16*
|
||||
define void @topbottom_mul_alias(i32 %N, i32* nocapture readnone %Out, i16* nocapture readonly %In1, i16* nocapture readonly %In2) {
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
|
||||
%count = phi i32 [ %N, %entry ], [ %count.next, %for.body ]
|
||||
%PIn1.0 = getelementptr inbounds i16, i16* %In1, i32 %iv
|
||||
%In1.0 = load i16, i16* %PIn1.0, align 2
|
||||
%SIn1.0 = sext i16 %In1.0 to i32
|
||||
%PIn2.0 = getelementptr inbounds i16, i16* %In2, i32 %iv
|
||||
%In2.0 = load i16, i16* %PIn2.0, align 2
|
||||
%SIn2.0 = sext i16 %In2.0 to i32
|
||||
%mul5.us.i.i = mul nsw i32 %SIn1.0, %SIn2.0
|
||||
%Out.0 = getelementptr inbounds i32, i32* %Out, i32 %iv
|
||||
store i32 %mul5.us.i.i, i32* %Out.0, align 4
|
||||
%iv.1 = or i32 %iv, 1
|
||||
%PIn1.1 = getelementptr inbounds i16, i16* %In1, i32 %iv.1
|
||||
%In1.1 = load i16, i16* %PIn1.1, align 2
|
||||
%SIn1.1 = sext i16 %In1.1 to i32
|
||||
%PIn2.1 = getelementptr inbounds i16, i16* %In2, i32 %iv.1
|
||||
%In2.1 = load i16, i16* %PIn2.1, align 2
|
||||
%SIn2.1 = sext i16 %In2.1 to i32
|
||||
%mul5.us.i.1.i = mul nsw i32 %SIn1.1, %SIn2.1
|
||||
%Out.1 = getelementptr inbounds i32, i32* %Out, i32 %iv.1
|
||||
store i32 %mul5.us.i.1.i, i32* %Out.1, align 4
|
||||
%iv.2 = or i32 %iv, 2
|
||||
%PIn1.2 = getelementptr inbounds i16, i16* %In1, i32 %iv.2
|
||||
%In1.2 = load i16, i16* %PIn1.2, align 2
|
||||
%SIn1.2 = sext i16 %In1.2 to i32
|
||||
%PIn2.2 = getelementptr inbounds i16, i16* %In2, i32 %iv.2
|
||||
%In2.2 = load i16, i16* %PIn2.2, align 2
|
||||
%SIn2.2 = sext i16 %In2.2 to i32
|
||||
%mul5.us.i.2.i = mul nsw i32 %SIn1.2, %SIn2.2
|
||||
%Out.2 = getelementptr inbounds i32, i32* %Out, i32 %iv.2
|
||||
store i32 %mul5.us.i.2.i, i32* %Out.2, align 4
|
||||
%iv.3 = or i32 %iv, 3
|
||||
%PIn1.3 = getelementptr inbounds i16, i16* %In1, i32 %iv.3
|
||||
%In1.3 = load i16, i16* %PIn1.3, align 2
|
||||
%SIn1.3 = sext i16 %In1.3 to i32
|
||||
%PIn2.3 = getelementptr inbounds i16, i16* %In2, i32 %iv.3
|
||||
%In2.3 = load i16, i16* %PIn2.3, align 2
|
||||
%SIn2.3 = sext i16 %In2.3 to i32
|
||||
%mul5.us.i.3.i = mul nsw i32 %SIn1.3, %SIn2.3
|
||||
%Out.3 = getelementptr inbounds i32, i32* %Out, i32 %iv.3
|
||||
store i32 %mul5.us.i.3.i, i32* %Out.3, align 4
|
||||
%iv.next = add i32 %iv, 4
|
||||
%count.next = add i32 %count, -4
|
||||
%niter375.ncmp.3.i = icmp eq i32 %count.next, 0
|
||||
br i1 %niter375.ncmp.3.i, label %exit, label %for.body
|
||||
|
||||
exit:
|
||||
ret void
|
||||
}
|
||||
|
||||
; TODO: We should be able to handle this by splatting the const value.
|
||||
; CHECK-LABEL: topbottom_mul_const
|
||||
; CHECK-NOT: bitcast i16*
|
||||
define void @topbottom_mul_const(i32 %N, i32* noalias nocapture readnone %Out, i16* nocapture readonly %In, i16 signext %const) {
|
||||
entry:
|
||||
%conv4.i.i = sext i16 %const to i32
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
|
||||
%count = phi i32 [ %N, %entry ], [ %count.next, %for.body ]
|
||||
%PIn.0 = getelementptr inbounds i16, i16* %In, i32 %iv
|
||||
%In.0 = load i16, i16* %PIn.0, align 2
|
||||
%conv.us.i144.i = sext i16 %In.0 to i32
|
||||
%mul5.us.i.i = mul nsw i32 %conv.us.i144.i, %conv4.i.i
|
||||
%Out.0 = getelementptr inbounds i32, i32* %Out, i32 %iv
|
||||
store i32 %mul5.us.i.i, i32* %Out.0, align 4
|
||||
%iv.1 = or i32 %iv, 1
|
||||
%PIn.1 = getelementptr inbounds i16, i16* %In, i32 %iv.1
|
||||
%In.1 = load i16, i16* %PIn.1, align 2
|
||||
%conv.us.i144.1.i = sext i16 %In.1 to i32
|
||||
%mul5.us.i.1.i = mul nsw i32 %conv.us.i144.1.i, %conv4.i.i
|
||||
%Out.1 = getelementptr inbounds i32, i32* %Out, i32 %iv.1
|
||||
store i32 %mul5.us.i.1.i, i32* %Out.1, align 4
|
||||
%iv.2 = or i32 %iv, 2
|
||||
%PIn.2 = getelementptr inbounds i16, i16* %In, i32 %iv.2
|
||||
%In.3 = load i16, i16* %PIn.2, align 2
|
||||
%conv.us.i144.2.i = sext i16 %In.3 to i32
|
||||
%mul5.us.i.2.i = mul nsw i32 %conv.us.i144.2.i, %conv4.i.i
|
||||
%Out.2 = getelementptr inbounds i32, i32* %Out, i32 %iv.2
|
||||
store i32 %mul5.us.i.2.i, i32* %Out.2, align 4
|
||||
%iv.3 = or i32 %iv, 3
|
||||
%PIn.3 = getelementptr inbounds i16, i16* %In, i32 %iv.3
|
||||
%In.4 = load i16, i16* %PIn.3, align 2
|
||||
%conv.us.i144.3.i = sext i16 %In.4 to i32
|
||||
%mul5.us.i.3.i = mul nsw i32 %conv.us.i144.3.i, %conv4.i.i
|
||||
%Out.3 = getelementptr inbounds i32, i32* %Out, i32 %iv.3
|
||||
store i32 %mul5.us.i.3.i, i32* %Out.3, align 4
|
||||
%iv.next = add i32 %iv, 4
|
||||
%count.next = add i32 %count, -4
|
||||
%niter375.ncmp.3.i = icmp eq i32 %count.next, 0
|
||||
br i1 %niter375.ncmp.3.i, label %exit, label %for.body
|
||||
|
||||
exit:
|
||||
ret void
|
||||
}
|
||||
|
||||
; TODO: We should be able to handle this and use smulwt and smulwb.
|
||||
; CHECK-LABEL: topbottom_mul_word_load_const
|
||||
; CHECK-NOT: bitcast i16*
|
||||
define void @topbottom_mul_word_load_const(i32 %N, i32* noalias nocapture readnone %Out, i16* nocapture readonly %In, i32* %C) {
|
||||
entry:
|
||||
%const = load i32, i32* %C
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
|
||||
%count = phi i32 [ %N, %entry ], [ %count.next, %for.body ]
|
||||
%PIn.0 = getelementptr inbounds i16, i16* %In, i32 %iv
|
||||
%In.0 = load i16, i16* %PIn.0, align 2
|
||||
%conv.us.i144.i = sext i16 %In.0 to i32
|
||||
%mul5.us.i.i = mul nsw i32 %conv.us.i144.i, %const
|
||||
%Out.0 = getelementptr inbounds i32, i32* %Out, i32 %iv
|
||||
store i32 %mul5.us.i.i, i32* %Out.0, align 4
|
||||
%iv.1 = or i32 %iv, 1
|
||||
%PIn.1 = getelementptr inbounds i16, i16* %In, i32 %iv.1
|
||||
%In.1 = load i16, i16* %PIn.1, align 2
|
||||
%conv.us.i144.1.i = sext i16 %In.1 to i32
|
||||
%mul5.us.i.1.i = mul nsw i32 %conv.us.i144.1.i, %const
|
||||
%Out.1 = getelementptr inbounds i32, i32* %Out, i32 %iv.1
|
||||
store i32 %mul5.us.i.1.i, i32* %Out.1, align 4
|
||||
%iv.2 = or i32 %iv, 2
|
||||
%PIn.2 = getelementptr inbounds i16, i16* %In, i32 %iv.2
|
||||
%In.3 = load i16, i16* %PIn.2, align 2
|
||||
%conv.us.i144.2.i = sext i16 %In.3 to i32
|
||||
%mul5.us.i.2.i = mul nsw i32 %conv.us.i144.2.i, %const
|
||||
%Out.2 = getelementptr inbounds i32, i32* %Out, i32 %iv.2
|
||||
store i32 %mul5.us.i.2.i, i32* %Out.2, align 4
|
||||
%iv.3 = or i32 %iv, 3
|
||||
%PIn.3 = getelementptr inbounds i16, i16* %In, i32 %iv.3
|
||||
%In.4 = load i16, i16* %PIn.3, align 2
|
||||
%conv.us.i144.3.i = sext i16 %In.4 to i32
|
||||
%mul5.us.i.3.i = mul nsw i32 %conv.us.i144.3.i, %const
|
||||
%Out.3 = getelementptr inbounds i32, i32* %Out, i32 %iv.3
|
||||
store i32 %mul5.us.i.3.i, i32* %Out.3, align 4
|
||||
%iv.next = add i32 %iv, 4
|
||||
%count.next = add i32 %count, -4
|
||||
%niter375.ncmp.3.i = icmp eq i32 %count.next, 0
|
||||
br i1 %niter375.ncmp.3.i, label %exit, label %for.body
|
||||
|
||||
exit:
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: topbottom_mul_8
|
||||
; CHECK-NOT: bitcast i16*
|
||||
define void @topbottom_mul_8(i32 %N, i32* noalias nocapture readnone %Out, i8* nocapture readonly %In1, i8* nocapture readonly %In2) {
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
|
||||
%count = phi i32 [ %N, %entry ], [ %count.next, %for.body ]
|
||||
%PIn1.0 = getelementptr inbounds i8, i8* %In1, i32 %iv
|
||||
%In1.0 = load i8, i8* %PIn1.0, align 1
|
||||
%SIn1.0 = sext i8 %In1.0 to i32
|
||||
%PIn2.0 = getelementptr inbounds i8, i8* %In2, i32 %iv
|
||||
%In2.0 = load i8, i8* %PIn2.0, align 1
|
||||
%SIn2.0 = sext i8 %In2.0 to i32
|
||||
%mul5.us.i.i = mul nsw i32 %SIn1.0, %SIn2.0
|
||||
%Out.0 = getelementptr inbounds i32, i32* %Out, i32 %iv
|
||||
store i32 %mul5.us.i.i, i32* %Out.0, align 4
|
||||
%iv.1 = or i32 %iv, 1
|
||||
%PIn1.1 = getelementptr inbounds i8, i8* %In1, i32 %iv.1
|
||||
%In1.1 = load i8, i8* %PIn1.1, align 1
|
||||
%SIn1.1 = sext i8 %In1.1 to i32
|
||||
%PIn2.1 = getelementptr inbounds i8, i8* %In2, i32 %iv.1
|
||||
%In2.1 = load i8, i8* %PIn2.1, align 1
|
||||
%SIn2.1 = sext i8 %In2.1 to i32
|
||||
%mul5.us.i.1.i = mul nsw i32 %SIn1.1, %SIn2.1
|
||||
%Out.1 = getelementptr inbounds i32, i32* %Out, i32 %iv.1
|
||||
store i32 %mul5.us.i.1.i, i32* %Out.1, align 4
|
||||
%iv.2 = or i32 %iv, 2
|
||||
%PIn1.2 = getelementptr inbounds i8, i8* %In1, i32 %iv.2
|
||||
%In1.2 = load i8, i8* %PIn1.2, align 1
|
||||
%SIn1.2 = sext i8 %In1.2 to i32
|
||||
%PIn2.2 = getelementptr inbounds i8, i8* %In2, i32 %iv.2
|
||||
%In2.2 = load i8, i8* %PIn2.2, align 1
|
||||
%SIn2.2 = sext i8 %In2.2 to i32
|
||||
%mul5.us.i.2.i = mul nsw i32 %SIn1.2, %SIn2.2
|
||||
%Out.2 = getelementptr inbounds i32, i32* %Out, i32 %iv.2
|
||||
store i32 %mul5.us.i.2.i, i32* %Out.2, align 4
|
||||
%iv.3 = or i32 %iv, 3
|
||||
%PIn1.3 = getelementptr inbounds i8, i8* %In1, i32 %iv.3
|
||||
%In1.3 = load i8, i8* %PIn1.3, align 1
|
||||
%SIn1.3 = sext i8 %In1.3 to i32
|
||||
%PIn2.3 = getelementptr inbounds i8, i8* %In2, i32 %iv.3
|
||||
%In2.3 = load i8, i8* %PIn2.3, align 1
|
||||
%SIn2.3 = sext i8 %In2.3 to i32
|
||||
%mul5.us.i.3.i = mul nsw i32 %SIn1.3, %SIn2.3
|
||||
%Out.3 = getelementptr inbounds i32, i32* %Out, i32 %iv.3
|
||||
store i32 %mul5.us.i.3.i, i32* %Out.3, align 4
|
||||
%iv.next = add i32 %iv, 4
|
||||
%count.next = add i32 %count, -4
|
||||
%niter375.ncmp.3.i = icmp eq i32 %count.next, 0
|
||||
br i1 %niter375.ncmp.3.i, label %exit, label %for.body
|
||||
|
||||
exit:
|
||||
ret void
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
; RUN: opt -mtriple=thumbv8m.main -mcpu=cortex-m33 -arm-parallel-dsp -S %s -o - | FileCheck %s
|
||||
; RUN: opt -mtriple=thumbv7a-linux-android -arm-parallel-dsp -S %s -o - | FileCheck %s
|
||||
|
||||
; CHECK-LABEL: reorder_gep_arguments
|
||||
; CHECK: [[Sub:%[^ ]+]] = xor i32 %iv, -1
|
||||
; CHECK: [[IdxPtr:%[^ ]+]] = getelementptr inbounds i16, i16* %arrayidx.us, i32 [[Sub]]
|
||||
; CHECK: [[IdxPtrCast:%[^ ]+]] = bitcast i16* [[IdxPtr]] to i32*
|
||||
; CHECK: [[Idx:%[^ ]+]] = load i32, i32* [[IdxPtrCast]], align 2
|
||||
; CHECK: [[Top:%[^ ]+]] = ashr i32 [[Idx]], 16
|
||||
; CHECK: [[Shl:%[^ ]+]] = shl i32 [[Idx]], 16
|
||||
; CHECK: [[Bottom:%[^ ]+]] = ashr i32 [[Shl]], 16
|
||||
; CHECK: [[BPtr:%[^ ]+]] = getelementptr inbounds i16, i16* %B, i32 %iv
|
||||
; CHECK: [[BData:%[^ ]+]] = load i16, i16* [[BPtr]], align 2
|
||||
; CHECK: [[BSext:%[^ ]+]] = sext i16 [[BData]] to i32
|
||||
; CHECK: [[Mul0:%[^ ]+]] = mul nsw i32 [[BSext]], [[Top]]
|
||||
; CHECK: [[BPtr1:%[^ ]+]] = getelementptr inbounds i16, i16* %B, i32 %add48.us
|
||||
; CHECK: [[BData1:%[^ ]+]] = load i16, i16* [[BPtr1]], align 2
|
||||
; CHECK: [[B1Sext:%[^ ]+]] = sext i16 [[BData1]] to i32
|
||||
; CHECK: [[Mul1:%[^ ]+]] = mul nsw i32 [[B1Sext]], [[Bottom]]
|
||||
|
||||
define i32 @reorder_gep_arguments(i16* %B, i16* %arrayidx.us, i32 %d) {
|
||||
entry:
|
||||
br label %for.body36.us
|
||||
|
||||
for.body36.us:
|
||||
%iv = phi i32 [ %add53.us, %for.body36.us ], [ 5, %entry ]
|
||||
%out32_Q12.0114.us = phi i32 [ %add52.us, %for.body36.us ], [ 0, %entry ]
|
||||
%sub37.us = sub nsw i32 0, %iv
|
||||
%arrayidx38.us = getelementptr inbounds i16, i16* %arrayidx.us, i32 %sub37.us
|
||||
%0 = load i16, i16* %arrayidx38.us, align 2
|
||||
%conv39.us = sext i16 %0 to i32
|
||||
%arrayidx40.us = getelementptr inbounds i16, i16* %B, i32 %iv
|
||||
%1 = load i16, i16* %arrayidx40.us, align 2
|
||||
%conv41.us = sext i16 %1 to i32
|
||||
%mul42.us = mul nsw i32 %conv41.us, %conv39.us
|
||||
%add43.us = add i32 %mul42.us, %out32_Q12.0114.us
|
||||
%sub45.us = xor i32 %iv, -1
|
||||
%arrayidx46.us = getelementptr inbounds i16, i16* %arrayidx.us, i32 %sub45.us
|
||||
%2 = load i16, i16* %arrayidx46.us, align 2
|
||||
%conv47.us = sext i16 %2 to i32
|
||||
%add48.us = or i32 %iv, 1
|
||||
%arrayidx49.us = getelementptr inbounds i16, i16* %B, i32 %add48.us
|
||||
%3 = load i16, i16* %arrayidx49.us, align 2
|
||||
%conv50.us = sext i16 %3 to i32
|
||||
%mul51.us = mul nsw i32 %conv50.us, %conv47.us
|
||||
%add52.us = add i32 %add43.us, %mul51.us
|
||||
%add53.us = add nuw nsw i32 %iv, 2
|
||||
%cmp34.us = icmp slt i32 %add53.us, %d
|
||||
br i1 %cmp34.us, label %for.body36.us, label %exit
|
||||
|
||||
exit:
|
||||
ret i32 %add52.us
|
||||
}
|
||||
|
|
@ -1,252 +0,0 @@
|
|||
; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -arm-parallel-dsp -S | FileCheck %s
|
||||
; RUN: opt -mtriple=thumbv7a-linux-android -arm-parallel-dsp -S %s -o - | FileCheck %s
|
||||
|
||||
; CHECK-LABEL: topbottom_mul
|
||||
define void @topbottom_mul(i32 %N, i32* noalias nocapture readnone %Out, i16* nocapture readonly %In1, i16* nocapture readonly %In2) {
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
; CHECK: for.body:
|
||||
; CHECK: [[Cast_PIn1_0:%[^ ]+]] = bitcast i16* %PIn1.0 to i32*
|
||||
; CHECK: [[PIn1_01:%[^ ]+]] = load i32, i32* [[Cast_PIn1_0]], align 2
|
||||
; CHECK: [[PIn1_1:%[^ ]+]] = ashr i32 [[PIn1_01]], 16
|
||||
; CHECK: [[PIn1_01_shl:%[^ ]+]] = shl i32 [[PIn1_01]], 16
|
||||
; CHECK: [[PIn1_0:%[^ ]+]] = ashr i32 [[PIn1_01_shl]], 16
|
||||
|
||||
; CHECK: [[Cast_PIn2_0:%[^ ]+]] = bitcast i16* %PIn2.0 to i32*
|
||||
; CHECK: [[PIn2_01:%[^ ]+]] = load i32, i32* [[Cast_PIn2_0]], align 2
|
||||
; CHECK: [[PIn2_1:%[^ ]+]] = ashr i32 [[PIn2_01]], 16
|
||||
; CHECK: [[PIn2_01_shl:%[^ ]+]] = shl i32 [[PIn2_01]], 16
|
||||
; CHECK: [[PIn2_0:%[^ ]+]] = ashr i32 [[PIn2_01_shl]], 16
|
||||
|
||||
; CHECK: mul nsw i32 [[PIn1_0]], [[PIn2_0]]
|
||||
; CHECK: mul nsw i32 [[PIn1_1]], [[PIn2_1]]
|
||||
|
||||
; CHECK: [[Cast_PIn1_2:%[^ ]+]] = bitcast i16* %PIn1.2 to i32*
|
||||
; CHECK: [[PIn1_23:%[^ ]+]] = load i32, i32* [[Cast_PIn1_2]], align 2
|
||||
; CHECK: [[PIn1_3:%[^ ]+]] = ashr i32 [[PIn1_23]], 16
|
||||
; CHECK: [[PIn1_23_shl:%[^ ]+]] = shl i32 [[PIn1_23]], 16
|
||||
; CHECK: [[PIn1_2:%[^ ]+]] = ashr i32 [[PIn1_23_shl]], 16
|
||||
|
||||
; CHECK: [[Cast_PIn2_2:%[^ ]+]] = bitcast i16* %PIn2.2 to i32*
|
||||
; CHECK: [[PIn2_23:%[^ ]+]] = load i32, i32* [[Cast_PIn2_2]], align 2
|
||||
; CHECK: [[PIn2_3:%[^ ]+]] = ashr i32 [[PIn2_23]], 16
|
||||
; CHECK: [[PIn2_23_shl:%[^ ]+]] = shl i32 [[PIn2_23]], 16
|
||||
; CHECK: [[PIn2_2:%[^ ]+]] = ashr i32 [[PIn2_23_shl]], 16
|
||||
|
||||
; CHECK: mul nsw i32 [[PIn1_2]], [[PIn2_2]]
|
||||
; CHECK: mul nsw i32 [[PIn1_3]], [[PIn2_3]]
|
||||
|
||||
for.body:
|
||||
%iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
|
||||
%count = phi i32 [ %N, %entry ], [ %count.next, %for.body ]
|
||||
%PIn1.0 = getelementptr inbounds i16, i16* %In1, i32 %iv
|
||||
%In1.0 = load i16, i16* %PIn1.0, align 2
|
||||
%SIn1.0 = sext i16 %In1.0 to i32
|
||||
%PIn2.0 = getelementptr inbounds i16, i16* %In2, i32 %iv
|
||||
%In2.0 = load i16, i16* %PIn2.0, align 2
|
||||
%SIn2.0 = sext i16 %In2.0 to i32
|
||||
%mul5.us.i.i = mul nsw i32 %SIn1.0, %SIn2.0
|
||||
%Out.0 = getelementptr inbounds i32, i32* %Out, i32 %iv
|
||||
store i32 %mul5.us.i.i, i32* %Out.0, align 4
|
||||
%iv.1 = or i32 %iv, 1
|
||||
%PIn1.1 = getelementptr inbounds i16, i16* %In1, i32 %iv.1
|
||||
%In1.1 = load i16, i16* %PIn1.1, align 2
|
||||
%SIn1.1 = sext i16 %In1.1 to i32
|
||||
%PIn2.1 = getelementptr inbounds i16, i16* %In2, i32 %iv.1
|
||||
%In2.1 = load i16, i16* %PIn2.1, align 2
|
||||
%SIn2.1 = sext i16 %In2.1 to i32
|
||||
%mul5.us.i.1.i = mul nsw i32 %SIn1.1, %SIn2.1
|
||||
%Out.1 = getelementptr inbounds i32, i32* %Out, i32 %iv.1
|
||||
store i32 %mul5.us.i.1.i, i32* %Out.1, align 4
|
||||
%iv.2 = or i32 %iv, 2
|
||||
%PIn1.2 = getelementptr inbounds i16, i16* %In1, i32 %iv.2
|
||||
%In1.2 = load i16, i16* %PIn1.2, align 2
|
||||
%SIn1.2 = sext i16 %In1.2 to i32
|
||||
%PIn2.2 = getelementptr inbounds i16, i16* %In2, i32 %iv.2
|
||||
%In2.2 = load i16, i16* %PIn2.2, align 2
|
||||
%SIn2.2 = sext i16 %In2.2 to i32
|
||||
%mul5.us.i.2.i = mul nsw i32 %SIn1.2, %SIn2.2
|
||||
%Out.2 = getelementptr inbounds i32, i32* %Out, i32 %iv.2
|
||||
store i32 %mul5.us.i.2.i, i32* %Out.2, align 4
|
||||
%iv.3 = or i32 %iv, 3
|
||||
%PIn1.3 = getelementptr inbounds i16, i16* %In1, i32 %iv.3
|
||||
%In1.3 = load i16, i16* %PIn1.3, align 2
|
||||
%SIn1.3 = sext i16 %In1.3 to i32
|
||||
%PIn2.3 = getelementptr inbounds i16, i16* %In2, i32 %iv.3
|
||||
%In2.3 = load i16, i16* %PIn2.3, align 2
|
||||
%SIn2.3 = sext i16 %In2.3 to i32
|
||||
%mul5.us.i.3.i = mul nsw i32 %SIn1.3, %SIn2.3
|
||||
%Out.3 = getelementptr inbounds i32, i32* %Out, i32 %iv.3
|
||||
store i32 %mul5.us.i.3.i, i32* %Out.3, align 4
|
||||
%iv.next = add i32 %iv, 4
|
||||
%count.next = add i32 %count, -4
|
||||
%niter375.ncmp.3.i = icmp eq i32 %count.next, 0
|
||||
br i1 %niter375.ncmp.3.i, label %exit, label %for.body
|
||||
|
||||
exit:
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: topbottom_mul_load_const
|
||||
define void @topbottom_mul_load_const(i32 %N, i32* noalias nocapture readnone %Out, i16* nocapture readonly %In, i16* %C) {
|
||||
entry:
|
||||
%const = load i16, i16* %C
|
||||
%conv4.i.i = sext i16 %const to i32
|
||||
br label %for.body
|
||||
|
||||
; CHECK: for.body:
|
||||
; CHECK: [[Cast_PIn_0:%[^ ]+]] = bitcast i16* %PIn.0 to i32*
|
||||
; CHECK: [[PIn_01:%[^ ]+]] = load i32, i32* [[Cast_PIn_0]], align 2
|
||||
; CHECK: [[PIn_1:%[^ ]+]] = ashr i32 [[PIn_01]], 16
|
||||
; CHECK: [[PIn_01_shl:%[^ ]+]] = shl i32 [[PIn_01]], 16
|
||||
; CHECK: [[PIn_0:%[^ ]+]] = ashr i32 [[PIn_01_shl]], 16
|
||||
|
||||
; CHECK: mul nsw i32 [[PIn_0]], %conv4.i.i
|
||||
; CHECK: mul nsw i32 [[PIn_1]], %conv4.i.i
|
||||
|
||||
; CHECK: [[Cast_PIn_2:%[^ ]+]] = bitcast i16* %PIn.2 to i32*
|
||||
; CHECK: [[PIn_23:%[^ ]+]] = load i32, i32* [[Cast_PIn_2]], align 2
|
||||
; CHECK: [[PIn_3:%[^ ]+]] = ashr i32 [[PIn_23]], 16
|
||||
; CHECK: [[PIn_23_shl:%[^ ]+]] = shl i32 [[PIn_23]], 16
|
||||
; CHECK: [[PIn_2:%[^ ]+]] = ashr i32 [[PIn_23_shl]], 16
|
||||
|
||||
; CHECK: mul nsw i32 [[PIn_2]], %conv4.i.i
|
||||
; CHECK: mul nsw i32 [[PIn_3]], %conv4.i.i
|
||||
|
||||
for.body:
|
||||
%iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
|
||||
%count = phi i32 [ %N, %entry ], [ %count.next, %for.body ]
|
||||
%PIn.0 = getelementptr inbounds i16, i16* %In, i32 %iv
|
||||
%In.0 = load i16, i16* %PIn.0, align 2
|
||||
%conv.us.i144.i = sext i16 %In.0 to i32
|
||||
%mul5.us.i.i = mul nsw i32 %conv.us.i144.i, %conv4.i.i
|
||||
%Out.0 = getelementptr inbounds i32, i32* %Out, i32 %iv
|
||||
store i32 %mul5.us.i.i, i32* %Out.0, align 4
|
||||
%iv.1 = or i32 %iv, 1
|
||||
%PIn.1 = getelementptr inbounds i16, i16* %In, i32 %iv.1
|
||||
%In.1 = load i16, i16* %PIn.1, align 2
|
||||
%conv.us.i144.1.i = sext i16 %In.1 to i32
|
||||
%mul5.us.i.1.i = mul nsw i32 %conv.us.i144.1.i, %conv4.i.i
|
||||
%Out.1 = getelementptr inbounds i32, i32* %Out, i32 %iv.1
|
||||
store i32 %mul5.us.i.1.i, i32* %Out.1, align 4
|
||||
%iv.2 = or i32 %iv, 2
|
||||
%PIn.2 = getelementptr inbounds i16, i16* %In, i32 %iv.2
|
||||
%In.3 = load i16, i16* %PIn.2, align 2
|
||||
%conv.us.i144.2.i = sext i16 %In.3 to i32
|
||||
%mul5.us.i.2.i = mul nsw i32 %conv.us.i144.2.i, %conv4.i.i
|
||||
%Out.2 = getelementptr inbounds i32, i32* %Out, i32 %iv.2
|
||||
store i32 %mul5.us.i.2.i, i32* %Out.2, align 4
|
||||
%iv.3 = or i32 %iv, 3
|
||||
%PIn.3 = getelementptr inbounds i16, i16* %In, i32 %iv.3
|
||||
%In.4 = load i16, i16* %PIn.3, align 2
|
||||
%conv.us.i144.3.i = sext i16 %In.4 to i32
|
||||
%mul5.us.i.3.i = mul nsw i32 %conv.us.i144.3.i, %conv4.i.i
|
||||
%Out.3 = getelementptr inbounds i32, i32* %Out, i32 %iv.3
|
||||
store i32 %mul5.us.i.3.i, i32* %Out.3, align 4
|
||||
%iv.next = add i32 %iv, 4
|
||||
%count.next = add i32 %count, -4
|
||||
%niter375.ncmp.3.i = icmp eq i32 %count.next, 0
|
||||
br i1 %niter375.ncmp.3.i, label %exit, label %for.body
|
||||
|
||||
exit:
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: topbottom_mul_64
|
||||
define void @topbottom_mul_64(i32 %N, i64* noalias nocapture readnone %Out, i16* nocapture readonly %In1, i16* nocapture readonly %In2) {
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
; CHECK: for.body:
|
||||
; CHECK: [[Cast_PIn1_0:%[^ ]+]] = bitcast i16* %PIn1.0 to i32*
|
||||
; CHECK: [[PIn1_01:%[^ ]+]] = load i32, i32* [[Cast_PIn1_0]], align 2
|
||||
; CHECK: [[PIn1_1:%[^ ]+]] = ashr i32 [[PIn1_01]], 16
|
||||
; CHECK: [[PIn1_01_shl:%[^ ]+]] = shl i32 [[PIn1_01]], 16
|
||||
; CHECK: [[PIn1_0:%[^ ]+]] = ashr i32 [[PIn1_01_shl]], 16
|
||||
|
||||
; CHECK: [[Cast_PIn2_0:%[^ ]+]] = bitcast i16* %PIn2.0 to i32*
|
||||
; CHECK: [[PIn2_01:%[^ ]+]] = load i32, i32* [[Cast_PIn2_0]], align 2
|
||||
; CHECK: [[PIn2_1:%[^ ]+]] = ashr i32 [[PIn2_01]], 16
|
||||
; CHECK: [[PIn2_01_shl:%[^ ]+]] = shl i32 [[PIn2_01]], 16
|
||||
; CHECK: [[PIn2_0:%[^ ]+]] = ashr i32 [[PIn2_01_shl]], 16
|
||||
|
||||
; CHECK: [[Mul0:%[^ ]+]] = mul nsw i32 [[PIn1_0]], [[PIn2_0]]
|
||||
; CHECK: [[SMul0:%[^ ]+]] = sext i32 [[Mul0]] to i64
|
||||
; CHECK: [[Mul1:%[^ ]+]] = mul nsw i32 [[PIn1_1]], [[PIn2_1]]
|
||||
; CHECK: [[SMul1:%[^ ]+]] = sext i32 [[Mul1]] to i64
|
||||
; CHECK: add i64 [[SMul0]], [[SMul1]]
|
||||
|
||||
; CHECK: [[Cast_PIn1_2:%[^ ]+]] = bitcast i16* %PIn1.2 to i32*
|
||||
; CHECK: [[PIn1_23:%[^ ]+]] = load i32, i32* [[Cast_PIn1_2]], align 2
|
||||
; CHECK: [[PIn1_3:%[^ ]+]] = ashr i32 [[PIn1_23]], 16
|
||||
; CHECK: [[PIn1_23_shl:%[^ ]+]] = shl i32 [[PIn1_23]], 16
|
||||
; CHECK: [[PIn1_2:%[^ ]+]] = ashr i32 [[PIn1_23_shl]], 16
|
||||
|
||||
; CHECK: [[Cast_PIn2_2:%[^ ]+]] = bitcast i16* %PIn2.2 to i32*
|
||||
; CHECK: [[PIn2_23:%[^ ]+]] = load i32, i32* [[Cast_PIn2_2]], align 2
|
||||
; CHECK: [[PIn2_3:%[^ ]+]] = ashr i32 [[PIn2_23]], 16
|
||||
; CHECK: [[PIn2_23_shl:%[^ ]+]] = shl i32 [[PIn2_23]], 16
|
||||
; CHECK: [[PIn2_2:%[^ ]+]] = ashr i32 [[PIn2_23_shl]], 16
|
||||
|
||||
; CHECK: [[Mul2:%[^ ]+]] = mul nsw i32 [[PIn1_2]], [[PIn2_2]]
|
||||
; CHECK: [[SMul2:%[^ ]+]] = sext i32 [[Mul2]] to i64
|
||||
; CHECK: [[Mul3:%[^ ]+]] = mul nsw i32 [[PIn1_3]], [[PIn2_3]]
|
||||
; CHECK: [[SMul3:%[^ ]+]] = sext i32 [[Mul3]] to i64
|
||||
; CHECK: add i64 [[SMul2]], [[SMul3]]
|
||||
|
||||
for.body:
|
||||
%iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
|
||||
%iv.out = phi i32 [ 0, %entry] , [ %iv.out.next, %for.body ]
|
||||
%count = phi i32 [ %N, %entry ], [ %count.next, %for.body ]
|
||||
%PIn1.0 = getelementptr inbounds i16, i16* %In1, i32 %iv
|
||||
%In1.0 = load i16, i16* %PIn1.0, align 2
|
||||
%SIn1.0 = sext i16 %In1.0 to i32
|
||||
%PIn2.0 = getelementptr inbounds i16, i16* %In2, i32 %iv
|
||||
%In2.0 = load i16, i16* %PIn2.0, align 2
|
||||
%SIn2.0 = sext i16 %In2.0 to i32
|
||||
%mul5.us.i.i = mul nsw i32 %SIn1.0, %SIn2.0
|
||||
%sext.0 = sext i32 %mul5.us.i.i to i64
|
||||
%iv.1 = or i32 %iv, 1
|
||||
%PIn1.1 = getelementptr inbounds i16, i16* %In1, i32 %iv.1
|
||||
%In1.1 = load i16, i16* %PIn1.1, align 2
|
||||
%SIn1.1 = sext i16 %In1.1 to i32
|
||||
%PIn2.1 = getelementptr inbounds i16, i16* %In2, i32 %iv.1
|
||||
%In2.1 = load i16, i16* %PIn2.1, align 2
|
||||
%SIn2.1 = sext i16 %In2.1 to i32
|
||||
%mul5.us.i.1.i = mul nsw i32 %SIn1.1, %SIn2.1
|
||||
%sext.1 = sext i32 %mul5.us.i.1.i to i64
|
||||
%mac.0 = add i64 %sext.0, %sext.1
|
||||
%Out.0 = getelementptr inbounds i64, i64* %Out, i32 %iv.out
|
||||
store i64 %mac.0, i64* %Out.0, align 4
|
||||
%iv.2 = or i32 %iv, 2
|
||||
%PIn1.2 = getelementptr inbounds i16, i16* %In1, i32 %iv.2
|
||||
%In1.2 = load i16, i16* %PIn1.2, align 2
|
||||
%SIn1.2 = sext i16 %In1.2 to i32
|
||||
%PIn2.2 = getelementptr inbounds i16, i16* %In2, i32 %iv.2
|
||||
%In2.2 = load i16, i16* %PIn2.2, align 2
|
||||
%SIn2.2 = sext i16 %In2.2 to i32
|
||||
%mul5.us.i.2.i = mul nsw i32 %SIn1.2, %SIn2.2
|
||||
%sext.2 = sext i32 %mul5.us.i.2.i to i64
|
||||
%iv.3 = or i32 %iv, 3
|
||||
%PIn1.3 = getelementptr inbounds i16, i16* %In1, i32 %iv.3
|
||||
%In1.3 = load i16, i16* %PIn1.3, align 2
|
||||
%SIn1.3 = sext i16 %In1.3 to i32
|
||||
%PIn2.3 = getelementptr inbounds i16, i16* %In2, i32 %iv.3
|
||||
%In2.3 = load i16, i16* %PIn2.3, align 2
|
||||
%SIn2.3 = sext i16 %In2.3 to i32
|
||||
%mul5.us.i.3.i = mul nsw i32 %SIn1.3, %SIn2.3
|
||||
%sext.3 = sext i32 %mul5.us.i.3.i to i64
|
||||
%mac.1 = add i64 %sext.2, %sext.3
|
||||
%iv.out.1 = or i32 %iv.out, 1
|
||||
%Out.1 = getelementptr inbounds i64, i64* %Out, i32 %iv.out.1
|
||||
store i64 %mac.1, i64* %Out.1, align 4
|
||||
%iv.next = add i32 %iv, 4
|
||||
%iv.out.next = add i32 %iv.out, 2
|
||||
%count.next = add i32 %count, -4
|
||||
%niter375.ncmp.3.i = icmp eq i32 %count.next, 0
|
||||
br i1 %niter375.ncmp.3.i, label %exit, label %for.body
|
||||
|
||||
exit:
|
||||
ret void
|
||||
}
|
Loading…
Reference in New Issue