forked from OSchip/llvm-project
[SLP] use range-for loops, fix formatting; NFC
These are part of D57059, but that patch doesn't apply cleanly to trunk at this point, so we might as well remove some of the noise. llvm-svn: 369776
This commit is contained in:
parent
729e242a79
commit
5a5d44e801
|
@ -2162,9 +2162,9 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
// the same block.
|
// the same block.
|
||||||
|
|
||||||
// Don't vectorize ephemeral values.
|
// Don't vectorize ephemeral values.
|
||||||
for (unsigned i = 0, e = VL.size(); i != e; ++i) {
|
for (Value *V : VL) {
|
||||||
if (EphValues.count(VL[i])) {
|
if (EphValues.count(V)) {
|
||||||
LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *VL[i]
|
LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
|
||||||
<< ") is ephemeral.\n");
|
<< ") is ephemeral.\n");
|
||||||
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
|
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
|
||||||
return;
|
return;
|
||||||
|
@ -2188,12 +2188,12 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that none of the instructions in the bundle are already in the tree.
|
// Check that none of the instructions in the bundle are already in the tree.
|
||||||
for (unsigned i = 0, e = VL.size(); i != e; ++i) {
|
for (Value *V : VL) {
|
||||||
auto *I = dyn_cast<Instruction>(VL[i]);
|
auto *I = dyn_cast<Instruction>(V);
|
||||||
if (!I)
|
if (!I)
|
||||||
continue;
|
continue;
|
||||||
if (getTreeEntry(I)) {
|
if (getTreeEntry(I)) {
|
||||||
LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *VL[i]
|
LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
|
||||||
<< ") is already in tree.\n");
|
<< ") is already in tree.\n");
|
||||||
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
|
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
|
||||||
return;
|
return;
|
||||||
|
@ -2203,8 +2203,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
// If any of the scalars is marked as a value that needs to stay scalar, then
|
// If any of the scalars is marked as a value that needs to stay scalar, then
|
||||||
// we need to gather the scalars.
|
// we need to gather the scalars.
|
||||||
// The reduction nodes (stored in UserIgnoreList) also should stay scalar.
|
// The reduction nodes (stored in UserIgnoreList) also should stay scalar.
|
||||||
for (unsigned i = 0, e = VL.size(); i != e; ++i) {
|
for (Value *V : VL) {
|
||||||
if (MustGather.count(VL[i]) || is_contained(UserIgnoreList, VL[i])) {
|
if (MustGather.count(V) || is_contained(UserIgnoreList, V)) {
|
||||||
LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
|
LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
|
||||||
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
|
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
|
||||||
return;
|
return;
|
||||||
|
@ -2446,8 +2446,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
case Instruction::FPTrunc:
|
case Instruction::FPTrunc:
|
||||||
case Instruction::BitCast: {
|
case Instruction::BitCast: {
|
||||||
Type *SrcTy = VL0->getOperand(0)->getType();
|
Type *SrcTy = VL0->getOperand(0)->getType();
|
||||||
for (unsigned i = 0; i < VL.size(); ++i) {
|
for (Value *V : VL) {
|
||||||
Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType();
|
Type *Ty = cast<Instruction>(V)->getOperand(0)->getType();
|
||||||
if (Ty != SrcTy || !isValidElementType(Ty)) {
|
if (Ty != SrcTy || !isValidElementType(Ty)) {
|
||||||
BS.cancelScheduling(VL, VL0);
|
BS.cancelScheduling(VL, VL0);
|
||||||
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
|
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
|
||||||
|
@ -2465,8 +2465,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
|
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
|
||||||
ValueList Operands;
|
ValueList Operands;
|
||||||
// Prepare the operand vector.
|
// Prepare the operand vector.
|
||||||
for (Value *j : VL)
|
for (Value *V : VL)
|
||||||
Operands.push_back(cast<Instruction>(j)->getOperand(i));
|
Operands.push_back(cast<Instruction>(V)->getOperand(i));
|
||||||
|
|
||||||
buildTree_rec(Operands, Depth + 1, {TE, i});
|
buildTree_rec(Operands, Depth + 1, {TE, i});
|
||||||
}
|
}
|
||||||
|
@ -2478,8 +2478,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
|
CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
|
||||||
CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0);
|
CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0);
|
||||||
Type *ComparedTy = VL0->getOperand(0)->getType();
|
Type *ComparedTy = VL0->getOperand(0)->getType();
|
||||||
for (unsigned i = 1, e = VL.size(); i < e; ++i) {
|
for (Value *V : VL) {
|
||||||
CmpInst *Cmp = cast<CmpInst>(VL[i]);
|
CmpInst *Cmp = cast<CmpInst>(V);
|
||||||
if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) ||
|
if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) ||
|
||||||
Cmp->getOperand(0)->getType() != ComparedTy) {
|
Cmp->getOperand(0)->getType() != ComparedTy) {
|
||||||
BS.cancelScheduling(VL, VL0);
|
BS.cancelScheduling(VL, VL0);
|
||||||
|
@ -2568,8 +2568,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
}
|
}
|
||||||
case Instruction::GetElementPtr: {
|
case Instruction::GetElementPtr: {
|
||||||
// We don't combine GEPs with complicated (nested) indexing.
|
// We don't combine GEPs with complicated (nested) indexing.
|
||||||
for (unsigned j = 0; j < VL.size(); ++j) {
|
for (Value *V : VL) {
|
||||||
if (cast<Instruction>(VL[j])->getNumOperands() != 2) {
|
if (cast<Instruction>(V)->getNumOperands() != 2) {
|
||||||
LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
|
LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
|
||||||
BS.cancelScheduling(VL, VL0);
|
BS.cancelScheduling(VL, VL0);
|
||||||
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
|
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
|
||||||
|
@ -2581,8 +2581,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
// We can't combine several GEPs into one vector if they operate on
|
// We can't combine several GEPs into one vector if they operate on
|
||||||
// different types.
|
// different types.
|
||||||
Type *Ty0 = VL0->getOperand(0)->getType();
|
Type *Ty0 = VL0->getOperand(0)->getType();
|
||||||
for (unsigned j = 0; j < VL.size(); ++j) {
|
for (Value *V : VL) {
|
||||||
Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType();
|
Type *CurTy = cast<Instruction>(V)->getOperand(0)->getType();
|
||||||
if (Ty0 != CurTy) {
|
if (Ty0 != CurTy) {
|
||||||
LLVM_DEBUG(dbgs()
|
LLVM_DEBUG(dbgs()
|
||||||
<< "SLP: not-vectorizable GEP (different types).\n");
|
<< "SLP: not-vectorizable GEP (different types).\n");
|
||||||
|
@ -2594,8 +2594,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
}
|
}
|
||||||
|
|
||||||
// We don't combine GEPs with non-constant indexes.
|
// We don't combine GEPs with non-constant indexes.
|
||||||
for (unsigned j = 0; j < VL.size(); ++j) {
|
for (Value *V : VL) {
|
||||||
auto Op = cast<Instruction>(VL[j])->getOperand(1);
|
auto Op = cast<Instruction>(V)->getOperand(1);
|
||||||
if (!isa<ConstantInt>(Op)) {
|
if (!isa<ConstantInt>(Op)) {
|
||||||
LLVM_DEBUG(dbgs()
|
LLVM_DEBUG(dbgs()
|
||||||
<< "SLP: not-vectorizable GEP (non-constant indexes).\n");
|
<< "SLP: not-vectorizable GEP (non-constant indexes).\n");
|
||||||
|
@ -2613,8 +2613,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
for (unsigned i = 0, e = 2; i < e; ++i) {
|
for (unsigned i = 0, e = 2; i < e; ++i) {
|
||||||
ValueList Operands;
|
ValueList Operands;
|
||||||
// Prepare the operand vector.
|
// Prepare the operand vector.
|
||||||
for (Value *j : VL)
|
for (Value *V : VL)
|
||||||
Operands.push_back(cast<Instruction>(j)->getOperand(i));
|
Operands.push_back(cast<Instruction>(V)->getOperand(i));
|
||||||
|
|
||||||
buildTree_rec(Operands, Depth + 1, {TE, i});
|
buildTree_rec(Operands, Depth + 1, {TE, i});
|
||||||
}
|
}
|
||||||
|
@ -2636,8 +2636,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
|
LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
|
||||||
|
|
||||||
ValueList Operands;
|
ValueList Operands;
|
||||||
for (Value *j : VL)
|
for (Value *V : VL)
|
||||||
Operands.push_back(cast<Instruction>(j)->getOperand(0));
|
Operands.push_back(cast<Instruction>(V)->getOperand(0));
|
||||||
TE->setOperandsInOrder();
|
TE->setOperandsInOrder();
|
||||||
buildTree_rec(Operands, Depth + 1, {TE, 0});
|
buildTree_rec(Operands, Depth + 1, {TE, 0});
|
||||||
return;
|
return;
|
||||||
|
@ -2661,15 +2661,15 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
for (unsigned j = 0; j != NumArgs; ++j)
|
for (unsigned j = 0; j != NumArgs; ++j)
|
||||||
if (hasVectorInstrinsicScalarOpd(ID, j))
|
if (hasVectorInstrinsicScalarOpd(ID, j))
|
||||||
ScalarArgs[j] = CI->getArgOperand(j);
|
ScalarArgs[j] = CI->getArgOperand(j);
|
||||||
for (unsigned i = 1, e = VL.size(); i != e; ++i) {
|
for (Value *V : VL) {
|
||||||
CallInst *CI2 = dyn_cast<CallInst>(VL[i]);
|
CallInst *CI2 = dyn_cast<CallInst>(V);
|
||||||
if (!CI2 || CI2->getCalledFunction() != Int ||
|
if (!CI2 || CI2->getCalledFunction() != Int ||
|
||||||
getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
|
getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
|
||||||
!CI->hasIdenticalOperandBundleSchema(*CI2)) {
|
!CI->hasIdenticalOperandBundleSchema(*CI2)) {
|
||||||
BS.cancelScheduling(VL, VL0);
|
BS.cancelScheduling(VL, VL0);
|
||||||
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
|
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
|
||||||
ReuseShuffleIndicies);
|
ReuseShuffleIndicies);
|
||||||
LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i]
|
LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V
|
||||||
<< "\n");
|
<< "\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -2698,7 +2698,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
|
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
|
||||||
ReuseShuffleIndicies);
|
ReuseShuffleIndicies);
|
||||||
LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"
|
LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"
|
||||||
<< *CI << "!=" << *VL[i] << '\n');
|
<< *CI << "!=" << *V << '\n');
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2709,8 +2709,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
|
for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
|
||||||
ValueList Operands;
|
ValueList Operands;
|
||||||
// Prepare the operand vector.
|
// Prepare the operand vector.
|
||||||
for (Value *j : VL) {
|
for (Value *V : VL) {
|
||||||
CallInst *CI2 = dyn_cast<CallInst>(j);
|
CallInst *CI2 = dyn_cast<CallInst>(V);
|
||||||
Operands.push_back(CI2->getArgOperand(i));
|
Operands.push_back(CI2->getArgOperand(i));
|
||||||
}
|
}
|
||||||
buildTree_rec(Operands, Depth + 1, {TE, i});
|
buildTree_rec(Operands, Depth + 1, {TE, i});
|
||||||
|
@ -2746,8 +2746,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||||
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
|
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
|
||||||
ValueList Operands;
|
ValueList Operands;
|
||||||
// Prepare the operand vector.
|
// Prepare the operand vector.
|
||||||
for (Value *j : VL)
|
for (Value *V : VL)
|
||||||
Operands.push_back(cast<Instruction>(j)->getOperand(i));
|
Operands.push_back(cast<Instruction>(V)->getOperand(i));
|
||||||
|
|
||||||
buildTree_rec(Operands, Depth + 1, {TE, i});
|
buildTree_rec(Operands, Depth + 1, {TE, i});
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue