[TTI] Use OperandValueInfo in getMemoryOpCost client api [nfc]

This removes the last use of OperandValueKind from the client side API, and (once this is fully plumbed through TTI implementation) allow use of the same properties in store costing as arithmetic costing.
This commit is contained in:
Philip Reames 2022-08-22 11:20:14 -07:00 committed by Philip Reames
parent 71771f8510
commit 27d3321c4f
4 changed files with 18 additions and 15 deletions

View File

@ -1182,7 +1182,7 @@ public:
getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
OperandValueKind OpdInfo = OK_AnyValue,
OperandValueInfo OpdInfo = {OK_AnyValue, OP_None},
const Instruction *I = nullptr) const;
/// \return The cost of VP Load and Store instructions.

View File

@ -904,12 +904,12 @@ InstructionCost TargetTransformInfo::getReplicationShuffleCost(
InstructionCost TargetTransformInfo::getMemoryOpCost(
unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind, TTI::OperandValueKind OpdInfo,
TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpdInfo,
const Instruction *I) const {
assert((I == nullptr || I->getOpcode() == Opcode) &&
"Opcode should reflect passed instruction.");
InstructionCost Cost = TTIImpl->getMemoryOpCost(
Opcode, Src, Alignment, AddressSpace, CostKind, OpdInfo, I);
Opcode, Src, Alignment, AddressSpace, CostKind, OpdInfo.Kind, I);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}

View File

@ -6398,9 +6398,9 @@ LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
CostKind);
} else {
TTI::OperandValueKind OpVK = TTI::getOperandInfo(I->getOperand(0)).Kind;
TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
CostKind, OpVK, I);
CostKind, OpInfo, I);
}
bool Reverse = ConsecutiveStride < 0;
@ -6679,10 +6679,10 @@ LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
const Align Alignment = getLoadStoreAlignment(I);
unsigned AS = getLoadStoreAddressSpace(I);
TTI::OperandValueKind OpVK = TTI::getOperandInfo(I->getOperand(0)).Kind;
TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
return TTI.getAddressComputationCost(ValTy) +
TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
TTI::TCK_RecipThroughput, OpVK, I);
TTI::TCK_RecipThroughput, OpInfo, I);
}
return getWideningCost(I, VF);
}

View File

@ -6048,7 +6048,8 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
auto *LI = cast<LoadInst>(V);
ScalarsCost += TTI->getMemoryOpCost(
Instruction::Load, LI->getType(), LI->getAlign(),
LI->getPointerAddressSpace(), CostKind, TTI::OK_AnyValue, LI);
LI->getPointerAddressSpace(), CostKind,
{TTI::OK_AnyValue, TTI::OP_None}, LI);
}
auto *LI = cast<LoadInst>(E->getMainOp());
auto *LoadTy = FixedVectorType::get(LI->getType(), VF);
@ -6056,7 +6057,8 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
GatherCost += VectorizedCnt *
TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment,
LI->getPointerAddressSpace(),
CostKind, TTI::OK_AnyValue, LI);
CostKind, {TTI::OK_AnyValue,
TTI::OP_None}, LI);
GatherCost += ScatterVectorizeCnt *
TTI->getGatherScatterOpCost(
Instruction::Load, LoadTy, LI->getPointerOperand(),
@ -6462,7 +6464,7 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
Align Alignment = cast<LoadInst>(VL0)->getAlign();
InstructionCost ScalarEltCost =
TTI->getMemoryOpCost(Instruction::Load, ScalarTy, Alignment, 0,
CostKind, TTI::OK_AnyValue, VL0);
CostKind, {TTI::OK_AnyValue, TTI::OP_None}, VL0);
if (NeedToShuffleReuses) {
CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
}
@ -6470,7 +6472,7 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
InstructionCost VecLdCost;
if (E->State == TreeEntry::Vectorize) {
VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0,
CostKind, TTI::OK_AnyValue, VL0);
CostKind, {TTI::OK_AnyValue, TTI::OP_None}, VL0);
} else {
assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState");
Align CommonAlignment = Alignment;
@ -6490,11 +6492,11 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
auto *SI =
cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0);
Align Alignment = SI->getAlign();
TTI::OperandValueKind OpVK = TTI::getOperandInfo(SI->getOperand(0)).Kind;
TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(SI->getOperand(0));
InstructionCost ScalarEltCost = TTI->getMemoryOpCost(
Instruction::Store, ScalarTy, Alignment, 0, CostKind, OpVK, VL0);
Instruction::Store, ScalarTy, Alignment, 0, CostKind, OpInfo, VL0);
InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost;
OpVK = TTI::OK_AnyValue;
TTI::OperandValueKind OpVK = TTI::OK_AnyValue;
if (all_of(E->Scalars,
[](Value *V) {
return isConstant(cast<Instruction>(V)->getOperand(0));
@ -6505,7 +6507,8 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
}))
OpVK = TTI::OK_NonUniformConstantValue;
InstructionCost VecStCost = TTI->getMemoryOpCost(
Instruction::Store, VecTy, Alignment, 0, CostKind, OpVK, VL0);
Instruction::Store, VecTy, Alignment, 0, CostKind,
{OpVK, TTI::OP_None}, VL0);
LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost));
return CommonCost + VecStCost - ScalarStCost;
}