diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h index ece6550b813f..5882972a358c 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -1182,7 +1182,7 @@ public: getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, - OperandValueKind OpdInfo = OK_AnyValue, + OperandValueInfo OpdInfo = {OK_AnyValue, OP_None}, const Instruction *I = nullptr) const; /// \return The cost of VP Load and Store instructions. diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp index 7ec968f1a1eb..98dc4058f761 100644 --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -904,12 +904,12 @@ InstructionCost TargetTransformInfo::getReplicationShuffleCost( InstructionCost TargetTransformInfo::getMemoryOpCost( unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, - TTI::TargetCostKind CostKind, TTI::OperandValueKind OpdInfo, + TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpdInfo, const Instruction *I) const { assert((I == nullptr || I->getOpcode() == Opcode) && "Opcode should reflect passed instruction."); InstructionCost Cost = TTIImpl->getMemoryOpCost( - Opcode, Src, Alignment, AddressSpace, CostKind, OpdInfo, I); + Opcode, Src, Alignment, AddressSpace, CostKind, OpdInfo.Kind, I); assert(Cost >= 0 && "TTI should not produce negative costs!"); return Cost; } diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 9d860a7dcf89..ecddd95fe0af 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -6398,9 +6398,9 @@ LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, CostKind); } else { - TTI::OperandValueKind OpVK = TTI::getOperandInfo(I->getOperand(0)).Kind; + TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0)); Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, - CostKind, OpVK, I); + CostKind, OpInfo, I); } bool Reverse = ConsecutiveStride < 0; @@ -6679,10 +6679,10 @@ LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, const Align Alignment = getLoadStoreAlignment(I); unsigned AS = getLoadStoreAddressSpace(I); - TTI::OperandValueKind OpVK = TTI::getOperandInfo(I->getOperand(0)).Kind; + TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0)); return TTI.getAddressComputationCost(ValTy) + TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, - TTI::TCK_RecipThroughput, OpVK, I); + TTI::TCK_RecipThroughput, OpInfo, I); } return getWideningCost(I, VF); } diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 7d84c2695e8a..14285963db84 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -6048,7 +6048,8 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E, auto *LI = cast(V); ScalarsCost += TTI->getMemoryOpCost( Instruction::Load, LI->getType(), LI->getAlign(), - LI->getPointerAddressSpace(), CostKind, TTI::OK_AnyValue, LI); + LI->getPointerAddressSpace(), CostKind, + {TTI::OK_AnyValue, TTI::OP_None}, LI); } auto *LI = cast(E->getMainOp()); auto *LoadTy = FixedVectorType::get(LI->getType(), VF); @@ -6056,7 +6057,8 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E, GatherCost += VectorizedCnt * TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment, LI->getPointerAddressSpace(), - CostKind, TTI::OK_AnyValue, LI); + CostKind, {TTI::OK_AnyValue, + TTI::OP_None}, LI); GatherCost += ScatterVectorizeCnt * TTI->getGatherScatterOpCost( Instruction::Load, LoadTy, LI->getPointerOperand(), @@ -6462,7 +6464,7 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E, Align Alignment = cast(VL0)->getAlign(); InstructionCost ScalarEltCost = TTI->getMemoryOpCost(Instruction::Load, ScalarTy, Alignment, 0, - CostKind, TTI::OK_AnyValue, VL0); + CostKind, {TTI::OK_AnyValue, TTI::OP_None}, VL0); if (NeedToShuffleReuses) { CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; } @@ -6470,7 +6472,7 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E, InstructionCost VecLdCost; if (E->State == TreeEntry::Vectorize) { VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0, - CostKind, TTI::OK_AnyValue, VL0); + CostKind, {TTI::OK_AnyValue, TTI::OP_None}, VL0); } else { assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState"); Align CommonAlignment = Alignment; @@ -6490,11 +6492,11 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E, auto *SI = cast(IsReorder ? VL[E->ReorderIndices.front()] : VL0); Align Alignment = SI->getAlign(); - TTI::OperandValueKind OpVK = TTI::getOperandInfo(SI->getOperand(0)).Kind; + TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(SI->getOperand(0)); InstructionCost ScalarEltCost = TTI->getMemoryOpCost( - Instruction::Store, ScalarTy, Alignment, 0, CostKind, OpVK, VL0); + Instruction::Store, ScalarTy, Alignment, 0, CostKind, OpInfo, VL0); InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost; - OpVK = TTI::OK_AnyValue; + TTI::OperandValueKind OpVK = TTI::OK_AnyValue; if (all_of(E->Scalars, [](Value *V) { return isConstant(cast(V)->getOperand(0)); @@ -6505,7 +6507,8 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E, })) OpVK = TTI::OK_NonUniformConstantValue; InstructionCost VecStCost = TTI->getMemoryOpCost( - Instruction::Store, VecTy, Alignment, 0, CostKind, OpVK, VL0); + Instruction::Store, VecTy, Alignment, 0, CostKind, + {OpVK, TTI::OP_None}, VL0); LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost)); return CommonCost + VecStCost - ScalarStCost; }