forked from OSchip/llvm-project
[CostModel] remove cost-kind predicate for FP add/mul vector reduction costs
This was originally part of:f2c25c7079
but that was reverted because there was an underlying bug in processing the vector type of these intrinsics. That was fixed with:74ffc823ed
This is similar in spirit to01ea93d85d
(memcpy) except that here the underlying caller assumptions were created for vectorizer use (throughput) rather than other passes. That meant targets could have an enormous throughput cost with no corresponding size, latency, or blended cost increase. Paraphrasing from the previous commits: This may not make sense for some callers, but at least now the costs will be consistently wrong instead of mysteriously wrong. Targets should provide better overrides if the current modeling is not accurate.
This commit is contained in:
parent
138fda5dd2
commit
50dfa19cc7
|
@ -1205,9 +1205,6 @@ public:
|
||||||
}
|
}
|
||||||
case Intrinsic::vector_reduce_fadd:
|
case Intrinsic::vector_reduce_fadd:
|
||||||
case Intrinsic::vector_reduce_fmul: {
|
case Intrinsic::vector_reduce_fmul: {
|
||||||
// FIXME: all cost kinds should default to the same thing?
|
|
||||||
if (CostKind != TTI::TCK_RecipThroughput)
|
|
||||||
return BaseT::getIntrinsicInstrCost(ICA, CostKind);
|
|
||||||
IntrinsicCostAttributes Attrs(
|
IntrinsicCostAttributes Attrs(
|
||||||
IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, 1, I);
|
IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, 1, I);
|
||||||
return getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
|
return getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
|
||||||
|
|
|
@ -236,11 +236,11 @@ define void @reduce_fmul(<16 x float> %va) {
|
||||||
; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
|
; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
|
||||||
;
|
;
|
||||||
; SIZE-LABEL: 'reduce_fmul'
|
; SIZE-LABEL: 'reduce_fmul'
|
||||||
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v = call float @llvm.vector.reduce.fmul.v16f32(float 4.200000e+01, <16 x float> %va)
|
; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v = call float @llvm.vector.reduce.fmul.v16f32(float 4.200000e+01, <16 x float> %va)
|
||||||
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
|
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
|
||||||
;
|
;
|
||||||
; SIZE_LATE-LABEL: 'reduce_fmul'
|
; SIZE_LATE-LABEL: 'reduce_fmul'
|
||||||
; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v = call float @llvm.vector.reduce.fmul.v16f32(float 4.200000e+01, <16 x float> %va)
|
; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v = call float @llvm.vector.reduce.fmul.v16f32(float 4.200000e+01, <16 x float> %va)
|
||||||
; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
|
; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
|
||||||
;
|
;
|
||||||
%v = call float @llvm.vector.reduce.fmul.v16f32(float 42.0, <16 x float> %va)
|
%v = call float @llvm.vector.reduce.fmul.v16f32(float 42.0, <16 x float> %va)
|
||||||
|
@ -257,11 +257,11 @@ define void @reduce_fadd_fast(<16 x float> %va) {
|
||||||
; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
|
; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
|
||||||
;
|
;
|
||||||
; SIZE-LABEL: 'reduce_fadd_fast'
|
; SIZE-LABEL: 'reduce_fadd_fast'
|
||||||
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v = call fast float @llvm.vector.reduce.fadd.v16f32(float 0.000000e+00, <16 x float> %va)
|
; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v = call fast float @llvm.vector.reduce.fadd.v16f32(float 0.000000e+00, <16 x float> %va)
|
||||||
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
|
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
|
||||||
;
|
;
|
||||||
; SIZE_LATE-LABEL: 'reduce_fadd_fast'
|
; SIZE_LATE-LABEL: 'reduce_fadd_fast'
|
||||||
; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v = call fast float @llvm.vector.reduce.fadd.v16f32(float 0.000000e+00, <16 x float> %va)
|
; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v = call fast float @llvm.vector.reduce.fadd.v16f32(float 0.000000e+00, <16 x float> %va)
|
||||||
; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
|
; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
|
||||||
;
|
;
|
||||||
%v = call fast float @llvm.vector.reduce.fadd.v16f32(float 0.0, <16 x float> %va)
|
%v = call fast float @llvm.vector.reduce.fadd.v16f32(float 0.0, <16 x float> %va)
|
||||||
|
|
Loading…
Reference in New Issue