forked from OSchip/llvm-project
Correct cost model for vector shift on AVX2
- After moving logic recognizing vector shift with scalar amount from DAG combining into DAG lowering, we declare to customize all vector shifts even vector shift on AVX is legal. As a result, the cost model needs special tuning to identify these legal cases. llvm-svn: 177586
This commit is contained in:
parent
7430382970
commit
70dd7f999d
|
@ -169,6 +169,29 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty) const {
|
|||
int ISD = TLI->InstructionOpcodeToISD(Opcode);
|
||||
assert(ISD && "Invalid opcode");
|
||||
|
||||
static const CostTblEntry<MVT> AVX2CostTable[] = {
|
||||
// Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
|
||||
// customize them to detect the cases where shift amount is a scalar one.
|
||||
{ ISD::SHL, MVT::v4i32, 1 },
|
||||
{ ISD::SRL, MVT::v4i32, 1 },
|
||||
{ ISD::SRA, MVT::v4i32, 1 },
|
||||
{ ISD::SHL, MVT::v8i32, 1 },
|
||||
{ ISD::SRL, MVT::v8i32, 1 },
|
||||
{ ISD::SRA, MVT::v8i32, 1 },
|
||||
{ ISD::SHL, MVT::v2i64, 1 },
|
||||
{ ISD::SRL, MVT::v2i64, 1 },
|
||||
{ ISD::SHL, MVT::v4i64, 1 },
|
||||
{ ISD::SRL, MVT::v4i64, 1 },
|
||||
};
|
||||
|
||||
// Look for AVX2 lowering tricks.
|
||||
if (ST->hasAVX2()) {
|
||||
int Idx = CostTableLookup<MVT>(AVX2CostTable, array_lengthof(AVX2CostTable),
|
||||
ISD, LT.second);
|
||||
if (Idx != -1)
|
||||
return LT.first * AVX2CostTable[Idx].Cost;
|
||||
}
|
||||
|
||||
static const CostTblEntry<MVT> AVX1CostTable[] = {
|
||||
// We don't have to scalarize unsupported ops. We can issue two half-sized
|
||||
// operations and we only need to extract the upper YMM half.
|
||||
|
|
|
@ -72,3 +72,57 @@ define i32 @fmul(i32 %arg) {
|
|||
%B = fmul <8 x float> undef, undef
|
||||
ret i32 undef
|
||||
}
|
||||
|
||||
; AVX: shift
|
||||
; AVX2: shift
|
||||
define void @shift() {
|
||||
; AVX: cost of 2 {{.*}} shl
|
||||
; AVX2: cost of 1 {{.*}} shl
|
||||
%A0 = shl <4 x i32> undef, undef
|
||||
; AVX: cost of 2 {{.*}} shl
|
||||
; AVX2: cost of 1 {{.*}} shl
|
||||
%A1 = shl <2 x i64> undef, undef
|
||||
|
||||
; AVX: cost of 2 {{.*}} lshr
|
||||
; AVX2: cost of 1 {{.*}} lshr
|
||||
%B0 = lshr <4 x i32> undef, undef
|
||||
; AVX: cost of 2 {{.*}} lshr
|
||||
; AVX2: cost of 1 {{.*}} lshr
|
||||
%B1 = lshr <2 x i64> undef, undef
|
||||
|
||||
; AVX: cost of 2 {{.*}} ashr
|
||||
; AVX2: cost of 1 {{.*}} ashr
|
||||
%C0 = ashr <4 x i32> undef, undef
|
||||
; AVX: cost of 6 {{.*}} ashr
|
||||
; AVX2: cost of 6 {{.*}} ashr
|
||||
%C1 = ashr <2 x i64> undef, undef
|
||||
|
||||
ret void
|
||||
}
|
||||
|
||||
; AVX: avx2shift
|
||||
; AVX2: avx2shift
|
||||
define void @avx2shift() {
|
||||
; AVX: cost of 2 {{.*}} shl
|
||||
; AVX2: cost of 1 {{.*}} shl
|
||||
%A0 = shl <8 x i32> undef, undef
|
||||
; AVX: cost of 2 {{.*}} shl
|
||||
; AVX2: cost of 1 {{.*}} shl
|
||||
%A1 = shl <4 x i64> undef, undef
|
||||
|
||||
; AVX: cost of 2 {{.*}} lshr
|
||||
; AVX2: cost of 1 {{.*}} lshr
|
||||
%B0 = lshr <8 x i32> undef, undef
|
||||
; AVX: cost of 2 {{.*}} lshr
|
||||
; AVX2: cost of 1 {{.*}} lshr
|
||||
%B1 = lshr <4 x i64> undef, undef
|
||||
|
||||
; AVX: cost of 2 {{.*}} ashr
|
||||
; AVX2: cost of 1 {{.*}} ashr
|
||||
%C0 = ashr <8 x i32> undef, undef
|
||||
; AVX: cost of 12 {{.*}} ashr
|
||||
; AVX2: cost of 12 {{.*}} ashr
|
||||
%C1 = ashr <4 x i64> undef, undef
|
||||
|
||||
ret void
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue