[TargetLowering] Add support for non-uniform vectors to BuildExactSDIV

This patch refactors the existing BuildExactSDIV implementation to support non-uniform constant vector denominators.

Differential Revision: https://reviews.llvm.org/D50392

llvm-svn: 339756
This commit is contained in:
Simon Pilgrim 2018-08-15 09:35:12 +00:00
parent 6548cd3905
commit a272fa9b0c
2 changed files with 74 additions and 136 deletions

View File

@ -3438,32 +3438,44 @@ static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N,
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
EVT VT = N->getValueType(0);
EVT SVT = VT.getScalarType();
EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
EVT ShSVT = ShVT.getScalarType();
auto BuildSDIVPattern = [](APInt Divisor, unsigned &Shift, APInt &Factor) {
bool UseSRA = false;
Shift = Divisor.countTrailingZeros();
bool UseSRA = false;
SmallVector<SDValue, 16> Shifts, Factors;
auto BuildSDIVPattern = [&](ConstantSDNode *C) {
if (C->isNullValue())
return false;
APInt Divisor = C->getAPIntValue();
unsigned Shift = Divisor.countTrailingZeros();
if (Shift) {
Divisor.ashrInPlace(Shift);
UseSRA = true;
}
// Calculate the multiplicative inverse, using Newton's method.
APInt t;
Factor = Divisor;
APInt Factor = Divisor;
while ((t = Divisor * Factor) != 1)
Factor *= APInt(Divisor.getBitWidth(), 2) - t;
return UseSRA;
Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT));
Factors.push_back(DAG.getConstant(Factor, dl, SVT));
return true;
};
ConstantSDNode *C = isConstOrConstSplat(Op1);
if (!C || C->isNullValue())
// Collect all magic values from the build vector.
if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern))
return SDValue();
APInt FactorVal;
unsigned ShiftVal;
bool UseSRA = BuildSDIVPattern(C->getAPIntValue(), ShiftVal, FactorVal);
SDValue Shift = DAG.getConstant(ShiftVal, dl, ShVT);
SDValue Factor = DAG.getConstant(FactorVal, dl, VT);
SDValue Shift, Factor;
if (VT.isVector()) {
Shift = DAG.getBuildVector(ShVT, dl, Shifts);
Factor = DAG.getBuildVector(VT, dl, Factors);
} else {
Shift = Shifts[0];
Factor = Factors[0];
}
SDValue Res = Op0;

View File

@ -80,45 +80,25 @@ define <4 x i32> @test4(<4 x i32> %x) {
define <4 x i32> @test5(<4 x i32> %x) {
; X86-LABEL: test5:
; X86: # %bb.0:
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
; X86-NEXT: movd %xmm1, %eax
; X86-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
; X86-NEXT: movd %eax, %xmm1
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; X86-NEXT: movd %xmm2, %eax
; X86-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
; X86-NEXT: movd %eax, %xmm2
; X86-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X86-NEXT: movd %xmm0, %eax
; X86-NEXT: sarl $3, %eax
; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
; X86-NEXT: movd %eax, %xmm1
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X86-NEXT: movd %xmm0, %eax
; X86-NEXT: sarl $3, %eax
; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
; X86-NEXT: movd %eax, %xmm0
; X86-NEXT: movdqa %xmm0, %xmm1
; X86-NEXT: psrad $3, %xmm1
; X86-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X86-NEXT: movdqa {{.*#+}} xmm2 = [2863311531,2863311531,3264175145,3264175145]
; X86-NEXT: movapd %xmm0, %xmm1
; X86-NEXT: pmuludq %xmm2, %xmm1
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; X86-NEXT: pmuludq %xmm0, %xmm2
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; X86-NEXT: movdqa %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test5:
; X64: # %bb.0:
; X64-NEXT: vpextrd $1, %xmm0, %eax
; X64-NEXT: sarl $3, %eax
; X64-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
; X64-NEXT: vmovd %xmm0, %ecx
; X64-NEXT: sarl $3, %ecx
; X64-NEXT: imull $-1431655765, %ecx, %ecx # imm = 0xAAAAAAAB
; X64-NEXT: vmovd %ecx, %xmm1
; X64-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; X64-NEXT: vpextrd $2, %xmm0, %eax
; X64-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
; X64-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; X64-NEXT: vpextrd $3, %xmm0, %eax
; X64-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
; X64-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; X64-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: retq
%div = sdiv exact <4 x i32> %x, <i32 24, i32 24, i32 25, i32 25>
ret <4 x i32> %div
@ -127,49 +107,26 @@ define <4 x i32> @test5(<4 x i32> %x) {
define <4 x i32> @test6(<4 x i32> %x) {
; X86-LABEL: test6:
; X86: # %bb.0:
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
; X86-NEXT: movd %xmm1, %eax
; X86-NEXT: sarl %eax
; X86-NEXT: imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5
; X86-NEXT: movd %eax, %xmm1
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; X86-NEXT: movd %xmm2, %eax
; X86-NEXT: sarl %eax
; X86-NEXT: imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5
; X86-NEXT: movd %eax, %xmm2
; X86-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X86-NEXT: movd %xmm0, %eax
; X86-NEXT: sarl $3, %eax
; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
; X86-NEXT: movd %eax, %xmm1
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X86-NEXT: movd %xmm0, %eax
; X86-NEXT: sarl $3, %eax
; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
; X86-NEXT: movd %eax, %xmm0
; X86-NEXT: movdqa %xmm0, %xmm1
; X86-NEXT: psrad $3, %xmm1
; X86-NEXT: psrad $1, %xmm0
; X86-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X86-NEXT: movdqa {{.*#+}} xmm2 = [2863311531,2863311531,3303820997,3303820997]
; X86-NEXT: movapd %xmm0, %xmm1
; X86-NEXT: pmuludq %xmm2, %xmm1
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; X86-NEXT: pmuludq %xmm0, %xmm2
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; X86-NEXT: movdqa %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test6:
; X64: # %bb.0:
; X64-NEXT: vpextrd $1, %xmm0, %eax
; X64-NEXT: sarl $3, %eax
; X64-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
; X64-NEXT: vmovd %xmm0, %ecx
; X64-NEXT: sarl $3, %ecx
; X64-NEXT: imull $-1431655765, %ecx, %ecx # imm = 0xAAAAAAAB
; X64-NEXT: vmovd %ecx, %xmm1
; X64-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; X64-NEXT: vpextrd $2, %xmm0, %eax
; X64-NEXT: sarl %eax
; X64-NEXT: imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5
; X64-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; X64-NEXT: vpextrd $3, %xmm0, %eax
; X64-NEXT: sarl %eax
; X64-NEXT: imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5
; X64-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; X64-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: retq
%div = sdiv exact <4 x i32> %x, <i32 24, i32 24, i32 26, i32 26>
ret <4 x i32> %div
@ -178,41 +135,19 @@ define <4 x i32> @test6(<4 x i32> %x) {
define <4 x i32> @test7(<4 x i32> %x) {
; X86-LABEL: test7:
; X86: # %bb.0:
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
; X86-NEXT: movd %xmm1, %eax
; X86-NEXT: imull $1749801491, %eax, %eax # imm = 0x684BDA13
; X86-NEXT: movd %eax, %xmm1
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; X86-NEXT: movd %xmm2, %eax
; X86-NEXT: imull $1749801491, %eax, %eax # imm = 0x684BDA13
; X86-NEXT: movd %eax, %xmm2
; X86-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X86-NEXT: movd %xmm0, %eax
; X86-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
; X86-NEXT: movd %eax, %xmm1
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X86-NEXT: movd %xmm0, %eax
; X86-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
; X86-NEXT: movd %eax, %xmm0
; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; X86-NEXT: movdqa %xmm1, %xmm0
; X86-NEXT: movdqa {{.*#+}} xmm1 = [3264175145,3264175145,1749801491,1749801491]
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X86-NEXT: pmuludq %xmm1, %xmm0
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; X86-NEXT: pmuludq %xmm2, %xmm1
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-NEXT: retl
;
; X64-LABEL: test7:
; X64: # %bb.0:
; X64-NEXT: vpextrd $1, %xmm0, %eax
; X64-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
; X64-NEXT: vmovd %xmm0, %ecx
; X64-NEXT: imull $-1030792151, %ecx, %ecx # imm = 0xC28F5C29
; X64-NEXT: vmovd %ecx, %xmm1
; X64-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; X64-NEXT: vpextrd $2, %xmm0, %eax
; X64-NEXT: imull $1749801491, %eax, %eax # imm = 0x684BDA13
; X64-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; X64-NEXT: vpextrd $3, %xmm0, %eax
; X64-NEXT: imull $1749801491, %eax, %eax # imm = 0x684BDA13
; X64-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; X64-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: retq
%div = sdiv exact <4 x i32> %x, <i32 25, i32 25, i32 27, i32 27>
ret <4 x i32> %div
@ -221,33 +156,24 @@ define <4 x i32> @test7(<4 x i32> %x) {
define <4 x i32> @test8(<4 x i32> %x) {
; X86-LABEL: test8:
; X86: # %bb.0:
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; X86-NEXT: movd %xmm1, %eax
; X86-NEXT: sarl $3, %eax
; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
; X86-NEXT: movd %eax, %xmm1
; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; X86-NEXT: movaps %xmm0, %xmm2
; X86-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[0,2]
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
; X86-NEXT: movd %xmm1, %eax
; X86-NEXT: sarl $3, %eax
; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
; X86-NEXT: movd %eax, %xmm1
; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm2[2,0]
; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; X86-NEXT: movdqa %xmm0, %xmm1
; X86-NEXT: psrad $3, %xmm1
; X86-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; X86-NEXT: movdqa {{.*#+}} xmm2 = [1,1,2863311531,2863311531]
; X86-NEXT: movapd %xmm1, %xmm0
; X86-NEXT: pmuludq %xmm2, %xmm0
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,3,3]
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; X86-NEXT: pmuludq %xmm1, %xmm2
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-NEXT: retl
;
; X64-LABEL: test8:
; X64: # %bb.0:
; X64-NEXT: vpextrd $2, %xmm0, %eax
; X64-NEXT: sarl $3, %eax
; X64-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
; X64-NEXT: vpinsrd $2, %eax, %xmm0, %xmm1
; X64-NEXT: vpextrd $3, %xmm0, %eax
; X64-NEXT: sarl $3, %eax
; X64-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
; X64-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; X64-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: retq
%div = sdiv exact <4 x i32> %x, <i32 1, i32 1, i32 24, i32 24>
ret <4 x i32> %div