[TargetLowering] Add BuildSDiv support for division by one or negone.

This reduces most of the sdiv stages (the MULHS, shifts etc.) to just zero/identity values and use the numerator scale factor to multiply by +1/-1.

llvm-svn: 340260
This commit is contained in:
Simon Pilgrim 2018-08-21 10:20:36 +00:00
parent 3cd1d27b58
commit 72b324de4d
2 changed files with 118 additions and 129 deletions

View File

@ -3524,27 +3524,35 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
if (N->getFlags().hasExact())
return BuildExactSDIV(*this, N, dl, DAG, Created);
SmallVector<SDValue, 16> MagicFactors, Factors, Shifts;
SmallVector<SDValue, 16> MagicFactors, Factors, Shifts, ShiftMasks;
auto BuildSDIVPattern = [&](ConstantSDNode *C) {
// TODO: Handle sdiv by one and neg-one.
if (C->isNullValue() || C->isOne() || C->isAllOnesValue())
if (C->isNullValue())
return false;
const APInt &Divisor = C->getAPIntValue();
APInt::ms magics = Divisor.magic();
int NumeratorFactor = 0;
int ShiftMask = -1;
// If d > 0 and m < 0, add the numerator.
if (Divisor.isStrictlyPositive() && magics.m.isNegative())
if (Divisor.isOneValue() || Divisor.isAllOnesValue()) {
// If d is +1/-1, we just multiply the numerator by +1/-1.
NumeratorFactor = Divisor.getSExtValue();
magics.m = 0;
magics.s = 0;
ShiftMask = 0;
} else if (Divisor.isStrictlyPositive() && magics.m.isNegative()) {
// If d > 0 and m < 0, add the numerator.
NumeratorFactor = 1;
// If d < 0 and m > 0, subtract the numerator.
else if (Divisor.isNegative() && magics.m.isStrictlyPositive())
} else if (Divisor.isNegative() && magics.m.isStrictlyPositive()) {
// If d < 0 and m > 0, subtract the numerator.
NumeratorFactor = -1;
}
MagicFactors.push_back(DAG.getConstant(magics.m, dl, SVT));
Factors.push_back(DAG.getConstant(NumeratorFactor, dl, SVT));
Shifts.push_back(DAG.getConstant(magics.s, dl, ShSVT));
ShiftMasks.push_back(DAG.getConstant(ShiftMask, dl, SVT));
return true;
};
@ -3555,19 +3563,21 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern))
return SDValue();
SDValue MagicFactor, Factor, Shift;
SDValue MagicFactor, Factor, Shift, ShiftMask;
if (VT.isVector()) {
MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors);
Factor = DAG.getBuildVector(VT, dl, Factors);
Shift = DAG.getBuildVector(ShVT, dl, Shifts);
ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks);
} else {
MagicFactor = MagicFactors[0];
Factor = Factors[0];
Shift = Shifts[0];
ShiftMask = ShiftMasks[0];
}
// Multiply the numerator (operand 0) by the magic value
// FIXME: We should support doing a MUL in a wider type
// Multiply the numerator (operand 0) by the magic value.
// FIXME: We should support doing a MUL in a wider type.
SDValue Q;
if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT)
: isOperationLegalOrCustom(ISD::MULHS, VT))
@ -3578,7 +3588,7 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), N0, MagicFactor);
Q = SDValue(LoHi.getNode(), 1);
} else
return SDValue(); // No mulhs or equivalent
return SDValue(); // No mulhs or equivalent.
Created.push_back(Q.getNode());
// (Optionally) Add/subtract the numerator using Factor.
@ -3591,9 +3601,11 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift);
Created.push_back(Q.getNode());
// Extract the sign bit and add it to the quotient
SDValue T =
DAG.getNode(ISD::SRL, dl, VT, Q, DAG.getConstant(EltBits - 1, dl, ShVT));
// Extract the sign bit, mask it and add it to the quotient.
SDValue SignShift = DAG.getConstant(EltBits - 1, dl, ShVT);
SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, SignShift);
Created.push_back(T.getNode());
T = DAG.getNode(ISD::AND, dl, VT, T, ShiftMask);
Created.push_back(T.getNode());
return DAG.getNode(ISD::ADD, dl, VT, Q, T);
}

View File

@ -2378,124 +2378,101 @@ define <8 x i16> @combine_vec_sdiv_nonuniform5(<8 x i16> %x) {
define <8 x i16> @combine_vec_sdiv_nonuniform6(<8 x i16> %x) {
; SSE-LABEL: combine_vec_sdiv_nonuniform6:
; SSE: # %bb.0:
; SSE-NEXT: pextrw $5, %xmm0, %eax
; SSE-NEXT: movswl %ax, %ecx
; SSE-NEXT: imull $-32639, %ecx, %ecx # imm = 0x8081
; SSE-NEXT: shrl $16, %ecx
; SSE-NEXT: addl %eax, %ecx
; SSE-NEXT: movzwl %cx, %eax
; SSE-NEXT: sarw $7, %cx
; SSE-NEXT: shrl $15, %eax
; SSE-NEXT: addl %ecx, %eax
; SSE-NEXT: pextrw $2, %xmm0, %ecx
; SSE-NEXT: movswl %cx, %edx
; SSE-NEXT: imull $32703, %edx, %edx # imm = 0x7FBF
; SSE-NEXT: shrl $16, %edx
; SSE-NEXT: subl %ecx, %edx
; SSE-NEXT: movzwl %dx, %ecx
; SSE-NEXT: sarw $8, %dx
; SSE-NEXT: shrl $15, %ecx
; SSE-NEXT: addl %edx, %ecx
; SSE-NEXT: pextrw $1, %xmm0, %edx
; SSE-NEXT: movl %edx, %esi
; SSE-NEXT: sarw $15, %si
; SSE-NEXT: movzwl %si, %esi
; SSE-NEXT: shrl $7, %esi
; SSE-NEXT: addl %edx, %esi
; SSE-NEXT: sarw $9, %si
; SSE-NEXT: negl %esi
; SSE-NEXT: pextrw $0, %xmm0, %edx
; SSE-NEXT: xorl %edi, %edi
; SSE-NEXT: cmpl $32768, %edx # imm = 0x8000
; SSE-NEXT: sete %dil
; SSE-NEXT: movd %edi, %xmm1
; SSE-NEXT: pinsrw $1, %esi, %xmm1
; SSE-NEXT: pinsrw $2, %ecx, %xmm1
; SSE-NEXT: pextrw $3, %xmm0, %ecx
; SSE-NEXT: negl %ecx
; SSE-NEXT: pinsrw $3, %ecx, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
; SSE-NEXT: pinsrw $5, %eax, %xmm1
; SSE-NEXT: pextrw $6, %xmm0, %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: sarw $15, %cx
; SSE-NEXT: movzwl %cx, %ecx
; SSE-NEXT: shrl $7, %ecx
; SSE-NEXT: addl %eax, %ecx
; SSE-NEXT: sarw $9, %cx
; SSE-NEXT: pinsrw $6, %ecx, %xmm1
; SSE-NEXT: pextrw $7, %xmm0, %eax
; SSE-NEXT: cwtl
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shll $14, %ecx
; SSE-NEXT: addl %eax, %ecx
; SSE-NEXT: movl %ecx, %eax
; SSE-NEXT: shrl $31, %eax
; SSE-NEXT: sarl $29, %ecx
; SSE-NEXT: addl %eax, %ecx
; SSE-NEXT: pinsrw $7, %ecx, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,1,1,1,0]
; SSE-NEXT: pmullw %xmm0, %xmm1
; SSE-NEXT: pmulhw {{.*}}(%rip), %xmm0
; SSE-NEXT: paddw %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psraw $8, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3,4,5],xmm1[6,7]
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psraw $4, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm1[1,2,3,4],xmm2[5],xmm1[6],xmm2[7]
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: psraw $2, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1,2,3,4],xmm3[5],xmm2[6,7]
; SSE-NEXT: movdqa %xmm3, %xmm1
; SSE-NEXT: psraw $1, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm3[0,1,2,3,4],xmm1[5],xmm3[6],xmm1[7]
; SSE-NEXT: psrlw $15, %xmm0
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6,7]
; SSE-NEXT: paddw %xmm2, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_nonuniform6:
; AVX: # %bb.0:
; AVX-NEXT: vpextrw $5, %xmm0, %eax
; AVX-NEXT: movswl %ax, %ecx
; AVX-NEXT: imull $-32639, %ecx, %ecx # imm = 0x8081
; AVX-NEXT: shrl $16, %ecx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: movzwl %cx, %eax
; AVX-NEXT: sarw $7, %cx
; AVX-NEXT: shrl $15, %eax
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: vpextrw $2, %xmm0, %ecx
; AVX-NEXT: movswl %cx, %edx
; AVX-NEXT: imull $32703, %edx, %edx # imm = 0x7FBF
; AVX-NEXT: shrl $16, %edx
; AVX-NEXT: subl %ecx, %edx
; AVX-NEXT: movzwl %dx, %ecx
; AVX-NEXT: sarw $8, %dx
; AVX-NEXT: shrl $15, %ecx
; AVX-NEXT: addl %edx, %ecx
; AVX-NEXT: vpextrw $1, %xmm0, %edx
; AVX-NEXT: movl %edx, %esi
; AVX-NEXT: sarw $15, %si
; AVX-NEXT: movzwl %si, %esi
; AVX-NEXT: shrl $7, %esi
; AVX-NEXT: addl %edx, %esi
; AVX-NEXT: sarw $9, %si
; AVX-NEXT: negl %esi
; AVX-NEXT: vpextrw $0, %xmm0, %edx
; AVX-NEXT: xorl %edi, %edi
; AVX-NEXT: cmpl $32768, %edx # imm = 0x8000
; AVX-NEXT: sete %dil
; AVX-NEXT: vmovd %edi, %xmm1
; AVX-NEXT: vpinsrw $1, %esi, %xmm1, %xmm1
; AVX-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1
; AVX-NEXT: vpextrw $3, %xmm0, %ecx
; AVX-NEXT: negl %ecx
; AVX-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
; AVX-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrw $6, %xmm0, %eax
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: sarw $15, %cx
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: shrl $7, %ecx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: sarw $9, %cx
; AVX-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
; AVX-NEXT: vpextrw $7, %xmm0, %eax
; AVX-NEXT: cwtl
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: shll $14, %ecx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: movl %ecx, %eax
; AVX-NEXT: shrl $31, %eax
; AVX-NEXT: sarl $29, %ecx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: vpinsrw $7, %ecx, %xmm1, %xmm0
; AVX-NEXT: retq
; AVX1-LABEL: combine_vec_sdiv_nonuniform6:
; AVX1: # %bb.0:
; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsraw $8, %xmm0, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3,4,5],xmm1[6,7]
; AVX1-NEXT: vpsraw $4, %xmm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4],xmm2[5],xmm1[6],xmm2[7]
; AVX1-NEXT: vpsraw $2, %xmm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4],xmm2[5],xmm1[6,7]
; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5],xmm1[6],xmm2[7]
; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6,7]
; AVX1-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_sdiv_nonuniform6:
; AVX2: # %bb.0:
; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1
; AVX2-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm1
; AVX2-NEXT: vpsravd {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6,7]
; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_vec_sdiv_nonuniform6:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1
; AVX512F-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0
; AVX512F-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vpsrlw $15, %xmm0, %xmm1
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3,4],xmm1[5,6,7]
; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512F-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: combine_vec_sdiv_nonuniform6:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1
; AVX512BW-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpsrlw $15, %xmm0, %xmm1
; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512BW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3,4],xmm1[5,6,7]
; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_nonuniform6:
; XOP: # %bb.0:
; XOP-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1
; XOP-NEXT: vpmacsww %xmm1, {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: vpsrlw $15, %xmm0, %xmm1
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3,4],xmm1[5,6,7]
; XOP-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = sdiv <8 x i16> %x, <i16 -32768, i16 -512, i16 -511, i16 -1, i16 1, i16 255, i16 512, i16 32767>
ret <8 x i16> %1
}