forked from OSchip/llvm-project
[X86][SSE] Fold BITOP(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(BITOP(X,Y))
Reduce XMM->GPR traffic by performing bitops on the vectors, and using a single MOVMSK call. This requires us to use vectors of the same size and element width, but we can mix fp/int type equivalents with suitable bitcasting.
This commit is contained in:
parent
5184857c62
commit
3d8149c2a1
|
@ -41966,6 +41966,17 @@ static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
|
|||
}
|
||||
}
|
||||
|
||||
unsigned convertIntLogicToFPLogicOpcode(unsigned Opcode) {
|
||||
unsigned FPOpcode;
|
||||
switch (Opcode) {
|
||||
default: llvm_unreachable("Unexpected input node for FP logic conversion");
|
||||
case ISD::AND: FPOpcode = X86ISD::FAND; break;
|
||||
case ISD::OR: FPOpcode = X86ISD::FOR; break;
|
||||
case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
|
||||
}
|
||||
return FPOpcode;
|
||||
}
|
||||
|
||||
/// If both input operands of a logic op are being cast from floating point
|
||||
/// types, try to convert this into a floating point logic node to avoid
|
||||
/// unnecessary moves from SSE to integer registers.
|
||||
|
@ -41990,18 +42001,45 @@ static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
|
|||
(Subtarget.hasSSE2() && N00Type == MVT::f64)))
|
||||
return SDValue();
|
||||
|
||||
unsigned FPOpcode;
|
||||
switch (N->getOpcode()) {
|
||||
default: llvm_unreachable("Unexpected input node for FP logic conversion");
|
||||
case ISD::AND: FPOpcode = X86ISD::FAND; break;
|
||||
case ISD::OR: FPOpcode = X86ISD::FOR; break;
|
||||
case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
|
||||
}
|
||||
|
||||
unsigned FPOpcode = convertIntLogicToFPLogicOpcode(N->getOpcode());
|
||||
SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
|
||||
return DAG.getBitcast(VT, FPLogic);
|
||||
}
|
||||
|
||||
// Attempt to fold BITOP(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(BITOP(X,Y))
|
||||
// to reduce XMM->GPR traffic.
|
||||
static SDValue combineBitOpWithMOVMSK(SDNode *N, SelectionDAG &DAG) {
|
||||
unsigned Opc = N->getOpcode();
|
||||
assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
|
||||
"Unexpected bit opcode");
|
||||
|
||||
SDValue N0 = N->getOperand(0);
|
||||
SDValue N1 = N->getOperand(1);
|
||||
|
||||
// Both operands must be single use MOVMSK.
|
||||
if (N0.getOpcode() != X86ISD::MOVMSK || !N0.hasOneUse() ||
|
||||
N1.getOpcode() != X86ISD::MOVMSK || !N1.hasOneUse())
|
||||
return SDValue();
|
||||
|
||||
SDValue Vec0 = N0.getOperand(0);
|
||||
SDValue Vec1 = N1.getOperand(0);
|
||||
EVT VecVT0 = Vec0.getValueType();
|
||||
EVT VecVT1 = Vec1.getValueType();
|
||||
|
||||
// Both MOVMSK operands must be from vectors of the same size and same element
|
||||
// size, but its OK for a fp/int diff.
|
||||
if (VecVT0.getSizeInBits() != VecVT1.getSizeInBits() ||
|
||||
VecVT0.getScalarSizeInBits() != VecVT1.getScalarSizeInBits())
|
||||
return SDValue();
|
||||
|
||||
SDLoc DL(N);
|
||||
unsigned VecOpc =
|
||||
VecVT0.isFloatingPoint() ? convertIntLogicToFPLogicOpcode(Opc) : Opc;
|
||||
SDValue Result =
|
||||
DAG.getNode(VecOpc, DL, VecVT0, Vec0, DAG.getBitcast(VecVT0, Vec1));
|
||||
return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
|
||||
}
|
||||
|
||||
/// If this is a zero/all-bits result that is bitwise-anded with a low bits
|
||||
/// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
|
||||
/// with a shift-right to eliminate loading the vector constant mask value.
|
||||
|
@ -42347,6 +42385,9 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
|
|||
if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
|
||||
return V;
|
||||
|
||||
if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
|
||||
return R;
|
||||
|
||||
if (DCI.isBeforeLegalizeOps())
|
||||
return SDValue();
|
||||
|
||||
|
@ -42694,6 +42735,9 @@ static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
|
|||
}
|
||||
}
|
||||
|
||||
if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
|
||||
return R;
|
||||
|
||||
if (DCI.isBeforeLegalizeOps())
|
||||
return SDValue();
|
||||
|
||||
|
@ -44707,6 +44751,9 @@ static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
|
|||
if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
|
||||
return Cmp;
|
||||
|
||||
if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
|
||||
return R;
|
||||
|
||||
if (DCI.isBeforeLegalizeOps())
|
||||
return SDValue();
|
||||
|
||||
|
|
|
@ -188,18 +188,16 @@ define i32 @and_movmskpd_movmskpd(<2 x double> %a0, <2 x i64> %a1) {
|
|||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: xorpd %xmm2, %xmm2
|
||||
; SSE-NEXT: cmpeqpd %xmm0, %xmm2
|
||||
; SSE-NEXT: movmskpd %xmm2, %ecx
|
||||
; SSE-NEXT: movmskpd %xmm1, %eax
|
||||
; SSE-NEXT: andl %ecx, %eax
|
||||
; SSE-NEXT: andpd %xmm1, %xmm2
|
||||
; SSE-NEXT: movmskpd %xmm2, %eax
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: and_movmskpd_movmskpd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
|
||||
; AVX-NEXT: vcmpeqpd %xmm0, %xmm2, %xmm0
|
||||
; AVX-NEXT: vmovmskpd %xmm0, %ecx
|
||||
; AVX-NEXT: vmovmskpd %xmm1, %eax
|
||||
; AVX-NEXT: andl %ecx, %eax
|
||||
; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: vmovmskpd %xmm0, %eax
|
||||
; AVX-NEXT: retq
|
||||
%1 = fcmp oeq <2 x double> zeroinitializer, %a0
|
||||
%2 = sext <2 x i1> %1 to <2 x i64>
|
||||
|
@ -217,18 +215,16 @@ define i32 @xor_movmskps_movmskps(<4 x float> %a0, <4 x i32> %a1) {
|
|||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: xorps %xmm2, %xmm2
|
||||
; SSE-NEXT: cmpeqps %xmm0, %xmm2
|
||||
; SSE-NEXT: movmskps %xmm2, %ecx
|
||||
; SSE-NEXT: movmskps %xmm1, %eax
|
||||
; SSE-NEXT: xorl %ecx, %eax
|
||||
; SSE-NEXT: xorps %xmm1, %xmm2
|
||||
; SSE-NEXT: movmskps %xmm2, %eax
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: xor_movmskps_movmskps:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
||||
; AVX-NEXT: vcmpeqps %xmm0, %xmm2, %xmm0
|
||||
; AVX-NEXT: vmovmskps %xmm0, %ecx
|
||||
; AVX-NEXT: vmovmskps %xmm1, %eax
|
||||
; AVX-NEXT: xorl %ecx, %eax
|
||||
; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: vmovmskps %xmm0, %eax
|
||||
; AVX-NEXT: retq
|
||||
%1 = fcmp oeq <4 x float> zeroinitializer, %a0
|
||||
%2 = sext <4 x i1> %1 to <4 x i32>
|
||||
|
@ -246,20 +242,18 @@ define i32 @or_pmovmskb_pmovmskb(<16 x i8> %a0, <8 x i16> %a1) {
|
|||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: pxor %xmm2, %xmm2
|
||||
; SSE-NEXT: pcmpeqb %xmm0, %xmm2
|
||||
; SSE-NEXT: pmovmskb %xmm2, %ecx
|
||||
; SSE-NEXT: psraw $15, %xmm1
|
||||
; SSE-NEXT: por %xmm2, %xmm1
|
||||
; SSE-NEXT: pmovmskb %xmm1, %eax
|
||||
; SSE-NEXT: orl %ecx, %eax
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: or_pmovmskb_pmovmskb:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; AVX-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
|
||||
; AVX-NEXT: vpmovmskb %xmm0, %ecx
|
||||
; AVX-NEXT: vpsraw $15, %xmm1, %xmm0
|
||||
; AVX-NEXT: vpsraw $15, %xmm1, %xmm1
|
||||
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: vpmovmskb %xmm0, %eax
|
||||
; AVX-NEXT: orl %ecx, %eax
|
||||
; AVX-NEXT: retq
|
||||
%1 = icmp eq <16 x i8> zeroinitializer, %a0
|
||||
%2 = sext <16 x i1> %1 to <16 x i8>
|
||||
|
|
Loading…
Reference in New Issue