diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 360ce8f1be0a..031edb615b87 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -1954,6 +1954,19 @@ SDValue DAGCombiner::visitSUB(SDNode *N) { DAG.getConstant(-N1C->getAPIntValue(), DL, VT)); } + // Right-shifting everything out but the sign bit followed by negation is the + // same as flipping arithmetic/logical shift type without the negation: + // -(X >>u 31) -> (X >>s 31) + // -(X >>s 31) -> (X >>u 31) + if (isNullConstantOrNullSplatConstant(N0) && + (N1->getOpcode() == ISD::SRA || N1->getOpcode() == ISD::SRL)) { + ConstantSDNode *ShiftAmt = isConstOrConstSplat(N1.getOperand(1)); + if (ShiftAmt && ShiftAmt->getZExtValue() == VT.getScalarSizeInBits() - 1) { + auto NewOpcode = N1->getOpcode() == ISD::SRA ? ISD::SRL :ISD::SRA; + return DAG.getNode(NewOpcode, DL, VT, N1.getOperand(0), N1.getOperand(1)); + } + } + // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) if (isAllOnesConstantOrAllOnesSplatConstant(N0)) return DAG.getNode(ISD::XOR, DL, VT, N1, N0); diff --git a/llvm/test/CodeGen/X86/negate-shift.ll b/llvm/test/CodeGen/X86/negate-shift.ll index fadbf02f0c24..54ffc8e71e07 100644 --- a/llvm/test/CodeGen/X86/negate-shift.ll +++ b/llvm/test/CodeGen/X86/negate-shift.ll @@ -4,8 +4,7 @@ define i32 @neg_lshr_signbit(i32 %x) { ; X64-LABEL: neg_lshr_signbit: ; X64: # BB#0: -; X64-NEXT: shrl $31, %edi -; X64-NEXT: negl %edi +; X64-NEXT: sarl $31, %edi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: retq ; @@ -17,8 +16,7 @@ define i32 @neg_lshr_signbit(i32 %x) { define i64 @neg_ashr_signbit(i64 %x) { ; X64-LABEL: neg_ashr_signbit: ; X64: # BB#0: -; X64-NEXT: sarq $63, %rdi -; X64-NEXT: negq %rdi +; X64-NEXT: shrq $63, %rdi ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: retq ; @@ -30,10 +28,7 @@ define i64 @neg_ashr_signbit(i64 %x) { define <4 x i32> @neg_ashr_signbit_vec(<4 x i32> %x) { ; X64-LABEL: neg_ashr_signbit_vec: ; X64: # BB#0: -; X64-NEXT: psrad $31, %xmm0 -; X64-NEXT: pxor %xmm1, %xmm1 -; X64-NEXT: psubd %xmm0, %xmm1 -; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: psrld $31, %xmm0 ; X64-NEXT: retq ; %sh = ashr <4 x i32> %x, @@ -44,10 +39,7 @@ define <4 x i32> @neg_ashr_signbit_vec(<4 x i32> %x) { define <8 x i16> @neg_lshr_signbit_vec(<8 x i16> %x) { ; X64-LABEL: neg_lshr_signbit_vec: ; X64: # BB#0: -; X64-NEXT: psrlw $15, %xmm0 -; X64-NEXT: pxor %xmm1, %xmm1 -; X64-NEXT: psubw %xmm0, %xmm1 -; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: psraw $15, %xmm0 ; X64-NEXT: retq ; %sh = lshr <8 x i16> %x,