forked from OSchip/llvm-project
[InstCombine] add multi-use demanded bits fold for add with low-bit mask
I noticed an add example like the one from D91343, so here's a similar patch. The logic is based on existing code for the single-use demanded bits fold. But I only matched a constant instead of using compute known bits on the operands because that was the motivating patterni that I noticed. I think this will allow removing a special-case (but incomplete) dedicated fold within visitAnd(), but I need to untangle the existing code to be sure. https://rise4fun.com/Alive/V6fP Name: add with low mask Pre: (C1 & (-1 u>> countLeadingZeros(C2))) == 0 %a = add i8 %x, C1 %r = and i8 %a, C2 => %r = and i8 %x, C2 Differential Revision: https://reviews.llvm.org/D91415
This commit is contained in:
parent
91aa211ea1
commit
e56103d250
|
@ -826,6 +826,21 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
|
|||
// do simplifications that apply to *just* the one user if we know that
|
||||
// this instruction has a simpler value in that context.
|
||||
switch (I->getOpcode()) {
|
||||
case Instruction::Add: {
|
||||
// TODO: Allow undefs and/or non-splat vectors.
|
||||
const APInt *C;
|
||||
if (match(I->getOperand(1), m_APInt(C))) {
|
||||
// Right fill the demanded bits for this add to demand the most
|
||||
// significant demanded bit and all those below it.
|
||||
unsigned Ctlz = DemandedMask.countLeadingZeros();
|
||||
APInt LowMask(APInt::getLowBitsSet(BitWidth, BitWidth - Ctlz));
|
||||
// If we are adding zeros to every bit below the highest demanded bit,
|
||||
// just return the add's variable operand.
|
||||
if ((*C & LowMask).isNullValue())
|
||||
return I->getOperand(0);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Instruction::And: {
|
||||
// If either the LHS or the RHS are Zero, the result is zero.
|
||||
computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
|
||||
|
|
|
@ -1049,11 +1049,13 @@ define <2 x i32> @lowmask_sext_in_reg_splat(<2 x i32> %x, <2 x i32>* %p) {
|
|||
ret <2 x i32> %and
|
||||
}
|
||||
|
||||
; Multi-use demanded bits - 'add' doesn't change 'and'
|
||||
|
||||
define i8 @lowmask_add(i8 %x) {
|
||||
; CHECK-LABEL: @lowmask_add(
|
||||
; CHECK-NEXT: [[A:%.*]] = add i8 [[X:%.*]], -64
|
||||
; CHECK-NEXT: call void @use8(i8 [[A]])
|
||||
; CHECK-NEXT: [[R:%.*]] = and i8 [[A]], 32
|
||||
; CHECK-NEXT: [[R:%.*]] = and i8 [[X]], 32
|
||||
; CHECK-NEXT: ret i8 [[R]]
|
||||
;
|
||||
%a = add i8 %x, -64 ; 0xc0
|
||||
|
@ -1062,6 +1064,8 @@ define i8 @lowmask_add(i8 %x) {
|
|||
ret i8 %r
|
||||
}
|
||||
|
||||
; Negative test - mask overlaps low bit of add
|
||||
|
||||
define i8 @not_lowmask_add(i8 %x) {
|
||||
; CHECK-LABEL: @not_lowmask_add(
|
||||
; CHECK-NEXT: [[A:%.*]] = add i8 [[X:%.*]], -64
|
||||
|
@ -1075,6 +1079,8 @@ define i8 @not_lowmask_add(i8 %x) {
|
|||
ret i8 %r
|
||||
}
|
||||
|
||||
; Negative test - mask overlaps low bit of add
|
||||
|
||||
define i8 @not_lowmask_add2(i8 %x) {
|
||||
; CHECK-LABEL: @not_lowmask_add2(
|
||||
; CHECK-NEXT: [[A:%.*]] = add i8 [[X:%.*]], -96
|
||||
|
@ -1088,11 +1094,13 @@ define i8 @not_lowmask_add2(i8 %x) {
|
|||
ret i8 %r
|
||||
}
|
||||
|
||||
; Multi-use demanded bits - 'add' doesn't change 'and'
|
||||
|
||||
define <2 x i8> @lowmask_add_splat(<2 x i8> %x, <2 x i8>* %p) {
|
||||
; CHECK-LABEL: @lowmask_add_splat(
|
||||
; CHECK-NEXT: [[A:%.*]] = add <2 x i8> [[X:%.*]], <i8 -64, i8 -64>
|
||||
; CHECK-NEXT: store <2 x i8> [[A]], <2 x i8>* [[P:%.*]], align 2
|
||||
; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[A]], <i8 32, i8 32>
|
||||
; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[X]], <i8 32, i8 32>
|
||||
; CHECK-NEXT: ret <2 x i8> [[R]]
|
||||
;
|
||||
%a = add <2 x i8> %x, <i8 -64, i8 -64> ; 0xc0
|
||||
|
|
|
@ -35,7 +35,7 @@ define void @fp_iv_loop1(float* noalias nocapture %A, i32 %N) #0 {
|
|||
; AUTO_VEC-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP1]], 96
|
||||
; AUTO_VEC-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK_UNR_LCSSA:%.*]], label [[VECTOR_PH_NEW:%.*]]
|
||||
; AUTO_VEC: vector.ph.new:
|
||||
; AUTO_VEC-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[TMP3]], 1152921504606846972
|
||||
; AUTO_VEC-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[TMP3]], -4
|
||||
; AUTO_VEC-NEXT: br label [[VECTOR_BODY:%.*]]
|
||||
; AUTO_VEC: vector.body:
|
||||
; AUTO_VEC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH_NEW]] ], [ [[INDEX_NEXT_3:%.*]], [[VECTOR_BODY]] ]
|
||||
|
@ -306,7 +306,7 @@ define double @external_use_with_fast_math(double* %a, i64 %n) {
|
|||
; AUTO_VEC-NEXT: [[TMP5:%.*]] = icmp ult i64 [[TMP2]], 48
|
||||
; AUTO_VEC-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK_UNR_LCSSA:%.*]], label [[VECTOR_PH_NEW:%.*]]
|
||||
; AUTO_VEC: vector.ph.new:
|
||||
; AUTO_VEC-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[TMP4]], 2305843009213693948
|
||||
; AUTO_VEC-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[TMP4]], -4
|
||||
; AUTO_VEC-NEXT: br label [[VECTOR_BODY:%.*]]
|
||||
; AUTO_VEC: vector.body:
|
||||
; AUTO_VEC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH_NEW]] ], [ [[INDEX_NEXT_3:%.*]], [[VECTOR_BODY]] ]
|
||||
|
|
|
@ -25,7 +25,7 @@ define i32 @foo(i32* nocapture %A, i32* nocapture %B, i32 %n) {
|
|||
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
|
||||
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
|
||||
; CHECK: vector.ph:
|
||||
; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], 8589934588
|
||||
; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], -4
|
||||
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function foo
|
||||
; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
|
||||
; RUN: opt < %s -loop-vectorize -disable-basic-aa -S -pass-remarks-analysis='loop-vectorize' 2>&1 | FileCheck %s -check-prefix=FORCED_OPTSIZE
|
||||
|
||||
|
@ -32,7 +32,7 @@ define i32 @foo(float* nocapture %a, float* nocapture %b, i32 %n) nounwind uwtab
|
|||
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]], [[DBG9]]
|
||||
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]], [[DBG9]]
|
||||
; CHECK: vector.ph:
|
||||
; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], 8589934588, [[DBG9]]
|
||||
; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], -4, [[DBG9]]
|
||||
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]], [[DBG9]]
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ], [[DBG9]]
|
||||
|
|
|
@ -38,7 +38,7 @@ define void @vdiv(double* %x, double* %y, double %a, i32 %N) #0 {
|
|||
; CHECK-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP0]], 12
|
||||
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK_UNR_LCSSA:%.*]], label [[VECTOR_PH_NEW:%.*]]
|
||||
; CHECK: vector.ph.new:
|
||||
; CHECK-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[TMP2]], 9223372036854775804
|
||||
; CHECK-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[TMP2]], -4
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = fdiv fast <4 x double> <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>, [[BROADCAST_SPLAT]]
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = fdiv fast <4 x double> <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>, [[BROADCAST_SPLAT]]
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = fdiv fast <4 x double> <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>, [[BROADCAST_SPLAT]]
|
||||
|
|
Loading…
Reference in New Issue