diff --git a/llvm/test/Transforms/InstCombine/and.ll b/llvm/test/Transforms/InstCombine/and.ll index 0a3451c2e5a5..2ce631885bef 100644 --- a/llvm/test/Transforms/InstCombine/and.ll +++ b/llvm/test/Transforms/InstCombine/and.ll @@ -1400,3 +1400,62 @@ define <2 x i8> @flip_masked_bit_nonuniform(<2 x i8> %A) { %C = and <2 x i8> %B, ret <2 x i8> %C } + +define i8 @ashr_bitwidth_mask(i8 %x, i8 %y) { +; CHECK-LABEL: @ashr_bitwidth_mask( +; CHECK-NEXT: [[SIGN:%.*]] = ashr i8 [[X:%.*]], 7 +; CHECK-NEXT: [[NEG_OR_ZERO:%.*]] = and i8 [[SIGN]], [[Y:%.*]] +; CHECK-NEXT: ret i8 [[NEG_OR_ZERO]] +; + %sign = ashr i8 %x, 7 + %neg_or_zero = and i8 %sign, %y + ret i8 %neg_or_zero +} + +define <2 x i8> @ashr_bitwidth_mask_vec_commute(<2 x i8> %x, <2 x i8> %py) { +; CHECK-LABEL: @ashr_bitwidth_mask_vec_commute( +; CHECK-NEXT: [[Y:%.*]] = mul <2 x i8> [[PY:%.*]], +; CHECK-NEXT: [[SIGN:%.*]] = ashr <2 x i8> [[X:%.*]], +; CHECK-NEXT: [[NEG_OR_ZERO:%.*]] = and <2 x i8> [[Y]], [[SIGN]] +; CHECK-NEXT: ret <2 x i8> [[NEG_OR_ZERO]] +; + %y = mul <2 x i8> %py, ; thwart complexity-based ordering + %sign = ashr <2 x i8> %x, + %neg_or_zero = and <2 x i8> %y, %sign + ret <2 x i8> %neg_or_zero +} + +define i8 @ashr_bitwidth_mask_use(i8 %x, i8 %y) { +; CHECK-LABEL: @ashr_bitwidth_mask_use( +; CHECK-NEXT: [[SIGN:%.*]] = ashr i8 [[X:%.*]], 7 +; CHECK-NEXT: call void @use8(i8 [[SIGN]]) +; CHECK-NEXT: [[R:%.*]] = and i8 [[SIGN]], [[Y:%.*]] +; CHECK-NEXT: ret i8 [[R]] +; + %sign = ashr i8 %x, 7 + call void @use8(i8 %sign) + %r = and i8 %sign, %y + ret i8 %r +} + +define i8 @ashr_not_bitwidth_mask(i8 %x, i8 %y) { +; CHECK-LABEL: @ashr_not_bitwidth_mask( +; CHECK-NEXT: [[SIGN:%.*]] = ashr i8 [[X:%.*]], 6 +; CHECK-NEXT: [[R:%.*]] = and i8 [[SIGN]], [[Y:%.*]] +; CHECK-NEXT: ret i8 [[R]] +; + %sign = ashr i8 %x, 6 + %r = and i8 %sign, %y + ret i8 %r +} + +define i8 @lshr_bitwidth_mask(i8 %x, i8 %y) { +; CHECK-LABEL: @lshr_bitwidth_mask( +; CHECK-NEXT: [[SIGN:%.*]] = lshr i8 [[X:%.*]], 7 +; CHECK-NEXT: [[R:%.*]] = and i8 [[SIGN]], [[Y:%.*]] +; CHECK-NEXT: ret i8 [[R]] +; + %sign = lshr i8 %x, 7 + %r = and i8 %sign, %y + ret i8 %r +} diff --git a/llvm/test/Transforms/InstCombine/icmp.ll b/llvm/test/Transforms/InstCombine/icmp.ll index 77f710b4a1e1..598d8b882bfb 100644 --- a/llvm/test/Transforms/InstCombine/icmp.ll +++ b/llvm/test/Transforms/InstCombine/icmp.ll @@ -676,8 +676,8 @@ define i1 @test37_extra_uses(i32 %x, i32 %y, i32 %z) { define i32 @neg_max_s32(i32 %x, i32 %y) { ; CHECK-LABEL: @neg_max_s32( ; CHECK-NEXT: [[C:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[S_V:%.*]] = select i1 [[C]], i32 [[Y]], i32 [[X]] -; CHECK-NEXT: ret i32 [[S_V]] +; CHECK-NEXT: [[S_NEG:%.*]] = select i1 [[C]], i32 [[Y]], i32 [[X]] +; CHECK-NEXT: ret i32 [[S_NEG]] ; %nx = sub nsw i32 0, %x %ny = sub nsw i32 0, %y @@ -690,8 +690,8 @@ define i32 @neg_max_s32(i32 %x, i32 %y) { define <4 x i32> @neg_max_v4s32(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: @neg_max_v4s32( ; CHECK-NEXT: [[C:%.*]] = icmp sgt <4 x i32> [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[S_V:%.*]] = select <4 x i1> [[C]], <4 x i32> [[X]], <4 x i32> [[Y]] -; CHECK-NEXT: ret <4 x i32> [[S_V]] +; CHECK-NEXT: [[S_NEG:%.*]] = select <4 x i1> [[C]], <4 x i32> [[X]], <4 x i32> [[Y]] +; CHECK-NEXT: ret <4 x i32> [[S_NEG]] ; %nx = sub nsw <4 x i32> zeroinitializer, %x %ny = sub nsw <4 x i32> zeroinitializer, %y diff --git a/llvm/test/Transforms/InstCombine/mul-inseltpoison.ll b/llvm/test/Transforms/InstCombine/mul-inseltpoison.ll index 602772800d3a..4b7af9212ae9 100644 --- a/llvm/test/Transforms/InstCombine/mul-inseltpoison.ll +++ b/llvm/test/Transforms/InstCombine/mul-inseltpoison.ll @@ -985,8 +985,8 @@ define <2 x i32> @mulsub2_vec_nonuniform_undef(<2 x i32> %a0) { define i32 @muladd2(i32 %a0) { ; CHECK-LABEL: @muladd2( -; CHECK-NEXT: [[ADD_NEG_NEG:%.*]] = mul i32 [[A0:%.*]], -4 -; CHECK-NEXT: [[MUL:%.*]] = add i32 [[ADD_NEG_NEG]], -64 +; CHECK-NEXT: [[DOTNEG:%.*]] = mul i32 [[A0:%.*]], -4 +; CHECK-NEXT: [[MUL:%.*]] = add i32 [[DOTNEG]], -64 ; CHECK-NEXT: ret i32 [[MUL]] ; %add = add i32 %a0, 16 @@ -996,8 +996,8 @@ define i32 @muladd2(i32 %a0) { define <2 x i32> @muladd2_vec(<2 x i32> %a0) { ; CHECK-LABEL: @muladd2_vec( -; CHECK-NEXT: [[ADD_NEG_NEG:%.*]] = mul <2 x i32> [[A0:%.*]], -; CHECK-NEXT: [[MUL:%.*]] = add <2 x i32> [[ADD_NEG_NEG]], +; CHECK-NEXT: [[DOTNEG:%.*]] = mul <2 x i32> [[A0:%.*]], +; CHECK-NEXT: [[MUL:%.*]] = add <2 x i32> [[DOTNEG]], ; CHECK-NEXT: ret <2 x i32> [[MUL]] ; %add = add <2 x i32> %a0, diff --git a/llvm/test/Transforms/InstCombine/mul.ll b/llvm/test/Transforms/InstCombine/mul.ll index 15faf764a040..3901f5879150 100644 --- a/llvm/test/Transforms/InstCombine/mul.ll +++ b/llvm/test/Transforms/InstCombine/mul.ll @@ -1055,8 +1055,8 @@ define <2 x i32> @mulsub2_vec_nonuniform_undef(<2 x i32> %a0) { define i32 @muladd2(i32 %a0) { ; CHECK-LABEL: @muladd2( -; CHECK-NEXT: [[ADD_NEG_NEG:%.*]] = mul i32 [[A0:%.*]], -4 -; CHECK-NEXT: [[MUL:%.*]] = add i32 [[ADD_NEG_NEG]], -64 +; CHECK-NEXT: [[DOTNEG:%.*]] = mul i32 [[A0:%.*]], -4 +; CHECK-NEXT: [[MUL:%.*]] = add i32 [[DOTNEG]], -64 ; CHECK-NEXT: ret i32 [[MUL]] ; %add = add i32 %a0, 16 @@ -1066,8 +1066,8 @@ define i32 @muladd2(i32 %a0) { define <2 x i32> @muladd2_vec(<2 x i32> %a0) { ; CHECK-LABEL: @muladd2_vec( -; CHECK-NEXT: [[ADD_NEG_NEG:%.*]] = mul <2 x i32> [[A0:%.*]], -; CHECK-NEXT: [[MUL:%.*]] = add <2 x i32> [[ADD_NEG_NEG]], +; CHECK-NEXT: [[DOTNEG:%.*]] = mul <2 x i32> [[A0:%.*]], +; CHECK-NEXT: [[MUL:%.*]] = add <2 x i32> [[DOTNEG]], ; CHECK-NEXT: ret <2 x i32> [[MUL]] ; %add = add <2 x i32> %a0,