[InstCombine] add tests for (i32 X s>> 31) & Y; NFC

Also regenerate some check lines to more accurately show
current transforms via name changes.
This commit is contained in:
Sanjay Patel 2021-10-08 09:34:15 -04:00
parent c060c634ef
commit a35673f4cf
4 changed files with 71 additions and 12 deletions

View File

@ -1400,3 +1400,62 @@ define <2 x i8> @flip_masked_bit_nonuniform(<2 x i8> %A) {
%C = and <2 x i8> %B, <i8 16, i8 4> %C = and <2 x i8> %B, <i8 16, i8 4>
ret <2 x i8> %C ret <2 x i8> %C
} }
define i8 @ashr_bitwidth_mask(i8 %x, i8 %y) {
; CHECK-LABEL: @ashr_bitwidth_mask(
; CHECK-NEXT: [[SIGN:%.*]] = ashr i8 [[X:%.*]], 7
; CHECK-NEXT: [[NEG_OR_ZERO:%.*]] = and i8 [[SIGN]], [[Y:%.*]]
; CHECK-NEXT: ret i8 [[NEG_OR_ZERO]]
;
%sign = ashr i8 %x, 7
%neg_or_zero = and i8 %sign, %y
ret i8 %neg_or_zero
}
define <2 x i8> @ashr_bitwidth_mask_vec_commute(<2 x i8> %x, <2 x i8> %py) {
; CHECK-LABEL: @ashr_bitwidth_mask_vec_commute(
; CHECK-NEXT: [[Y:%.*]] = mul <2 x i8> [[PY:%.*]], <i8 42, i8 2>
; CHECK-NEXT: [[SIGN:%.*]] = ashr <2 x i8> [[X:%.*]], <i8 7, i8 7>
; CHECK-NEXT: [[NEG_OR_ZERO:%.*]] = and <2 x i8> [[Y]], [[SIGN]]
; CHECK-NEXT: ret <2 x i8> [[NEG_OR_ZERO]]
;
%y = mul <2 x i8> %py, <i8 42, i8 2> ; thwart complexity-based ordering
%sign = ashr <2 x i8> %x, <i8 7, i8 7>
%neg_or_zero = and <2 x i8> %y, %sign
ret <2 x i8> %neg_or_zero
}
define i8 @ashr_bitwidth_mask_use(i8 %x, i8 %y) {
; CHECK-LABEL: @ashr_bitwidth_mask_use(
; CHECK-NEXT: [[SIGN:%.*]] = ashr i8 [[X:%.*]], 7
; CHECK-NEXT: call void @use8(i8 [[SIGN]])
; CHECK-NEXT: [[R:%.*]] = and i8 [[SIGN]], [[Y:%.*]]
; CHECK-NEXT: ret i8 [[R]]
;
%sign = ashr i8 %x, 7
call void @use8(i8 %sign)
%r = and i8 %sign, %y
ret i8 %r
}
define i8 @ashr_not_bitwidth_mask(i8 %x, i8 %y) {
; CHECK-LABEL: @ashr_not_bitwidth_mask(
; CHECK-NEXT: [[SIGN:%.*]] = ashr i8 [[X:%.*]], 6
; CHECK-NEXT: [[R:%.*]] = and i8 [[SIGN]], [[Y:%.*]]
; CHECK-NEXT: ret i8 [[R]]
;
%sign = ashr i8 %x, 6
%r = and i8 %sign, %y
ret i8 %r
}
define i8 @lshr_bitwidth_mask(i8 %x, i8 %y) {
; CHECK-LABEL: @lshr_bitwidth_mask(
; CHECK-NEXT: [[SIGN:%.*]] = lshr i8 [[X:%.*]], 7
; CHECK-NEXT: [[R:%.*]] = and i8 [[SIGN]], [[Y:%.*]]
; CHECK-NEXT: ret i8 [[R]]
;
%sign = lshr i8 %x, 7
%r = and i8 %sign, %y
ret i8 %r
}

View File

@ -676,8 +676,8 @@ define i1 @test37_extra_uses(i32 %x, i32 %y, i32 %z) {
define i32 @neg_max_s32(i32 %x, i32 %y) { define i32 @neg_max_s32(i32 %x, i32 %y) {
; CHECK-LABEL: @neg_max_s32( ; CHECK-LABEL: @neg_max_s32(
; CHECK-NEXT: [[C:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[C:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[S_V:%.*]] = select i1 [[C]], i32 [[Y]], i32 [[X]] ; CHECK-NEXT: [[S_NEG:%.*]] = select i1 [[C]], i32 [[Y]], i32 [[X]]
; CHECK-NEXT: ret i32 [[S_V]] ; CHECK-NEXT: ret i32 [[S_NEG]]
; ;
%nx = sub nsw i32 0, %x %nx = sub nsw i32 0, %x
%ny = sub nsw i32 0, %y %ny = sub nsw i32 0, %y
@ -690,8 +690,8 @@ define i32 @neg_max_s32(i32 %x, i32 %y) {
define <4 x i32> @neg_max_v4s32(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @neg_max_v4s32(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: @neg_max_v4s32( ; CHECK-LABEL: @neg_max_v4s32(
; CHECK-NEXT: [[C:%.*]] = icmp sgt <4 x i32> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[C:%.*]] = icmp sgt <4 x i32> [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[S_V:%.*]] = select <4 x i1> [[C]], <4 x i32> [[X]], <4 x i32> [[Y]] ; CHECK-NEXT: [[S_NEG:%.*]] = select <4 x i1> [[C]], <4 x i32> [[X]], <4 x i32> [[Y]]
; CHECK-NEXT: ret <4 x i32> [[S_V]] ; CHECK-NEXT: ret <4 x i32> [[S_NEG]]
; ;
%nx = sub nsw <4 x i32> zeroinitializer, %x %nx = sub nsw <4 x i32> zeroinitializer, %x
%ny = sub nsw <4 x i32> zeroinitializer, %y %ny = sub nsw <4 x i32> zeroinitializer, %y

View File

@ -985,8 +985,8 @@ define <2 x i32> @mulsub2_vec_nonuniform_undef(<2 x i32> %a0) {
define i32 @muladd2(i32 %a0) { define i32 @muladd2(i32 %a0) {
; CHECK-LABEL: @muladd2( ; CHECK-LABEL: @muladd2(
; CHECK-NEXT: [[ADD_NEG_NEG:%.*]] = mul i32 [[A0:%.*]], -4 ; CHECK-NEXT: [[DOTNEG:%.*]] = mul i32 [[A0:%.*]], -4
; CHECK-NEXT: [[MUL:%.*]] = add i32 [[ADD_NEG_NEG]], -64 ; CHECK-NEXT: [[MUL:%.*]] = add i32 [[DOTNEG]], -64
; CHECK-NEXT: ret i32 [[MUL]] ; CHECK-NEXT: ret i32 [[MUL]]
; ;
%add = add i32 %a0, 16 %add = add i32 %a0, 16
@ -996,8 +996,8 @@ define i32 @muladd2(i32 %a0) {
define <2 x i32> @muladd2_vec(<2 x i32> %a0) { define <2 x i32> @muladd2_vec(<2 x i32> %a0) {
; CHECK-LABEL: @muladd2_vec( ; CHECK-LABEL: @muladd2_vec(
; CHECK-NEXT: [[ADD_NEG_NEG:%.*]] = mul <2 x i32> [[A0:%.*]], <i32 -4, i32 -4> ; CHECK-NEXT: [[DOTNEG:%.*]] = mul <2 x i32> [[A0:%.*]], <i32 -4, i32 -4>
; CHECK-NEXT: [[MUL:%.*]] = add <2 x i32> [[ADD_NEG_NEG]], <i32 -64, i32 -64> ; CHECK-NEXT: [[MUL:%.*]] = add <2 x i32> [[DOTNEG]], <i32 -64, i32 -64>
; CHECK-NEXT: ret <2 x i32> [[MUL]] ; CHECK-NEXT: ret <2 x i32> [[MUL]]
; ;
%add = add <2 x i32> %a0, <i32 16, i32 16> %add = add <2 x i32> %a0, <i32 16, i32 16>

View File

@ -1055,8 +1055,8 @@ define <2 x i32> @mulsub2_vec_nonuniform_undef(<2 x i32> %a0) {
define i32 @muladd2(i32 %a0) { define i32 @muladd2(i32 %a0) {
; CHECK-LABEL: @muladd2( ; CHECK-LABEL: @muladd2(
; CHECK-NEXT: [[ADD_NEG_NEG:%.*]] = mul i32 [[A0:%.*]], -4 ; CHECK-NEXT: [[DOTNEG:%.*]] = mul i32 [[A0:%.*]], -4
; CHECK-NEXT: [[MUL:%.*]] = add i32 [[ADD_NEG_NEG]], -64 ; CHECK-NEXT: [[MUL:%.*]] = add i32 [[DOTNEG]], -64
; CHECK-NEXT: ret i32 [[MUL]] ; CHECK-NEXT: ret i32 [[MUL]]
; ;
%add = add i32 %a0, 16 %add = add i32 %a0, 16
@ -1066,8 +1066,8 @@ define i32 @muladd2(i32 %a0) {
define <2 x i32> @muladd2_vec(<2 x i32> %a0) { define <2 x i32> @muladd2_vec(<2 x i32> %a0) {
; CHECK-LABEL: @muladd2_vec( ; CHECK-LABEL: @muladd2_vec(
; CHECK-NEXT: [[ADD_NEG_NEG:%.*]] = mul <2 x i32> [[A0:%.*]], <i32 -4, i32 -4> ; CHECK-NEXT: [[DOTNEG:%.*]] = mul <2 x i32> [[A0:%.*]], <i32 -4, i32 -4>
; CHECK-NEXT: [[MUL:%.*]] = add <2 x i32> [[ADD_NEG_NEG]], <i32 -64, i32 -64> ; CHECK-NEXT: [[MUL:%.*]] = add <2 x i32> [[DOTNEG]], <i32 -64, i32 -64>
; CHECK-NEXT: ret <2 x i32> [[MUL]] ; CHECK-NEXT: ret <2 x i32> [[MUL]]
; ;
%add = add <2 x i32> %a0, <i32 16, i32 16> %add = add <2 x i32> %a0, <i32 16, i32 16>