forked from OSchip/llvm-project
189 lines
6.4 KiB
LLVM
189 lines
6.4 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
|
|
|
|
; PR53610 - sub(0,and(lshr(X,C1),1)) --> ashr(shl(X,(BW-1)-C1),BW-1)
|
|
; PR53610 - sub(C2,and(lshr(X,C1),1)) --> add(ashr(shl(X,(BW-1)-C1),BW-1),C2)
|
|
|
|
define i8 @neg_mask1_lshr(i8 %a0) {
|
|
; CHECK-LABEL: @neg_mask1_lshr(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i8 [[A0:%.*]], 4
|
|
; CHECK-NEXT: [[TMP2:%.*]] = ashr i8 [[TMP1]], 7
|
|
; CHECK-NEXT: ret i8 [[TMP2]]
|
|
;
|
|
%shift = lshr i8 %a0, 3
|
|
%mask = and i8 %shift, 1
|
|
%neg = sub i8 0, %mask
|
|
ret i8 %neg
|
|
}
|
|
|
|
define i8 @sub_mask1_lshr(i8 %a0) {
|
|
; CHECK-LABEL: @sub_mask1_lshr(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i8 [[A0:%.*]], 6
|
|
; CHECK-NEXT: [[TMP2:%.*]] = ashr i8 [[TMP1]], 7
|
|
; CHECK-NEXT: [[NEG:%.*]] = add nsw i8 [[TMP2]], 10
|
|
; CHECK-NEXT: ret i8 [[NEG]]
|
|
;
|
|
%shift = lshr i8 %a0, 1
|
|
%mask = and i8 %shift, 1
|
|
%neg = sub i8 10, %mask
|
|
ret i8 %neg
|
|
}
|
|
|
|
define <4 x i32> @neg_mask1_lshr_vector_uniform(<4 x i32> %a0) {
|
|
; CHECK-LABEL: @neg_mask1_lshr_vector_uniform(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl <4 x i32> [[A0:%.*]], <i32 28, i32 28, i32 28, i32 28>
|
|
; CHECK-NEXT: [[TMP2:%.*]] = ashr <4 x i32> [[TMP1]], <i32 31, i32 31, i32 31, i32 31>
|
|
; CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
;
|
|
%shift = lshr <4 x i32> %a0, <i32 3, i32 3, i32 3, i32 3>
|
|
%mask = and <4 x i32> %shift, <i32 1, i32 1, i32 1, i32 1>
|
|
%neg = sub <4 x i32> zeroinitializer, %mask
|
|
ret <4 x i32> %neg
|
|
}
|
|
|
|
define <4 x i32> @neg_mask1_lshr_vector_nonuniform(<4 x i32> %a0) {
|
|
; CHECK-LABEL: @neg_mask1_lshr_vector_nonuniform(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl <4 x i32> [[A0:%.*]], <i32 28, i32 27, i32 26, i32 25>
|
|
; CHECK-NEXT: [[TMP2:%.*]] = ashr <4 x i32> [[TMP1]], <i32 31, i32 31, i32 31, i32 31>
|
|
; CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
;
|
|
%shift = lshr <4 x i32> %a0, <i32 3, i32 4, i32 5, i32 6>
|
|
%mask = and <4 x i32> %shift, <i32 1, i32 1, i32 1, i32 1>
|
|
%neg = sub <4 x i32> zeroinitializer, %mask
|
|
ret <4 x i32> %neg
|
|
}
|
|
|
|
define <4 x i32> @sub_mask1_lshr_vector_nonuniform(<4 x i32> %a0) {
|
|
; CHECK-LABEL: @sub_mask1_lshr_vector_nonuniform(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl <4 x i32> [[A0:%.*]], <i32 28, i32 27, i32 26, i32 25>
|
|
; CHECK-NEXT: [[TMP2:%.*]] = ashr <4 x i32> [[TMP1]], <i32 31, i32 31, i32 31, i32 31>
|
|
; CHECK-NEXT: [[NEG:%.*]] = add nsw <4 x i32> [[TMP2]], <i32 5, i32 0, i32 -1, i32 65556>
|
|
; CHECK-NEXT: ret <4 x i32> [[NEG]]
|
|
;
|
|
%shift = lshr <4 x i32> %a0, <i32 3, i32 4, i32 5, i32 6>
|
|
%mask = and <4 x i32> %shift, <i32 1, i32 1, i32 1, i32 1>
|
|
%neg = sub <4 x i32> <i32 5, i32 0, i32 -1, i32 65556>, %mask
|
|
ret <4 x i32> %neg
|
|
}
|
|
|
|
define i8 @sub_mask1_trunc_lshr(i64 %a0) {
|
|
; CHECK-LABEL: @sub_mask1_trunc_lshr(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[A0:%.*]], 48
|
|
; CHECK-NEXT: [[TMP2:%.*]] = ashr i64 [[TMP1]], 63
|
|
; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[TMP2]] to i8
|
|
; CHECK-NEXT: [[NEG:%.*]] = add i8 [[TMP3]], 10
|
|
; CHECK-NEXT: ret i8 [[NEG]]
|
|
;
|
|
%shift = lshr i64 %a0, 15
|
|
%trunc = trunc i64 %shift to i8
|
|
%mask = and i8 %trunc, 1
|
|
%neg = sub i8 10, %mask
|
|
ret i8 %neg
|
|
}
|
|
|
|
define i32 @sub_sext_mask1_trunc_lshr(i64 %a0) {
|
|
; CHECK-LABEL: @sub_sext_mask1_trunc_lshr(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[A0:%.*]], 48
|
|
; CHECK-NEXT: [[TMP2:%.*]] = ashr i64 [[TMP1]], 63
|
|
; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[TMP2]] to i8
|
|
; CHECK-NEXT: [[NARROW:%.*]] = add i8 [[TMP3]], 10
|
|
; CHECK-NEXT: [[NEG:%.*]] = zext i8 [[NARROW]] to i32
|
|
; CHECK-NEXT: ret i32 [[NEG]]
|
|
;
|
|
%shift = lshr i64 %a0, 15
|
|
%trunc = trunc i64 %shift to i8
|
|
%mask = and i8 %trunc, 1
|
|
%sext = sext i8 %mask to i32
|
|
%neg = sub i32 10, %sext
|
|
ret i32 %neg
|
|
}
|
|
|
|
define i32 @sub_zext_trunc_lshr(i64 %a0) {
|
|
; CHECK-LABEL: @sub_zext_trunc_lshr(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A0:%.*]] to i32
|
|
; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP1]], 16
|
|
; CHECK-NEXT: [[TMP3:%.*]] = ashr i32 [[TMP2]], 31
|
|
; CHECK-NEXT: [[NEG:%.*]] = add nsw i32 [[TMP3]], 10
|
|
; CHECK-NEXT: ret i32 [[NEG]]
|
|
;
|
|
%shift = lshr i64 %a0, 15
|
|
%trunc = trunc i64 %shift to i1
|
|
%sext = zext i1 %trunc to i32
|
|
%neg = sub i32 10, %sext
|
|
ret i32 %neg
|
|
}
|
|
|
|
; Negative Test - wrong mask
|
|
define i8 @neg_mask2_lshr(i8 %a0) {
|
|
; CHECK-LABEL: @neg_mask2_lshr(
|
|
; CHECK-NEXT: [[SHIFT:%.*]] = lshr i8 [[A0:%.*]], 3
|
|
; CHECK-NEXT: [[MASK:%.*]] = and i8 [[SHIFT]], 2
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub nsw i8 0, [[MASK]]
|
|
; CHECK-NEXT: ret i8 [[NEG]]
|
|
;
|
|
%shift = lshr i8 %a0, 3
|
|
%mask = and i8 %shift, 2
|
|
%neg = sub i8 0, %mask
|
|
ret i8 %neg
|
|
}
|
|
|
|
; Negative Test - bad shift amount
|
|
define i8 @neg_mask2_lshr_outofbounds(i8 %a0) {
|
|
; CHECK-LABEL: @neg_mask2_lshr_outofbounds(
|
|
; CHECK-NEXT: ret i8 poison
|
|
;
|
|
%shift = lshr i8 %a0, 8
|
|
%mask = and i8 %shift, 2
|
|
%neg = sub i8 0, %mask
|
|
ret i8 %neg
|
|
}
|
|
|
|
; Negative Test - non-constant shift amount
|
|
define <2 x i32> @neg_mask1_lshr_vector_var(<2 x i32> %a0, <2 x i32> %a1) {
|
|
; CHECK-LABEL: @neg_mask1_lshr_vector_var(
|
|
; CHECK-NEXT: [[SHIFT:%.*]] = lshr <2 x i32> [[A0:%.*]], [[A1:%.*]]
|
|
; CHECK-NEXT: [[MASK:%.*]] = and <2 x i32> [[SHIFT]], <i32 1, i32 1>
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub nsw <2 x i32> zeroinitializer, [[MASK]]
|
|
; CHECK-NEXT: ret <2 x i32> [[NEG]]
|
|
;
|
|
%shift = lshr <2 x i32> %a0, %a1
|
|
%mask = and <2 x i32> %shift, <i32 1, i32 1>
|
|
%neg = sub <2 x i32> zeroinitializer, %mask
|
|
ret <2 x i32> %neg
|
|
}
|
|
|
|
; Extra Use - mask
|
|
define i8 @neg_mask1_lshr_extrause_mask(i8 %a0) {
|
|
; CHECK-LABEL: @neg_mask1_lshr_extrause_mask(
|
|
; CHECK-NEXT: [[SHIFT:%.*]] = lshr i8 [[A0:%.*]], 3
|
|
; CHECK-NEXT: [[MASK:%.*]] = and i8 [[SHIFT]], 1
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub nsw i8 0, [[MASK]]
|
|
; CHECK-NEXT: call void @usei8(i8 [[MASK]])
|
|
; CHECK-NEXT: ret i8 [[NEG]]
|
|
;
|
|
%shift = lshr i8 %a0, 3
|
|
%mask = and i8 %shift, 1
|
|
%neg = sub i8 0, %mask
|
|
call void @usei8(i8 %mask)
|
|
ret i8 %neg
|
|
}
|
|
|
|
; Extra Use - shift
|
|
define <2 x i32> @neg_mask1_lshr_extrause_lshr(<2 x i32> %a0) {
|
|
; CHECK-LABEL: @neg_mask1_lshr_extrause_lshr(
|
|
; CHECK-NEXT: [[SHIFT:%.*]] = lshr <2 x i32> [[A0:%.*]], <i32 3, i32 3>
|
|
; CHECK-NEXT: [[MASK:%.*]] = and <2 x i32> [[SHIFT]], <i32 1, i32 1>
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub nsw <2 x i32> zeroinitializer, [[MASK]]
|
|
; CHECK-NEXT: call void @usev2i32(<2 x i32> [[SHIFT]])
|
|
; CHECK-NEXT: ret <2 x i32> [[NEG]]
|
|
;
|
|
%shift = lshr <2 x i32> %a0, <i32 3, i32 3>
|
|
%mask = and <2 x i32> %shift, <i32 1, i32 1>
|
|
%neg = sub <2 x i32> zeroinitializer, %mask
|
|
call void @usev2i32(<2 x i32> %shift)
|
|
ret <2 x i32> %neg
|
|
}
|
|
|
|
declare void @usei8(i8)
|
|
declare void @usev2i32(<2 x i32>)
|