forked from OSchip/llvm-project
[InstCombine] remove casts from splat-a-bit pattern
https://alive2.llvm.org/ce/z/_AivbM This case seems clear since we can reduce instruction count and avoid an intermediate type change, but we might want to use mask-and-compare for other sequences. Currently, we can generate more instructions on some related patterns by trying to use bit-hacks instead of mask+cmp, so something is not behaving as expected.
This commit is contained in:
parent
b78c85a44a
commit
3a126134d3
|
@ -1591,6 +1591,18 @@ Instruction *InstCombinerImpl::visitSExt(SExtInst &CI) {
|
|||
return BinaryOperator::CreateAShr(A, NewShAmt);
|
||||
}
|
||||
|
||||
// Splatting a bit of constant-index across a value:
|
||||
// sext (ashr (trunc iN X to iM), M-1) to iN --> ashr (shl X, N-M), N-1
|
||||
// TODO: If the dest type is different, use a cast (adjust use check).
|
||||
if (match(Src, m_OneUse(m_AShr(m_Trunc(m_Value(X)),
|
||||
m_SpecificInt(SrcBitSize - 1)))) &&
|
||||
X->getType() == DestTy) {
|
||||
Constant *ShlAmtC = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
|
||||
Constant *AshrAmtC = ConstantInt::get(DestTy, DestBitSize - 1);
|
||||
Value *Shl = Builder.CreateShl(X, ShlAmtC);
|
||||
return BinaryOperator::CreateAShr(Shl, AshrAmtC);
|
||||
}
|
||||
|
||||
if (match(Src, m_VScale(DL))) {
|
||||
if (CI.getFunction()->hasFnAttribute(Attribute::VScaleRange)) {
|
||||
unsigned MaxVScale = CI.getFunction()
|
||||
|
|
|
@ -323,9 +323,8 @@ define i10 @test19(i10 %i) {
|
|||
|
||||
define i32 @smear_set_bit(i32 %x) {
|
||||
; CHECK-LABEL: @smear_set_bit(
|
||||
; CHECK-NEXT: [[T:%.*]] = trunc i32 [[X:%.*]] to i8
|
||||
; CHECK-NEXT: [[A:%.*]] = ashr i8 [[T]], 7
|
||||
; CHECK-NEXT: [[S:%.*]] = sext i8 [[A]] to i32
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 24
|
||||
; CHECK-NEXT: [[S:%.*]] = ashr i32 [[TMP1]], 31
|
||||
; CHECK-NEXT: ret i32 [[S]]
|
||||
;
|
||||
%t = trunc i32 %x to i8
|
||||
|
@ -334,12 +333,14 @@ define i32 @smear_set_bit(i32 %x) {
|
|||
ret i32 %s
|
||||
}
|
||||
|
||||
; extra use of trunc is ok because we still shorten the use chain
|
||||
|
||||
define <2 x i32> @smear_set_bit_vec_use1(<2 x i32> %x) {
|
||||
; CHECK-LABEL: @smear_set_bit_vec_use1(
|
||||
; CHECK-NEXT: [[T:%.*]] = trunc <2 x i32> [[X:%.*]] to <2 x i5>
|
||||
; CHECK-NEXT: call void @use_vec(<2 x i5> [[T]])
|
||||
; CHECK-NEXT: [[A:%.*]] = ashr <2 x i5> [[T]], <i5 4, i5 4>
|
||||
; CHECK-NEXT: [[S:%.*]] = sext <2 x i5> [[A]] to <2 x i32>
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i32> [[X]], <i32 27, i32 27>
|
||||
; CHECK-NEXT: [[S:%.*]] = ashr <2 x i32> [[TMP1]], <i32 31, i32 31>
|
||||
; CHECK-NEXT: ret <2 x i32> [[S]]
|
||||
;
|
||||
%t = trunc <2 x i32> %x to <2 x i5>
|
||||
|
@ -349,6 +350,8 @@ define <2 x i32> @smear_set_bit_vec_use1(<2 x i32> %x) {
|
|||
ret <2 x i32> %s
|
||||
}
|
||||
|
||||
; negative test - extra use
|
||||
|
||||
define i32 @smear_set_bit_use2(i32 %x) {
|
||||
; CHECK-LABEL: @smear_set_bit_use2(
|
||||
; CHECK-NEXT: [[T:%.*]] = trunc i32 [[X:%.*]] to i8
|
||||
|
@ -364,6 +367,8 @@ define i32 @smear_set_bit_use2(i32 %x) {
|
|||
ret i32 %s
|
||||
}
|
||||
|
||||
; negative test - must shift all the way across
|
||||
|
||||
define i32 @smear_set_bit_wrong_shift_amount(i32 %x) {
|
||||
; CHECK-LABEL: @smear_set_bit_wrong_shift_amount(
|
||||
; CHECK-NEXT: [[T:%.*]] = trunc i32 [[X:%.*]] to i8
|
||||
|
@ -377,6 +382,8 @@ define i32 @smear_set_bit_wrong_shift_amount(i32 %x) {
|
|||
ret i32 %s
|
||||
}
|
||||
|
||||
; TODO: this could be mask+compare+sext or shifts+trunc
|
||||
|
||||
define i16 @smear_set_bit_different_dest_type(i32 %x) {
|
||||
; CHECK-LABEL: @smear_set_bit_different_dest_type(
|
||||
; CHECK-NEXT: [[T:%.*]] = trunc i32 [[X:%.*]] to i8
|
||||
|
|
Loading…
Reference in New Issue