forked from OSchip/llvm-project
Revert "[InstCombine] "Bypass" NUW trunc of lshr if we are going to sext the result (PR49543)"
I forgot about the case where we sign-extend to width smaller than the original.
This reverts commit 41b71f718b
.
This commit is contained in:
parent
d87b9b81cc
commit
1e68d338c1
|
@ -1498,36 +1498,6 @@ Instruction *InstCombinerImpl::visitSExt(SExtInst &CI) {
|
|||
unsigned DestBitSize = DestTy->getScalarSizeInBits();
|
||||
unsigned XBitSize = X->getType()->getScalarSizeInBits();
|
||||
|
||||
// Iff we are chopping off all the zero bits that were just shifted-in,
|
||||
// instead perform the arithmetic shift, and bypass trunc by sign-extending
|
||||
// it directly. Either one of the lshr and trunc can have extra uses, we can
|
||||
// fix them up, but only one of them, else we increase instruction count.
|
||||
if (match(X,
|
||||
m_LShr(m_Value(), m_SpecificInt_ICMP(
|
||||
ICmpInst::Predicate::ICMP_EQ,
|
||||
APInt(XBitSize, XBitSize - SrcBitSize)))) &&
|
||||
(Src->hasOneUse() || X->hasOneUser())) {
|
||||
auto *LShr = cast<Instruction>(X);
|
||||
auto *AShr =
|
||||
BinaryOperator::CreateAShr(LShr->getOperand(0), LShr->getOperand(1),
|
||||
LShr->getName() + ".signed", LShr);
|
||||
if (!LShr->hasOneUse()) {
|
||||
auto *Mask =
|
||||
ConstantExpr::getLShr(Constant::getAllOnesValue(AShr->getType()),
|
||||
cast<Constant>(LShr->getOperand(1)));
|
||||
auto *NewLShr =
|
||||
BinaryOperator::CreateAnd(AShr, Mask, LShr->getName(), LShr);
|
||||
replaceInstUsesWith(*LShr, NewLShr);
|
||||
}
|
||||
if (!Src->hasOneUse()) {
|
||||
auto *OldTrunc = cast<Instruction>(Src);
|
||||
auto *NewTrunc = CastInst::Create(Instruction::Trunc, AShr, SrcTy,
|
||||
OldTrunc->getName(), OldTrunc);
|
||||
replaceInstUsesWith(*OldTrunc, NewTrunc);
|
||||
}
|
||||
return CastInst::Create(Instruction::SExt, AShr, DestTy);
|
||||
}
|
||||
|
||||
// Iff X had more sign bits than the number of bits that were chopped off
|
||||
// by the truncation, we can directly sign-extend the X.
|
||||
unsigned XNumSignBits = ComputeNumSignBits(X, 0, &CI);
|
||||
|
|
|
@ -12,8 +12,9 @@ declare void @usevec4(<2 x i4>)
|
|||
|
||||
define i16 @t0(i8 %x) {
|
||||
; CHECK-LABEL: @t0(
|
||||
; CHECK-NEXT: [[A_SIGNED:%.*]] = ashr i8 [[X:%.*]], 4
|
||||
; CHECK-NEXT: [[C:%.*]] = sext i8 [[A_SIGNED]] to i16
|
||||
; CHECK-NEXT: [[A:%.*]] = lshr i8 [[X:%.*]], 4
|
||||
; CHECK-NEXT: [[B:%.*]] = trunc i8 [[A]] to i4
|
||||
; CHECK-NEXT: [[C:%.*]] = sext i4 [[B]] to i16
|
||||
; CHECK-NEXT: ret i16 [[C]]
|
||||
;
|
||||
%a = lshr i8 %x, 4
|
||||
|
@ -24,8 +25,9 @@ define i16 @t0(i8 %x) {
|
|||
|
||||
define i16 @t1(i8 %x) {
|
||||
; CHECK-LABEL: @t1(
|
||||
; CHECK-NEXT: [[A_SIGNED:%.*]] = ashr i8 [[X:%.*]], 5
|
||||
; CHECK-NEXT: [[C:%.*]] = sext i8 [[A_SIGNED]] to i16
|
||||
; CHECK-NEXT: [[A:%.*]] = lshr i8 [[X:%.*]], 5
|
||||
; CHECK-NEXT: [[B:%.*]] = trunc i8 [[A]] to i3
|
||||
; CHECK-NEXT: [[C:%.*]] = sext i3 [[B]] to i16
|
||||
; CHECK-NEXT: ret i16 [[C]]
|
||||
;
|
||||
%a = lshr i8 %x, 5
|
||||
|
@ -36,8 +38,9 @@ define i16 @t1(i8 %x) {
|
|||
|
||||
define i16 @t2(i7 %x) {
|
||||
; CHECK-LABEL: @t2(
|
||||
; CHECK-NEXT: [[A_SIGNED:%.*]] = ashr i7 [[X:%.*]], 3
|
||||
; CHECK-NEXT: [[C:%.*]] = sext i7 [[A_SIGNED]] to i16
|
||||
; CHECK-NEXT: [[A:%.*]] = lshr i7 [[X:%.*]], 3
|
||||
; CHECK-NEXT: [[B:%.*]] = trunc i7 [[A]] to i4
|
||||
; CHECK-NEXT: [[C:%.*]] = sext i4 [[B]] to i16
|
||||
; CHECK-NEXT: ret i16 [[C]]
|
||||
;
|
||||
%a = lshr i7 %x, 3
|
||||
|
@ -61,8 +64,9 @@ define i16 @n3(i8 %x) {
|
|||
|
||||
define <2 x i16> @t4_vec_splat(<2 x i8> %x) {
|
||||
; CHECK-LABEL: @t4_vec_splat(
|
||||
; CHECK-NEXT: [[A_SIGNED:%.*]] = ashr <2 x i8> [[X:%.*]], <i8 4, i8 4>
|
||||
; CHECK-NEXT: [[C:%.*]] = sext <2 x i8> [[A_SIGNED]] to <2 x i16>
|
||||
; CHECK-NEXT: [[A:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 4, i8 4>
|
||||
; CHECK-NEXT: [[B:%.*]] = trunc <2 x i8> [[A]] to <2 x i4>
|
||||
; CHECK-NEXT: [[C:%.*]] = sext <2 x i4> [[B]] to <2 x i16>
|
||||
; CHECK-NEXT: ret <2 x i16> [[C]]
|
||||
;
|
||||
%a = lshr <2 x i8> %x, <i8 4, i8 4>
|
||||
|
@ -73,8 +77,9 @@ define <2 x i16> @t4_vec_splat(<2 x i8> %x) {
|
|||
|
||||
define <2 x i16> @t5_vec_undef(<2 x i8> %x) {
|
||||
; CHECK-LABEL: @t5_vec_undef(
|
||||
; CHECK-NEXT: [[A_SIGNED:%.*]] = ashr <2 x i8> [[X:%.*]], <i8 4, i8 undef>
|
||||
; CHECK-NEXT: [[C:%.*]] = sext <2 x i8> [[A_SIGNED]] to <2 x i16>
|
||||
; CHECK-NEXT: [[A:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 4, i8 undef>
|
||||
; CHECK-NEXT: [[B:%.*]] = trunc <2 x i8> [[A]] to <2 x i4>
|
||||
; CHECK-NEXT: [[C:%.*]] = sext <2 x i4> [[B]] to <2 x i16>
|
||||
; CHECK-NEXT: ret <2 x i16> [[C]]
|
||||
;
|
||||
%a = lshr <2 x i8> %x, <i8 4, i8 undef>
|
||||
|
@ -85,10 +90,10 @@ define <2 x i16> @t5_vec_undef(<2 x i8> %x) {
|
|||
|
||||
define i16 @t6_extrause0(i8 %x) {
|
||||
; CHECK-LABEL: @t6_extrause0(
|
||||
; CHECK-NEXT: [[A_SIGNED:%.*]] = ashr i8 [[X:%.*]], 4
|
||||
; CHECK-NEXT: [[B1:%.*]] = trunc i8 [[A_SIGNED]] to i4
|
||||
; CHECK-NEXT: call void @use4(i4 [[B1]])
|
||||
; CHECK-NEXT: [[C:%.*]] = sext i8 [[A_SIGNED]] to i16
|
||||
; CHECK-NEXT: [[A:%.*]] = lshr i8 [[X:%.*]], 4
|
||||
; CHECK-NEXT: [[B:%.*]] = trunc i8 [[A]] to i4
|
||||
; CHECK-NEXT: call void @use4(i4 [[B]])
|
||||
; CHECK-NEXT: [[C:%.*]] = sext i4 [[B]] to i16
|
||||
; CHECK-NEXT: ret i16 [[C]]
|
||||
;
|
||||
%a = lshr i8 %x, 4
|
||||
|
@ -99,10 +104,10 @@ define i16 @t6_extrause0(i8 %x) {
|
|||
}
|
||||
define <2 x i16> @t7_extrause0_vec_undef(<2 x i8> %x) {
|
||||
; CHECK-LABEL: @t7_extrause0_vec_undef(
|
||||
; CHECK-NEXT: [[A_SIGNED:%.*]] = ashr <2 x i8> [[X:%.*]], <i8 4, i8 undef>
|
||||
; CHECK-NEXT: [[B1:%.*]] = trunc <2 x i8> [[A_SIGNED]] to <2 x i4>
|
||||
; CHECK-NEXT: call void @usevec4(<2 x i4> [[B1]])
|
||||
; CHECK-NEXT: [[C:%.*]] = sext <2 x i8> [[A_SIGNED]] to <2 x i16>
|
||||
; CHECK-NEXT: [[A:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 4, i8 undef>
|
||||
; CHECK-NEXT: [[B:%.*]] = trunc <2 x i8> [[A]] to <2 x i4>
|
||||
; CHECK-NEXT: call void @usevec4(<2 x i4> [[B]])
|
||||
; CHECK-NEXT: [[C:%.*]] = sext <2 x i4> [[B]] to <2 x i16>
|
||||
; CHECK-NEXT: ret <2 x i16> [[C]]
|
||||
;
|
||||
%a = lshr <2 x i8> %x, <i8 4, i8 undef>
|
||||
|
@ -113,10 +118,10 @@ define <2 x i16> @t7_extrause0_vec_undef(<2 x i8> %x) {
|
|||
}
|
||||
define i16 @t8_extrause1(i8 %x) {
|
||||
; CHECK-LABEL: @t8_extrause1(
|
||||
; CHECK-NEXT: [[A_SIGNED:%.*]] = ashr i8 [[X:%.*]], 4
|
||||
; CHECK-NEXT: [[A1:%.*]] = and i8 [[A_SIGNED]], 15
|
||||
; CHECK-NEXT: call void @use8(i8 [[A1]])
|
||||
; CHECK-NEXT: [[C:%.*]] = sext i8 [[A_SIGNED]] to i16
|
||||
; CHECK-NEXT: [[A:%.*]] = lshr i8 [[X:%.*]], 4
|
||||
; CHECK-NEXT: call void @use8(i8 [[A]])
|
||||
; CHECK-NEXT: [[B:%.*]] = trunc i8 [[A]] to i4
|
||||
; CHECK-NEXT: [[C:%.*]] = sext i4 [[B]] to i16
|
||||
; CHECK-NEXT: ret i16 [[C]]
|
||||
;
|
||||
%a = lshr i8 %x, 4 ; has extra use, but we can deal with that
|
||||
|
@ -127,10 +132,10 @@ define i16 @t8_extrause1(i8 %x) {
|
|||
}
|
||||
define <2 x i16> @t9_extrause1_vec_undef(<2 x i8> %x) {
|
||||
; CHECK-LABEL: @t9_extrause1_vec_undef(
|
||||
; CHECK-NEXT: [[A_SIGNED:%.*]] = ashr <2 x i8> [[X:%.*]], <i8 4, i8 undef>
|
||||
; CHECK-NEXT: [[A1:%.*]] = and <2 x i8> [[A_SIGNED]], <i8 15, i8 undef>
|
||||
; CHECK-NEXT: call void @usevec8(<2 x i8> [[A1]])
|
||||
; CHECK-NEXT: [[C:%.*]] = sext <2 x i8> [[A_SIGNED]] to <2 x i16>
|
||||
; CHECK-NEXT: [[A:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 4, i8 undef>
|
||||
; CHECK-NEXT: call void @usevec8(<2 x i8> [[A]])
|
||||
; CHECK-NEXT: [[B:%.*]] = trunc <2 x i8> [[A]] to <2 x i4>
|
||||
; CHECK-NEXT: [[C:%.*]] = sext <2 x i4> [[B]] to <2 x i16>
|
||||
; CHECK-NEXT: ret <2 x i16> [[C]]
|
||||
;
|
||||
%a = lshr <2 x i8> %x, <i8 4, i8 undef>
|
||||
|
|
Loading…
Reference in New Issue