[InstCombine] convert mul by negative-pow2 to negate and shift

This is an unusual canonicalization because we create an extra instruction,
but it's likely better for analysis and codegen (similar reasoning as D133399).

InstCombine::Negator may create this kind of multiply from negate and shift,
but this should not conflict because of the narrow negation.

I don't know how to create a fully general proof for this kind of transform in
Alive2, but here's an example with bitwidths similar to one of the regression
tests:
https://alive2.llvm.org/ce/z/J3jTjR

Differential Revision: https://reviews.llvm.org/D133667
This commit is contained in:
Sanjay Patel 2022-10-02 12:22:25 -04:00
parent 4490cfbaf4
commit 2e87333bfe
3 changed files with 49 additions and 18 deletions

View File

@ -158,7 +158,8 @@ Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
return replaceInstUsesWith(I, V);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
unsigned BitWidth = I.getType()->getScalarSizeInBits();
Type *Ty = I.getType();
unsigned BitWidth = Ty->getScalarSizeInBits();
// X * -1 == 0 - X
if (match(Op1, m_AllOnes())) {
@ -212,6 +213,25 @@ Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
if (Value *NegOp0 = Negator::Negate(/*IsNegation*/ true, Op0, *this))
return BinaryOperator::CreateMul(
NegOp0, ConstantExpr::getNeg(cast<Constant>(Op1)), I.getName());
// Try to convert multiply of extended operand to narrow negate and shift
// for better analysis.
// This is valid if the shift amount (trailing zeros in the multiplier
// constant) clears more high bits than the bitwidth difference between
// source and destination types:
// ({z/s}ext X) * (-1<<C) --> (zext (-X)) << C
const APInt *NegPow2C;
Value *X;
if (match(Op0, m_ZExtOrSExt(m_Value(X))) &&
match(Op1, m_APIntAllowUndef(NegPow2C))) {
unsigned SrcWidth = X->getType()->getScalarSizeInBits();
unsigned ShiftAmt = NegPow2C->countTrailingZeros();
if (ShiftAmt >= BitWidth - SrcWidth) {
Value *N = Builder.CreateNeg(X, X->getName() + ".neg");
Value *Z = Builder.CreateZExt(N, Ty, N->getName() + ".z");
return BinaryOperator::CreateShl(Z, ConstantInt::get(Ty, ShiftAmt));
}
}
}
if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
@ -320,7 +340,6 @@ Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
// 2) X * Y --> X & Y, iff X, Y can be only {0,1}.
// Note: We could use known bits to generalize this and related patterns with
// shifts/truncs
Type *Ty = I.getType();
if (Ty->isIntOrIntVectorTy(1) ||
(match(Op0, m_And(m_Value(), m_One())) &&
match(Op1, m_And(m_Value(), m_One()))))

View File

@ -2254,18 +2254,16 @@ define { i64, i64 } @PR57576(i64 noundef %x, i64 noundef %y, i64 noundef %z, i64
; CHECK-LABEL: @PR57576(
; CHECK-NEXT: [[ZX:%.*]] = zext i64 [[X:%.*]] to i128
; CHECK-NEXT: [[ZY:%.*]] = zext i64 [[Y:%.*]] to i128
; CHECK-NEXT: [[ZW:%.*]] = zext i64 [[W:%.*]] to i128
; CHECK-NEXT: [[ZZ:%.*]] = zext i64 [[Z:%.*]] to i128
; CHECK-NEXT: [[SHY:%.*]] = shl nuw i128 [[ZY]], 64
; CHECK-NEXT: [[XY:%.*]] = or i128 [[SHY]], [[ZX]]
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i128 [[ZW]], 64
; CHECK-NEXT: [[TMP2:%.*]] = or i128 [[TMP1]], [[ZZ]]
; CHECK-NEXT: [[ADD:%.*]] = sub i128 [[XY]], [[TMP2]]
; CHECK-NEXT: [[T:%.*]] = trunc i128 [[ADD]] to i64
; CHECK-NEXT: [[H:%.*]] = lshr i128 [[ADD]], 64
; CHECK-NEXT: [[T2:%.*]] = trunc i128 [[H]] to i64
; CHECK-NEXT: [[SUB:%.*]] = sub i128 [[XY]], [[ZZ]]
; CHECK-NEXT: [[T:%.*]] = trunc i128 [[SUB]] to i64
; CHECK-NEXT: [[TMP1:%.*]] = lshr i128 [[SUB]], 64
; CHECK-NEXT: [[DOTTR:%.*]] = trunc i128 [[TMP1]] to i64
; CHECK-NEXT: [[DOTNARROW:%.*]] = sub i64 [[DOTTR]], [[W:%.*]]
; CHECK-NEXT: [[R1:%.*]] = insertvalue { i64, i64 } poison, i64 [[T]], 0
; CHECK-NEXT: [[R2:%.*]] = insertvalue { i64, i64 } [[R1]], i64 [[T2]], 1
; CHECK-NEXT: [[R2:%.*]] = insertvalue { i64, i64 } [[R1]], i64 [[DOTNARROW]], 1
; CHECK-NEXT: ret { i64, i64 } [[R2]]
;
%zx = zext i64 %x to i128

View File

@ -1484,8 +1484,9 @@ define i32 @mulnot_extrause(i32 %a0) {
define i32 @zext_negpow2(i8 %x) {
; CHECK-LABEL: @zext_negpow2(
; CHECK-NEXT: [[ZX:%.*]] = zext i8 [[X:%.*]] to i32
; CHECK-NEXT: [[R:%.*]] = mul i32 [[ZX]], -16777216
; CHECK-NEXT: [[X_NEG:%.*]] = sub i8 0, [[X:%.*]]
; CHECK-NEXT: [[X_NEG_Z:%.*]] = zext i8 [[X_NEG]] to i32
; CHECK-NEXT: [[R:%.*]] = shl nuw i32 [[X_NEG_Z]], 24
; CHECK-NEXT: ret i32 [[R]]
;
%zx = zext i8 %x to i32
@ -1493,10 +1494,13 @@ define i32 @zext_negpow2(i8 %x) {
ret i32 %r
}
; splat constant
define <2 x i14> @zext_negpow2_vec(<2 x i5> %x) {
; CHECK-LABEL: @zext_negpow2_vec(
; CHECK-NEXT: [[ZX:%.*]] = zext <2 x i5> [[X:%.*]] to <2 x i14>
; CHECK-NEXT: [[R:%.*]] = mul <2 x i14> [[ZX]], <i14 -2048, i14 -2048>
; CHECK-NEXT: [[X_NEG:%.*]] = sub <2 x i5> zeroinitializer, [[X:%.*]]
; CHECK-NEXT: [[X_NEG_Z:%.*]] = zext <2 x i5> [[X_NEG]] to <2 x i14>
; CHECK-NEXT: [[R:%.*]] = shl <2 x i14> [[X_NEG_Z]], <i14 11, i14 11>
; CHECK-NEXT: ret <2 x i14> [[R]]
;
%zx = zext <2 x i5> %x to <2 x i14>
@ -1504,6 +1508,8 @@ define <2 x i14> @zext_negpow2_vec(<2 x i5> %x) {
ret <2 x i14> %r
}
; negative test - mul must be big enough to cover bitwidth diff
define i32 @zext_negpow2_too_small(i8 %x) {
; CHECK-LABEL: @zext_negpow2_too_small(
; CHECK-NEXT: [[ZX:%.*]] = zext i8 [[X:%.*]] to i32
@ -1517,8 +1523,9 @@ define i32 @zext_negpow2_too_small(i8 %x) {
define i16 @sext_negpow2(i9 %x) {
; CHECK-LABEL: @sext_negpow2(
; CHECK-NEXT: [[SX:%.*]] = sext i9 [[X:%.*]] to i16
; CHECK-NEXT: [[R:%.*]] = mul i16 [[SX]], -1024
; CHECK-NEXT: [[X_NEG:%.*]] = sub i9 0, [[X:%.*]]
; CHECK-NEXT: [[X_NEG_Z:%.*]] = zext i9 [[X_NEG]] to i16
; CHECK-NEXT: [[R:%.*]] = shl i16 [[X_NEG_Z]], 10
; CHECK-NEXT: ret i16 [[R]]
;
%sx = sext i9 %x to i16
@ -1526,10 +1533,13 @@ define i16 @sext_negpow2(i9 %x) {
ret i16 %r
}
; splat constant with poison element(s)
define <2 x i16> @sext_negpow2_vec(<2 x i8> %x) {
; CHECK-LABEL: @sext_negpow2_vec(
; CHECK-NEXT: [[SX:%.*]] = sext <2 x i8> [[X:%.*]] to <2 x i16>
; CHECK-NEXT: [[R:%.*]] = mul <2 x i16> [[SX]], <i16 -256, i16 poison>
; CHECK-NEXT: [[X_NEG:%.*]] = sub <2 x i8> zeroinitializer, [[X:%.*]]
; CHECK-NEXT: [[X_NEG_Z:%.*]] = zext <2 x i8> [[X_NEG]] to <2 x i16>
; CHECK-NEXT: [[R:%.*]] = shl nuw <2 x i16> [[X_NEG_Z]], <i16 8, i16 8>
; CHECK-NEXT: ret <2 x i16> [[R]]
;
%sx = sext <2 x i8> %x to <2 x i16>
@ -1537,6 +1547,8 @@ define <2 x i16> @sext_negpow2_vec(<2 x i8> %x) {
ret <2 x i16> %r
}
; negative test - mul must be big enough to cover bitwidth diff
define <2 x i16> @sext_negpow2_too_small_vec(<2 x i8> %x) {
; CHECK-LABEL: @sext_negpow2_too_small_vec(
; CHECK-NEXT: [[SX:%.*]] = sext <2 x i8> [[X:%.*]] to <2 x i16>
@ -1548,6 +1560,8 @@ define <2 x i16> @sext_negpow2_too_small_vec(<2 x i8> %x) {
ret <2 x i16> %r
}
; negative test - too many uses
define i32 @zext_negpow2_use(i8 %x) {
; CHECK-LABEL: @zext_negpow2_use(
; CHECK-NEXT: [[ZX:%.*]] = zext i8 [[X:%.*]] to i32