[InstCombine] Ensure shifts are in range for (X << C1) / C2 -> X fold.

We can get here before out of range shift amounts have been handled - limit to BW-2 for sdiv and BW-1 for udiv

Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=38078
This commit is contained in:
Simon Pilgrim 2021-09-25 12:57:33 +01:00
parent 0b61f43b60
commit 5a14edd8ed
2 changed files with 66 additions and 3 deletions

View File

@ -788,11 +788,12 @@ Instruction *InstCombinerImpl::commonIDivTransforms(BinaryOperator &I) {
}
if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) &&
*C1 != C1->getBitWidth() - 1) ||
(!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))))) {
C1->ult(C1->getBitWidth() - 1)) ||
(!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))) &&
C1->ult(C1->getBitWidth()))) {
APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
APInt C1Shifted = APInt::getOneBitSet(
C1->getBitWidth(), static_cast<unsigned>(C1->getLimitedValue()));
C1->getBitWidth(), static_cast<unsigned>(C1->getZExtValue()));
// (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1.
if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) {

View File

@ -1796,6 +1796,68 @@ define void @ashr_out_of_range_1(i177* %A) {
ret void
}
; OSS Fuzz #38078
; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=38078
define void @ossfuzz_38078(i32 %arg, i32 %arg1) {
; CHECK-LABEL: @ossfuzz_38078(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I2:%.*]] = sub i32 0, [[ARG1:%.*]]
; CHECK-NEXT: [[I5:%.*]] = icmp eq i32 [[I2]], [[ARG:%.*]]
; CHECK-NEXT: call void @llvm.assume(i1 [[I5]])
; CHECK-NEXT: store volatile i32 undef, i32* undef, align 4
; CHECK-NEXT: br label [[BB:%.*]]
; CHECK: BB:
; CHECK-NEXT: unreachable
;
bb:
%i = or i32 0, -1
%B24 = urem i32 %i, -2147483648
%B21 = or i32 %i, %i
%i2 = add nsw i32 %arg, %arg1
%B7 = or i32 %i, %i2
%B8 = and i32 %i, %i2
%B12 = sdiv i32 %i2, %B7
%B3 = add i32 %i2, %B24
%B5 = and i32 %i, %B3
%B18 = and i32 %i, %B8
%i3 = xor i32 %i2, %B3
%C1 = icmp ne i32 %B8, %B5
%i4 = lshr i32 %B5, %i3
%B29 = shl nuw i32 %B8, %i3
%B2 = lshr i32 %B12, %i2
%B16 = add i32 %B2, %i3
%B = sdiv i32 %B29, %B5
%B15 = sub i32 %i2, %B5
%B22 = or i32 %B21, %B29
%B23 = mul i32 %B15, %B
%C2 = icmp sge i1 %C1, false
%C7 = icmp sle i32 %i3, %B16
%B20 = xor i32 %B21, %B22
%G1 = getelementptr i32, i32* undef, i32 %B22
%B1 = sub i32 %B, undef
%B26 = ashr i32 %B29, undef
%B4 = add i32 undef, %B5
%B27 = srem i32 %B12, %B21
%i5 = icmp eq i32 %B20, %B18
%C11 = icmp ugt i32 %i4, %B4
call void @llvm.assume(i1 %i5)
store volatile i32 %B4, i32* %G1, align 4
%B11 = or i32 undef, %B23
br label %BB
BB:
store i1 %C7, i1* undef, align 1
store i32 %B11, i32* undef, align 4
store i1 %C11, i1* undef, align 1
store i32 %B1, i32* undef, align 4
store i32 %B27, i32* undef, align 4
%C = icmp ne i32 %B26, undef
%B17 = or i1 %C, %C2
store i1 %B17, i1* undef, align 1
unreachable
}
declare void @llvm.assume(i1 noundef)
define i8 @lshr_mask_demand(i8 %x) {
; CHECK-LABEL: @lshr_mask_demand(
; CHECK-NEXT: [[S:%.*]] = lshr i8 63, [[X:%.*]]