From 77e07cc0109b5f6b7feb7dc72eda01f656b164af Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 24 May 2017 17:05:28 +0000 Subject: [PATCH] [InstSimplify] Simplify uadd/sadd/umul/smul with overflow intrinsics when the Zero or Undef is on the LHS. Summary: This code was migrated from InstCombine a few years ago. InstCombine had nearby code that would move Constants to the RHS for these, but InstSimplify doesn't have such code on this path. Reviewers: spatel, majnemer, davide Reviewed By: spatel Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D33473 llvm-svn: 303774 --- llvm/lib/Analysis/InstructionSimplify.cpp | 8 ++-- llvm/test/Transforms/InstSimplify/call.ll | 48 +++++++++++++++++++++++ 2 files changed, 53 insertions(+), 3 deletions(-) diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 9572d81e471b..f10e2378fdbb 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -4440,19 +4440,21 @@ static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd, case Intrinsic::uadd_with_overflow: case Intrinsic::sadd_with_overflow: { // X + undef -> undef - if (isa(RHS)) + if (isa(LHS) || isa(RHS)) return UndefValue::get(ReturnType); return nullptr; } case Intrinsic::umul_with_overflow: case Intrinsic::smul_with_overflow: { + // 0 * X -> { 0, false } // X * 0 -> { 0, false } - if (match(RHS, m_Zero())) + if (match(LHS, m_Zero()) || match(RHS, m_Zero())) return Constant::getNullValue(ReturnType); + // undef * X -> { 0, false } // X * undef -> { 0, false } - if (match(RHS, m_Undef())) + if (match(LHS, m_Undef()) || match(RHS, m_Undef())) return Constant::getNullValue(ReturnType); return nullptr; diff --git a/llvm/test/Transforms/InstSimplify/call.ll b/llvm/test/Transforms/InstSimplify/call.ll index 5d68fbea436c..68daac65ee6b 100644 --- a/llvm/test/Transforms/InstSimplify/call.ll +++ b/llvm/test/Transforms/InstSimplify/call.ll @@ -35,6 +35,14 @@ define {i8, i1} @test_uadd3(i8 %v) { ret {i8, i1} %result } +define {i8, i1} @test_uadd4(i8 %v) { +; CHECK-LABEL: @test_uadd4( +; CHECK-NEXT: ret { i8, i1 } undef +; + %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 undef, i8 %v) + ret {i8, i1} %result +} + define i1 @test_sadd1() { ; CHECK-LABEL: @test_sadd1( ; CHECK-NEXT: ret i1 true @@ -61,6 +69,14 @@ define {i8, i1} @test_sadd3(i8 %v) { ret {i8, i1} %result } +define {i8, i1} @test_sadd4(i8 %v) { +; CHECK-LABEL: @test_sadd4( +; CHECK-NEXT: ret { i8, i1 } undef +; + %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 undef, i8 %v) + ret {i8, i1} %result +} + define {i8, i1} @test_usub1(i8 %V) { ; CHECK-LABEL: @test_usub1( ; CHECK-NEXT: ret { i8, i1 } zeroinitializer @@ -125,6 +141,22 @@ define {i8, i1} @test_umul2(i8 %V) { ret {i8, i1} %x } +define {i8, i1} @test_umul3(i8 %V) { +; CHECK-LABEL: @test_umul3( +; CHECK-NEXT: ret { i8, i1 } zeroinitializer +; + %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 0, i8 %V) + ret {i8, i1} %x +} + +define {i8, i1} @test_umul4(i8 %V) { +; CHECK-LABEL: @test_umul4( +; CHECK-NEXT: ret { i8, i1 } zeroinitializer +; + %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 undef, i8 %V) + ret {i8, i1} %x +} + define {i8, i1} @test_smul1(i8 %V) { ; CHECK-LABEL: @test_smul1( ; CHECK-NEXT: ret { i8, i1 } zeroinitializer @@ -141,6 +173,22 @@ define {i8, i1} @test_smul2(i8 %V) { ret {i8, i1} %x } +define {i8, i1} @test_smul3(i8 %V) { +; CHECK-LABEL: @test_smul3( +; CHECK-NEXT: ret { i8, i1 } zeroinitializer +; + %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 0, i8 %V) + ret {i8, i1} %x +} + +define {i8, i1} @test_smul4(i8 %V) { +; CHECK-LABEL: @test_smul4( +; CHECK-NEXT: ret { i8, i1 } zeroinitializer +; + %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 undef, i8 %V) + ret {i8, i1} %x +} + declare i256 @llvm.cttz.i256(i256 %src, i1 %is_zero_undef) define i256 @test_cttz() {