From 83914ee96fc2d828e1cfb8913f5d156d39150e2c Mon Sep 17 00:00:00 2001 From: Serge Pavlov Date: Thu, 5 May 2022 12:02:42 +0700 Subject: [PATCH] [InstCombine] Remove side effect of replaced constrained intrinsics If a constrained intrinsic call was replaced by some value, it was not removed in some cases. The dangling instruction resulted in useless instructions executed in runtime. It happened because constrained intrinsics usually have side effect, it is used to model the interaction with floating-point environment. In some cases it is correct behavior but often the side effect is actually absent or can be ignored. This change adds specific treatment of constrained intrinsics so that their side effect can be removed if it actually absents. Differential Revision: https://reviews.llvm.org/D118426 --- .../llvm/Analysis/InstructionSimplify.h | 4 + .../InstCombine/InstCombineCalls.cpp | 10 ++ .../Transforms/InstCombine/constrained.ll | 125 ++++++++++++++++++ 3 files changed, 139 insertions(+) create mode 100644 llvm/test/Transforms/InstCombine/constrained.ll diff --git a/llvm/include/llvm/Analysis/InstructionSimplify.h b/llvm/include/llvm/Analysis/InstructionSimplify.h index 8f6ed3a6a192..612a73551f72 100644 --- a/llvm/include/llvm/Analysis/InstructionSimplify.h +++ b/llvm/include/llvm/Analysis/InstructionSimplify.h @@ -299,6 +299,10 @@ Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q); /// Given a callsite, fold the result or return null. +/// +/// \note A call with declared side effect may be simplified into a value +/// without such. It happens if simplification code deduces that side effect +/// is actually absent. Value *SimplifyCall(CallBase *Call, const SimplifyQuery &Q); /// Given an operand for a Freeze, see if we can fold the result. diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index c96919caee2b..6b2ab24b4b74 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -1237,6 +1237,16 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { return NewCall; } + // Unused constrained FP intrinsic calls may have declared side effect, which + // actually absent. If SimplifyCall returns a replacement for such call, + // assume side effect is absent and the call may be removed. + if (CI.use_empty() && isa(CI)) { + if (SimplifyCall(&CI, SQ.getWithInstruction(&CI))) { + eraseInstFromFunction(CI); + return nullptr; + } + } + Intrinsic::ID IID = II->getIntrinsicID(); switch (IID) { case Intrinsic::objectsize: diff --git a/llvm/test/Transforms/InstCombine/constrained.ll b/llvm/test/Transforms/InstCombine/constrained.ll new file mode 100644 index 000000000000..b5ef71e6edfb --- /dev/null +++ b/llvm/test/Transforms/InstCombine/constrained.ll @@ -0,0 +1,125 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -instcombine %s | FileCheck %s + +; Treatment of operation with unused result. + +; If operation does not raise exceptions, it may be removed even in strict mode. +define float @f_unused_precise() #0 { +; CHECK-LABEL: @f_unused_precise( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret float 1.000000e+00 +; +entry: + %result = call float @llvm.experimental.constrained.fadd.f32(float 1.0, float 1.0, metadata !"round.upward", metadata !"fpexcept.strict") #0 + ret float 1.0 +} + +; If operation raises exceptions, it cannot be removed in strict mode. +define float @f_unused_strict() #0 { +; CHECK-LABEL: @f_unused_strict( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[RESULT:%.*]] = call float @llvm.experimental.constrained.fdiv.f32(float 1.000000e+00, float 3.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0:[0-9]+]] +; CHECK-NEXT: ret float 1.000000e+00 +; +entry: + %result = call float @llvm.experimental.constrained.fdiv.f32(float 1.0, float 3.0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float 1.0 +} + +; If operation raises exceptions, it can be removed in non-strict mode. +define float @f_unused_ignore() #0 { +; CHECK-LABEL: @f_unused_ignore( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret float 1.000000e+00 +; +entry: + %result = call float @llvm.experimental.constrained.fdiv.f32(float 1.0, float 3.0, metadata !"round.towardzero", metadata !"fpexcept.ignore") #0 + ret float 1.0 +} + +; If operation raises exceptions, it can be removed in non-strict mode even if rounding mode is dynamic. +define float @f_unused_dynamic_ignore() #0 { +; CHECK-LABEL: @f_unused_dynamic_ignore( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret float 1.000000e+00 +; +entry: + %result = call float @llvm.experimental.constrained.fdiv.f32(float 1.0, float 3.0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 + ret float 1.0 +} + +; If operation raises exceptions, it can be removed in "maytrap" mode. +define float @f_unused_maytrap() #0 { +; CHECK-LABEL: @f_unused_maytrap( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret float 1.000000e+00 +; +entry: + %result = call float @llvm.experimental.constrained.fdiv.f32(float 1.0, float 3.0, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float 1.0 +} + +; Constant evaluation. + +; If operation does not raise exceptions, it may be folded even in strict mode. +define float @f_eval_precise() #0 { +; CHECK-LABEL: @f_eval_precise( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret float 2.000000e+00 +; +entry: + %result = call float @llvm.experimental.constrained.fadd.f32(float 1.0, float 1.0, metadata !"round.upward", metadata !"fpexcept.strict") #0 + ret float %result +} + +; If operation raises exceptions, it cannot be folded in strict mode. +define float @f_eval_strict() #0 { +; CHECK-LABEL: @f_eval_strict( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[RESULT:%.*]] = call float @llvm.experimental.constrained.fdiv.f32(float 1.000000e+00, float 3.000000e+00, metadata !"round.upward", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RESULT]] +; +entry: + %result = call float @llvm.experimental.constrained.fdiv.f32(float 1.0, float 3.0, metadata !"round.upward", metadata !"fpexcept.strict") #0 + ret float %result +} + +; If operation raises exceptions, it can be folded in non-strict mode. +define float @f_eval_ignore() #0 { +; CHECK-LABEL: @f_eval_ignore( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret float 0x3FD5555540000000 +; +entry: + %result = call float @llvm.experimental.constrained.fdiv.f32(float 1.0, float 3.0, metadata !"round.downward", metadata !"fpexcept.ignore") #0 + ret float %result +} + +; if result is imprecise, it cannot be folded if rounding mode is dynamic. +define float @f_eval_dynamic_ignore() #0 { +; CHECK-LABEL: @f_eval_dynamic_ignore( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[RESULT:%.*]] = call float @llvm.experimental.constrained.fdiv.f32(float 1.000000e+00, float 3.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: ret float [[RESULT]] +; +entry: + %result = call float @llvm.experimental.constrained.fdiv.f32(float 1.0, float 3.0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 + ret float %result +} + +; If result is imprecise and rounding mode is not dynamic, operation can be folded in "maytrap" mode. +define float @f_eval_maytrap() #0 { +; CHECK-LABEL: @f_eval_maytrap( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret float 0x3FD5555560000000 +; +entry: + %result = call float @llvm.experimental.constrained.fdiv.f32(float 1.0, float 3.0, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %result +} + + +declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) +declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata) + +attributes #0 = { strictfp }