2009-09-11 07:07:18 +08:00
|
|
|
//===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
|
2005-10-28 00:00:10 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-10-28 00:00:10 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2009-09-11 07:07:18 +08:00
|
|
|
// This file defines routines for folding instructions into constants.
|
|
|
|
//
|
2013-01-02 17:10:48 +08:00
|
|
|
// Also, to supplement the basic IR ConstantExpr simplifications,
|
2009-09-11 07:07:18 +08:00
|
|
|
// this file defines some additional folding routines that can make use of
|
2013-01-02 17:10:48 +08:00
|
|
|
// DataLayout information. These functions cannot go in IR due to library
|
2009-09-11 07:07:18 +08:00
|
|
|
// dependency issues.
|
2005-10-28 00:00:10 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Analysis/ConstantFolding.h"
|
2016-08-25 08:45:04 +08:00
|
|
|
#include "llvm/ADT/APFloat.h"
|
|
|
|
#include "llvm/ADT/APInt.h"
|
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2016-06-21 13:10:24 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/ADT/StringRef.h"
|
2015-01-15 10:16:27 +08:00
|
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2014-06-10 02:28:53 +08:00
|
|
|
#include "llvm/Config/config.h"
|
2016-08-25 08:45:04 +08:00
|
|
|
#include "llvm/IR/Constant.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
2016-08-25 08:45:04 +08:00
|
|
|
#include "llvm/IR/GlobalValue.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
2016-08-25 08:45:04 +08:00
|
|
|
#include "llvm/IR/InstrTypes.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/Operator.h"
|
2016-08-25 08:45:04 +08:00
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
2009-07-12 04:10:48 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2017-04-27 00:39:58 +08:00
|
|
|
#include "llvm/Support/KnownBits.h"
|
2005-10-28 00:00:10 +08:00
|
|
|
#include "llvm/Support/MathExtras.h"
|
2016-03-29 01:40:08 +08:00
|
|
|
#include <cassert>
|
2005-10-28 00:00:10 +08:00
|
|
|
#include <cerrno>
|
2016-03-29 01:40:08 +08:00
|
|
|
#include <cfenv>
|
2006-12-02 10:22:01 +08:00
|
|
|
#include <cmath>
|
2016-08-25 08:45:04 +08:00
|
|
|
#include <cstddef>
|
|
|
|
#include <cstdint>
|
2014-06-10 02:28:53 +08:00
|
|
|
|
2005-10-28 00:00:10 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2016-03-29 01:40:08 +08:00
|
|
|
namespace {
|
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Constant Folding internal helper functions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-12-02 10:26:02 +08:00
|
|
|
static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
|
|
|
|
Constant *C, Type *SrcEltTy,
|
|
|
|
unsigned NumSrcElts,
|
|
|
|
const DataLayout &DL) {
|
|
|
|
// Now that we know that the input value is a vector of integers, just shift
|
|
|
|
// and insert them into our result.
|
|
|
|
unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
|
|
|
|
for (unsigned i = 0; i != NumSrcElts; ++i) {
|
|
|
|
Constant *Element;
|
|
|
|
if (DL.isLittleEndian())
|
|
|
|
Element = C->getAggregateElement(NumSrcElts - i - 1);
|
|
|
|
else
|
|
|
|
Element = C->getAggregateElement(i);
|
|
|
|
|
|
|
|
if (Element && isa<UndefValue>(Element)) {
|
|
|
|
Result <<= BitShift;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
|
|
|
|
if (!ElementCI)
|
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
|
|
|
|
|
|
|
Result <<= BitShift;
|
|
|
|
Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-12-08 04:56:11 +08:00
|
|
|
/// Constant fold bitcast, symbolically evaluating it with DataLayout.
|
2014-10-02 23:13:22 +08:00
|
|
|
/// This always returns a non-null constant, but it may be a
|
2009-10-25 14:08:26 +08:00
|
|
|
/// ConstantExpr if unfoldable.
|
2016-03-29 01:40:08 +08:00
|
|
|
Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
|
2011-08-25 04:18:38 +08:00
|
|
|
// Catch the obvious splat cases.
|
|
|
|
if (C->isNullValue() && !DestTy->isX86_MMXTy())
|
|
|
|
return Constant::getNullValue(DestTy);
|
2014-10-22 20:18:48 +08:00
|
|
|
if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() &&
|
|
|
|
!DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
|
2011-08-25 04:18:38 +08:00
|
|
|
return Constant::getAllOnesValue(DestTy);
|
|
|
|
|
2016-12-08 04:56:11 +08:00
|
|
|
if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
|
|
|
|
// Handle a vector->scalar integer/fp cast.
|
|
|
|
if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
|
|
|
|
unsigned NumSrcElts = VTy->getNumElements();
|
|
|
|
Type *SrcEltTy = VTy->getElementType();
|
|
|
|
|
|
|
|
// If the vector is a vector of floating point, convert it to vector of int
|
|
|
|
// to simplify things.
|
|
|
|
if (SrcEltTy->isFloatingPointTy()) {
|
|
|
|
unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
|
|
|
|
Type *SrcIVTy =
|
|
|
|
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
|
|
|
|
// Ask IR to do the conversion now that #elts line up.
|
|
|
|
C = ConstantExpr::getBitCast(C, SrcIVTy);
|
|
|
|
}
|
|
|
|
|
|
|
|
APInt Result(DL.getTypeSizeInBits(DestTy), 0);
|
|
|
|
if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
|
|
|
|
SrcEltTy, NumSrcElts, DL))
|
|
|
|
return CE;
|
|
|
|
|
|
|
|
if (isa<IntegerType>(DestTy))
|
|
|
|
return ConstantInt::get(DestTy, Result);
|
|
|
|
|
|
|
|
APFloat FP(DestTy->getFltSemantics(), Result);
|
|
|
|
return ConstantFP::get(DestTy->getContext(), FP);
|
2012-01-28 07:33:07 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2011-08-20 22:02:29 +08:00
|
|
|
// The code below only handles casts to vectors currently.
|
2016-07-13 12:22:12 +08:00
|
|
|
auto *DestVTy = dyn_cast<VectorType>(DestTy);
|
2014-04-15 12:59:12 +08:00
|
|
|
if (!DestVTy)
|
2009-10-25 14:08:26 +08:00
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
Teach FoldBitCast to be able to handle bitcasts from (e.g.) i128 -> <4 x float>.
This allows us to simplify this:
union vec2d {
double e[2];
double v __attribute__((vector_size(16)));
};
typedef union vec2d vec2d;
static vec2d a={{1,2}}, b={{3,4}};
vec2d foo () {
return (vec2d){ .v = a.v + b.v * (vec2d){{5,5}}.v };
}
down to:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double 1.600000e+01, 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double 2.200000e+01, 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
instead of:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 0), 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 1), 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
llvm-svn: 85040
2009-10-25 14:15:37 +08:00
|
|
|
// If this is a scalar -> vector cast, convert the input into a <1 x scalar>
|
|
|
|
// vector so the code below can handle it uniformly.
|
|
|
|
if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
|
|
|
|
Constant *Ops = C; // don't take the address of C!
|
2015-03-10 10:37:25 +08:00
|
|
|
return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
|
Teach FoldBitCast to be able to handle bitcasts from (e.g.) i128 -> <4 x float>.
This allows us to simplify this:
union vec2d {
double e[2];
double v __attribute__((vector_size(16)));
};
typedef union vec2d vec2d;
static vec2d a={{1,2}}, b={{3,4}};
vec2d foo () {
return (vec2d){ .v = a.v + b.v * (vec2d){{5,5}}.v };
}
down to:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double 1.600000e+01, 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double 2.200000e+01, 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
instead of:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 0), 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 1), 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
llvm-svn: 85040
2009-10-25 14:15:37 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
Teach FoldBitCast to be able to handle bitcasts from (e.g.) i128 -> <4 x float>.
This allows us to simplify this:
union vec2d {
double e[2];
double v __attribute__((vector_size(16)));
};
typedef union vec2d vec2d;
static vec2d a={{1,2}}, b={{3,4}};
vec2d foo () {
return (vec2d){ .v = a.v + b.v * (vec2d){{5,5}}.v };
}
down to:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double 1.600000e+01, 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double 2.200000e+01, 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
instead of:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 0), 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 1), 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
llvm-svn: 85040
2009-10-25 14:15:37 +08:00
|
|
|
// If this is a bitcast from constant vector -> vector, fold it.
|
2012-01-27 05:37:55 +08:00
|
|
|
if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
|
Teach FoldBitCast to be able to handle bitcasts from (e.g.) i128 -> <4 x float>.
This allows us to simplify this:
union vec2d {
double e[2];
double v __attribute__((vector_size(16)));
};
typedef union vec2d vec2d;
static vec2d a={{1,2}}, b={{3,4}};
vec2d foo () {
return (vec2d){ .v = a.v + b.v * (vec2d){{5,5}}.v };
}
down to:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double 1.600000e+01, 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double 2.200000e+01, 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
instead of:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 0), 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 1), 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
llvm-svn: 85040
2009-10-25 14:15:37 +08:00
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2013-01-02 17:10:48 +08:00
|
|
|
// If the element types match, IR can fold it.
|
2009-10-25 14:08:26 +08:00
|
|
|
unsigned NumDstElt = DestVTy->getNumElements();
|
2012-01-27 05:37:55 +08:00
|
|
|
unsigned NumSrcElt = C->getType()->getVectorNumElements();
|
2009-10-25 14:08:26 +08:00
|
|
|
if (NumDstElt == NumSrcElt)
|
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2012-01-27 05:37:55 +08:00
|
|
|
Type *SrcEltTy = C->getType()->getVectorElementType();
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *DstEltTy = DestVTy->getElementType();
|
2012-11-05 08:11:11 +08:00
|
|
|
|
|
|
|
// Otherwise, we're changing the number of elements in a vector, which
|
2009-10-25 14:08:26 +08:00
|
|
|
// requires endianness information to do the right thing. For example,
|
|
|
|
// bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
|
|
|
|
// folds to (little endian):
|
|
|
|
// <4 x i32> <i32 0, i32 0, i32 1, i32 0>
|
|
|
|
// and to (big endian):
|
|
|
|
// <4 x i32> <i32 0, i32 0, i32 0, i32 1>
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
// First thing is first. We only want to think about integer here, so if
|
|
|
|
// we have something in FP form, recast it as integer.
|
2010-02-16 00:12:20 +08:00
|
|
|
if (DstEltTy->isFloatingPointTy()) {
|
2009-10-25 14:08:26 +08:00
|
|
|
// Fold to an vector of integers with same size as our FP type.
|
|
|
|
unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *DestIVTy =
|
2009-10-25 14:08:26 +08:00
|
|
|
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
|
|
|
|
// Recursively handle this integer conversion, if possible.
|
2015-03-10 10:37:25 +08:00
|
|
|
C = FoldBitCast(C, DestIVTy, DL);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2013-01-02 17:10:48 +08:00
|
|
|
// Finally, IR can handle this now that #elts line up.
|
2009-10-25 14:08:26 +08:00
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
// Okay, we know the destination is integer, if the input is FP, convert
|
|
|
|
// it to integer first.
|
2010-02-16 00:12:20 +08:00
|
|
|
if (SrcEltTy->isFloatingPointTy()) {
|
2009-10-25 14:08:26 +08:00
|
|
|
unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcIVTy =
|
2009-10-25 14:08:26 +08:00
|
|
|
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
|
2013-01-02 17:10:48 +08:00
|
|
|
// Ask IR to do the conversion now that #elts line up.
|
2009-10-25 14:08:26 +08:00
|
|
|
C = ConstantExpr::getBitCast(C, SrcIVTy);
|
2013-01-02 17:10:48 +08:00
|
|
|
// If IR wasn't able to fold it, bail out.
|
2012-01-27 05:37:55 +08:00
|
|
|
if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
|
|
|
|
!isa<ConstantDataVector>(C))
|
2009-10-25 14:08:26 +08:00
|
|
|
return C;
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
// Now we know that the input and output vectors are both integer vectors
|
|
|
|
// of the same size, and that their #elements is not the same. Do the
|
|
|
|
// conversion here, which depends on whether the input or output has
|
|
|
|
// more elements.
|
2015-03-10 10:37:25 +08:00
|
|
|
bool isLittleEndian = DL.isLittleEndian();
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
SmallVector<Constant*, 32> Result;
|
|
|
|
if (NumDstElt < NumSrcElt) {
|
|
|
|
// Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
|
|
|
|
Constant *Zero = Constant::getNullValue(DstEltTy);
|
|
|
|
unsigned Ratio = NumSrcElt/NumDstElt;
|
|
|
|
unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
|
|
|
|
unsigned SrcElt = 0;
|
|
|
|
for (unsigned i = 0; i != NumDstElt; ++i) {
|
|
|
|
// Build each element of the result.
|
|
|
|
Constant *Elt = Zero;
|
|
|
|
unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
|
|
|
|
for (unsigned j = 0; j != Ratio; ++j) {
|
2016-07-29 12:06:09 +08:00
|
|
|
Constant *Src = C->getAggregateElement(SrcElt++);
|
|
|
|
if (Src && isa<UndefValue>(Src))
|
2016-07-30 02:48:27 +08:00
|
|
|
Src = Constant::getNullValue(C->getType()->getVectorElementType());
|
2016-07-29 12:06:09 +08:00
|
|
|
else
|
|
|
|
Src = dyn_cast_or_null<ConstantInt>(Src);
|
2009-10-25 14:08:26 +08:00
|
|
|
if (!Src) // Reject constantexpr elements.
|
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
// Zero extend the element to the right size.
|
|
|
|
Src = ConstantExpr::getZExt(Src, Elt->getType());
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
// Shift it to the right place, depending on endianness.
|
2012-11-05 08:11:11 +08:00
|
|
|
Src = ConstantExpr::getShl(Src,
|
2009-10-25 14:08:26 +08:00
|
|
|
ConstantInt::get(Src->getType(), ShiftAmt));
|
|
|
|
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
// Mix it in.
|
|
|
|
Elt = ConstantExpr::getOr(Elt, Src);
|
|
|
|
}
|
|
|
|
Result.push_back(Elt);
|
|
|
|
}
|
2012-01-27 05:37:55 +08:00
|
|
|
return ConstantVector::get(Result);
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2012-01-27 05:37:55 +08:00
|
|
|
// Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
|
|
|
|
unsigned Ratio = NumDstElt/NumSrcElt;
|
2015-03-10 10:37:25 +08:00
|
|
|
unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2012-01-27 05:37:55 +08:00
|
|
|
// Loop over each source value, expanding into multiple results.
|
|
|
|
for (unsigned i = 0; i != NumSrcElt; ++i) {
|
2016-09-13 22:50:47 +08:00
|
|
|
auto *Element = C->getAggregateElement(i);
|
|
|
|
|
|
|
|
if (!Element) // Reject constantexpr elements.
|
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
|
|
|
|
|
|
|
if (isa<UndefValue>(Element)) {
|
|
|
|
// Correctly Propagate undef values.
|
|
|
|
Result.append(Ratio, UndefValue::get(DstEltTy));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto *Src = dyn_cast<ConstantInt>(Element);
|
|
|
|
if (!Src)
|
2012-01-27 05:37:55 +08:00
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2012-01-27 05:37:55 +08:00
|
|
|
unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
|
|
|
|
for (unsigned j = 0; j != Ratio; ++j) {
|
|
|
|
// Shift the piece of the value into the right place, depending on
|
|
|
|
// endianness.
|
2012-11-05 08:11:11 +08:00
|
|
|
Constant *Elt = ConstantExpr::getLShr(Src,
|
2012-01-27 05:37:55 +08:00
|
|
|
ConstantInt::get(Src->getType(), ShiftAmt));
|
|
|
|
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2014-10-22 20:18:48 +08:00
|
|
|
// Truncate the element to an integer with the same pointer size and
|
|
|
|
// convert the element back to a pointer using a inttoptr.
|
|
|
|
if (DstEltTy->isPointerTy()) {
|
|
|
|
IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
|
|
|
|
Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
|
|
|
|
Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2012-01-27 05:37:55 +08:00
|
|
|
// Truncate and remember this piece.
|
|
|
|
Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
|
2009-10-25 14:08:26 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2011-02-15 08:14:00 +08:00
|
|
|
return ConstantVector::get(Result);
|
2009-10-25 14:08:26 +08:00
|
|
|
}
|
|
|
|
|
2016-04-23 04:40:10 +08:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
2014-10-02 23:13:22 +08:00
|
|
|
/// If this constant is a constant offset from a global, return the global and
|
|
|
|
/// the constant. Because of constantexprs, this function is recursive.
|
2016-04-23 04:40:10 +08:00
|
|
|
bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
|
|
|
|
APInt &Offset, const DataLayout &DL) {
|
2007-01-31 08:51:48 +08:00
|
|
|
// Trivial case, constant is the global.
|
|
|
|
if ((GV = dyn_cast<GlobalValue>(C))) {
|
2018-02-14 14:58:08 +08:00
|
|
|
unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
|
2013-11-05 04:46:52 +08:00
|
|
|
Offset = APInt(BitWidth, 0);
|
2007-01-31 08:51:48 +08:00
|
|
|
return true;
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
// Otherwise, if this isn't a constant expr, bail out.
|
2016-07-13 12:22:12 +08:00
|
|
|
auto *CE = dyn_cast<ConstantExpr>(C);
|
2007-01-31 08:51:48 +08:00
|
|
|
if (!CE) return false;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
// Look through ptr->int and ptr->ptr casts.
|
|
|
|
if (CE->getOpcode() == Instruction::PtrToInt ||
|
2015-07-28 02:31:03 +08:00
|
|
|
CE->getOpcode() == Instruction::BitCast)
|
2015-03-10 10:37:25 +08:00
|
|
|
return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
|
|
|
// i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
|
2016-07-13 12:22:12 +08:00
|
|
|
auto *GEP = dyn_cast<GEPOperator>(CE);
|
2013-11-05 04:46:52 +08:00
|
|
|
if (!GEP)
|
|
|
|
return false;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2018-02-14 14:58:08 +08:00
|
|
|
unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
|
2013-11-05 04:46:52 +08:00
|
|
|
APInt TmpOffset(BitWidth, 0);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2013-11-05 04:46:52 +08:00
|
|
|
// If the base isn't a global+constant, we aren't either.
|
2015-03-10 10:37:25 +08:00
|
|
|
if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL))
|
2013-11-05 04:46:52 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Otherwise, add any offset that our operands provide.
|
2015-03-10 10:37:25 +08:00
|
|
|
if (!GEP->accumulateConstantOffset(DL, TmpOffset))
|
2013-11-05 04:46:52 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
Offset = TmpOffset;
|
|
|
|
return true;
|
2007-01-31 08:51:48 +08:00
|
|
|
}
|
|
|
|
|
2018-03-13 18:19:50 +08:00
|
|
|
Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
|
|
|
|
const DataLayout &DL) {
|
|
|
|
do {
|
|
|
|
Type *SrcTy = C->getType();
|
|
|
|
|
|
|
|
// If the type sizes are the same and a cast is legal, just directly
|
|
|
|
// cast the constant.
|
|
|
|
if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
|
|
|
|
Instruction::CastOps Cast = Instruction::BitCast;
|
|
|
|
// If we are going from a pointer to int or vice versa, we spell the cast
|
|
|
|
// differently.
|
|
|
|
if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
|
|
|
|
Cast = Instruction::IntToPtr;
|
|
|
|
else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
|
|
|
|
Cast = Instruction::PtrToInt;
|
|
|
|
|
|
|
|
if (CastInst::castIsValid(Cast, C, DestTy))
|
|
|
|
return ConstantExpr::getCast(Cast, C, DestTy);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this isn't an aggregate type, there is nothing we can do to drill down
|
|
|
|
// and find a bitcastable constant.
|
|
|
|
if (!SrcTy->isAggregateType())
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// We're simulating a load through a pointer that was bitcast to point to
|
|
|
|
// a different type, so we can try to walk down through the initial
|
2018-12-12 04:29:16 +08:00
|
|
|
// elements of an aggregate to see if some part of the aggregate is
|
2018-03-13 18:19:50 +08:00
|
|
|
// castable to implement the "load" semantic model.
|
2018-12-12 04:29:16 +08:00
|
|
|
if (SrcTy->isStructTy()) {
|
|
|
|
// Struct types might have leading zero-length elements like [0 x i32],
|
|
|
|
// which are certainly not what we are looking for, so skip them.
|
|
|
|
unsigned Elem = 0;
|
|
|
|
Constant *ElemC;
|
|
|
|
do {
|
|
|
|
ElemC = C->getAggregateElement(Elem++);
|
|
|
|
} while (ElemC && DL.getTypeSizeInBits(ElemC->getType()) == 0);
|
|
|
|
C = ElemC;
|
|
|
|
} else {
|
|
|
|
C = C->getAggregateElement(0u);
|
|
|
|
}
|
2018-03-13 18:19:50 +08:00
|
|
|
} while (C);
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-04-23 04:40:10 +08:00
|
|
|
namespace {
|
|
|
|
|
2014-10-02 23:13:22 +08:00
|
|
|
/// Recursive helper to read bits out of global. C is the constant being copied
|
|
|
|
/// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
|
|
|
|
/// results into and BytesLeft is the number of bytes left in
|
2015-03-10 10:37:25 +08:00
|
|
|
/// the CurPtr buffer. DL is the DataLayout.
|
2016-03-29 01:40:08 +08:00
|
|
|
bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
|
|
|
|
unsigned BytesLeft, const DataLayout &DL) {
|
2015-03-10 10:37:25 +08:00
|
|
|
assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
|
2009-10-23 14:23:49 +08:00
|
|
|
"Out of range access");
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-24 13:27:19 +08:00
|
|
|
// If this element is zero or undefined, we can just return since *CurPtr is
|
|
|
|
// zero initialized.
|
2009-10-23 14:23:49 +08:00
|
|
|
if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
|
|
|
|
return true;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *CI = dyn_cast<ConstantInt>(C)) {
|
2009-10-23 14:23:49 +08:00
|
|
|
if (CI->getBitWidth() > 64 ||
|
|
|
|
(CI->getBitWidth() & 7) != 0)
|
|
|
|
return false;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
uint64_t Val = CI->getZExtValue();
|
|
|
|
unsigned IntBytes = unsigned(CI->getBitWidth()/8);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
|
2012-11-09 04:34:25 +08:00
|
|
|
int n = ByteOffset;
|
2015-03-10 10:37:25 +08:00
|
|
|
if (!DL.isLittleEndian())
|
2012-11-09 04:34:25 +08:00
|
|
|
n = IntBytes - n - 1;
|
|
|
|
CurPtr[i] = (unsigned char)(Val >> (n * 8));
|
2009-10-23 14:23:49 +08:00
|
|
|
++ByteOffset;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *CFP = dyn_cast<ConstantFP>(C)) {
|
2009-10-23 14:23:49 +08:00
|
|
|
if (CFP->getType()->isDoubleTy()) {
|
2015-03-10 10:37:25 +08:00
|
|
|
C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
|
|
|
|
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
|
2009-10-23 14:23:49 +08:00
|
|
|
}
|
|
|
|
if (CFP->getType()->isFloatTy()){
|
2015-03-10 10:37:25 +08:00
|
|
|
C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
|
|
|
|
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
|
2009-10-23 14:23:49 +08:00
|
|
|
}
|
2013-02-07 06:43:31 +08:00
|
|
|
if (CFP->getType()->isHalfTy()){
|
2015-03-10 10:37:25 +08:00
|
|
|
C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
|
|
|
|
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
|
2013-02-07 06:43:31 +08:00
|
|
|
}
|
2009-10-24 13:27:19 +08:00
|
|
|
return false;
|
2009-10-23 14:23:49 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *CS = dyn_cast<ConstantStruct>(C)) {
|
2015-03-10 10:37:25 +08:00
|
|
|
const StructLayout *SL = DL.getStructLayout(CS->getType());
|
2009-10-23 14:23:49 +08:00
|
|
|
unsigned Index = SL->getElementContainingOffset(ByteOffset);
|
|
|
|
uint64_t CurEltOffset = SL->getElementOffset(Index);
|
|
|
|
ByteOffset -= CurEltOffset;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2016-08-25 08:45:04 +08:00
|
|
|
while (true) {
|
2009-10-23 14:23:49 +08:00
|
|
|
// If the element access is to the element itself and not to tail padding,
|
|
|
|
// read the bytes from the element.
|
2015-03-10 10:37:25 +08:00
|
|
|
uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
|
2009-10-23 14:23:49 +08:00
|
|
|
|
|
|
|
if (ByteOffset < EltSize &&
|
|
|
|
!ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
|
2015-03-10 10:37:25 +08:00
|
|
|
BytesLeft, DL))
|
2009-10-23 14:23:49 +08:00
|
|
|
return false;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
++Index;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
// Check to see if we read from the last struct element, if so we're done.
|
|
|
|
if (Index == CS->getType()->getNumElements())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// If we read all of the bytes we needed from this element we're done.
|
|
|
|
uint64_t NextEltOffset = SL->getElementOffset(Index);
|
|
|
|
|
2013-08-13 07:15:58 +08:00
|
|
|
if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
|
2009-10-23 14:23:49 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Move to the next element of the struct.
|
2013-08-13 07:15:58 +08:00
|
|
|
CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
|
|
|
|
BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
|
2009-10-23 14:23:49 +08:00
|
|
|
ByteOffset = 0;
|
|
|
|
CurEltOffset = NextEltOffset;
|
|
|
|
}
|
|
|
|
// not reached.
|
|
|
|
}
|
|
|
|
|
2012-01-25 14:48:06 +08:00
|
|
|
if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
|
|
|
|
isa<ConstantDataSequential>(C)) {
|
2013-08-13 07:15:58 +08:00
|
|
|
Type *EltTy = C->getType()->getSequentialElementType();
|
2015-03-10 10:37:25 +08:00
|
|
|
uint64_t EltSize = DL.getTypeAllocSize(EltTy);
|
2009-10-23 14:23:49 +08:00
|
|
|
uint64_t Index = ByteOffset / EltSize;
|
|
|
|
uint64_t Offset = ByteOffset - Index * EltSize;
|
2012-01-25 14:48:06 +08:00
|
|
|
uint64_t NumElts;
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *AT = dyn_cast<ArrayType>(C->getType()))
|
2012-01-25 14:48:06 +08:00
|
|
|
NumElts = AT->getNumElements();
|
|
|
|
else
|
2013-08-13 07:15:58 +08:00
|
|
|
NumElts = C->getType()->getVectorNumElements();
|
2012-07-25 17:14:54 +08:00
|
|
|
|
2012-01-25 14:48:06 +08:00
|
|
|
for (; Index != NumElts; ++Index) {
|
|
|
|
if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
|
2015-03-10 10:37:25 +08:00
|
|
|
BytesLeft, DL))
|
2009-10-23 14:23:49 +08:00
|
|
|
return false;
|
2012-07-25 17:14:54 +08:00
|
|
|
|
|
|
|
uint64_t BytesWritten = EltSize - Offset;
|
|
|
|
assert(BytesWritten <= EltSize && "Not indexing into this element?");
|
|
|
|
if (BytesWritten >= BytesLeft)
|
2009-10-23 14:23:49 +08:00
|
|
|
return true;
|
2012-07-25 17:14:54 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
Offset = 0;
|
2012-07-25 17:14:54 +08:00
|
|
|
BytesLeft -= BytesWritten;
|
|
|
|
CurPtr += BytesWritten;
|
2009-10-23 14:23:49 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *CE = dyn_cast<ConstantExpr>(C)) {
|
2011-02-07 04:22:49 +08:00
|
|
|
if (CE->getOpcode() == Instruction::IntToPtr &&
|
2015-03-10 10:37:25 +08:00
|
|
|
CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
|
2012-11-05 08:11:11 +08:00
|
|
|
return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
|
2015-03-10 10:37:25 +08:00
|
|
|
BytesLeft, DL);
|
2013-08-13 06:56:15 +08:00
|
|
|
}
|
2011-02-07 04:11:56 +08:00
|
|
|
}
|
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
// Otherwise, unknown initializer type.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-03-29 01:40:08 +08:00
|
|
|
Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
|
|
|
|
const DataLayout &DL) {
|
2016-07-13 12:22:12 +08:00
|
|
|
auto *PTy = cast<PointerType>(C->getType());
|
|
|
|
auto *IntType = dyn_cast<IntegerType>(LoadTy);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
// If this isn't an integer load we can't fold it directly.
|
|
|
|
if (!IntType) {
|
2013-08-21 05:20:04 +08:00
|
|
|
unsigned AS = PTy->getAddressSpace();
|
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
// If this is a float/double load, we can try folding it as an int32/64 load
|
2009-10-23 14:57:37 +08:00
|
|
|
// and then bitcast the result. This can be useful for union cases. Note
|
|
|
|
// that address spaces don't matter here since we're not going to result in
|
|
|
|
// an actual new load.
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *MapTy;
|
2013-02-07 06:43:31 +08:00
|
|
|
if (LoadTy->isHalfTy())
|
2016-01-22 09:17:26 +08:00
|
|
|
MapTy = Type::getInt16Ty(C->getContext());
|
2013-02-07 06:43:31 +08:00
|
|
|
else if (LoadTy->isFloatTy())
|
2016-01-22 09:17:26 +08:00
|
|
|
MapTy = Type::getInt32Ty(C->getContext());
|
2009-10-23 14:57:37 +08:00
|
|
|
else if (LoadTy->isDoubleTy())
|
2016-01-22 09:17:26 +08:00
|
|
|
MapTy = Type::getInt64Ty(C->getContext());
|
2010-02-16 19:11:14 +08:00
|
|
|
else if (LoadTy->isVectorTy()) {
|
2016-01-22 09:17:26 +08:00
|
|
|
MapTy = PointerType::getIntNTy(C->getContext(),
|
|
|
|
DL.getTypeAllocSizeInBits(LoadTy));
|
2009-10-23 14:57:37 +08:00
|
|
|
} else
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2009-10-23 14:23:49 +08:00
|
|
|
|
2016-01-22 09:17:26 +08:00
|
|
|
C = FoldBitCast(C, MapTy->getPointerTo(AS), DL);
|
|
|
|
if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL))
|
2015-03-10 10:37:25 +08:00
|
|
|
return FoldBitCast(Res, LoadTy, DL);
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2009-10-23 14:23:49 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
|
2013-08-13 07:15:58 +08:00
|
|
|
if (BytesLoaded > 32 || BytesLoaded == 0)
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
GlobalValue *GVal;
|
2016-07-14 07:33:07 +08:00
|
|
|
APInt OffsetAI;
|
|
|
|
if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL))
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
auto *GV = dyn_cast<GlobalVariable>(GVal);
|
2009-10-24 13:27:19 +08:00
|
|
|
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
|
2009-10-23 14:23:49 +08:00
|
|
|
!GV->getInitializer()->getType()->isSized())
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2009-10-23 14:23:49 +08:00
|
|
|
|
2016-07-14 07:33:07 +08:00
|
|
|
int64_t Offset = OffsetAI.getSExtValue();
|
|
|
|
int64_t InitializerSize = DL.getTypeAllocSize(GV->getInitializer()->getType());
|
|
|
|
|
|
|
|
// If we're not accessing anything in this constant, the result is undefined.
|
|
|
|
if (Offset + BytesLoaded <= 0)
|
|
|
|
return UndefValue::get(IntType);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
// If we're not accessing anything in this constant, the result is undefined.
|
2016-07-14 07:33:07 +08:00
|
|
|
if (Offset >= InitializerSize)
|
2009-10-23 14:23:49 +08:00
|
|
|
return UndefValue::get(IntType);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:50:36 +08:00
|
|
|
unsigned char RawBytes[32] = {0};
|
2016-07-14 07:33:07 +08:00
|
|
|
unsigned char *CurPtr = RawBytes;
|
|
|
|
unsigned BytesLeft = BytesLoaded;
|
|
|
|
|
|
|
|
// If we're loading off the beginning of the global, some bytes may be valid.
|
|
|
|
if (Offset < 0) {
|
|
|
|
CurPtr += -Offset;
|
|
|
|
BytesLeft += Offset;
|
|
|
|
Offset = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL))
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2009-10-23 14:23:49 +08:00
|
|
|
|
2012-11-09 04:34:25 +08:00
|
|
|
APInt ResultVal = APInt(IntType->getBitWidth(), 0);
|
2015-03-10 10:37:25 +08:00
|
|
|
if (DL.isLittleEndian()) {
|
2012-11-09 04:34:25 +08:00
|
|
|
ResultVal = RawBytes[BytesLoaded - 1];
|
|
|
|
for (unsigned i = 1; i != BytesLoaded; ++i) {
|
|
|
|
ResultVal <<= 8;
|
2013-08-13 06:56:15 +08:00
|
|
|
ResultVal |= RawBytes[BytesLoaded - 1 - i];
|
2012-11-09 04:34:25 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ResultVal = RawBytes[0];
|
|
|
|
for (unsigned i = 1; i != BytesLoaded; ++i) {
|
|
|
|
ResultVal <<= 8;
|
|
|
|
ResultVal |= RawBytes[i];
|
|
|
|
}
|
2009-10-23 14:50:36 +08:00
|
|
|
}
|
2009-10-23 14:23:49 +08:00
|
|
|
|
2009-10-23 14:50:36 +08:00
|
|
|
return ConstantInt::get(IntType->getContext(), ResultVal);
|
2009-10-23 14:23:49 +08:00
|
|
|
}
|
|
|
|
|
2018-03-13 18:19:50 +08:00
|
|
|
Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy,
|
|
|
|
const DataLayout &DL) {
|
2016-01-22 09:17:26 +08:00
|
|
|
auto *SrcPtr = CE->getOperand(0);
|
|
|
|
auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType());
|
|
|
|
if (!SrcPtrTy)
|
2014-05-15 17:56:28 +08:00
|
|
|
return nullptr;
|
2016-01-22 09:17:26 +08:00
|
|
|
Type *SrcTy = SrcPtrTy->getPointerElementType();
|
2014-05-15 17:56:28 +08:00
|
|
|
|
2016-01-22 09:17:26 +08:00
|
|
|
Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL);
|
2014-05-15 17:56:28 +08:00
|
|
|
if (!C)
|
|
|
|
return nullptr;
|
|
|
|
|
2018-03-13 18:19:50 +08:00
|
|
|
return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL);
|
2014-05-15 17:56:28 +08:00
|
|
|
}
|
|
|
|
|
2016-03-29 01:40:08 +08:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
2016-01-22 09:17:26 +08:00
|
|
|
Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
|
2015-03-10 10:37:25 +08:00
|
|
|
const DataLayout &DL) {
|
2009-10-22 14:25:11 +08:00
|
|
|
// First, try the easy cases:
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *GV = dyn_cast<GlobalVariable>(C))
|
2009-10-22 14:25:11 +08:00
|
|
|
if (GV->isConstant() && GV->hasDefinitiveInitializer())
|
|
|
|
return GV->getInitializer();
|
|
|
|
|
2015-07-23 06:29:30 +08:00
|
|
|
if (auto *GA = dyn_cast<GlobalAlias>(C))
|
Don't IPO over functions that can be de-refined
Summary:
Fixes PR26774.
If you're aware of the issue, feel free to skip the "Motivation"
section and jump directly to "This patch".
Motivation:
I define "refinement" as discarding behaviors from a program that the
optimizer has license to discard. So transforming:
```
void f(unsigned x) {
unsigned t = 5 / x;
(void)t;
}
```
to
```
void f(unsigned x) { }
```
is refinement, since the behavior went from "if x == 0 then undefined
else nothing" to "nothing" (the optimizer has license to discard
undefined behavior).
Refinement is a fundamental aspect of many mid-level optimizations done
by LLVM. For instance, transforming `x == (x + 1)` to `false` also
involves refinement since the expression's value went from "if x is
`undef` then { `true` or `false` } else { `false` }" to "`false`" (by
definition, the optimizer has license to fold `undef` to any non-`undef`
value).
Unfortunately, refinement implies that the optimizer cannot assume
that the implementation of a function it can see has all of the
behavior an unoptimized or a differently optimized version of the same
function can have. This is a problem for functions with comdat
linkage, where a function can be replaced by an unoptimized or a
differently optimized version of the same source level function.
For instance, FunctionAttrs cannot assume a comdat function is
actually `readnone` even if it does not have any loads or stores in
it; since there may have been loads and stores in the "original
function" that were refined out in the currently visible variant, and
at the link step the linker may in fact choose an implementation with
a load or a store. As an example, consider a function that does two
atomic loads from the same memory location, and writes to memory only
if the two values are not equal. The optimizer is allowed to refine
this function by first CSE'ing the two loads, and the folding the
comparision to always report that the two values are equal. Such a
refined variant will look like it is `readonly`. However, the
unoptimized version of the function can still write to memory (since
the two loads //can// result in different values), and selecting the
unoptimized version at link time will retroactively invalidate
transforms we may have done under the assumption that the function
does not write to memory.
Note: this is not just a problem with atomics or with linking
differently optimized object files. See PR26774 for more realistic
examples that involved neither.
This patch:
This change introduces a new set of linkage types, predicated as
`GlobalValue::mayBeDerefined` that returns true if the linkage type
allows a function to be replaced by a differently optimized variant at
link time. It then changes a set of IPO passes to bail out if they see
such a function.
Reviewers: chandlerc, hfinkel, dexonsmith, joker.eph, rnk
Subscribers: mcrosier, llvm-commits
Differential Revision: http://reviews.llvm.org/D18634
llvm-svn: 265762
2016-04-08 08:48:30 +08:00
|
|
|
if (GA->getAliasee() && !GA->isInterposable())
|
2016-01-22 09:17:26 +08:00
|
|
|
return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
|
2015-07-23 06:29:30 +08:00
|
|
|
|
2009-10-22 14:44:07 +08:00
|
|
|
// If the loaded value isn't a constant expr, we can't handle it.
|
2016-07-13 12:22:12 +08:00
|
|
|
auto *CE = dyn_cast<ConstantExpr>(C);
|
2013-08-13 06:56:15 +08:00
|
|
|
if (!CE)
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-22 14:44:07 +08:00
|
|
|
if (CE->getOpcode() == Instruction::GetElementPtr) {
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
|
2013-08-13 06:56:15 +08:00
|
|
|
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
|
2012-11-05 08:11:11 +08:00
|
|
|
if (Constant *V =
|
2009-10-22 14:44:07 +08:00
|
|
|
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
|
|
|
|
return V;
|
2013-08-13 06:56:15 +08:00
|
|
|
}
|
|
|
|
}
|
2009-10-22 14:44:07 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2014-05-15 17:56:28 +08:00
|
|
|
if (CE->getOpcode() == Instruction::BitCast)
|
2018-03-13 18:19:50 +08:00
|
|
|
if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL))
|
2014-05-15 17:56:28 +08:00
|
|
|
return LoadedC;
|
|
|
|
|
2009-10-22 14:44:07 +08:00
|
|
|
// Instead of loading constant c string, use corresponding integer value
|
|
|
|
// directly if string length is small enough.
|
2012-02-05 10:29:43 +08:00
|
|
|
StringRef Str;
|
2015-03-10 10:37:25 +08:00
|
|
|
if (getConstantStringInfo(CE, Str) && !Str.empty()) {
|
2016-06-21 13:10:24 +08:00
|
|
|
size_t StrLen = Str.size();
|
2009-10-23 14:23:49 +08:00
|
|
|
unsigned NumBits = Ty->getPrimitiveSizeInBits();
|
2010-07-12 08:22:51 +08:00
|
|
|
// Replace load with immediate integer if the result is an integer or fp
|
|
|
|
// value.
|
|
|
|
if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
|
2010-07-12 14:47:05 +08:00
|
|
|
(isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
|
2009-10-23 14:23:49 +08:00
|
|
|
APInt StrVal(NumBits, 0);
|
|
|
|
APInt SingleChar(NumBits, 0);
|
2015-03-10 10:37:25 +08:00
|
|
|
if (DL.isLittleEndian()) {
|
2016-06-21 13:10:24 +08:00
|
|
|
for (unsigned char C : reverse(Str.bytes())) {
|
|
|
|
SingleChar = static_cast<uint64_t>(C);
|
2009-10-22 14:38:35 +08:00
|
|
|
StrVal = (StrVal << 8) | SingleChar;
|
|
|
|
}
|
2009-10-22 14:44:07 +08:00
|
|
|
} else {
|
2016-06-21 13:10:24 +08:00
|
|
|
for (unsigned char C : Str.bytes()) {
|
|
|
|
SingleChar = static_cast<uint64_t>(C);
|
2009-10-22 14:44:07 +08:00
|
|
|
StrVal = (StrVal << 8) | SingleChar;
|
|
|
|
}
|
|
|
|
// Append NULL at the end.
|
|
|
|
SingleChar = 0;
|
|
|
|
StrVal = (StrVal << 8) | SingleChar;
|
2009-10-22 14:38:35 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2010-07-12 08:22:51 +08:00
|
|
|
Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
|
|
|
|
if (Ty->isFloatingPointTy())
|
|
|
|
Res = ConstantExpr::getBitCast(Res, Ty);
|
|
|
|
return Res;
|
2009-10-22 14:38:35 +08:00
|
|
|
}
|
2009-10-22 14:25:11 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-22 14:44:07 +08:00
|
|
|
// If this load comes from anywhere in a constant global, and if the global
|
|
|
|
// is all undef or zero, we know what it loads.
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
|
2009-10-22 14:44:07 +08:00
|
|
|
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
|
|
|
|
if (GV->getInitializer()->isNullValue())
|
2016-01-22 09:17:26 +08:00
|
|
|
return Constant::getNullValue(Ty);
|
2009-10-22 14:44:07 +08:00
|
|
|
if (isa<UndefValue>(GV->getInitializer()))
|
2016-01-22 09:17:26 +08:00
|
|
|
return UndefValue::get(Ty);
|
2009-10-22 14:44:07 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2012-11-09 04:34:25 +08:00
|
|
|
// Try hard to fold loads from bitcasted strange and non-type-safe things.
|
2016-01-22 09:17:26 +08:00
|
|
|
return FoldReinterpretLoadFromConstPtr(CE, Ty, DL);
|
2009-10-22 14:25:11 +08:00
|
|
|
}
|
|
|
|
|
2016-03-29 01:40:08 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) {
|
2014-04-15 12:59:12 +08:00
|
|
|
if (LI->isVolatile()) return nullptr;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *C = dyn_cast<Constant>(LI->getOperand(0)))
|
2016-01-22 09:17:26 +08:00
|
|
|
return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL);
|
2009-10-23 14:23:49 +08:00
|
|
|
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2009-10-22 14:25:11 +08:00
|
|
|
}
|
2007-01-31 08:51:48 +08:00
|
|
|
|
2014-10-02 23:13:22 +08:00
|
|
|
/// One of Op0/Op1 is a constant expression.
|
2008-12-15 09:35:36 +08:00
|
|
|
/// Attempt to symbolically evaluate the result of a binary operator merging
|
2013-02-14 11:23:37 +08:00
|
|
|
/// these together. If target data info is available, it is provided as DL,
|
|
|
|
/// otherwise DL is null.
|
2016-03-29 01:40:08 +08:00
|
|
|
Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
|
|
|
|
const DataLayout &DL) {
|
2007-01-31 08:51:48 +08:00
|
|
|
// SROA
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
|
|
|
|
// Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
|
|
|
|
// bits.
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2015-03-10 10:37:25 +08:00
|
|
|
if (Opc == Instruction::And) {
|
2017-05-25 00:53:07 +08:00
|
|
|
KnownBits Known0 = computeKnownBits(Op0, DL);
|
|
|
|
KnownBits Known1 = computeKnownBits(Op1, DL);
|
2017-04-27 00:39:58 +08:00
|
|
|
if ((Known1.One | Known0.Zero).isAllOnesValue()) {
|
2013-02-14 11:23:37 +08:00
|
|
|
// All the bits of Op0 that the 'and' could be masking are already zero.
|
|
|
|
return Op0;
|
|
|
|
}
|
2017-04-27 00:39:58 +08:00
|
|
|
if ((Known0.One | Known1.Zero).isAllOnesValue()) {
|
2013-02-14 11:23:37 +08:00
|
|
|
// All the bits of Op1 that the 'and' could be masking are already zero.
|
|
|
|
return Op1;
|
|
|
|
}
|
|
|
|
|
2017-05-04 07:12:29 +08:00
|
|
|
Known0.Zero |= Known1.Zero;
|
|
|
|
Known0.One &= Known1.One;
|
|
|
|
if (Known0.isConstant())
|
|
|
|
return ConstantInt::get(Op0->getType(), Known0.getConstant());
|
2013-02-14 11:23:37 +08:00
|
|
|
}
|
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
|
|
|
|
// constant. This happens frequently when iterating over a global array.
|
2015-03-10 10:37:25 +08:00
|
|
|
if (Opc == Instruction::Sub) {
|
2007-01-31 08:51:48 +08:00
|
|
|
GlobalValue *GV1, *GV2;
|
2013-11-05 04:46:52 +08:00
|
|
|
APInt Offs1, Offs2;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2015-03-10 10:37:25 +08:00
|
|
|
if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
|
|
|
|
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
|
|
|
|
unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
|
2013-09-12 09:07:58 +08:00
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
// (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
|
2013-02-06 03:04:36 +08:00
|
|
|
// PtrToInt may change the bitwidth so we have convert to the right size
|
|
|
|
// first.
|
|
|
|
return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
|
|
|
|
Offs2.zextOrTrunc(OpSize));
|
2007-01-31 08:51:48 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2007-01-31 08:51:48 +08:00
|
|
|
}
|
|
|
|
|
2014-10-02 23:13:22 +08:00
|
|
|
/// If array indices are not pointer-sized integers, explicitly cast them so
|
|
|
|
/// that they aren't implicitly casted by the getelementptr.
|
2016-03-29 01:40:08 +08:00
|
|
|
Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
|
2016-11-11 06:34:55 +08:00
|
|
|
Type *ResultTy, Optional<unsigned> InRangeIndex,
|
|
|
|
const DataLayout &DL, const TargetLibraryInfo *TLI) {
|
2015-03-10 10:37:25 +08:00
|
|
|
Type *IntPtrTy = DL.getIntPtrType(ResultTy);
|
2016-12-09 01:22:35 +08:00
|
|
|
Type *IntPtrScalarTy = IntPtrTy->getScalarType();
|
2010-02-02 02:27:38 +08:00
|
|
|
|
|
|
|
bool Any = false;
|
|
|
|
SmallVector<Constant*, 32> NewIdxs;
|
2011-07-19 21:32:40 +08:00
|
|
|
for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
|
2010-02-02 02:27:38 +08:00
|
|
|
if ((i == 1 ||
|
2016-12-09 01:22:35 +08:00
|
|
|
!isa<StructType>(GetElementPtrInst::getIndexedType(
|
|
|
|
SrcElemTy, Ops.slice(1, i - 1)))) &&
|
2016-12-22 01:34:21 +08:00
|
|
|
Ops[i]->getType()->getScalarType() != IntPtrScalarTy) {
|
2010-02-02 02:27:38 +08:00
|
|
|
Any = true;
|
2016-12-22 01:34:21 +08:00
|
|
|
Type *NewType = Ops[i]->getType()->isVectorTy()
|
|
|
|
? IntPtrTy
|
|
|
|
: IntPtrTy->getScalarType();
|
2010-02-02 02:27:38 +08:00
|
|
|
NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
|
|
|
|
true,
|
2016-12-22 01:34:21 +08:00
|
|
|
NewType,
|
2010-02-02 02:27:38 +08:00
|
|
|
true),
|
2016-12-22 01:34:21 +08:00
|
|
|
Ops[i], NewType));
|
2010-02-02 02:27:38 +08:00
|
|
|
} else
|
|
|
|
NewIdxs.push_back(Ops[i]);
|
|
|
|
}
|
|
|
|
|
2013-08-13 06:56:15 +08:00
|
|
|
if (!Any)
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2013-08-13 06:56:15 +08:00
|
|
|
|
2016-11-11 06:34:55 +08:00
|
|
|
Constant *C = ConstantExpr::getGetElementPtr(
|
|
|
|
SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
|
2016-07-29 11:27:26 +08:00
|
|
|
if (Constant *Folded = ConstantFoldConstant(C, DL, TLI))
|
|
|
|
C = Folded;
|
2013-08-13 06:56:15 +08:00
|
|
|
|
2010-02-02 02:27:38 +08:00
|
|
|
return C;
|
|
|
|
}
|
|
|
|
|
2012-07-30 15:25:20 +08:00
|
|
|
/// Strip the pointer casts, but preserve the address space information.
|
2016-03-29 01:40:08 +08:00
|
|
|
Constant* StripPtrCastKeepAS(Constant* Ptr, Type *&ElemTy) {
|
2012-07-30 15:25:20 +08:00
|
|
|
assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
|
2016-07-13 12:22:12 +08:00
|
|
|
auto *OldPtrTy = cast<PointerType>(Ptr->getType());
|
2014-06-05 03:01:48 +08:00
|
|
|
Ptr = Ptr->stripPointerCasts();
|
2016-07-13 12:22:12 +08:00
|
|
|
auto *NewPtrTy = cast<PointerType>(Ptr->getType());
|
2012-07-30 15:25:20 +08:00
|
|
|
|
2016-01-22 07:42:06 +08:00
|
|
|
ElemTy = NewPtrTy->getPointerElementType();
|
|
|
|
|
2012-07-30 15:25:20 +08:00
|
|
|
// Preserve the address space number of the pointer.
|
|
|
|
if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
|
2016-01-22 07:42:06 +08:00
|
|
|
NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace());
|
2013-11-15 09:34:59 +08:00
|
|
|
Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
|
2012-07-30 15:25:20 +08:00
|
|
|
}
|
|
|
|
return Ptr;
|
|
|
|
}
|
|
|
|
|
2014-10-02 23:13:22 +08:00
|
|
|
/// If we can symbolically evaluate the GEP constant expression, do so.
|
2016-03-29 01:40:08 +08:00
|
|
|
Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
|
|
|
|
ArrayRef<Constant *> Ops,
|
|
|
|
const DataLayout &DL,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
2016-11-11 06:34:55 +08:00
|
|
|
const GEPOperator *InnermostGEP = GEP;
|
2016-11-22 09:03:40 +08:00
|
|
|
bool InBounds = GEP->isInBounds();
|
2016-11-11 06:34:55 +08:00
|
|
|
|
2016-01-22 07:42:06 +08:00
|
|
|
Type *SrcElemTy = GEP->getSourceElementType();
|
|
|
|
Type *ResElemTy = GEP->getResultElementType();
|
|
|
|
Type *ResTy = GEP->getType();
|
|
|
|
if (!SrcElemTy->isSized())
|
|
|
|
return nullptr;
|
|
|
|
|
2016-11-11 06:34:55 +08:00
|
|
|
if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
|
|
|
|
GEP->getInRangeIndex(), DL, TLI))
|
2016-01-22 07:42:06 +08:00
|
|
|
return C;
|
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
Constant *Ptr = Ops[0];
|
2016-01-22 07:42:06 +08:00
|
|
|
if (!Ptr->getType()->isPointerTy())
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2015-03-10 10:37:25 +08:00
|
|
|
Type *IntPtrTy = DL.getIntPtrType(Ptr->getType());
|
2008-05-08 12:54:43 +08:00
|
|
|
|
|
|
|
// If this is a constant expr gep that is effectively computing an
|
|
|
|
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
|
2011-07-19 21:32:40 +08:00
|
|
|
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
|
2018-02-14 14:58:08 +08:00
|
|
|
if (!isa<ConstantInt>(Ops[i])) {
|
|
|
|
|
|
|
|
// If this is "gep i8* Ptr, (sub 0, V)", fold this as:
|
|
|
|
// "inttoptr (sub (ptrtoint Ptr), V)"
|
|
|
|
if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
|
|
|
|
auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
|
|
|
|
assert((!CE || CE->getType() == IntPtrTy) &&
|
|
|
|
"CastGEPIndices didn't canonicalize index types!");
|
|
|
|
if (CE && CE->getOpcode() == Instruction::Sub &&
|
|
|
|
CE->getOperand(0)->isNullValue()) {
|
|
|
|
Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
|
|
|
|
Res = ConstantExpr::getSub(Res, CE->getOperand(1));
|
|
|
|
Res = ConstantExpr::getIntToPtr(Res, ResTy);
|
|
|
|
if (auto *FoldedRes = ConstantFoldConstant(Res, DL, TLI))
|
|
|
|
Res = FoldedRes;
|
|
|
|
return Res;
|
|
|
|
}
|
2011-01-06 14:19:46 +08:00
|
|
|
}
|
2018-02-14 14:58:08 +08:00
|
|
|
return nullptr;
|
2011-01-06 14:19:46 +08:00
|
|
|
}
|
2012-07-30 15:25:20 +08:00
|
|
|
|
2015-03-10 10:37:25 +08:00
|
|
|
unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
|
2011-07-19 22:01:37 +08:00
|
|
|
APInt Offset =
|
2015-03-10 10:37:25 +08:00
|
|
|
APInt(BitWidth,
|
2016-01-22 11:08:27 +08:00
|
|
|
DL.getIndexedOffsetInType(
|
|
|
|
SrcElemTy,
|
2015-03-10 10:37:25 +08:00
|
|
|
makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
|
2016-01-22 07:42:06 +08:00
|
|
|
Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
|
2010-03-11 03:31:51 +08:00
|
|
|
|
|
|
|
// If this is a GEP of a GEP, fold it all into a single GEP.
|
2016-07-13 12:22:12 +08:00
|
|
|
while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
|
2016-11-11 06:34:55 +08:00
|
|
|
InnermostGEP = GEP;
|
2016-11-22 09:03:40 +08:00
|
|
|
InBounds &= GEP->isInBounds();
|
2016-11-11 06:34:55 +08:00
|
|
|
|
2013-08-13 06:56:15 +08:00
|
|
|
SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
|
2010-03-13 01:55:20 +08:00
|
|
|
|
|
|
|
// Do not try the incorporate the sub-GEP if some index is not a number.
|
|
|
|
bool AllConstantInt = true;
|
2016-07-13 12:22:12 +08:00
|
|
|
for (Value *NestedOp : NestedOps)
|
|
|
|
if (!isa<ConstantInt>(NestedOp)) {
|
2010-03-13 01:55:20 +08:00
|
|
|
AllConstantInt = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!AllConstantInt)
|
|
|
|
break;
|
|
|
|
|
2010-03-11 03:31:51 +08:00
|
|
|
Ptr = cast<Constant>(GEP->getOperand(0));
|
2016-01-22 11:08:27 +08:00
|
|
|
SrcElemTy = GEP->getSourceElementType();
|
|
|
|
Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
|
2016-01-22 07:42:06 +08:00
|
|
|
Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
|
2010-03-11 03:31:51 +08:00
|
|
|
}
|
|
|
|
|
2009-08-20 02:18:36 +08:00
|
|
|
// If the base value for this address is a literal integer value, fold the
|
|
|
|
// getelementptr to the resulting integer value casted to the pointer type.
|
2010-03-19 03:34:33 +08:00
|
|
|
APInt BasePtr(BitWidth, 0);
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
|
2013-08-13 06:56:15 +08:00
|
|
|
if (CE->getOpcode() == Instruction::IntToPtr) {
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
|
2010-12-07 16:25:19 +08:00
|
|
|
BasePtr = Base->getValue().zextOrTrunc(BitWidth);
|
2013-08-13 06:56:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-06 03:23:29 +08:00
|
|
|
auto *PTy = cast<PointerType>(Ptr->getType());
|
|
|
|
if ((Ptr->isNullValue() || BasePtr != 0) &&
|
|
|
|
!DL.isNonIntegralPointerType(PTy)) {
|
2013-08-13 06:56:15 +08:00
|
|
|
Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
|
2016-01-22 07:42:06 +08:00
|
|
|
return ConstantExpr::getIntToPtr(C, ResTy);
|
2009-08-20 02:18:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise form a regular getelementptr. Recompute the indices so that
|
|
|
|
// we eliminate over-indexing of the notional static type array bounds.
|
|
|
|
// This makes it easy to determine if the getelementptr is "inbounds".
|
|
|
|
// Also, this helps GlobalOpt do SROA on GlobalVariables.
|
2016-08-06 03:23:29 +08:00
|
|
|
Type *Ty = PTy;
|
2013-08-21 05:20:04 +08:00
|
|
|
SmallVector<Constant *, 32> NewIdxs;
|
|
|
|
|
2009-08-20 06:46:59 +08:00
|
|
|
do {
|
2016-01-22 07:42:06 +08:00
|
|
|
if (!Ty->isStructTy()) {
|
|
|
|
if (Ty->isPointerTy()) {
|
2009-12-03 09:05:45 +08:00
|
|
|
// The only pointer indexing we'll do is on the first index of the GEP.
|
|
|
|
if (!NewIdxs.empty())
|
|
|
|
break;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2016-01-22 07:42:06 +08:00
|
|
|
Ty = SrcElemTy;
|
|
|
|
|
2009-12-03 09:05:45 +08:00
|
|
|
// Only handle pointers to sized types, not pointers to functions.
|
2016-01-22 07:42:06 +08:00
|
|
|
if (!Ty->isSized())
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2016-01-22 07:42:06 +08:00
|
|
|
} else if (auto *ATy = dyn_cast<SequentialType>(Ty)) {
|
|
|
|
Ty = ATy->getElementType();
|
|
|
|
} else {
|
|
|
|
// We've reached some non-indexable type.
|
|
|
|
break;
|
2009-12-03 09:05:45 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-08-20 02:18:36 +08:00
|
|
|
// Determine which element of the array the offset points into.
|
2016-01-22 07:42:06 +08:00
|
|
|
APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty));
|
2016-07-13 13:16:16 +08:00
|
|
|
if (ElemSize == 0) {
|
2010-11-21 16:39:01 +08:00
|
|
|
// The element size is 0. This may be [0 x Ty]*, so just use a zero
|
2010-11-21 20:43:13 +08:00
|
|
|
// index for this level and proceed to the next level to see if it can
|
|
|
|
// accommodate the offset.
|
2010-11-21 16:39:01 +08:00
|
|
|
NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
|
2016-07-13 13:16:16 +08:00
|
|
|
} else {
|
2010-11-21 16:39:01 +08:00
|
|
|
// The element size is non-zero divide the offset by the element
|
|
|
|
// size (rounding down), to compute the index at this level.
|
2016-07-13 23:53:46 +08:00
|
|
|
bool Overflow;
|
|
|
|
APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow);
|
|
|
|
if (Overflow)
|
|
|
|
break;
|
2010-11-21 16:39:01 +08:00
|
|
|
Offset -= NewIdx * ElemSize;
|
|
|
|
NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
|
|
|
|
}
|
2016-01-22 07:42:06 +08:00
|
|
|
} else {
|
2016-07-13 12:22:12 +08:00
|
|
|
auto *STy = cast<StructType>(Ty);
|
2012-04-25 02:42:47 +08:00
|
|
|
// If we end up with an offset that isn't valid for this struct type, we
|
|
|
|
// can't re-form this GEP in a regular form, so bail out. The pointer
|
|
|
|
// operand likely went through casts that are necessary to make the GEP
|
|
|
|
// sensible.
|
2015-03-10 10:37:25 +08:00
|
|
|
const StructLayout &SL = *DL.getStructLayout(STy);
|
2016-07-13 13:16:16 +08:00
|
|
|
if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes()))
|
2012-04-25 02:42:47 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
// Determine which field of the struct the offset points into. The
|
|
|
|
// getZExtValue is fine as we've already ensured that the offset is
|
|
|
|
// within the range representable by the StructLayout API.
|
2009-08-22 00:52:54 +08:00
|
|
|
unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
|
2009-11-06 12:27:31 +08:00
|
|
|
NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
|
|
|
|
ElIdx));
|
2009-08-22 00:52:54 +08:00
|
|
|
Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
|
2009-08-20 02:18:36 +08:00
|
|
|
Ty = STy->getTypeAtIndex(ElIdx);
|
|
|
|
}
|
2016-01-22 07:42:06 +08:00
|
|
|
} while (Ty != ResElemTy);
|
2009-08-20 06:46:59 +08:00
|
|
|
|
|
|
|
// If we haven't used up the entire offset by descending the static
|
|
|
|
// type, then the offset is pointing into the middle of an indivisible
|
|
|
|
// member, so we can't simplify it.
|
|
|
|
if (Offset != 0)
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2009-08-20 02:18:36 +08:00
|
|
|
|
2016-11-11 06:34:55 +08:00
|
|
|
// Preserve the inrange index from the innermost GEP if possible. We must
|
|
|
|
// have calculated the same indices up to and including the inrange index.
|
|
|
|
Optional<unsigned> InRangeIndex;
|
|
|
|
if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
|
|
|
|
if (SrcElemTy == InnermostGEP->getSourceElementType() &&
|
|
|
|
NewIdxs.size() > *LastIRIndex) {
|
|
|
|
InRangeIndex = LastIRIndex;
|
|
|
|
for (unsigned I = 0; I <= *LastIRIndex; ++I)
|
2018-09-11 09:53:36 +08:00
|
|
|
if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
|
|
|
|
return nullptr;
|
2016-11-11 06:34:55 +08:00
|
|
|
}
|
|
|
|
|
2009-09-11 08:04:14 +08:00
|
|
|
// Create a GEP.
|
2016-11-22 09:03:40 +08:00
|
|
|
Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
|
|
|
|
InBounds, InRangeIndex);
|
2013-08-13 07:15:58 +08:00
|
|
|
assert(C->getType()->getPointerElementType() == Ty &&
|
2009-09-04 07:34:49 +08:00
|
|
|
"Computed GetElementPtr has unexpected type!");
|
2009-08-20 06:46:59 +08:00
|
|
|
|
|
|
|
// If we ended up indexing a member with a type that doesn't match
|
2009-08-21 00:42:55 +08:00
|
|
|
// the type of what the original indices indexed, add a cast.
|
2016-01-22 07:42:06 +08:00
|
|
|
if (Ty != ResElemTy)
|
|
|
|
C = FoldBitCast(C, ResTy, DL);
|
2009-08-20 02:18:36 +08:00
|
|
|
|
2009-08-20 06:46:59 +08:00
|
|
|
return C;
|
2007-01-31 08:51:48 +08:00
|
|
|
}
|
|
|
|
|
2016-01-21 14:33:22 +08:00
|
|
|
/// Attempt to constant fold an instruction with the
|
|
|
|
/// specified opcode and operands. If successful, the constant result is
|
|
|
|
/// returned, if not, null is returned. Note that this function can fail when
|
|
|
|
/// attempting to fold instructions like loads and stores, which have no
|
|
|
|
/// constant expression form.
|
2016-07-29 11:27:33 +08:00
|
|
|
Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
|
2016-03-29 01:40:08 +08:00
|
|
|
ArrayRef<Constant *> Ops,
|
|
|
|
const DataLayout &DL,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
2016-07-29 11:27:33 +08:00
|
|
|
Type *DestTy = InstOrCE->getType();
|
|
|
|
|
2016-01-21 14:33:22 +08:00
|
|
|
// Handle easy binops first.
|
|
|
|
if (Instruction::isBinaryOp(Opcode))
|
|
|
|
return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
|
|
|
|
|
|
|
|
if (Instruction::isCast(Opcode))
|
|
|
|
return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
|
|
|
|
|
2016-07-13 11:42:38 +08:00
|
|
|
if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
|
2016-01-22 07:42:06 +08:00
|
|
|
if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
|
|
|
|
return C;
|
|
|
|
|
2016-11-11 06:34:55 +08:00
|
|
|
return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
|
|
|
|
Ops.slice(1), GEP->isInBounds(),
|
|
|
|
GEP->getInRangeIndex());
|
2016-01-22 07:42:06 +08:00
|
|
|
}
|
|
|
|
|
2016-07-29 11:27:31 +08:00
|
|
|
if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
|
|
|
|
return CE->getWithOperands(Ops);
|
|
|
|
|
2016-01-21 14:33:22 +08:00
|
|
|
switch (Opcode) {
|
|
|
|
default: return nullptr;
|
|
|
|
case Instruction::ICmp:
|
|
|
|
case Instruction::FCmp: llvm_unreachable("Invalid for compares");
|
|
|
|
case Instruction::Call:
|
2017-06-10 07:18:11 +08:00
|
|
|
if (auto *F = dyn_cast<Function>(Ops.back())) {
|
|
|
|
ImmutableCallSite CS(cast<CallInst>(InstOrCE));
|
|
|
|
if (canConstantFoldCallTo(CS, F))
|
|
|
|
return ConstantFoldCall(CS, F, Ops.slice(0, Ops.size() - 1), TLI);
|
|
|
|
}
|
2016-01-21 14:33:22 +08:00
|
|
|
return nullptr;
|
|
|
|
case Instruction::Select:
|
|
|
|
return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
|
|
|
|
case Instruction::ExtractElement:
|
|
|
|
return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
|
|
|
|
case Instruction::InsertElement:
|
|
|
|
return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
|
|
|
|
case Instruction::ShuffleVector:
|
|
|
|
return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-29 01:40:08 +08:00
|
|
|
} // end anonymous namespace
|
2007-01-31 08:51:48 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Constant Folding public APIs
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-07-29 11:27:26 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
Constant *
|
|
|
|
ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
|
|
|
|
const TargetLibraryInfo *TLI,
|
|
|
|
SmallDenseMap<Constant *, Constant *> &FoldedOps) {
|
|
|
|
if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
SmallVector<Constant *, 8> Ops;
|
|
|
|
for (const Use &NewU : C->operands()) {
|
|
|
|
auto *NewC = cast<Constant>(&NewU);
|
|
|
|
// Recursively fold the ConstantExpr's operands. If we have already folded
|
|
|
|
// a ConstantExpr, we don't have to process it again.
|
|
|
|
if (isa<ConstantVector>(NewC) || isa<ConstantExpr>(NewC)) {
|
|
|
|
auto It = FoldedOps.find(NewC);
|
|
|
|
if (It == FoldedOps.end()) {
|
|
|
|
if (auto *FoldedC =
|
|
|
|
ConstantFoldConstantImpl(NewC, DL, TLI, FoldedOps)) {
|
|
|
|
FoldedOps.insert({NewC, FoldedC});
|
2017-03-21 18:17:39 +08:00
|
|
|
NewC = FoldedC;
|
2016-07-29 11:27:26 +08:00
|
|
|
} else {
|
|
|
|
FoldedOps.insert({NewC, NewC});
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
NewC = It->second;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ops.push_back(NewC);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto *CE = dyn_cast<ConstantExpr>(C)) {
|
|
|
|
if (CE->isCompare())
|
|
|
|
return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
|
|
|
|
DL, TLI);
|
|
|
|
|
2016-07-29 11:27:33 +08:00
|
|
|
return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
|
2016-07-29 11:27:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
assert(isa<ConstantVector>(C));
|
|
|
|
return ConstantVector::get(Ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
2015-03-10 10:37:25 +08:00
|
|
|
Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
|
2011-12-01 11:08:23 +08:00
|
|
|
const TargetLibraryInfo *TLI) {
|
2010-11-23 18:16:18 +08:00
|
|
|
// Handle PHI nodes quickly here...
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *PN = dyn_cast<PHINode>(I)) {
|
2014-04-15 12:59:12 +08:00
|
|
|
Constant *CommonValue = nullptr;
|
2010-11-14 20:53:18 +08:00
|
|
|
|
2016-07-29 11:27:26 +08:00
|
|
|
SmallDenseMap<Constant *, Constant *> FoldedOps;
|
2015-05-13 04:05:31 +08:00
|
|
|
for (Value *Incoming : PN->incoming_values()) {
|
2010-11-23 18:16:18 +08:00
|
|
|
// If the incoming value is undef then skip it. Note that while we could
|
|
|
|
// skip the value if it is equal to the phi node itself we choose not to
|
|
|
|
// because that would break the rule that constant folding only applies if
|
|
|
|
// all operands are constants.
|
|
|
|
if (isa<UndefValue>(Incoming))
|
2010-11-14 20:53:18 +08:00
|
|
|
continue;
|
2012-04-28 01:50:22 +08:00
|
|
|
// If the incoming value is not a constant, then give up.
|
2016-07-13 12:22:12 +08:00
|
|
|
auto *C = dyn_cast<Constant>(Incoming);
|
2012-04-28 01:50:22 +08:00
|
|
|
if (!C)
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2012-04-28 01:50:22 +08:00
|
|
|
// Fold the PHI's operands.
|
2016-07-29 11:27:26 +08:00
|
|
|
if (auto *FoldedC = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps))
|
|
|
|
C = FoldedC;
|
2012-04-28 01:50:22 +08:00
|
|
|
// If the incoming value is a different constant to
|
|
|
|
// the one we saw previously, then give up.
|
|
|
|
if (CommonValue && C != CommonValue)
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2010-11-14 20:53:18 +08:00
|
|
|
CommonValue = C;
|
|
|
|
}
|
2007-01-31 07:45:45 +08:00
|
|
|
|
2010-11-14 20:53:18 +08:00
|
|
|
// If we reach here, all incoming values are the same constant or undef.
|
|
|
|
return CommonValue ? CommonValue : UndefValue::get(PN->getType());
|
2007-01-31 07:45:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Scan the operand list, checking to see if they are all constants, if so,
|
2016-01-21 14:33:22 +08:00
|
|
|
// hand off to ConstantFoldInstOperandsImpl.
|
2016-03-13 13:36:15 +08:00
|
|
|
if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
|
|
|
|
return nullptr;
|
2007-01-31 07:45:45 +08:00
|
|
|
|
2016-07-29 11:27:26 +08:00
|
|
|
SmallDenseMap<Constant *, Constant *> FoldedOps;
|
2016-03-13 13:36:15 +08:00
|
|
|
SmallVector<Constant *, 8> Ops;
|
2016-07-13 12:22:12 +08:00
|
|
|
for (const Use &OpU : I->operands()) {
|
|
|
|
auto *Op = cast<Constant>(&OpU);
|
2012-04-28 01:50:22 +08:00
|
|
|
// Fold the Instruction's operands.
|
2016-07-29 11:27:26 +08:00
|
|
|
if (auto *FoldedOp = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps))
|
|
|
|
Op = FoldedOp;
|
2012-04-28 01:50:22 +08:00
|
|
|
|
|
|
|
Ops.push_back(Op);
|
|
|
|
}
|
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (const auto *CI = dyn_cast<CmpInst>(I))
|
2009-11-10 07:06:58 +08:00
|
|
|
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
|
2015-03-10 10:37:25 +08:00
|
|
|
DL, TLI);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (const auto *LI = dyn_cast<LoadInst>(I))
|
2015-03-10 10:37:25 +08:00
|
|
|
return ConstantFoldLoadInst(LI, DL);
|
2010-11-30 04:36:52 +08:00
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
|
2010-11-30 04:36:52 +08:00
|
|
|
return ConstantExpr::getInsertValue(
|
|
|
|
cast<Constant>(IVI->getAggregateOperand()),
|
|
|
|
cast<Constant>(IVI->getInsertedValueOperand()),
|
2011-07-13 18:26:04 +08:00
|
|
|
IVI->getIndices());
|
2013-08-13 06:56:15 +08:00
|
|
|
}
|
2010-11-30 04:36:52 +08:00
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *EVI = dyn_cast<ExtractValueInst>(I)) {
|
2010-11-30 04:36:52 +08:00
|
|
|
return ConstantExpr::getExtractValue(
|
|
|
|
cast<Constant>(EVI->getAggregateOperand()),
|
2011-07-13 18:26:04 +08:00
|
|
|
EVI->getIndices());
|
2013-08-13 06:56:15 +08:00
|
|
|
}
|
2010-11-30 04:36:52 +08:00
|
|
|
|
2016-01-22 07:42:06 +08:00
|
|
|
return ConstantFoldInstOperands(I, Ops, DL, TLI);
|
2007-01-31 07:45:45 +08:00
|
|
|
}
|
|
|
|
|
2016-07-29 11:27:26 +08:00
|
|
|
Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
|
|
|
SmallDenseMap<Constant *, Constant *> FoldedOps;
|
|
|
|
return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
|
2013-04-13 20:53:18 +08:00
|
|
|
}
|
|
|
|
|
2016-01-21 14:33:22 +08:00
|
|
|
Constant *llvm::ConstantFoldInstOperands(Instruction *I,
|
2011-07-19 21:32:40 +08:00
|
|
|
ArrayRef<Constant *> Ops,
|
2015-03-10 10:37:25 +08:00
|
|
|
const DataLayout &DL,
|
2012-11-05 08:11:11 +08:00
|
|
|
const TargetLibraryInfo *TLI) {
|
2016-07-29 11:27:33 +08:00
|
|
|
return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
|
2007-01-31 07:45:45 +08:00
|
|
|
}
|
|
|
|
|
2007-12-11 06:53:04 +08:00
|
|
|
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
2012-11-05 08:11:11 +08:00
|
|
|
Constant *Ops0, Constant *Ops1,
|
2015-03-10 10:37:25 +08:00
|
|
|
const DataLayout &DL,
|
2011-12-01 11:08:23 +08:00
|
|
|
const TargetLibraryInfo *TLI) {
|
2007-12-11 06:53:04 +08:00
|
|
|
// fold: icmp (inttoptr x), null -> icmp x, 0
|
2017-06-03 00:17:32 +08:00
|
|
|
// fold: icmp null, (inttoptr x) -> icmp 0, x
|
2007-12-11 06:53:04 +08:00
|
|
|
// fold: icmp (ptrtoint x), 0 -> icmp x, null
|
2017-06-03 00:17:32 +08:00
|
|
|
// fold: icmp 0, (ptrtoint x) -> icmp null, x
|
2008-05-26 04:56:15 +08:00
|
|
|
// fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
|
2007-12-11 06:53:04 +08:00
|
|
|
// fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
|
|
|
|
//
|
2015-03-10 10:37:25 +08:00
|
|
|
// FIXME: The following comment is out of data and the DataLayout is here now.
|
|
|
|
// ConstantExpr::getCompare cannot do this, because it doesn't have DL
|
2007-12-11 06:53:04 +08:00
|
|
|
// around to know if bit truncation is happening.
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
|
2015-03-10 10:37:25 +08:00
|
|
|
if (Ops1->isNullValue()) {
|
2007-12-11 06:53:04 +08:00
|
|
|
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
2015-03-10 10:37:25 +08:00
|
|
|
Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
|
2007-12-11 06:53:04 +08:00
|
|
|
// Convert the integer value to the right size to ensure we get the
|
|
|
|
// proper extension or truncation.
|
2009-07-30 02:55:55 +08:00
|
|
|
Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
|
2007-12-11 06:53:04 +08:00
|
|
|
IntPtrTy, false);
|
2009-11-10 07:06:58 +08:00
|
|
|
Constant *Null = Constant::getNullValue(C->getType());
|
2015-03-10 10:37:25 +08:00
|
|
|
return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
|
2007-12-11 06:53:04 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2007-12-11 06:53:04 +08:00
|
|
|
// Only do this transformation if the int is intptrty in size, otherwise
|
|
|
|
// there is a truncation or extension that we aren't modeling.
|
2013-08-21 05:20:04 +08:00
|
|
|
if (CE0->getOpcode() == Instruction::PtrToInt) {
|
2015-03-10 10:37:25 +08:00
|
|
|
Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
|
2013-08-21 05:20:04 +08:00
|
|
|
if (CE0->getType() == IntPtrTy) {
|
|
|
|
Constant *C = CE0->getOperand(0);
|
|
|
|
Constant *Null = Constant::getNullValue(C->getType());
|
2015-03-10 10:37:25 +08:00
|
|
|
return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
|
2013-08-21 05:20:04 +08:00
|
|
|
}
|
2007-12-11 06:53:04 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
|
2015-03-10 10:37:25 +08:00
|
|
|
if (CE0->getOpcode() == CE1->getOpcode()) {
|
2008-05-26 04:56:15 +08:00
|
|
|
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
2015-03-10 10:37:25 +08:00
|
|
|
Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
|
2013-08-21 05:20:04 +08:00
|
|
|
|
2008-05-26 04:56:15 +08:00
|
|
|
// Convert the integer value to the right size to ensure we get the
|
|
|
|
// proper extension or truncation.
|
2009-07-30 02:55:55 +08:00
|
|
|
Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
|
2008-05-26 04:56:15 +08:00
|
|
|
IntPtrTy, false);
|
2009-07-30 02:55:55 +08:00
|
|
|
Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
|
2008-05-26 04:56:15 +08:00
|
|
|
IntPtrTy, false);
|
2015-03-10 10:37:25 +08:00
|
|
|
return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
|
2008-05-26 04:56:15 +08:00
|
|
|
}
|
|
|
|
|
2012-11-01 16:07:29 +08:00
|
|
|
// Only do this transformation if the int is intptrty in size, otherwise
|
|
|
|
// there is a truncation or extension that we aren't modeling.
|
2013-08-21 05:20:04 +08:00
|
|
|
if (CE0->getOpcode() == Instruction::PtrToInt) {
|
2015-03-10 10:37:25 +08:00
|
|
|
Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
|
2013-08-21 05:20:04 +08:00
|
|
|
if (CE0->getType() == IntPtrTy &&
|
|
|
|
CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
|
2015-03-10 10:37:25 +08:00
|
|
|
return ConstantFoldCompareInstOperands(
|
|
|
|
Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
|
2013-08-21 05:20:04 +08:00
|
|
|
}
|
|
|
|
}
|
2007-12-11 06:53:04 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2010-01-02 09:22:23 +08:00
|
|
|
// icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
|
|
|
|
// icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
|
|
|
|
if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
|
|
|
|
CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
|
2015-03-10 10:37:25 +08:00
|
|
|
Constant *LHS = ConstantFoldCompareInstOperands(
|
|
|
|
Predicate, CE0->getOperand(0), Ops1, DL, TLI);
|
|
|
|
Constant *RHS = ConstantFoldCompareInstOperands(
|
|
|
|
Predicate, CE0->getOperand(1), Ops1, DL, TLI);
|
2012-11-05 08:11:11 +08:00
|
|
|
unsigned OpC =
|
2010-01-02 09:22:23 +08:00
|
|
|
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
|
2016-01-21 14:26:35 +08:00
|
|
|
return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
|
2010-01-02 09:22:23 +08:00
|
|
|
}
|
2017-06-03 00:17:32 +08:00
|
|
|
} else if (isa<ConstantExpr>(Ops1)) {
|
|
|
|
// If RHS is a constant expression, but the left side isn't, swap the
|
|
|
|
// operands and try again.
|
|
|
|
Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
|
|
|
|
return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
|
2007-12-11 06:53:04 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-11-10 07:06:58 +08:00
|
|
|
return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
|
2007-12-11 06:53:04 +08:00
|
|
|
}
|
|
|
|
|
2016-01-21 14:26:35 +08:00
|
|
|
Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
|
|
|
|
Constant *RHS,
|
|
|
|
const DataLayout &DL) {
|
|
|
|
assert(Instruction::isBinaryOp(Opcode));
|
|
|
|
if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
|
|
|
|
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
|
|
|
|
return C;
|
|
|
|
|
|
|
|
return ConstantExpr::get(Opcode, LHS, RHS);
|
|
|
|
}
|
2007-12-11 06:53:04 +08:00
|
|
|
|
2016-01-21 14:31:08 +08:00
|
|
|
Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
|
|
|
|
Type *DestTy, const DataLayout &DL) {
|
|
|
|
assert(Instruction::isCast(Opcode));
|
|
|
|
switch (Opcode) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Missing case");
|
|
|
|
case Instruction::PtrToInt:
|
|
|
|
// If the input is a inttoptr, eliminate the pair. This requires knowing
|
|
|
|
// the width of a pointer, so it can't be done in ConstantExpr::getCast.
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *CE = dyn_cast<ConstantExpr>(C)) {
|
2016-01-21 14:31:08 +08:00
|
|
|
if (CE->getOpcode() == Instruction::IntToPtr) {
|
|
|
|
Constant *Input = CE->getOperand(0);
|
|
|
|
unsigned InWidth = Input->getType()->getScalarSizeInBits();
|
|
|
|
unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
|
|
|
|
if (PtrWidth < InWidth) {
|
|
|
|
Constant *Mask =
|
|
|
|
ConstantInt::get(CE->getContext(),
|
|
|
|
APInt::getLowBitsSet(InWidth, PtrWidth));
|
|
|
|
Input = ConstantExpr::getAnd(Input, Mask);
|
|
|
|
}
|
|
|
|
// Do a zext or trunc to get to the dest size.
|
|
|
|
return ConstantExpr::getIntegerCast(Input, DestTy, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ConstantExpr::getCast(Opcode, C, DestTy);
|
|
|
|
case Instruction::IntToPtr:
|
|
|
|
// If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
|
|
|
|
// the int size is >= the ptr size and the address spaces are the same.
|
|
|
|
// This requires knowing the width of a pointer, so it can't be done in
|
|
|
|
// ConstantExpr::getCast.
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *CE = dyn_cast<ConstantExpr>(C)) {
|
2016-01-21 14:31:08 +08:00
|
|
|
if (CE->getOpcode() == Instruction::PtrToInt) {
|
|
|
|
Constant *SrcPtr = CE->getOperand(0);
|
|
|
|
unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
|
|
|
|
unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
|
|
|
|
|
|
|
|
if (MidIntSize >= SrcPtrSize) {
|
|
|
|
unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
|
|
|
|
if (SrcAS == DestTy->getPointerAddressSpace())
|
|
|
|
return FoldBitCast(CE->getOperand(0), DestTy, DL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ConstantExpr::getCast(Opcode, C, DestTy);
|
|
|
|
case Instruction::Trunc:
|
|
|
|
case Instruction::ZExt:
|
|
|
|
case Instruction::SExt:
|
|
|
|
case Instruction::FPTrunc:
|
|
|
|
case Instruction::FPExt:
|
|
|
|
case Instruction::UIToFP:
|
|
|
|
case Instruction::SIToFP:
|
|
|
|
case Instruction::FPToUI:
|
|
|
|
case Instruction::FPToSI:
|
|
|
|
case Instruction::AddrSpaceCast:
|
|
|
|
return ConstantExpr::getCast(Opcode, C, DestTy);
|
|
|
|
case Instruction::BitCast:
|
|
|
|
return FoldBitCast(C, DestTy, DL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-05 08:11:11 +08:00
|
|
|
Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
|
2009-10-06 00:36:26 +08:00
|
|
|
ConstantExpr *CE) {
|
2012-01-24 13:43:50 +08:00
|
|
|
if (!CE->getOperand(1)->isNullValue())
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr; // Do not allow stepping over the value!
|
2012-01-25 14:48:06 +08:00
|
|
|
|
|
|
|
// Loop over all of the operands, tracking down which value we are
|
|
|
|
// addressing.
|
|
|
|
for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
|
|
|
|
C = C->getAggregateElement(CE->getOperand(i));
|
2014-04-15 12:59:12 +08:00
|
|
|
if (!C)
|
|
|
|
return nullptr;
|
2012-01-25 14:48:06 +08:00
|
|
|
}
|
|
|
|
return C;
|
2012-01-24 13:43:50 +08:00
|
|
|
}
|
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
Constant *
|
|
|
|
llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
|
|
|
|
ArrayRef<Constant *> Indices) {
|
2007-01-31 07:45:45 +08:00
|
|
|
// Loop over all of the operands, tracking down which value we are
|
2012-01-24 13:43:50 +08:00
|
|
|
// addressing.
|
2016-07-13 12:22:12 +08:00
|
|
|
for (Constant *Index : Indices) {
|
|
|
|
C = C->getAggregateElement(Index);
|
2014-04-15 12:59:12 +08:00
|
|
|
if (!C)
|
|
|
|
return nullptr;
|
2012-01-24 13:43:50 +08:00
|
|
|
}
|
2007-01-31 07:45:45 +08:00
|
|
|
return C;
|
|
|
|
}
|
|
|
|
|
2005-10-28 00:00:10 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2007-01-31 07:45:45 +08:00
|
|
|
// Constant Folding for Calls
|
2005-10-28 00:00:10 +08:00
|
|
|
//
|
|
|
|
|
2017-06-10 07:18:11 +08:00
|
|
|
bool llvm::canConstantFoldCallTo(ImmutableCallSite CS, const Function *F) {
|
2017-08-15 05:15:13 +08:00
|
|
|
if (CS.isNoBuiltin() || CS.isStrictFP())
|
2017-06-10 07:18:11 +08:00
|
|
|
return false;
|
2005-10-28 00:00:10 +08:00
|
|
|
switch (F->getIntrinsicID()) {
|
2013-02-07 06:43:31 +08:00
|
|
|
case Intrinsic::fabs:
|
2014-10-22 07:00:20 +08:00
|
|
|
case Intrinsic::minnum:
|
|
|
|
case Intrinsic::maxnum:
|
2018-10-20 02:15:32 +08:00
|
|
|
case Intrinsic::minimum:
|
|
|
|
case Intrinsic::maximum:
|
2013-02-07 06:43:31 +08:00
|
|
|
case Intrinsic::log:
|
|
|
|
case Intrinsic::log2:
|
|
|
|
case Intrinsic::log10:
|
|
|
|
case Intrinsic::exp:
|
|
|
|
case Intrinsic::exp2:
|
|
|
|
case Intrinsic::floor:
|
2014-03-24 12:36:06 +08:00
|
|
|
case Intrinsic::ceil:
|
2007-10-03 01:43:59 +08:00
|
|
|
case Intrinsic::sqrt:
|
2015-07-08 11:55:47 +08:00
|
|
|
case Intrinsic::sin:
|
|
|
|
case Intrinsic::cos:
|
2015-07-21 16:52:23 +08:00
|
|
|
case Intrinsic::trunc:
|
|
|
|
case Intrinsic::rint:
|
|
|
|
case Intrinsic::nearbyint:
|
2011-12-03 08:00:03 +08:00
|
|
|
case Intrinsic::pow:
|
2007-10-03 01:43:59 +08:00
|
|
|
case Intrinsic::powi:
|
2007-04-01 15:35:23 +08:00
|
|
|
case Intrinsic::bswap:
|
|
|
|
case Intrinsic::ctpop:
|
|
|
|
case Intrinsic::ctlz:
|
|
|
|
case Intrinsic::cttz:
|
2018-08-17 21:23:44 +08:00
|
|
|
case Intrinsic::fshl:
|
|
|
|
case Intrinsic::fshr:
|
2014-03-05 08:02:00 +08:00
|
|
|
case Intrinsic::fma:
|
|
|
|
case Intrinsic::fmuladd:
|
2014-03-06 13:32:52 +08:00
|
|
|
case Intrinsic::copysign:
|
2018-05-19 07:52:57 +08:00
|
|
|
case Intrinsic::launder_invariant_group:
|
Implement strip.invariant.group
Summary:
This patch introduce new intrinsic -
strip.invariant.group that was described in the
RFC: Devirtualization v2
Reviewers: rsmith, hfinkel, nlopes, sanjoy, amharc, kuhar
Subscribers: arsenm, nhaehnle, JDevlieghere, hiraditya, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47103
Co-authored-by: Krzysztof Pszeniczny <krzysztof.pszeniczny@gmail.com>
llvm-svn: 336073
2018-07-02 12:49:30 +08:00
|
|
|
case Intrinsic::strip_invariant_group:
|
2014-03-07 12:36:21 +08:00
|
|
|
case Intrinsic::round:
|
2016-07-14 08:29:50 +08:00
|
|
|
case Intrinsic::masked_load:
|
2009-10-06 06:53:52 +08:00
|
|
|
case Intrinsic::sadd_with_overflow:
|
2011-03-27 22:26:13 +08:00
|
|
|
case Intrinsic::uadd_with_overflow:
|
2009-10-06 06:53:52 +08:00
|
|
|
case Intrinsic::ssub_with_overflow:
|
2011-03-27 22:26:13 +08:00
|
|
|
case Intrinsic::usub_with_overflow:
|
2010-10-14 08:05:07 +08:00
|
|
|
case Intrinsic::smul_with_overflow:
|
2011-03-27 22:26:13 +08:00
|
|
|
case Intrinsic::umul_with_overflow:
|
2018-11-21 01:05:55 +08:00
|
|
|
case Intrinsic::sadd_sat:
|
|
|
|
case Intrinsic::uadd_sat:
|
|
|
|
case Intrinsic::ssub_sat:
|
|
|
|
case Intrinsic::usub_sat:
|
2010-03-19 08:36:35 +08:00
|
|
|
case Intrinsic::convert_from_fp16:
|
|
|
|
case Intrinsic::convert_to_fp16:
|
2016-03-21 23:00:35 +08:00
|
|
|
case Intrinsic::bitreverse:
|
2011-01-11 09:07:24 +08:00
|
|
|
case Intrinsic::x86_sse_cvtss2si:
|
|
|
|
case Intrinsic::x86_sse_cvtss2si64:
|
|
|
|
case Intrinsic::x86_sse_cvttss2si:
|
|
|
|
case Intrinsic::x86_sse_cvttss2si64:
|
|
|
|
case Intrinsic::x86_sse2_cvtsd2si:
|
|
|
|
case Intrinsic::x86_sse2_cvtsd2si64:
|
|
|
|
case Intrinsic::x86_sse2_cvttsd2si:
|
|
|
|
case Intrinsic::x86_sse2_cvttsd2si64:
|
2018-08-13 06:09:54 +08:00
|
|
|
case Intrinsic::x86_avx512_vcvtss2si32:
|
|
|
|
case Intrinsic::x86_avx512_vcvtss2si64:
|
|
|
|
case Intrinsic::x86_avx512_cvttss2si:
|
|
|
|
case Intrinsic::x86_avx512_cvttss2si64:
|
|
|
|
case Intrinsic::x86_avx512_vcvtsd2si32:
|
|
|
|
case Intrinsic::x86_avx512_vcvtsd2si64:
|
|
|
|
case Intrinsic::x86_avx512_cvttsd2si:
|
|
|
|
case Intrinsic::x86_avx512_cvttsd2si64:
|
|
|
|
case Intrinsic::x86_avx512_vcvtss2usi32:
|
|
|
|
case Intrinsic::x86_avx512_vcvtss2usi64:
|
|
|
|
case Intrinsic::x86_avx512_cvttss2usi:
|
|
|
|
case Intrinsic::x86_avx512_cvttss2usi64:
|
|
|
|
case Intrinsic::x86_avx512_vcvtsd2usi32:
|
|
|
|
case Intrinsic::x86_avx512_vcvtsd2usi64:
|
|
|
|
case Intrinsic::x86_avx512_cvttsd2usi:
|
|
|
|
case Intrinsic::x86_avx512_cvttsd2usi64:
|
2018-11-07 23:24:12 +08:00
|
|
|
case Intrinsic::is_constant:
|
2005-10-28 00:00:10 +08:00
|
|
|
return true;
|
2009-10-05 13:00:35 +08:00
|
|
|
default:
|
|
|
|
return false;
|
2017-04-08 05:36:32 +08:00
|
|
|
case Intrinsic::not_intrinsic: break;
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
|
|
|
|
2013-08-13 06:56:15 +08:00
|
|
|
if (!F->hasName())
|
|
|
|
return false;
|
2009-07-26 16:34:35 +08:00
|
|
|
StringRef Name = F->getName();
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2007-08-08 14:55:43 +08:00
|
|
|
// In these cases, the check of the length is required. We don't want to
|
|
|
|
// return true for a name like "cos\0blah" which strcmp would return equal to
|
|
|
|
// "cos", but has length 8.
|
2009-07-26 16:34:35 +08:00
|
|
|
switch (Name[0]) {
|
2015-08-28 03:56:57 +08:00
|
|
|
default:
|
|
|
|
return false;
|
2007-08-08 14:55:43 +08:00
|
|
|
case 'a':
|
2015-08-28 03:56:57 +08:00
|
|
|
return Name == "acos" || Name == "asin" || Name == "atan" ||
|
|
|
|
Name == "atan2" || Name == "acosf" || Name == "asinf" ||
|
|
|
|
Name == "atanf" || Name == "atan2f";
|
2007-08-08 14:55:43 +08:00
|
|
|
case 'c':
|
2015-08-28 03:56:57 +08:00
|
|
|
return Name == "ceil" || Name == "cos" || Name == "cosh" ||
|
|
|
|
Name == "ceilf" || Name == "cosf" || Name == "coshf";
|
2007-08-08 14:55:43 +08:00
|
|
|
case 'e':
|
2015-08-28 03:56:57 +08:00
|
|
|
return Name == "exp" || Name == "exp2" || Name == "expf" || Name == "exp2f";
|
2007-08-08 14:55:43 +08:00
|
|
|
case 'f':
|
2015-08-28 03:56:57 +08:00
|
|
|
return Name == "fabs" || Name == "floor" || Name == "fmod" ||
|
|
|
|
Name == "fabsf" || Name == "floorf" || Name == "fmodf";
|
2007-08-08 14:55:43 +08:00
|
|
|
case 'l':
|
2017-10-01 08:09:53 +08:00
|
|
|
return Name == "log" || Name == "log10" || Name == "logf" ||
|
2015-08-28 03:56:57 +08:00
|
|
|
Name == "log10f";
|
2007-08-08 14:55:43 +08:00
|
|
|
case 'p':
|
2015-08-28 03:56:57 +08:00
|
|
|
return Name == "pow" || Name == "powf";
|
2016-12-26 22:29:29 +08:00
|
|
|
case 'r':
|
|
|
|
return Name == "round" || Name == "roundf";
|
2007-08-08 14:55:43 +08:00
|
|
|
case 's':
|
2009-07-26 16:34:35 +08:00
|
|
|
return Name == "sin" || Name == "sinh" || Name == "sqrt" ||
|
2015-08-28 03:56:57 +08:00
|
|
|
Name == "sinf" || Name == "sinhf" || Name == "sqrtf";
|
2007-08-08 14:55:43 +08:00
|
|
|
case 't':
|
2015-08-28 03:56:57 +08:00
|
|
|
return Name == "tan" || Name == "tanh" || Name == "tanf" || Name == "tanhf";
|
2017-05-13 06:11:20 +08:00
|
|
|
case '_':
|
|
|
|
|
|
|
|
// Check for various function names that get used for the math functions
|
|
|
|
// when the header files are preprocessed with the macro
|
|
|
|
// __FINITE_MATH_ONLY__ enabled.
|
|
|
|
// The '12' here is the length of the shortest name that can match.
|
|
|
|
// We need to check the size before looking at Name[1] and Name[2]
|
|
|
|
// so we may as well check a limit that will eliminate mismatches.
|
|
|
|
if (Name.size() < 12 || Name[1] != '_')
|
|
|
|
return false;
|
|
|
|
switch (Name[2]) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case 'a':
|
|
|
|
return Name == "__acos_finite" || Name == "__acosf_finite" ||
|
|
|
|
Name == "__asin_finite" || Name == "__asinf_finite" ||
|
|
|
|
Name == "__atan2_finite" || Name == "__atan2f_finite";
|
|
|
|
case 'c':
|
|
|
|
return Name == "__cosh_finite" || Name == "__coshf_finite";
|
|
|
|
case 'e':
|
|
|
|
return Name == "__exp_finite" || Name == "__expf_finite" ||
|
|
|
|
Name == "__exp2_finite" || Name == "__exp2f_finite";
|
|
|
|
case 'l':
|
|
|
|
return Name == "__log_finite" || Name == "__logf_finite" ||
|
|
|
|
Name == "__log10_finite" || Name == "__log10f_finite";
|
|
|
|
case 'p':
|
|
|
|
return Name == "__pow_finite" || Name == "__powf_finite";
|
|
|
|
case 's':
|
|
|
|
return Name == "__sinh_finite" || Name == "__sinhf_finite";
|
|
|
|
}
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-29 01:40:08 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
Constant *GetConstantFoldFPValue(double V, Type *Ty) {
|
2013-02-07 06:43:31 +08:00
|
|
|
if (Ty->isHalfTy()) {
|
|
|
|
APFloat APF(V);
|
|
|
|
bool unused;
|
2016-12-14 19:57:17 +08:00
|
|
|
APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &unused);
|
2013-02-07 06:43:31 +08:00
|
|
|
return ConstantFP::get(Ty->getContext(), APF);
|
|
|
|
}
|
2009-10-05 13:06:24 +08:00
|
|
|
if (Ty->isFloatTy())
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
|
2009-10-05 13:06:24 +08:00
|
|
|
if (Ty->isDoubleTy())
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFP::get(Ty->getContext(), APFloat(V));
|
2017-10-01 08:09:53 +08:00
|
|
|
llvm_unreachable("Can only constant fold half/float/double");
|
2014-03-05 08:01:58 +08:00
|
|
|
}
|
|
|
|
|
2014-10-02 23:13:22 +08:00
|
|
|
/// Clear the floating-point exception state.
|
2016-03-29 01:40:08 +08:00
|
|
|
inline void llvm_fenv_clearexcept() {
|
2014-06-10 03:00:52 +08:00
|
|
|
#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
|
2014-06-10 02:28:53 +08:00
|
|
|
feclearexcept(FE_ALL_EXCEPT);
|
|
|
|
#endif
|
|
|
|
errno = 0;
|
|
|
|
}
|
|
|
|
|
2014-10-02 23:13:22 +08:00
|
|
|
/// Test if a floating-point exception was raised.
|
2016-03-29 01:40:08 +08:00
|
|
|
inline bool llvm_fenv_testexcept() {
|
2014-06-10 02:28:53 +08:00
|
|
|
int errno_val = errno;
|
|
|
|
if (errno_val == ERANGE || errno_val == EDOM)
|
|
|
|
return true;
|
2014-06-10 03:00:52 +08:00
|
|
|
#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
|
2014-06-10 02:28:53 +08:00
|
|
|
if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-03-29 01:40:08 +08:00
|
|
|
Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) {
|
2014-06-10 02:28:53 +08:00
|
|
|
llvm_fenv_clearexcept();
|
2014-03-05 08:01:58 +08:00
|
|
|
V = NativeFP(V);
|
2014-06-10 02:28:53 +08:00
|
|
|
if (llvm_fenv_testexcept()) {
|
|
|
|
llvm_fenv_clearexcept();
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2014-03-05 08:01:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return GetConstantFoldFPValue(V, Ty);
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
|
|
|
|
2016-03-29 01:40:08 +08:00
|
|
|
Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V,
|
|
|
|
double W, Type *Ty) {
|
2014-06-10 02:28:53 +08:00
|
|
|
llvm_fenv_clearexcept();
|
2007-07-16 23:26:22 +08:00
|
|
|
V = NativeFP(V, W);
|
2014-06-10 02:28:53 +08:00
|
|
|
if (llvm_fenv_testexcept()) {
|
|
|
|
llvm_fenv_clearexcept();
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2007-09-07 02:13:44 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2014-03-05 08:01:58 +08:00
|
|
|
return GetConstantFoldFPValue(V, Ty);
|
2007-07-16 23:26:22 +08:00
|
|
|
}
|
|
|
|
|
2014-10-02 23:13:22 +08:00
|
|
|
/// Attempt to fold an SSE floating point to integer conversion of a constant
|
|
|
|
/// floating point. If roundTowardZero is false, the default IEEE rounding is
|
|
|
|
/// used (toward nearest, ties to even). This matches the behavior of the
|
|
|
|
/// non-truncating SSE instructions in the default rounding mode. The desired
|
|
|
|
/// integer type Ty is used to select how many bits are available for the
|
|
|
|
/// result. Returns null if the conversion cannot be performed, otherwise
|
|
|
|
/// returns the Constant value resulting from the conversion.
|
2016-07-19 23:07:43 +08:00
|
|
|
Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
|
2018-08-13 06:09:54 +08:00
|
|
|
Type *Ty, bool IsSigned) {
|
2011-01-11 09:07:24 +08:00
|
|
|
// All of these conversion intrinsics form an integer of at most 64bits.
|
2013-08-13 07:15:58 +08:00
|
|
|
unsigned ResultWidth = Ty->getIntegerBitWidth();
|
2011-01-11 09:07:24 +08:00
|
|
|
assert(ResultWidth <= 64 &&
|
|
|
|
"Can only constant fold conversions to 64 and 32 bit ints");
|
|
|
|
|
|
|
|
uint64_t UIntVal;
|
|
|
|
bool isExact = false;
|
|
|
|
APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
|
|
|
|
: APFloat::rmNearestTiesToEven;
|
2017-03-20 22:40:12 +08:00
|
|
|
APFloat::opStatus status =
|
|
|
|
Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
|
2018-08-13 06:09:54 +08:00
|
|
|
IsSigned, mode, &isExact);
|
2016-07-19 23:07:43 +08:00
|
|
|
if (status != APFloat::opOK &&
|
|
|
|
(!roundTowardZero || status != APFloat::opInexact))
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2018-08-13 06:09:54 +08:00
|
|
|
return ConstantInt::get(Ty, UIntVal, IsSigned);
|
2011-01-11 09:07:24 +08:00
|
|
|
}
|
|
|
|
|
2016-03-29 01:40:08 +08:00
|
|
|
double getValueAsDouble(ConstantFP *Op) {
|
2014-03-05 08:01:58 +08:00
|
|
|
Type *Ty = Op->getType();
|
|
|
|
|
|
|
|
if (Ty->isFloatTy())
|
|
|
|
return Op->getValueAPF().convertToFloat();
|
|
|
|
|
|
|
|
if (Ty->isDoubleTy())
|
|
|
|
return Op->getValueAPF().convertToDouble();
|
|
|
|
|
|
|
|
bool unused;
|
|
|
|
APFloat APF = Op->getValueAPF();
|
2016-12-14 19:57:17 +08:00
|
|
|
APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
|
2014-03-05 08:01:58 +08:00
|
|
|
return APF.convertToDouble();
|
|
|
|
}
|
|
|
|
|
2018-11-07 23:24:12 +08:00
|
|
|
static bool isManifestConstant(const Constant *c) {
|
|
|
|
if (isa<ConstantData>(c)) {
|
|
|
|
return true;
|
|
|
|
} else if (isa<ConstantAggregate>(c) || isa<ConstantExpr>(c)) {
|
|
|
|
for (const Value *subc : c->operand_values()) {
|
|
|
|
if (!isManifestConstant(cast<Constant>(subc)))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-01-12 05:18:00 +08:00
|
|
|
static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
|
|
|
|
if (auto *CI = dyn_cast<ConstantInt>(Op)) {
|
|
|
|
C = &CI->getValue();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (isa<UndefValue>(Op)) {
|
|
|
|
C = nullptr;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-03-29 01:40:08 +08:00
|
|
|
Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
|
|
|
|
ArrayRef<Constant *> Operands,
|
llvm: Add support for "-fno-delete-null-pointer-checks"
Summary:
Support for this option is needed for building Linux kernel.
This is a very frequently requested feature by kernel developers.
More details : https://lkml.org/lkml/2018/4/4/601
GCC option description for -fdelete-null-pointer-checks:
This Assume that programs cannot safely dereference null pointers,
and that no code or data element resides at address zero.
-fno-delete-null-pointer-checks is the inverse of this implying that
null pointer dereferencing is not undefined.
This feature is implemented in LLVM IR in this CL as the function attribute
"null-pointer-is-valid"="true" in IR (Under review at D47894).
The CL updates several passes that assumed null pointer dereferencing is
undefined to not optimize when the "null-pointer-is-valid"="true"
attribute is present.
Reviewers: t.p.northover, efriedma, jyknight, chandlerc, rnk, srhines, void, george.burgess.iv
Reviewed By: efriedma, george.burgess.iv
Subscribers: eraman, haicheng, george.burgess.iv, drinkcat, theraven, reames, sanjoy, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47895
llvm-svn: 336613
2018-07-10 06:27:23 +08:00
|
|
|
const TargetLibraryInfo *TLI,
|
|
|
|
ImmutableCallSite CS) {
|
2011-07-19 21:32:40 +08:00
|
|
|
if (Operands.size() == 1) {
|
2018-11-07 23:24:12 +08:00
|
|
|
if (IntrinsicID == Intrinsic::is_constant) {
|
|
|
|
// We know we have a "Constant" argument. But we want to only
|
|
|
|
// return true for manifest constants, not those that depend on
|
|
|
|
// constants with unknowable values, e.g. GlobalValue or BlockAddress.
|
|
|
|
if (isManifestConstant(Operands[0]))
|
|
|
|
return ConstantInt::getTrue(Ty->getContext());
|
|
|
|
return nullptr;
|
|
|
|
}
|
2016-04-09 02:21:11 +08:00
|
|
|
if (isa<UndefValue>(Operands[0])) {
|
2019-01-12 05:18:00 +08:00
|
|
|
// cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
|
|
|
|
// ctpop() is between 0 and bitwidth, pick 0 for undef.
|
|
|
|
if (IntrinsicID == Intrinsic::cos ||
|
|
|
|
IntrinsicID == Intrinsic::ctpop)
|
2016-04-09 02:21:11 +08:00
|
|
|
return Constant::getNullValue(Ty);
|
2017-06-04 16:21:53 +08:00
|
|
|
if (IntrinsicID == Intrinsic::bswap ||
|
2018-05-19 07:52:57 +08:00
|
|
|
IntrinsicID == Intrinsic::bitreverse ||
|
Implement strip.invariant.group
Summary:
This patch introduce new intrinsic -
strip.invariant.group that was described in the
RFC: Devirtualization v2
Reviewers: rsmith, hfinkel, nlopes, sanjoy, amharc, kuhar
Subscribers: arsenm, nhaehnle, JDevlieghere, hiraditya, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47103
Co-authored-by: Krzysztof Pszeniczny <krzysztof.pszeniczny@gmail.com>
llvm-svn: 336073
2018-07-02 12:49:30 +08:00
|
|
|
IntrinsicID == Intrinsic::launder_invariant_group ||
|
|
|
|
IntrinsicID == Intrinsic::strip_invariant_group)
|
2017-06-04 16:21:53 +08:00
|
|
|
return Operands[0];
|
2016-04-09 02:21:11 +08:00
|
|
|
}
|
2018-05-19 07:52:57 +08:00
|
|
|
|
2018-07-24 05:20:00 +08:00
|
|
|
if (isa<ConstantPointerNull>(Operands[0])) {
|
Implement strip.invariant.group
Summary:
This patch introduce new intrinsic -
strip.invariant.group that was described in the
RFC: Devirtualization v2
Reviewers: rsmith, hfinkel, nlopes, sanjoy, amharc, kuhar
Subscribers: arsenm, nhaehnle, JDevlieghere, hiraditya, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47103
Co-authored-by: Krzysztof Pszeniczny <krzysztof.pszeniczny@gmail.com>
llvm-svn: 336073
2018-07-02 12:49:30 +08:00
|
|
|
// launder(null) == null == strip(null) iff in addrspace 0
|
|
|
|
if (IntrinsicID == Intrinsic::launder_invariant_group ||
|
2018-07-24 05:20:00 +08:00
|
|
|
IntrinsicID == Intrinsic::strip_invariant_group) {
|
|
|
|
// If instruction is not yet put in a basic block (e.g. when cloning
|
|
|
|
// a function during inlining), CS caller may not be available.
|
|
|
|
// So check CS's BB first before querying CS.getCaller.
|
|
|
|
const Function *Caller = CS.getParent() ? CS.getCaller() : nullptr;
|
|
|
|
if (Caller &&
|
|
|
|
!NullPointerIsDefined(
|
|
|
|
Caller, Operands[0]->getType()->getPointerAddressSpace())) {
|
|
|
|
return Operands[0];
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
2018-05-19 07:52:57 +08:00
|
|
|
}
|
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
|
2014-03-06 03:41:48 +08:00
|
|
|
if (IntrinsicID == Intrinsic::convert_to_fp16) {
|
2010-03-19 08:36:35 +08:00
|
|
|
APFloat Val(Op->getValueAPF());
|
|
|
|
|
|
|
|
bool lost = false;
|
2016-12-14 19:57:17 +08:00
|
|
|
Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
|
2010-03-19 08:36:35 +08:00
|
|
|
|
2014-03-06 03:41:48 +08:00
|
|
|
return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
|
2010-03-19 08:36:35 +08:00
|
|
|
}
|
|
|
|
|
2017-10-01 08:09:53 +08:00
|
|
|
if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2010-09-28 05:29:20 +08:00
|
|
|
|
2014-03-07 12:36:21 +08:00
|
|
|
if (IntrinsicID == Intrinsic::round) {
|
|
|
|
APFloat V = Op->getValueAPF();
|
|
|
|
V.roundToIntegral(APFloat::rmNearestTiesToAway);
|
|
|
|
return ConstantFP::get(Ty->getContext(), V);
|
|
|
|
}
|
|
|
|
|
2015-07-21 16:52:23 +08:00
|
|
|
if (IntrinsicID == Intrinsic::floor) {
|
|
|
|
APFloat V = Op->getValueAPF();
|
|
|
|
V.roundToIntegral(APFloat::rmTowardNegative);
|
|
|
|
return ConstantFP::get(Ty->getContext(), V);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IntrinsicID == Intrinsic::ceil) {
|
|
|
|
APFloat V = Op->getValueAPF();
|
|
|
|
V.roundToIntegral(APFloat::rmTowardPositive);
|
|
|
|
return ConstantFP::get(Ty->getContext(), V);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IntrinsicID == Intrinsic::trunc) {
|
|
|
|
APFloat V = Op->getValueAPF();
|
|
|
|
V.roundToIntegral(APFloat::rmTowardZero);
|
|
|
|
return ConstantFP::get(Ty->getContext(), V);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IntrinsicID == Intrinsic::rint) {
|
|
|
|
APFloat V = Op->getValueAPF();
|
|
|
|
V.roundToIntegral(APFloat::rmNearestTiesToEven);
|
|
|
|
return ConstantFP::get(Ty->getContext(), V);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IntrinsicID == Intrinsic::nearbyint) {
|
|
|
|
APFloat V = Op->getValueAPF();
|
|
|
|
V.roundToIntegral(APFloat::rmNearestTiesToEven);
|
|
|
|
return ConstantFP::get(Ty->getContext(), V);
|
|
|
|
}
|
|
|
|
|
2010-09-28 05:29:20 +08:00
|
|
|
/// We only fold functions with finite arguments. Folding NaN and inf is
|
|
|
|
/// likely to be aborted with an exception anyway, and some host libms
|
|
|
|
/// have known errors raising exceptions.
|
|
|
|
if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2010-09-28 05:29:20 +08:00
|
|
|
|
2007-09-07 02:13:44 +08:00
|
|
|
/// Currently APFloat versions of these functions do not exist, so we use
|
|
|
|
/// the host native double versions. Float versions are not called
|
|
|
|
/// directly but for all these it is true (float)(f((double)arg)) ==
|
|
|
|
/// f(arg). Long double not supported yet.
|
2014-03-05 08:01:58 +08:00
|
|
|
double V = getValueAsDouble(Op);
|
2013-02-07 06:43:31 +08:00
|
|
|
|
2014-03-06 03:41:48 +08:00
|
|
|
switch (IntrinsicID) {
|
2013-02-07 06:43:31 +08:00
|
|
|
default: break;
|
|
|
|
case Intrinsic::fabs:
|
|
|
|
return ConstantFoldFP(fabs, V, Ty);
|
|
|
|
case Intrinsic::log2:
|
2015-05-07 08:05:26 +08:00
|
|
|
return ConstantFoldFP(Log2, V, Ty);
|
2013-02-07 06:43:31 +08:00
|
|
|
case Intrinsic::log:
|
|
|
|
return ConstantFoldFP(log, V, Ty);
|
|
|
|
case Intrinsic::log10:
|
|
|
|
return ConstantFoldFP(log10, V, Ty);
|
|
|
|
case Intrinsic::exp:
|
|
|
|
return ConstantFoldFP(exp, V, Ty);
|
|
|
|
case Intrinsic::exp2:
|
|
|
|
return ConstantFoldFP(exp2, V, Ty);
|
2015-07-08 11:55:47 +08:00
|
|
|
case Intrinsic::sin:
|
|
|
|
return ConstantFoldFP(sin, V, Ty);
|
|
|
|
case Intrinsic::cos:
|
|
|
|
return ConstantFoldFP(cos, V, Ty);
|
2017-01-21 08:59:57 +08:00
|
|
|
case Intrinsic::sqrt:
|
|
|
|
return ConstantFoldFP(sqrt, V, Ty);
|
2013-02-07 06:43:31 +08:00
|
|
|
}
|
|
|
|
|
2014-03-06 03:41:48 +08:00
|
|
|
if (!TLI)
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2014-03-06 03:41:48 +08:00
|
|
|
|
2017-05-13 06:11:20 +08:00
|
|
|
char NameKeyChar = Name[0];
|
|
|
|
if (Name[0] == '_' && Name.size() > 2 && Name[1] == '_')
|
|
|
|
NameKeyChar = Name[2];
|
|
|
|
|
|
|
|
switch (NameKeyChar) {
|
2007-08-08 14:55:43 +08:00
|
|
|
case 'a':
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
if ((Name == "acos" && TLI->has(LibFunc_acos)) ||
|
2017-05-13 06:11:20 +08:00
|
|
|
(Name == "acosf" && TLI->has(LibFunc_acosf)) ||
|
|
|
|
(Name == "__acos_finite" && TLI->has(LibFunc_acos_finite)) ||
|
|
|
|
(Name == "__acosf_finite" && TLI->has(LibFunc_acosf_finite)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(acos, V, Ty);
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
else if ((Name == "asin" && TLI->has(LibFunc_asin)) ||
|
2017-05-13 06:11:20 +08:00
|
|
|
(Name == "asinf" && TLI->has(LibFunc_asinf)) ||
|
|
|
|
(Name == "__asin_finite" && TLI->has(LibFunc_asin_finite)) ||
|
|
|
|
(Name == "__asinf_finite" && TLI->has(LibFunc_asinf_finite)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(asin, V, Ty);
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
else if ((Name == "atan" && TLI->has(LibFunc_atan)) ||
|
|
|
|
(Name == "atanf" && TLI->has(LibFunc_atanf)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(atan, V, Ty);
|
2007-08-08 14:55:43 +08:00
|
|
|
break;
|
|
|
|
case 'c':
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
if ((Name == "ceil" && TLI->has(LibFunc_ceil)) ||
|
|
|
|
(Name == "ceilf" && TLI->has(LibFunc_ceilf)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(ceil, V, Ty);
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
else if ((Name == "cos" && TLI->has(LibFunc_cos)) ||
|
|
|
|
(Name == "cosf" && TLI->has(LibFunc_cosf)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(cos, V, Ty);
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
else if ((Name == "cosh" && TLI->has(LibFunc_cosh)) ||
|
2017-05-13 06:11:20 +08:00
|
|
|
(Name == "coshf" && TLI->has(LibFunc_coshf)) ||
|
|
|
|
(Name == "__cosh_finite" && TLI->has(LibFunc_cosh_finite)) ||
|
|
|
|
(Name == "__coshf_finite" && TLI->has(LibFunc_coshf_finite)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(cosh, V, Ty);
|
2007-08-08 14:55:43 +08:00
|
|
|
break;
|
|
|
|
case 'e':
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
if ((Name == "exp" && TLI->has(LibFunc_exp)) ||
|
2017-05-13 06:11:20 +08:00
|
|
|
(Name == "expf" && TLI->has(LibFunc_expf)) ||
|
|
|
|
(Name == "__exp_finite" && TLI->has(LibFunc_exp_finite)) ||
|
|
|
|
(Name == "__expf_finite" && TLI->has(LibFunc_expf_finite)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(exp, V, Ty);
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
if ((Name == "exp2" && TLI->has(LibFunc_exp2)) ||
|
2017-05-13 06:11:20 +08:00
|
|
|
(Name == "exp2f" && TLI->has(LibFunc_exp2f)) ||
|
|
|
|
(Name == "__exp2_finite" && TLI->has(LibFunc_exp2_finite)) ||
|
|
|
|
(Name == "__exp2f_finite" && TLI->has(LibFunc_exp2f_finite)))
|
2011-05-23 06:22:35 +08:00
|
|
|
// Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
|
|
|
|
// C99 library.
|
|
|
|
return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
|
2007-08-08 14:55:43 +08:00
|
|
|
break;
|
|
|
|
case 'f':
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
if ((Name == "fabs" && TLI->has(LibFunc_fabs)) ||
|
|
|
|
(Name == "fabsf" && TLI->has(LibFunc_fabsf)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(fabs, V, Ty);
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
else if ((Name == "floor" && TLI->has(LibFunc_floor)) ||
|
|
|
|
(Name == "floorf" && TLI->has(LibFunc_floorf)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(floor, V, Ty);
|
2007-08-08 14:55:43 +08:00
|
|
|
break;
|
|
|
|
case 'l':
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
if ((Name == "log" && V > 0 && TLI->has(LibFunc_log)) ||
|
2017-05-13 06:11:20 +08:00
|
|
|
(Name == "logf" && V > 0 && TLI->has(LibFunc_logf)) ||
|
|
|
|
(Name == "__log_finite" && V > 0 &&
|
|
|
|
TLI->has(LibFunc_log_finite)) ||
|
|
|
|
(Name == "__logf_finite" && V > 0 &&
|
|
|
|
TLI->has(LibFunc_logf_finite)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(log, V, Ty);
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
else if ((Name == "log10" && V > 0 && TLI->has(LibFunc_log10)) ||
|
2017-05-13 06:11:20 +08:00
|
|
|
(Name == "log10f" && V > 0 && TLI->has(LibFunc_log10f)) ||
|
|
|
|
(Name == "__log10_finite" && V > 0 &&
|
|
|
|
TLI->has(LibFunc_log10_finite)) ||
|
|
|
|
(Name == "__log10f_finite" && V > 0 &&
|
|
|
|
TLI->has(LibFunc_log10f_finite)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(log10, V, Ty);
|
2007-08-08 14:55:43 +08:00
|
|
|
break;
|
2016-12-26 22:29:29 +08:00
|
|
|
case 'r':
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
if ((Name == "round" && TLI->has(LibFunc_round)) ||
|
|
|
|
(Name == "roundf" && TLI->has(LibFunc_roundf)))
|
2016-12-26 22:29:29 +08:00
|
|
|
return ConstantFoldFP(round, V, Ty);
|
2017-06-01 04:25:13 +08:00
|
|
|
break;
|
2007-08-08 14:55:43 +08:00
|
|
|
case 's':
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
if ((Name == "sin" && TLI->has(LibFunc_sin)) ||
|
|
|
|
(Name == "sinf" && TLI->has(LibFunc_sinf)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(sin, V, Ty);
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
else if ((Name == "sinh" && TLI->has(LibFunc_sinh)) ||
|
2017-05-13 06:11:20 +08:00
|
|
|
(Name == "sinhf" && TLI->has(LibFunc_sinhf)) ||
|
|
|
|
(Name == "__sinh_finite" && TLI->has(LibFunc_sinh_finite)) ||
|
|
|
|
(Name == "__sinhf_finite" && TLI->has(LibFunc_sinhf_finite)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(sinh, V, Ty);
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
else if ((Name == "sqrt" && V >= 0 && TLI->has(LibFunc_sqrt)) ||
|
|
|
|
(Name == "sqrtf" && V >= 0 && TLI->has(LibFunc_sqrtf)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(sqrt, V, Ty);
|
2007-08-08 14:55:43 +08:00
|
|
|
break;
|
|
|
|
case 't':
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
if ((Name == "tan" && TLI->has(LibFunc_tan)) ||
|
|
|
|
(Name == "tanf" && TLI->has(LibFunc_tanf)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(tan, V, Ty);
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
else if ((Name == "tanh" && TLI->has(LibFunc_tanh)) ||
|
|
|
|
(Name == "tanhf" && TLI->has(LibFunc_tanhf)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(tanh, V, Ty);
|
2007-08-08 14:55:43 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2009-10-05 13:00:35 +08:00
|
|
|
}
|
2011-01-10 17:02:58 +08:00
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
|
2014-03-06 03:41:48 +08:00
|
|
|
switch (IntrinsicID) {
|
2011-01-10 17:02:58 +08:00
|
|
|
case Intrinsic::bswap:
|
2014-03-06 03:41:48 +08:00
|
|
|
return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
|
2011-01-10 17:02:58 +08:00
|
|
|
case Intrinsic::ctpop:
|
2009-07-25 07:12:02 +08:00
|
|
|
return ConstantInt::get(Ty, Op->getValue().countPopulation());
|
2016-03-21 23:00:35 +08:00
|
|
|
case Intrinsic::bitreverse:
|
|
|
|
return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
|
2011-01-10 17:02:58 +08:00
|
|
|
case Intrinsic::convert_from_fp16: {
|
2016-12-14 19:57:17 +08:00
|
|
|
APFloat Val(APFloat::IEEEhalf(), Op->getValue());
|
2010-03-19 08:36:35 +08:00
|
|
|
|
|
|
|
bool lost = false;
|
2015-05-15 02:01:48 +08:00
|
|
|
APFloat::opStatus status = Val.convert(
|
|
|
|
Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
|
2010-03-19 08:36:35 +08:00
|
|
|
|
|
|
|
// Conversion is always precise.
|
2010-12-23 08:58:24 +08:00
|
|
|
(void)status;
|
2010-03-19 08:36:35 +08:00
|
|
|
assert(status == APFloat::opOK && !lost &&
|
|
|
|
"Precision lost during fp16 constfolding");
|
|
|
|
|
2014-03-06 03:41:48 +08:00
|
|
|
return ConstantFP::get(Ty->getContext(), Val);
|
2010-03-19 08:36:35 +08:00
|
|
|
}
|
2011-01-10 17:02:58 +08:00
|
|
|
default:
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2011-01-10 17:02:58 +08:00
|
|
|
}
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
2011-01-10 17:02:58 +08:00
|
|
|
|
2012-01-27 05:37:55 +08:00
|
|
|
// Support ConstantVector in case we have an Undef in the top.
|
2012-11-05 08:11:11 +08:00
|
|
|
if (isa<ConstantVector>(Operands[0]) ||
|
2012-01-27 05:37:55 +08:00
|
|
|
isa<ConstantDataVector>(Operands[0])) {
|
2016-07-13 12:22:12 +08:00
|
|
|
auto *Op = cast<Constant>(Operands[0]);
|
2014-03-06 03:41:48 +08:00
|
|
|
switch (IntrinsicID) {
|
2011-01-11 09:07:24 +08:00
|
|
|
default: break;
|
|
|
|
case Intrinsic::x86_sse_cvtss2si:
|
|
|
|
case Intrinsic::x86_sse_cvtss2si64:
|
|
|
|
case Intrinsic::x86_sse2_cvtsd2si:
|
|
|
|
case Intrinsic::x86_sse2_cvtsd2si64:
|
2012-01-27 05:37:55 +08:00
|
|
|
if (ConstantFP *FPOp =
|
2016-07-19 23:07:43 +08:00
|
|
|
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
|
|
|
|
return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
|
2018-08-13 06:09:54 +08:00
|
|
|
/*roundTowardZero=*/false, Ty,
|
|
|
|
/*IsSigned*/true);
|
2017-06-04 16:21:51 +08:00
|
|
|
break;
|
2011-01-11 09:07:24 +08:00
|
|
|
case Intrinsic::x86_sse_cvttss2si:
|
|
|
|
case Intrinsic::x86_sse_cvttss2si64:
|
|
|
|
case Intrinsic::x86_sse2_cvttsd2si:
|
|
|
|
case Intrinsic::x86_sse2_cvttsd2si64:
|
2012-01-27 05:37:55 +08:00
|
|
|
if (ConstantFP *FPOp =
|
2016-07-19 23:07:43 +08:00
|
|
|
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
|
|
|
|
return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
|
2018-08-13 06:09:54 +08:00
|
|
|
/*roundTowardZero=*/true, Ty,
|
|
|
|
/*IsSigned*/true);
|
2017-06-04 16:21:51 +08:00
|
|
|
break;
|
2011-01-11 09:07:24 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2009-10-05 13:00:35 +08:00
|
|
|
}
|
2011-01-10 17:02:58 +08:00
|
|
|
|
2011-07-19 21:32:40 +08:00
|
|
|
if (Operands.size() == 2) {
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
|
2013-02-07 06:43:31 +08:00
|
|
|
if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2014-03-05 08:01:58 +08:00
|
|
|
double Op1V = getValueAsDouble(Op1);
|
2013-02-07 06:43:31 +08:00
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
|
2009-10-05 13:06:24 +08:00
|
|
|
if (Op2->getType() != Op1->getType())
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2011-12-02 07:16:03 +08:00
|
|
|
|
2014-03-05 08:01:58 +08:00
|
|
|
double Op2V = getValueAsDouble(Op2);
|
2014-03-06 03:41:48 +08:00
|
|
|
if (IntrinsicID == Intrinsic::pow) {
|
2011-12-03 08:00:03 +08:00
|
|
|
return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
|
|
|
|
}
|
2014-03-06 13:32:52 +08:00
|
|
|
if (IntrinsicID == Intrinsic::copysign) {
|
|
|
|
APFloat V1 = Op1->getValueAPF();
|
2016-02-14 00:54:14 +08:00
|
|
|
const APFloat &V2 = Op2->getValueAPF();
|
2014-03-06 13:32:52 +08:00
|
|
|
V1.copySign(V2);
|
|
|
|
return ConstantFP::get(Ty->getContext(), V1);
|
|
|
|
}
|
2014-10-22 07:00:20 +08:00
|
|
|
|
|
|
|
if (IntrinsicID == Intrinsic::minnum) {
|
|
|
|
const APFloat &C1 = Op1->getValueAPF();
|
|
|
|
const APFloat &C2 = Op2->getValueAPF();
|
|
|
|
return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IntrinsicID == Intrinsic::maxnum) {
|
|
|
|
const APFloat &C1 = Op1->getValueAPF();
|
|
|
|
const APFloat &C2 = Op2->getValueAPF();
|
|
|
|
return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
|
|
|
|
}
|
|
|
|
|
2018-10-20 02:15:32 +08:00
|
|
|
if (IntrinsicID == Intrinsic::minimum) {
|
|
|
|
const APFloat &C1 = Op1->getValueAPF();
|
|
|
|
const APFloat &C2 = Op2->getValueAPF();
|
|
|
|
return ConstantFP::get(Ty->getContext(), minimum(C1, C2));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IntrinsicID == Intrinsic::maximum) {
|
|
|
|
const APFloat &C1 = Op1->getValueAPF();
|
|
|
|
const APFloat &C2 = Op2->getValueAPF();
|
|
|
|
return ConstantFP::get(Ty->getContext(), maximum(C1, C2));
|
|
|
|
}
|
|
|
|
|
2011-12-03 08:00:03 +08:00
|
|
|
if (!TLI)
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
if ((Name == "pow" && TLI->has(LibFunc_pow)) ||
|
2017-05-13 06:11:20 +08:00
|
|
|
(Name == "powf" && TLI->has(LibFunc_powf)) ||
|
|
|
|
(Name == "__pow_finite" && TLI->has(LibFunc_pow_finite)) ||
|
|
|
|
(Name == "__powf_finite" && TLI->has(LibFunc_powf_finite)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
if ((Name == "fmod" && TLI->has(LibFunc_fmod)) ||
|
|
|
|
(Name == "fmodf" && TLI->has(LibFunc_fmodf)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
if ((Name == "atan2" && TLI->has(LibFunc_atan2)) ||
|
2017-05-13 06:11:20 +08:00
|
|
|
(Name == "atan2f" && TLI->has(LibFunc_atan2f)) ||
|
|
|
|
(Name == "__atan2_finite" && TLI->has(LibFunc_atan2_finite)) ||
|
|
|
|
(Name == "__atan2f_finite" && TLI->has(LibFunc_atan2f_finite)))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
|
2016-07-13 12:22:12 +08:00
|
|
|
} else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
|
2014-03-06 03:41:48 +08:00
|
|
|
if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
|
|
|
|
return ConstantFP::get(Ty->getContext(),
|
2013-02-07 06:43:31 +08:00
|
|
|
APFloat((float)std::pow((float)Op1V,
|
|
|
|
(int)Op2C->getZExtValue())));
|
2014-03-06 03:41:48 +08:00
|
|
|
if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
|
|
|
|
return ConstantFP::get(Ty->getContext(),
|
2009-11-06 12:27:31 +08:00
|
|
|
APFloat((float)std::pow((float)Op1V,
|
2008-04-20 08:41:09 +08:00
|
|
|
(int)Op2C->getZExtValue())));
|
2014-03-06 03:41:48 +08:00
|
|
|
if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
|
|
|
|
return ConstantFP::get(Ty->getContext(),
|
2009-11-06 12:27:31 +08:00
|
|
|
APFloat((double)std::pow((double)Op1V,
|
|
|
|
(int)Op2C->getZExtValue())));
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2019-01-12 05:18:00 +08:00
|
|
|
if (Operands[0]->getType()->isIntegerTy() &&
|
|
|
|
Operands[1]->getType()->isIntegerTy()) {
|
|
|
|
const APInt *C0, *C1;
|
|
|
|
if (!getConstIntOrUndef(Operands[0], C0) ||
|
|
|
|
!getConstIntOrUndef(Operands[1], C1))
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
switch (IntrinsicID) {
|
|
|
|
default: break;
|
|
|
|
case Intrinsic::smul_with_overflow:
|
|
|
|
case Intrinsic::umul_with_overflow:
|
|
|
|
// Even if both operands are undef, we cannot fold muls to undef
|
|
|
|
// in the general case. For example, on i2 there are no inputs
|
|
|
|
// that would produce { i2 -1, i1 true } as the result.
|
|
|
|
if (!C0 || !C1)
|
|
|
|
return Constant::getNullValue(Ty);
|
|
|
|
LLVM_FALLTHROUGH;
|
|
|
|
case Intrinsic::sadd_with_overflow:
|
|
|
|
case Intrinsic::uadd_with_overflow:
|
|
|
|
case Intrinsic::ssub_with_overflow:
|
|
|
|
case Intrinsic::usub_with_overflow: {
|
|
|
|
if (!C0 || !C1)
|
|
|
|
return UndefValue::get(Ty);
|
|
|
|
|
|
|
|
APInt Res;
|
|
|
|
bool Overflow;
|
2014-03-06 03:41:48 +08:00
|
|
|
switch (IntrinsicID) {
|
2019-01-12 05:18:00 +08:00
|
|
|
default: llvm_unreachable("Invalid case");
|
2010-10-14 08:05:07 +08:00
|
|
|
case Intrinsic::sadd_with_overflow:
|
2019-01-12 05:18:00 +08:00
|
|
|
Res = C0->sadd_ov(*C1, Overflow);
|
|
|
|
break;
|
2010-10-14 08:05:07 +08:00
|
|
|
case Intrinsic::uadd_with_overflow:
|
2019-01-12 05:18:00 +08:00
|
|
|
Res = C0->uadd_ov(*C1, Overflow);
|
|
|
|
break;
|
2010-10-14 08:05:07 +08:00
|
|
|
case Intrinsic::ssub_with_overflow:
|
2019-01-12 05:18:00 +08:00
|
|
|
Res = C0->ssub_ov(*C1, Overflow);
|
|
|
|
break;
|
2010-10-14 08:05:07 +08:00
|
|
|
case Intrinsic::usub_with_overflow:
|
2019-01-12 05:18:00 +08:00
|
|
|
Res = C0->usub_ov(*C1, Overflow);
|
|
|
|
break;
|
2011-03-27 22:26:13 +08:00
|
|
|
case Intrinsic::smul_with_overflow:
|
2019-01-12 05:18:00 +08:00
|
|
|
Res = C0->smul_ov(*C1, Overflow);
|
|
|
|
break;
|
|
|
|
case Intrinsic::umul_with_overflow:
|
|
|
|
Res = C0->umul_ov(*C1, Overflow);
|
|
|
|
break;
|
2009-10-05 13:26:04 +08:00
|
|
|
}
|
2019-01-12 05:18:00 +08:00
|
|
|
Constant *Ops[] = {
|
|
|
|
ConstantInt::get(Ty->getContext(), Res),
|
|
|
|
ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
|
|
|
|
};
|
|
|
|
return ConstantStruct::get(cast<StructType>(Ty), Ops);
|
|
|
|
}
|
|
|
|
case Intrinsic::uadd_sat:
|
|
|
|
case Intrinsic::sadd_sat:
|
|
|
|
if (!C0 && !C1)
|
|
|
|
return UndefValue::get(Ty);
|
|
|
|
if (!C0 || !C1)
|
|
|
|
return Constant::getAllOnesValue(Ty);
|
|
|
|
if (IntrinsicID == Intrinsic::uadd_sat)
|
|
|
|
return ConstantInt::get(Ty, C0->uadd_sat(*C1));
|
|
|
|
else
|
|
|
|
return ConstantInt::get(Ty, C0->sadd_sat(*C1));
|
|
|
|
case Intrinsic::usub_sat:
|
|
|
|
case Intrinsic::ssub_sat:
|
|
|
|
if (!C0 && !C1)
|
|
|
|
return UndefValue::get(Ty);
|
|
|
|
if (!C0 || !C1)
|
|
|
|
return Constant::getNullValue(Ty);
|
|
|
|
if (IntrinsicID == Intrinsic::usub_sat)
|
|
|
|
return ConstantInt::get(Ty, C0->usub_sat(*C1));
|
|
|
|
else
|
|
|
|
return ConstantInt::get(Ty, C0->ssub_sat(*C1));
|
|
|
|
case Intrinsic::cttz:
|
|
|
|
case Intrinsic::ctlz:
|
|
|
|
assert(C1 && "Must be constant int");
|
|
|
|
|
|
|
|
// cttz(0, 1) and ctlz(0, 1) are undef.
|
|
|
|
if (C1->isOneValue() && (!C0 || C0->isNullValue()))
|
|
|
|
return UndefValue::get(Ty);
|
|
|
|
if (!C0)
|
|
|
|
return Constant::getNullValue(Ty);
|
|
|
|
if (IntrinsicID == Intrinsic::cttz)
|
|
|
|
return ConstantInt::get(Ty, C0->countTrailingZeros());
|
|
|
|
else
|
|
|
|
return ConstantInt::get(Ty, C0->countLeadingZeros());
|
2009-10-05 13:26:04 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2009-10-05 13:26:04 +08:00
|
|
|
}
|
2018-08-13 06:09:54 +08:00
|
|
|
|
|
|
|
// Support ConstantVector in case we have an Undef in the top.
|
|
|
|
if ((isa<ConstantVector>(Operands[0]) ||
|
|
|
|
isa<ConstantDataVector>(Operands[0])) &&
|
|
|
|
// Check for default rounding mode.
|
|
|
|
// FIXME: Support other rounding modes?
|
|
|
|
isa<ConstantInt>(Operands[1]) &&
|
|
|
|
cast<ConstantInt>(Operands[1])->getValue() == 4) {
|
|
|
|
auto *Op = cast<Constant>(Operands[0]);
|
|
|
|
switch (IntrinsicID) {
|
|
|
|
default: break;
|
|
|
|
case Intrinsic::x86_avx512_vcvtss2si32:
|
|
|
|
case Intrinsic::x86_avx512_vcvtss2si64:
|
|
|
|
case Intrinsic::x86_avx512_vcvtsd2si32:
|
|
|
|
case Intrinsic::x86_avx512_vcvtsd2si64:
|
|
|
|
if (ConstantFP *FPOp =
|
|
|
|
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
|
|
|
|
return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
|
|
|
|
/*roundTowardZero=*/false, Ty,
|
|
|
|
/*IsSigned*/true);
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_avx512_vcvtss2usi32:
|
|
|
|
case Intrinsic::x86_avx512_vcvtss2usi64:
|
|
|
|
case Intrinsic::x86_avx512_vcvtsd2usi32:
|
|
|
|
case Intrinsic::x86_avx512_vcvtsd2usi64:
|
|
|
|
if (ConstantFP *FPOp =
|
|
|
|
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
|
|
|
|
return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
|
|
|
|
/*roundTowardZero=*/false, Ty,
|
|
|
|
/*IsSigned*/false);
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_avx512_cvttss2si:
|
|
|
|
case Intrinsic::x86_avx512_cvttss2si64:
|
|
|
|
case Intrinsic::x86_avx512_cvttsd2si:
|
|
|
|
case Intrinsic::x86_avx512_cvttsd2si64:
|
|
|
|
if (ConstantFP *FPOp =
|
|
|
|
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
|
|
|
|
return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
|
|
|
|
/*roundTowardZero=*/true, Ty,
|
|
|
|
/*IsSigned*/true);
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_avx512_cvttss2usi:
|
|
|
|
case Intrinsic::x86_avx512_cvttss2usi64:
|
|
|
|
case Intrinsic::x86_avx512_cvttsd2usi:
|
|
|
|
case Intrinsic::x86_avx512_cvttsd2usi64:
|
|
|
|
if (ConstantFP *FPOp =
|
|
|
|
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
|
|
|
|
return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
|
|
|
|
/*roundTowardZero=*/true, Ty,
|
|
|
|
/*IsSigned*/false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
2014-03-05 08:02:00 +08:00
|
|
|
|
|
|
|
if (Operands.size() != 3)
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2014-03-05 08:02:00 +08:00
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
|
|
|
|
if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
|
|
|
|
if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
|
2014-03-06 03:41:48 +08:00
|
|
|
switch (IntrinsicID) {
|
2014-03-05 08:02:00 +08:00
|
|
|
default: break;
|
|
|
|
case Intrinsic::fma:
|
|
|
|
case Intrinsic::fmuladd: {
|
|
|
|
APFloat V = Op1->getValueAPF();
|
|
|
|
APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(),
|
|
|
|
Op3->getValueAPF(),
|
|
|
|
APFloat::rmNearestTiesToEven);
|
|
|
|
if (s != APFloat::opInvalidOp)
|
|
|
|
return ConstantFP::get(Ty->getContext(), V);
|
|
|
|
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2014-03-05 08:02:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-17 21:23:44 +08:00
|
|
|
if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
|
2019-01-12 05:18:00 +08:00
|
|
|
const APInt *C0, *C1, *C2;
|
|
|
|
if (!getConstIntOrUndef(Operands[0], C0) ||
|
|
|
|
!getConstIntOrUndef(Operands[1], C1) ||
|
|
|
|
!getConstIntOrUndef(Operands[2], C2))
|
2018-08-17 21:23:44 +08:00
|
|
|
return nullptr;
|
|
|
|
|
2019-01-12 05:18:00 +08:00
|
|
|
bool IsRight = IntrinsicID == Intrinsic::fshr;
|
|
|
|
if (!C2)
|
|
|
|
return Operands[IsRight ? 1 : 0];
|
|
|
|
if (!C0 && !C1)
|
|
|
|
return UndefValue::get(Ty);
|
|
|
|
|
2018-08-17 21:23:44 +08:00
|
|
|
// The shift amount is interpreted as modulo the bitwidth. If the shift
|
|
|
|
// amount is effectively 0, avoid UB due to oversized inverse shift below.
|
2019-01-12 05:18:00 +08:00
|
|
|
unsigned BitWidth = C2->getBitWidth();
|
|
|
|
unsigned ShAmt = C2->urem(BitWidth);
|
2018-08-17 21:23:44 +08:00
|
|
|
if (!ShAmt)
|
2019-01-12 05:18:00 +08:00
|
|
|
return Operands[IsRight ? 1 : 0];
|
2018-08-17 21:23:44 +08:00
|
|
|
|
2019-01-12 05:18:00 +08:00
|
|
|
// (C0 << ShlAmt) | (C1 >> LshrAmt)
|
2018-08-17 21:23:44 +08:00
|
|
|
unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
|
|
|
|
unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
|
2019-01-12 05:18:00 +08:00
|
|
|
if (!C0)
|
|
|
|
return ConstantInt::get(Ty, C1->lshr(LshrAmt));
|
|
|
|
if (!C1)
|
|
|
|
return ConstantInt::get(Ty, C0->shl(ShlAmt));
|
|
|
|
return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
|
2018-08-17 21:23:44 +08:00
|
|
|
}
|
|
|
|
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
2014-03-06 03:41:48 +08:00
|
|
|
|
2016-03-29 01:40:08 +08:00
|
|
|
Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
|
|
|
|
VectorType *VTy, ArrayRef<Constant *> Operands,
|
2016-07-14 08:29:50 +08:00
|
|
|
const DataLayout &DL,
|
llvm: Add support for "-fno-delete-null-pointer-checks"
Summary:
Support for this option is needed for building Linux kernel.
This is a very frequently requested feature by kernel developers.
More details : https://lkml.org/lkml/2018/4/4/601
GCC option description for -fdelete-null-pointer-checks:
This Assume that programs cannot safely dereference null pointers,
and that no code or data element resides at address zero.
-fno-delete-null-pointer-checks is the inverse of this implying that
null pointer dereferencing is not undefined.
This feature is implemented in LLVM IR in this CL as the function attribute
"null-pointer-is-valid"="true" in IR (Under review at D47894).
The CL updates several passes that assumed null pointer dereferencing is
undefined to not optimize when the "null-pointer-is-valid"="true"
attribute is present.
Reviewers: t.p.northover, efriedma, jyknight, chandlerc, rnk, srhines, void, george.burgess.iv
Reviewed By: efriedma, george.burgess.iv
Subscribers: eraman, haicheng, george.burgess.iv, drinkcat, theraven, reames, sanjoy, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47895
llvm-svn: 336613
2018-07-10 06:27:23 +08:00
|
|
|
const TargetLibraryInfo *TLI,
|
|
|
|
ImmutableCallSite CS) {
|
2014-03-06 03:41:48 +08:00
|
|
|
SmallVector<Constant *, 4> Result(VTy->getNumElements());
|
|
|
|
SmallVector<Constant *, 4> Lane(Operands.size());
|
|
|
|
Type *Ty = VTy->getElementType();
|
|
|
|
|
2016-07-14 08:29:50 +08:00
|
|
|
if (IntrinsicID == Intrinsic::masked_load) {
|
|
|
|
auto *SrcPtr = Operands[0];
|
|
|
|
auto *Mask = Operands[2];
|
|
|
|
auto *Passthru = Operands[3];
|
2016-07-14 14:58:37 +08:00
|
|
|
|
2016-07-14 08:29:50 +08:00
|
|
|
Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, VTy, DL);
|
|
|
|
|
|
|
|
SmallVector<Constant *, 32> NewElements;
|
|
|
|
for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
|
2016-07-14 14:58:37 +08:00
|
|
|
auto *MaskElt = Mask->getAggregateElement(I);
|
2016-07-14 08:29:50 +08:00
|
|
|
if (!MaskElt)
|
|
|
|
break;
|
2016-07-14 14:58:37 +08:00
|
|
|
auto *PassthruElt = Passthru->getAggregateElement(I);
|
|
|
|
auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
|
|
|
|
if (isa<UndefValue>(MaskElt)) {
|
|
|
|
if (PassthruElt)
|
|
|
|
NewElements.push_back(PassthruElt);
|
|
|
|
else if (VecElt)
|
|
|
|
NewElements.push_back(VecElt);
|
|
|
|
else
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
if (MaskElt->isNullValue()) {
|
2016-07-14 08:29:50 +08:00
|
|
|
if (!PassthruElt)
|
2016-07-14 14:58:37 +08:00
|
|
|
return nullptr;
|
2016-07-14 08:29:50 +08:00
|
|
|
NewElements.push_back(PassthruElt);
|
2016-07-14 14:58:37 +08:00
|
|
|
} else if (MaskElt->isOneValue()) {
|
2016-07-14 08:29:50 +08:00
|
|
|
if (!VecElt)
|
2016-07-14 14:58:37 +08:00
|
|
|
return nullptr;
|
2016-07-14 08:29:50 +08:00
|
|
|
NewElements.push_back(VecElt);
|
2016-07-14 14:58:37 +08:00
|
|
|
} else {
|
|
|
|
return nullptr;
|
2016-07-14 08:29:50 +08:00
|
|
|
}
|
|
|
|
}
|
2016-07-14 14:58:37 +08:00
|
|
|
if (NewElements.size() != VTy->getNumElements())
|
|
|
|
return nullptr;
|
|
|
|
return ConstantVector::get(NewElements);
|
2016-07-14 08:29:50 +08:00
|
|
|
}
|
|
|
|
|
2014-03-06 03:41:48 +08:00
|
|
|
for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
|
|
|
|
// Gather a column of constants.
|
|
|
|
for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
|
2017-06-04 02:50:29 +08:00
|
|
|
// These intrinsics use a scalar type for their second argument.
|
|
|
|
if (J == 1 &&
|
2017-06-04 15:30:28 +08:00
|
|
|
(IntrinsicID == Intrinsic::cttz || IntrinsicID == Intrinsic::ctlz ||
|
|
|
|
IntrinsicID == Intrinsic::powi)) {
|
2017-06-04 02:50:29 +08:00
|
|
|
Lane[J] = Operands[J];
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-03-06 03:41:48 +08:00
|
|
|
Constant *Agg = Operands[J]->getAggregateElement(I);
|
|
|
|
if (!Agg)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
Lane[J] = Agg;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Use the regular scalar folding to simplify this column.
|
llvm: Add support for "-fno-delete-null-pointer-checks"
Summary:
Support for this option is needed for building Linux kernel.
This is a very frequently requested feature by kernel developers.
More details : https://lkml.org/lkml/2018/4/4/601
GCC option description for -fdelete-null-pointer-checks:
This Assume that programs cannot safely dereference null pointers,
and that no code or data element resides at address zero.
-fno-delete-null-pointer-checks is the inverse of this implying that
null pointer dereferencing is not undefined.
This feature is implemented in LLVM IR in this CL as the function attribute
"null-pointer-is-valid"="true" in IR (Under review at D47894).
The CL updates several passes that assumed null pointer dereferencing is
undefined to not optimize when the "null-pointer-is-valid"="true"
attribute is present.
Reviewers: t.p.northover, efriedma, jyknight, chandlerc, rnk, srhines, void, george.burgess.iv
Reviewed By: efriedma, george.burgess.iv
Subscribers: eraman, haicheng, george.burgess.iv, drinkcat, theraven, reames, sanjoy, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47895
llvm-svn: 336613
2018-07-10 06:27:23 +08:00
|
|
|
Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, CS);
|
2014-03-06 03:41:48 +08:00
|
|
|
if (!Folded)
|
|
|
|
return nullptr;
|
|
|
|
Result[I] = Folded;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ConstantVector::get(Result);
|
|
|
|
}
|
|
|
|
|
2016-03-29 01:40:08 +08:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
2014-03-06 03:41:48 +08:00
|
|
|
Constant *
|
2017-06-10 07:18:11 +08:00
|
|
|
llvm::ConstantFoldCall(ImmutableCallSite CS, Function *F,
|
|
|
|
ArrayRef<Constant *> Operands,
|
2014-03-06 03:41:48 +08:00
|
|
|
const TargetLibraryInfo *TLI) {
|
2017-08-15 05:15:13 +08:00
|
|
|
if (CS.isNoBuiltin() || CS.isStrictFP())
|
2017-06-10 07:18:11 +08:00
|
|
|
return nullptr;
|
2014-03-06 03:41:48 +08:00
|
|
|
if (!F->hasName())
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2014-03-06 03:41:48 +08:00
|
|
|
StringRef Name = F->getName();
|
|
|
|
|
|
|
|
Type *Ty = F->getReturnType();
|
|
|
|
|
2016-07-13 12:22:12 +08:00
|
|
|
if (auto *VTy = dyn_cast<VectorType>(Ty))
|
2016-07-14 08:29:50 +08:00
|
|
|
return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
|
llvm: Add support for "-fno-delete-null-pointer-checks"
Summary:
Support for this option is needed for building Linux kernel.
This is a very frequently requested feature by kernel developers.
More details : https://lkml.org/lkml/2018/4/4/601
GCC option description for -fdelete-null-pointer-checks:
This Assume that programs cannot safely dereference null pointers,
and that no code or data element resides at address zero.
-fno-delete-null-pointer-checks is the inverse of this implying that
null pointer dereferencing is not undefined.
This feature is implemented in LLVM IR in this CL as the function attribute
"null-pointer-is-valid"="true" in IR (Under review at D47894).
The CL updates several passes that assumed null pointer dereferencing is
undefined to not optimize when the "null-pointer-is-valid"="true"
attribute is present.
Reviewers: t.p.northover, efriedma, jyknight, chandlerc, rnk, srhines, void, george.burgess.iv
Reviewed By: efriedma, george.burgess.iv
Subscribers: eraman, haicheng, george.burgess.iv, drinkcat, theraven, reames, sanjoy, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47895
llvm-svn: 336613
2018-07-10 06:27:23 +08:00
|
|
|
F->getParent()->getDataLayout(), TLI, CS);
|
2014-03-06 03:41:48 +08:00
|
|
|
|
llvm: Add support for "-fno-delete-null-pointer-checks"
Summary:
Support for this option is needed for building Linux kernel.
This is a very frequently requested feature by kernel developers.
More details : https://lkml.org/lkml/2018/4/4/601
GCC option description for -fdelete-null-pointer-checks:
This Assume that programs cannot safely dereference null pointers,
and that no code or data element resides at address zero.
-fno-delete-null-pointer-checks is the inverse of this implying that
null pointer dereferencing is not undefined.
This feature is implemented in LLVM IR in this CL as the function attribute
"null-pointer-is-valid"="true" in IR (Under review at D47894).
The CL updates several passes that assumed null pointer dereferencing is
undefined to not optimize when the "null-pointer-is-valid"="true"
attribute is present.
Reviewers: t.p.northover, efriedma, jyknight, chandlerc, rnk, srhines, void, george.burgess.iv
Reviewed By: efriedma, george.burgess.iv
Subscribers: eraman, haicheng, george.burgess.iv, drinkcat, theraven, reames, sanjoy, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47895
llvm-svn: 336613
2018-07-10 06:27:23 +08:00
|
|
|
return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI, CS);
|
2014-03-06 03:41:48 +08:00
|
|
|
}
|
2016-11-03 04:48:11 +08:00
|
|
|
|
|
|
|
bool llvm::isMathLibCallNoop(CallSite CS, const TargetLibraryInfo *TLI) {
|
|
|
|
// FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
|
|
|
|
// (and to some extent ConstantFoldScalarCall).
|
2017-08-15 05:15:13 +08:00
|
|
|
if (CS.isNoBuiltin() || CS.isStrictFP())
|
2017-06-10 07:18:11 +08:00
|
|
|
return false;
|
2016-11-03 04:48:11 +08:00
|
|
|
Function *F = CS.getCalledFunction();
|
|
|
|
if (!F)
|
|
|
|
return false;
|
|
|
|
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
LibFunc Func;
|
2016-11-03 04:48:11 +08:00
|
|
|
if (!TLI || !TLI->getLibFunc(*F, Func))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (CS.getNumArgOperands() == 1) {
|
|
|
|
if (ConstantFP *OpC = dyn_cast<ConstantFP>(CS.getArgOperand(0))) {
|
|
|
|
const APFloat &Op = OpC->getValueAPF();
|
|
|
|
switch (Func) {
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
case LibFunc_logl:
|
|
|
|
case LibFunc_log:
|
|
|
|
case LibFunc_logf:
|
|
|
|
case LibFunc_log2l:
|
|
|
|
case LibFunc_log2:
|
|
|
|
case LibFunc_log2f:
|
|
|
|
case LibFunc_log10l:
|
|
|
|
case LibFunc_log10:
|
|
|
|
case LibFunc_log10f:
|
2016-11-03 04:48:11 +08:00
|
|
|
return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
|
|
|
|
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
case LibFunc_expl:
|
|
|
|
case LibFunc_exp:
|
|
|
|
case LibFunc_expf:
|
2016-11-03 04:48:11 +08:00
|
|
|
// FIXME: These boundaries are slightly conservative.
|
|
|
|
if (OpC->getType()->isDoubleTy())
|
|
|
|
return Op.compare(APFloat(-745.0)) != APFloat::cmpLessThan &&
|
|
|
|
Op.compare(APFloat(709.0)) != APFloat::cmpGreaterThan;
|
|
|
|
if (OpC->getType()->isFloatTy())
|
|
|
|
return Op.compare(APFloat(-103.0f)) != APFloat::cmpLessThan &&
|
|
|
|
Op.compare(APFloat(88.0f)) != APFloat::cmpGreaterThan;
|
|
|
|
break;
|
|
|
|
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
case LibFunc_exp2l:
|
|
|
|
case LibFunc_exp2:
|
|
|
|
case LibFunc_exp2f:
|
2016-11-03 04:48:11 +08:00
|
|
|
// FIXME: These boundaries are slightly conservative.
|
|
|
|
if (OpC->getType()->isDoubleTy())
|
|
|
|
return Op.compare(APFloat(-1074.0)) != APFloat::cmpLessThan &&
|
|
|
|
Op.compare(APFloat(1023.0)) != APFloat::cmpGreaterThan;
|
|
|
|
if (OpC->getType()->isFloatTy())
|
|
|
|
return Op.compare(APFloat(-149.0f)) != APFloat::cmpLessThan &&
|
|
|
|
Op.compare(APFloat(127.0f)) != APFloat::cmpGreaterThan;
|
|
|
|
break;
|
|
|
|
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
case LibFunc_sinl:
|
|
|
|
case LibFunc_sin:
|
|
|
|
case LibFunc_sinf:
|
|
|
|
case LibFunc_cosl:
|
|
|
|
case LibFunc_cos:
|
|
|
|
case LibFunc_cosf:
|
2016-11-03 04:48:11 +08:00
|
|
|
return !Op.isInfinity();
|
|
|
|
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
case LibFunc_tanl:
|
|
|
|
case LibFunc_tan:
|
|
|
|
case LibFunc_tanf: {
|
2016-11-03 04:48:11 +08:00
|
|
|
// FIXME: Stop using the host math library.
|
|
|
|
// FIXME: The computation isn't done in the right precision.
|
|
|
|
Type *Ty = OpC->getType();
|
|
|
|
if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
|
|
|
|
double OpV = getValueAsDouble(OpC);
|
|
|
|
return ConstantFoldFP(tan, OpV, Ty) != nullptr;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
case LibFunc_asinl:
|
|
|
|
case LibFunc_asin:
|
|
|
|
case LibFunc_asinf:
|
|
|
|
case LibFunc_acosl:
|
|
|
|
case LibFunc_acos:
|
|
|
|
case LibFunc_acosf:
|
2016-11-03 04:48:11 +08:00
|
|
|
return Op.compare(APFloat(Op.getSemantics(), "-1")) !=
|
|
|
|
APFloat::cmpLessThan &&
|
|
|
|
Op.compare(APFloat(Op.getSemantics(), "1")) !=
|
|
|
|
APFloat::cmpGreaterThan;
|
|
|
|
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
case LibFunc_sinh:
|
|
|
|
case LibFunc_cosh:
|
|
|
|
case LibFunc_sinhf:
|
|
|
|
case LibFunc_coshf:
|
|
|
|
case LibFunc_sinhl:
|
|
|
|
case LibFunc_coshl:
|
2016-11-03 04:48:11 +08:00
|
|
|
// FIXME: These boundaries are slightly conservative.
|
|
|
|
if (OpC->getType()->isDoubleTy())
|
|
|
|
return Op.compare(APFloat(-710.0)) != APFloat::cmpLessThan &&
|
|
|
|
Op.compare(APFloat(710.0)) != APFloat::cmpGreaterThan;
|
|
|
|
if (OpC->getType()->isFloatTy())
|
|
|
|
return Op.compare(APFloat(-89.0f)) != APFloat::cmpLessThan &&
|
|
|
|
Op.compare(APFloat(89.0f)) != APFloat::cmpGreaterThan;
|
|
|
|
break;
|
|
|
|
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
case LibFunc_sqrtl:
|
|
|
|
case LibFunc_sqrt:
|
|
|
|
case LibFunc_sqrtf:
|
2016-11-03 04:48:11 +08:00
|
|
|
return Op.isNaN() || Op.isZero() || !Op.isNegative();
|
|
|
|
|
|
|
|
// FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
|
|
|
|
// maybe others?
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (CS.getNumArgOperands() == 2) {
|
|
|
|
ConstantFP *Op0C = dyn_cast<ConstantFP>(CS.getArgOperand(0));
|
|
|
|
ConstantFP *Op1C = dyn_cast<ConstantFP>(CS.getArgOperand(1));
|
|
|
|
if (Op0C && Op1C) {
|
|
|
|
const APFloat &Op0 = Op0C->getValueAPF();
|
|
|
|
const APFloat &Op1 = Op1C->getValueAPF();
|
|
|
|
|
|
|
|
switch (Func) {
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
case LibFunc_powl:
|
|
|
|
case LibFunc_pow:
|
|
|
|
case LibFunc_powf: {
|
2016-11-03 04:48:11 +08:00
|
|
|
// FIXME: Stop using the host math library.
|
|
|
|
// FIXME: The computation isn't done in the right precision.
|
|
|
|
Type *Ty = Op0C->getType();
|
|
|
|
if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
|
|
|
|
if (Ty == Op1C->getType()) {
|
|
|
|
double Op0V = getValueAsDouble(Op0C);
|
|
|
|
double Op1V = getValueAsDouble(Op1C);
|
|
|
|
return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
case LibFunc_fmodl:
|
|
|
|
case LibFunc_fmod:
|
|
|
|
case LibFunc_fmodf:
|
2016-11-03 04:48:11 +08:00
|
|
|
return Op0.isNaN() || Op1.isNaN() ||
|
|
|
|
(!Op0.isInfinity() && !Op1.isZero());
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|