2009-09-11 07:07:18 +08:00
|
|
|
//===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
|
2005-10-28 00:00:10 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-10-28 00:00:10 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2009-09-11 07:07:18 +08:00
|
|
|
// This file defines routines for folding instructions into constants.
|
|
|
|
//
|
2013-01-02 17:10:48 +08:00
|
|
|
// Also, to supplement the basic IR ConstantExpr simplifications,
|
2009-09-11 07:07:18 +08:00
|
|
|
// this file defines some additional folding routines that can make use of
|
2013-01-02 17:10:48 +08:00
|
|
|
// DataLayout information. These functions cannot go in IR due to library
|
2009-09-11 07:07:18 +08:00
|
|
|
// dependency issues.
|
2005-10-28 00:00:10 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Analysis/ConstantFolding.h"
|
2013-04-13 20:53:18 +08:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/StringMap.h"
|
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
|
|
|
#include "llvm/IR/Operator.h"
|
2009-07-12 04:10:48 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Support/FEnv.h"
|
2005-10-28 00:00:10 +08:00
|
|
|
#include "llvm/Support/GetElementPtrTypeIterator.h"
|
|
|
|
#include "llvm/Support/MathExtras.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Target/TargetLibraryInfo.h"
|
2005-10-28 00:00:10 +08:00
|
|
|
#include <cerrno>
|
2006-12-02 10:22:01 +08:00
|
|
|
#include <cmath>
|
2005-10-28 00:00:10 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Constant Folding internal helper functions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-11-05 08:11:11 +08:00
|
|
|
/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
|
2012-10-09 00:38:25 +08:00
|
|
|
/// DataLayout. This always returns a non-null constant, but it may be a
|
2009-10-25 14:08:26 +08:00
|
|
|
/// ConstantExpr if unfoldable.
|
2011-07-18 12:54:35 +08:00
|
|
|
static Constant *FoldBitCast(Constant *C, Type *DestTy,
|
2012-10-09 00:38:25 +08:00
|
|
|
const DataLayout &TD) {
|
2011-08-25 04:18:38 +08:00
|
|
|
// Catch the obvious splat cases.
|
|
|
|
if (C->isNullValue() && !DestTy->isX86_MMXTy())
|
|
|
|
return Constant::getNullValue(DestTy);
|
|
|
|
if (C->isAllOnesValue() && !DestTy->isX86_MMXTy())
|
|
|
|
return Constant::getAllOnesValue(DestTy);
|
|
|
|
|
2012-01-28 07:33:07 +08:00
|
|
|
// Handle a vector->integer cast.
|
|
|
|
if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) {
|
2013-02-27 06:51:07 +08:00
|
|
|
VectorType *VTy = dyn_cast<VectorType>(C->getType());
|
|
|
|
if (VTy == 0)
|
2012-01-28 07:33:07 +08:00
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
|
|
|
|
2013-02-27 06:51:07 +08:00
|
|
|
unsigned NumSrcElts = VTy->getNumElements();
|
|
|
|
Type *SrcEltTy = VTy->getElementType();
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2012-01-28 07:33:07 +08:00
|
|
|
// If the vector is a vector of floating point, convert it to vector of int
|
|
|
|
// to simplify things.
|
2012-02-07 05:56:39 +08:00
|
|
|
if (SrcEltTy->isFloatingPointTy()) {
|
|
|
|
unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
|
2012-01-28 07:33:07 +08:00
|
|
|
Type *SrcIVTy =
|
|
|
|
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
|
2013-01-02 17:10:48 +08:00
|
|
|
// Ask IR to do the conversion now that #elts line up.
|
2012-01-28 07:33:07 +08:00
|
|
|
C = ConstantExpr::getBitCast(C, SrcIVTy);
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2013-02-27 06:51:07 +08:00
|
|
|
ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C);
|
|
|
|
if (CDV == 0)
|
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
|
|
|
|
2012-01-28 07:33:07 +08:00
|
|
|
// Now that we know that the input value is a vector of integers, just shift
|
|
|
|
// and insert them into our result.
|
2012-02-07 05:56:39 +08:00
|
|
|
unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy);
|
2012-01-28 07:33:07 +08:00
|
|
|
APInt Result(IT->getBitWidth(), 0);
|
|
|
|
for (unsigned i = 0; i != NumSrcElts; ++i) {
|
2012-02-07 05:56:39 +08:00
|
|
|
Result <<= BitShift;
|
|
|
|
if (TD.isLittleEndian())
|
|
|
|
Result |= CDV->getElementAsInteger(NumSrcElts-i-1);
|
|
|
|
else
|
|
|
|
Result |= CDV->getElementAsInteger(i);
|
2012-01-28 07:33:07 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2012-01-28 07:33:07 +08:00
|
|
|
return ConstantInt::get(IT, Result);
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2011-08-20 22:02:29 +08:00
|
|
|
// The code below only handles casts to vectors currently.
|
2011-07-18 12:54:35 +08:00
|
|
|
VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
|
2009-10-25 14:08:26 +08:00
|
|
|
if (DestVTy == 0)
|
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
Teach FoldBitCast to be able to handle bitcasts from (e.g.) i128 -> <4 x float>.
This allows us to simplify this:
union vec2d {
double e[2];
double v __attribute__((vector_size(16)));
};
typedef union vec2d vec2d;
static vec2d a={{1,2}}, b={{3,4}};
vec2d foo () {
return (vec2d){ .v = a.v + b.v * (vec2d){{5,5}}.v };
}
down to:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double 1.600000e+01, 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double 2.200000e+01, 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
instead of:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 0), 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 1), 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
llvm-svn: 85040
2009-10-25 14:15:37 +08:00
|
|
|
// If this is a scalar -> vector cast, convert the input into a <1 x scalar>
|
|
|
|
// vector so the code below can handle it uniformly.
|
|
|
|
if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
|
|
|
|
Constant *Ops = C; // don't take the address of C!
|
2011-02-15 08:14:00 +08:00
|
|
|
return FoldBitCast(ConstantVector::get(Ops), DestTy, TD);
|
Teach FoldBitCast to be able to handle bitcasts from (e.g.) i128 -> <4 x float>.
This allows us to simplify this:
union vec2d {
double e[2];
double v __attribute__((vector_size(16)));
};
typedef union vec2d vec2d;
static vec2d a={{1,2}}, b={{3,4}};
vec2d foo () {
return (vec2d){ .v = a.v + b.v * (vec2d){{5,5}}.v };
}
down to:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double 1.600000e+01, 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double 2.200000e+01, 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
instead of:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 0), 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 1), 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
llvm-svn: 85040
2009-10-25 14:15:37 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
Teach FoldBitCast to be able to handle bitcasts from (e.g.) i128 -> <4 x float>.
This allows us to simplify this:
union vec2d {
double e[2];
double v __attribute__((vector_size(16)));
};
typedef union vec2d vec2d;
static vec2d a={{1,2}}, b={{3,4}};
vec2d foo () {
return (vec2d){ .v = a.v + b.v * (vec2d){{5,5}}.v };
}
down to:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double 1.600000e+01, 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double 2.200000e+01, 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
instead of:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 0), 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 1), 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
llvm-svn: 85040
2009-10-25 14:15:37 +08:00
|
|
|
// If this is a bitcast from constant vector -> vector, fold it.
|
2012-01-27 05:37:55 +08:00
|
|
|
if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
|
Teach FoldBitCast to be able to handle bitcasts from (e.g.) i128 -> <4 x float>.
This allows us to simplify this:
union vec2d {
double e[2];
double v __attribute__((vector_size(16)));
};
typedef union vec2d vec2d;
static vec2d a={{1,2}}, b={{3,4}};
vec2d foo () {
return (vec2d){ .v = a.v + b.v * (vec2d){{5,5}}.v };
}
down to:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double 1.600000e+01, 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double 2.200000e+01, 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
instead of:
define %0 @foo() nounwind ssp {
entry:
%mrv5 = insertvalue %0 undef, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 0), 0 ; <%0> [#uses=1]
%mrv6 = insertvalue %0 %mrv5, double extractelement (<2 x double> fadd (<2 x double> fmul (<2 x double> bitcast (<1 x i128> <i128 85174437667405312423031577302488055808> to <2 x double>), <2 x double> <double 3.000000e+00, double 4.000000e+00>), <2 x double> <double 1.000000e+00, double 2.000000e+00>), i32 1), 1 ; <%0> [#uses=1]
ret %0 %mrv6
}
llvm-svn: 85040
2009-10-25 14:15:37 +08:00
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2013-01-02 17:10:48 +08:00
|
|
|
// If the element types match, IR can fold it.
|
2009-10-25 14:08:26 +08:00
|
|
|
unsigned NumDstElt = DestVTy->getNumElements();
|
2012-01-27 05:37:55 +08:00
|
|
|
unsigned NumSrcElt = C->getType()->getVectorNumElements();
|
2009-10-25 14:08:26 +08:00
|
|
|
if (NumDstElt == NumSrcElt)
|
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2012-01-27 05:37:55 +08:00
|
|
|
Type *SrcEltTy = C->getType()->getVectorElementType();
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *DstEltTy = DestVTy->getElementType();
|
2012-11-05 08:11:11 +08:00
|
|
|
|
|
|
|
// Otherwise, we're changing the number of elements in a vector, which
|
2009-10-25 14:08:26 +08:00
|
|
|
// requires endianness information to do the right thing. For example,
|
|
|
|
// bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
|
|
|
|
// folds to (little endian):
|
|
|
|
// <4 x i32> <i32 0, i32 0, i32 1, i32 0>
|
|
|
|
// and to (big endian):
|
|
|
|
// <4 x i32> <i32 0, i32 0, i32 0, i32 1>
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
// First thing is first. We only want to think about integer here, so if
|
|
|
|
// we have something in FP form, recast it as integer.
|
2010-02-16 00:12:20 +08:00
|
|
|
if (DstEltTy->isFloatingPointTy()) {
|
2009-10-25 14:08:26 +08:00
|
|
|
// Fold to an vector of integers with same size as our FP type.
|
|
|
|
unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *DestIVTy =
|
2009-10-25 14:08:26 +08:00
|
|
|
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
|
|
|
|
// Recursively handle this integer conversion, if possible.
|
|
|
|
C = FoldBitCast(C, DestIVTy, TD);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2013-01-02 17:10:48 +08:00
|
|
|
// Finally, IR can handle this now that #elts line up.
|
2009-10-25 14:08:26 +08:00
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
// Okay, we know the destination is integer, if the input is FP, convert
|
|
|
|
// it to integer first.
|
2010-02-16 00:12:20 +08:00
|
|
|
if (SrcEltTy->isFloatingPointTy()) {
|
2009-10-25 14:08:26 +08:00
|
|
|
unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcIVTy =
|
2009-10-25 14:08:26 +08:00
|
|
|
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
|
2013-01-02 17:10:48 +08:00
|
|
|
// Ask IR to do the conversion now that #elts line up.
|
2009-10-25 14:08:26 +08:00
|
|
|
C = ConstantExpr::getBitCast(C, SrcIVTy);
|
2013-01-02 17:10:48 +08:00
|
|
|
// If IR wasn't able to fold it, bail out.
|
2012-01-27 05:37:55 +08:00
|
|
|
if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
|
|
|
|
!isa<ConstantDataVector>(C))
|
2009-10-25 14:08:26 +08:00
|
|
|
return C;
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
// Now we know that the input and output vectors are both integer vectors
|
|
|
|
// of the same size, and that their #elements is not the same. Do the
|
|
|
|
// conversion here, which depends on whether the input or output has
|
|
|
|
// more elements.
|
|
|
|
bool isLittleEndian = TD.isLittleEndian();
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
SmallVector<Constant*, 32> Result;
|
|
|
|
if (NumDstElt < NumSrcElt) {
|
|
|
|
// Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
|
|
|
|
Constant *Zero = Constant::getNullValue(DstEltTy);
|
|
|
|
unsigned Ratio = NumSrcElt/NumDstElt;
|
|
|
|
unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
|
|
|
|
unsigned SrcElt = 0;
|
|
|
|
for (unsigned i = 0; i != NumDstElt; ++i) {
|
|
|
|
// Build each element of the result.
|
|
|
|
Constant *Elt = Zero;
|
|
|
|
unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
|
|
|
|
for (unsigned j = 0; j != Ratio; ++j) {
|
2012-01-27 05:37:55 +08:00
|
|
|
Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
|
2009-10-25 14:08:26 +08:00
|
|
|
if (!Src) // Reject constantexpr elements.
|
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
// Zero extend the element to the right size.
|
|
|
|
Src = ConstantExpr::getZExt(Src, Elt->getType());
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
// Shift it to the right place, depending on endianness.
|
2012-11-05 08:11:11 +08:00
|
|
|
Src = ConstantExpr::getShl(Src,
|
2009-10-25 14:08:26 +08:00
|
|
|
ConstantInt::get(Src->getType(), ShiftAmt));
|
|
|
|
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
// Mix it in.
|
|
|
|
Elt = ConstantExpr::getOr(Elt, Src);
|
|
|
|
}
|
|
|
|
Result.push_back(Elt);
|
|
|
|
}
|
2012-01-27 05:37:55 +08:00
|
|
|
return ConstantVector::get(Result);
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2012-01-27 05:37:55 +08:00
|
|
|
// Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
|
|
|
|
unsigned Ratio = NumDstElt/NumSrcElt;
|
|
|
|
unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2012-01-27 05:37:55 +08:00
|
|
|
// Loop over each source value, expanding into multiple results.
|
|
|
|
for (unsigned i = 0; i != NumSrcElt; ++i) {
|
|
|
|
Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
|
|
|
|
if (!Src) // Reject constantexpr elements.
|
|
|
|
return ConstantExpr::getBitCast(C, DestTy);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2012-01-27 05:37:55 +08:00
|
|
|
unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
|
|
|
|
for (unsigned j = 0; j != Ratio; ++j) {
|
|
|
|
// Shift the piece of the value into the right place, depending on
|
|
|
|
// endianness.
|
2012-11-05 08:11:11 +08:00
|
|
|
Constant *Elt = ConstantExpr::getLShr(Src,
|
2012-01-27 05:37:55 +08:00
|
|
|
ConstantInt::get(Src->getType(), ShiftAmt));
|
|
|
|
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2012-01-27 05:37:55 +08:00
|
|
|
// Truncate and remember this piece.
|
|
|
|
Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
|
2009-10-25 14:08:26 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2011-02-15 08:14:00 +08:00
|
|
|
return ConstantVector::get(Result);
|
2009-10-25 14:08:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
/// IsConstantOffsetFromGlobal - If this constant is actually a constant offset
|
|
|
|
/// from a global, return the global and the constant. Because of
|
|
|
|
/// constantexprs, this function is recursive.
|
|
|
|
static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
|
2013-01-24 05:21:24 +08:00
|
|
|
APInt &Offset, const DataLayout &TD) {
|
2007-01-31 08:51:48 +08:00
|
|
|
// Trivial case, constant is the global.
|
|
|
|
if ((GV = dyn_cast<GlobalValue>(C))) {
|
2013-01-24 05:21:24 +08:00
|
|
|
Offset.clearAllBits();
|
2007-01-31 08:51:48 +08:00
|
|
|
return true;
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
// Otherwise, if this isn't a constant expr, bail out.
|
|
|
|
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
|
|
|
|
if (!CE) return false;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
// Look through ptr->int and ptr->ptr casts.
|
|
|
|
if (CE->getOpcode() == Instruction::PtrToInt ||
|
|
|
|
CE->getOpcode() == Instruction::BitCast)
|
|
|
|
return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
|
|
|
// i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
|
2013-02-03 21:17:11 +08:00
|
|
|
if (GEPOperator *GEP = dyn_cast<GEPOperator>(CE)) {
|
2007-01-31 08:51:48 +08:00
|
|
|
// If the base isn't a global+constant, we aren't either.
|
|
|
|
if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD))
|
|
|
|
return false;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
// Otherwise, add any offset that our operands provide.
|
2013-02-03 21:17:11 +08:00
|
|
|
return GEP->accumulateConstantOffset(TD, Offset);
|
2007-01-31 08:51:48 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
/// ReadDataFromGlobal - Recursive helper to read bits out of global. C is the
|
|
|
|
/// constant being copied out of. ByteOffset is an offset into C. CurPtr is the
|
|
|
|
/// pointer to copy results into and BytesLeft is the number of bytes left in
|
|
|
|
/// the CurPtr buffer. TD is the target data.
|
|
|
|
static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
|
|
|
|
unsigned char *CurPtr, unsigned BytesLeft,
|
2012-10-09 00:38:25 +08:00
|
|
|
const DataLayout &TD) {
|
2009-10-23 14:23:49 +08:00
|
|
|
assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) &&
|
|
|
|
"Out of range access");
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-24 13:27:19 +08:00
|
|
|
// If this element is zero or undefined, we can just return since *CurPtr is
|
|
|
|
// zero initialized.
|
2009-10-23 14:23:49 +08:00
|
|
|
if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
|
|
|
|
return true;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
|
|
|
|
if (CI->getBitWidth() > 64 ||
|
|
|
|
(CI->getBitWidth() & 7) != 0)
|
|
|
|
return false;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
uint64_t Val = CI->getZExtValue();
|
|
|
|
unsigned IntBytes = unsigned(CI->getBitWidth()/8);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
|
2012-11-09 04:34:25 +08:00
|
|
|
int n = ByteOffset;
|
|
|
|
if (!TD.isLittleEndian())
|
|
|
|
n = IntBytes - n - 1;
|
|
|
|
CurPtr[i] = (unsigned char)(Val >> (n * 8));
|
2009-10-23 14:23:49 +08:00
|
|
|
++ByteOffset;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
|
|
|
|
if (CFP->getType()->isDoubleTy()) {
|
2009-10-25 14:08:26 +08:00
|
|
|
C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD);
|
2009-10-23 14:23:49 +08:00
|
|
|
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD);
|
|
|
|
}
|
|
|
|
if (CFP->getType()->isFloatTy()){
|
2009-10-25 14:08:26 +08:00
|
|
|
C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), TD);
|
2009-10-23 14:23:49 +08:00
|
|
|
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD);
|
|
|
|
}
|
2013-02-07 06:43:31 +08:00
|
|
|
if (CFP->getType()->isHalfTy()){
|
|
|
|
C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), TD);
|
|
|
|
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD);
|
|
|
|
}
|
2009-10-24 13:27:19 +08:00
|
|
|
return false;
|
2009-10-23 14:23:49 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
|
|
|
|
const StructLayout *SL = TD.getStructLayout(CS->getType());
|
|
|
|
unsigned Index = SL->getElementContainingOffset(ByteOffset);
|
|
|
|
uint64_t CurEltOffset = SL->getElementOffset(Index);
|
|
|
|
ByteOffset -= CurEltOffset;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
while (1) {
|
|
|
|
// If the element access is to the element itself and not to tail padding,
|
|
|
|
// read the bytes from the element.
|
|
|
|
uint64_t EltSize = TD.getTypeAllocSize(CS->getOperand(Index)->getType());
|
|
|
|
|
|
|
|
if (ByteOffset < EltSize &&
|
|
|
|
!ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
|
|
|
|
BytesLeft, TD))
|
|
|
|
return false;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
++Index;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
// Check to see if we read from the last struct element, if so we're done.
|
|
|
|
if (Index == CS->getType()->getNumElements())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// If we read all of the bytes we needed from this element we're done.
|
|
|
|
uint64_t NextEltOffset = SL->getElementOffset(Index);
|
|
|
|
|
|
|
|
if (BytesLeft <= NextEltOffset-CurEltOffset-ByteOffset)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Move to the next element of the struct.
|
2009-10-24 13:22:15 +08:00
|
|
|
CurPtr += NextEltOffset-CurEltOffset-ByteOffset;
|
2009-10-23 14:23:49 +08:00
|
|
|
BytesLeft -= NextEltOffset-CurEltOffset-ByteOffset;
|
|
|
|
ByteOffset = 0;
|
|
|
|
CurEltOffset = NextEltOffset;
|
|
|
|
}
|
|
|
|
// not reached.
|
|
|
|
}
|
|
|
|
|
2012-01-25 14:48:06 +08:00
|
|
|
if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
|
|
|
|
isa<ConstantDataSequential>(C)) {
|
|
|
|
Type *EltTy = cast<SequentialType>(C->getType())->getElementType();
|
|
|
|
uint64_t EltSize = TD.getTypeAllocSize(EltTy);
|
2009-10-23 14:23:49 +08:00
|
|
|
uint64_t Index = ByteOffset / EltSize;
|
|
|
|
uint64_t Offset = ByteOffset - Index * EltSize;
|
2012-01-25 14:48:06 +08:00
|
|
|
uint64_t NumElts;
|
|
|
|
if (ArrayType *AT = dyn_cast<ArrayType>(C->getType()))
|
|
|
|
NumElts = AT->getNumElements();
|
|
|
|
else
|
|
|
|
NumElts = cast<VectorType>(C->getType())->getNumElements();
|
2012-07-25 17:14:54 +08:00
|
|
|
|
2012-01-25 14:48:06 +08:00
|
|
|
for (; Index != NumElts; ++Index) {
|
|
|
|
if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
|
2009-10-23 14:23:49 +08:00
|
|
|
BytesLeft, TD))
|
|
|
|
return false;
|
2012-07-25 17:14:54 +08:00
|
|
|
|
|
|
|
uint64_t BytesWritten = EltSize - Offset;
|
|
|
|
assert(BytesWritten <= EltSize && "Not indexing into this element?");
|
|
|
|
if (BytesWritten >= BytesLeft)
|
2009-10-23 14:23:49 +08:00
|
|
|
return true;
|
2012-07-25 17:14:54 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
Offset = 0;
|
2012-07-25 17:14:54 +08:00
|
|
|
BytesLeft -= BytesWritten;
|
|
|
|
CurPtr += BytesWritten;
|
2009-10-23 14:23:49 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2011-02-07 04:11:56 +08:00
|
|
|
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
|
2011-02-07 04:22:49 +08:00
|
|
|
if (CE->getOpcode() == Instruction::IntToPtr &&
|
2012-11-05 08:11:11 +08:00
|
|
|
CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
|
|
|
|
return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
|
2012-01-25 14:48:06 +08:00
|
|
|
BytesLeft, TD);
|
2011-02-07 04:11:56 +08:00
|
|
|
}
|
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
// Otherwise, unknown initializer type.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
|
2012-10-09 00:38:25 +08:00
|
|
|
const DataLayout &TD) {
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
|
|
|
|
IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
// If this isn't an integer load we can't fold it directly.
|
|
|
|
if (!IntType) {
|
|
|
|
// If this is a float/double load, we can try folding it as an int32/64 load
|
2009-10-23 14:57:37 +08:00
|
|
|
// and then bitcast the result. This can be useful for union cases. Note
|
|
|
|
// that address spaces don't matter here since we're not going to result in
|
|
|
|
// an actual new load.
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *MapTy;
|
2013-02-07 06:43:31 +08:00
|
|
|
if (LoadTy->isHalfTy())
|
|
|
|
MapTy = Type::getInt16PtrTy(C->getContext());
|
|
|
|
else if (LoadTy->isFloatTy())
|
2009-10-23 14:23:49 +08:00
|
|
|
MapTy = Type::getInt32PtrTy(C->getContext());
|
2009-10-23 14:57:37 +08:00
|
|
|
else if (LoadTy->isDoubleTy())
|
2009-10-23 14:23:49 +08:00
|
|
|
MapTy = Type::getInt64PtrTy(C->getContext());
|
2010-02-16 19:11:14 +08:00
|
|
|
else if (LoadTy->isVectorTy()) {
|
2009-10-23 14:57:37 +08:00
|
|
|
MapTy = IntegerType::get(C->getContext(),
|
|
|
|
TD.getTypeAllocSizeInBits(LoadTy));
|
|
|
|
MapTy = PointerType::getUnqual(MapTy);
|
|
|
|
} else
|
2009-10-23 14:23:49 +08:00
|
|
|
return 0;
|
|
|
|
|
2009-10-25 14:08:26 +08:00
|
|
|
C = FoldBitCast(C, MapTy, TD);
|
2009-10-23 14:23:49 +08:00
|
|
|
if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, TD))
|
2009-10-25 14:08:26 +08:00
|
|
|
return FoldBitCast(Res, LoadTy, TD);
|
2009-10-23 14:23:49 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
|
2009-10-23 14:50:36 +08:00
|
|
|
if (BytesLoaded > 32 || BytesLoaded == 0) return 0;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
GlobalValue *GVal;
|
2013-01-24 05:21:24 +08:00
|
|
|
APInt Offset(TD.getPointerSizeInBits(), 0);
|
2009-10-23 14:23:49 +08:00
|
|
|
if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD))
|
|
|
|
return 0;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
|
2009-10-24 13:27:19 +08:00
|
|
|
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
|
2009-10-23 14:23:49 +08:00
|
|
|
!GV->getInitializer()->getType()->isSized())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// If we're loading off the beginning of the global, some bytes may be valid,
|
|
|
|
// but we don't try to handle this.
|
2013-01-24 05:21:24 +08:00
|
|
|
if (Offset.isNegative()) return 0;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:23:49 +08:00
|
|
|
// If we're not accessing anything in this constant, the result is undefined.
|
2013-01-24 05:21:24 +08:00
|
|
|
if (Offset.getZExtValue() >=
|
|
|
|
TD.getTypeAllocSize(GV->getInitializer()->getType()))
|
2009-10-23 14:23:49 +08:00
|
|
|
return UndefValue::get(IntType);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-23 14:50:36 +08:00
|
|
|
unsigned char RawBytes[32] = {0};
|
2013-01-24 05:21:24 +08:00
|
|
|
if (!ReadDataFromGlobal(GV->getInitializer(), Offset.getZExtValue(), RawBytes,
|
2009-10-23 14:23:49 +08:00
|
|
|
BytesLoaded, TD))
|
|
|
|
return 0;
|
|
|
|
|
2012-11-09 04:34:25 +08:00
|
|
|
APInt ResultVal = APInt(IntType->getBitWidth(), 0);
|
|
|
|
if (TD.isLittleEndian()) {
|
|
|
|
ResultVal = RawBytes[BytesLoaded - 1];
|
|
|
|
for (unsigned i = 1; i != BytesLoaded; ++i) {
|
|
|
|
ResultVal <<= 8;
|
|
|
|
ResultVal |= RawBytes[BytesLoaded-1-i];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ResultVal = RawBytes[0];
|
|
|
|
for (unsigned i = 1; i != BytesLoaded; ++i) {
|
|
|
|
ResultVal <<= 8;
|
|
|
|
ResultVal |= RawBytes[i];
|
|
|
|
}
|
2009-10-23 14:50:36 +08:00
|
|
|
}
|
2009-10-23 14:23:49 +08:00
|
|
|
|
2009-10-23 14:50:36 +08:00
|
|
|
return ConstantInt::get(IntType->getContext(), ResultVal);
|
2009-10-23 14:23:49 +08:00
|
|
|
}
|
|
|
|
|
2009-10-22 14:25:11 +08:00
|
|
|
/// ConstantFoldLoadFromConstPtr - Return the value that a load from C would
|
|
|
|
/// produce if it is constant and determinable. If this is not determinable,
|
|
|
|
/// return null.
|
|
|
|
Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
|
2012-10-09 00:38:25 +08:00
|
|
|
const DataLayout *TD) {
|
2009-10-22 14:25:11 +08:00
|
|
|
// First, try the easy cases:
|
|
|
|
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
|
|
|
|
if (GV->isConstant() && GV->hasDefinitiveInitializer())
|
|
|
|
return GV->getInitializer();
|
|
|
|
|
2009-10-22 14:44:07 +08:00
|
|
|
// If the loaded value isn't a constant expr, we can't handle it.
|
|
|
|
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
|
|
|
|
if (!CE) return 0;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-22 14:44:07 +08:00
|
|
|
if (CE->getOpcode() == Instruction::GetElementPtr) {
|
|
|
|
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
|
|
|
|
if (GV->isConstant() && GV->hasDefinitiveInitializer())
|
2012-11-05 08:11:11 +08:00
|
|
|
if (Constant *V =
|
2009-10-22 14:44:07 +08:00
|
|
|
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
|
|
|
|
return V;
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-22 14:44:07 +08:00
|
|
|
// Instead of loading constant c string, use corresponding integer value
|
|
|
|
// directly if string length is small enough.
|
2012-02-05 10:29:43 +08:00
|
|
|
StringRef Str;
|
|
|
|
if (TD && getConstantStringInfo(CE, Str) && !Str.empty()) {
|
|
|
|
unsigned StrLen = Str.size();
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *Ty = cast<PointerType>(CE->getType())->getElementType();
|
2009-10-23 14:23:49 +08:00
|
|
|
unsigned NumBits = Ty->getPrimitiveSizeInBits();
|
2010-07-12 08:22:51 +08:00
|
|
|
// Replace load with immediate integer if the result is an integer or fp
|
|
|
|
// value.
|
|
|
|
if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
|
2010-07-12 14:47:05 +08:00
|
|
|
(isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
|
2009-10-23 14:23:49 +08:00
|
|
|
APInt StrVal(NumBits, 0);
|
|
|
|
APInt SingleChar(NumBits, 0);
|
2009-10-22 14:44:07 +08:00
|
|
|
if (TD->isLittleEndian()) {
|
2009-10-23 14:23:49 +08:00
|
|
|
for (signed i = StrLen-1; i >= 0; i--) {
|
2009-10-22 14:44:07 +08:00
|
|
|
SingleChar = (uint64_t) Str[i] & UCHAR_MAX;
|
2009-10-22 14:38:35 +08:00
|
|
|
StrVal = (StrVal << 8) | SingleChar;
|
|
|
|
}
|
2009-10-22 14:44:07 +08:00
|
|
|
} else {
|
2009-10-23 14:23:49 +08:00
|
|
|
for (unsigned i = 0; i < StrLen; i++) {
|
2009-10-22 14:44:07 +08:00
|
|
|
SingleChar = (uint64_t) Str[i] & UCHAR_MAX;
|
|
|
|
StrVal = (StrVal << 8) | SingleChar;
|
|
|
|
}
|
|
|
|
// Append NULL at the end.
|
|
|
|
SingleChar = 0;
|
|
|
|
StrVal = (StrVal << 8) | SingleChar;
|
2009-10-22 14:38:35 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2010-07-12 08:22:51 +08:00
|
|
|
Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
|
|
|
|
if (Ty->isFloatingPointTy())
|
|
|
|
Res = ConstantExpr::getBitCast(Res, Ty);
|
|
|
|
return Res;
|
2009-10-22 14:38:35 +08:00
|
|
|
}
|
2009-10-22 14:25:11 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-22 14:44:07 +08:00
|
|
|
// If this load comes from anywhere in a constant global, and if the global
|
|
|
|
// is all undef or zero, we know what it loads.
|
2011-01-25 02:53:32 +08:00
|
|
|
if (GlobalVariable *GV =
|
|
|
|
dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, TD))) {
|
2009-10-22 14:44:07 +08:00
|
|
|
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *ResTy = cast<PointerType>(C->getType())->getElementType();
|
2009-10-22 14:44:07 +08:00
|
|
|
if (GV->getInitializer()->isNullValue())
|
|
|
|
return Constant::getNullValue(ResTy);
|
|
|
|
if (isa<UndefValue>(GV->getInitializer()))
|
|
|
|
return UndefValue::get(ResTy);
|
|
|
|
}
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2012-11-09 04:34:25 +08:00
|
|
|
// Try hard to fold loads from bitcasted strange and non-type-safe things.
|
|
|
|
if (TD)
|
2009-10-23 14:23:49 +08:00
|
|
|
return FoldReinterpretLoadFromConstPtr(CE, *TD);
|
2009-10-22 14:25:11 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-09 00:38:25 +08:00
|
|
|
static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
|
2009-10-22 14:25:11 +08:00
|
|
|
if (LI->isVolatile()) return 0;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-22 14:25:11 +08:00
|
|
|
if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
|
|
|
|
return ConstantFoldLoadFromConstPtr(C, TD);
|
2009-10-23 14:23:49 +08:00
|
|
|
|
2009-10-22 14:25:11 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2007-01-31 08:51:48 +08:00
|
|
|
|
|
|
|
/// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
|
2008-12-15 09:35:36 +08:00
|
|
|
/// Attempt to symbolically evaluate the result of a binary operator merging
|
2013-02-14 11:23:37 +08:00
|
|
|
/// these together. If target data info is available, it is provided as DL,
|
|
|
|
/// otherwise DL is null.
|
2007-01-31 08:51:48 +08:00
|
|
|
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
|
2013-02-14 11:23:37 +08:00
|
|
|
Constant *Op1, const DataLayout *DL){
|
2007-01-31 08:51:48 +08:00
|
|
|
// SROA
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
|
|
|
|
// Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
|
|
|
|
// bits.
|
2012-11-05 08:11:11 +08:00
|
|
|
|
|
|
|
|
2013-02-14 11:23:37 +08:00
|
|
|
if (Opc == Instruction::And && DL) {
|
2013-04-20 00:56:24 +08:00
|
|
|
unsigned BitWidth = DL->getTypeSizeInBits(Op0->getType()->getScalarType());
|
2013-02-14 11:23:37 +08:00
|
|
|
APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0);
|
|
|
|
APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0);
|
|
|
|
ComputeMaskedBits(Op0, KnownZero0, KnownOne0, DL);
|
|
|
|
ComputeMaskedBits(Op1, KnownZero1, KnownOne1, DL);
|
|
|
|
if ((KnownOne1 | KnownZero0).isAllOnesValue()) {
|
|
|
|
// All the bits of Op0 that the 'and' could be masking are already zero.
|
|
|
|
return Op0;
|
|
|
|
}
|
|
|
|
if ((KnownOne0 | KnownZero1).isAllOnesValue()) {
|
|
|
|
// All the bits of Op1 that the 'and' could be masking are already zero.
|
|
|
|
return Op1;
|
|
|
|
}
|
|
|
|
|
|
|
|
APInt KnownZero = KnownZero0 | KnownZero1;
|
|
|
|
APInt KnownOne = KnownOne0 & KnownOne1;
|
|
|
|
if ((KnownZero | KnownOne).isAllOnesValue()) {
|
|
|
|
return ConstantInt::get(Op0->getType(), KnownOne);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
|
|
|
|
// constant. This happens frequently when iterating over a global array.
|
2013-02-14 11:23:37 +08:00
|
|
|
if (Opc == Instruction::Sub && DL) {
|
2007-01-31 08:51:48 +08:00
|
|
|
GlobalValue *GV1, *GV2;
|
2013-02-14 11:23:37 +08:00
|
|
|
unsigned PtrSize = DL->getPointerSizeInBits();
|
|
|
|
unsigned OpSize = DL->getTypeSizeInBits(Op0->getType());
|
2013-02-06 03:04:36 +08:00
|
|
|
APInt Offs1(PtrSize, 0), Offs2(PtrSize, 0);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2013-02-14 11:23:37 +08:00
|
|
|
if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *DL))
|
|
|
|
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *DL) &&
|
2007-01-31 08:51:48 +08:00
|
|
|
GV1 == GV2) {
|
|
|
|
// (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
|
2013-02-06 03:04:36 +08:00
|
|
|
// PtrToInt may change the bitwidth so we have convert to the right size
|
|
|
|
// first.
|
|
|
|
return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
|
|
|
|
Offs2.zextOrTrunc(OpSize));
|
2007-01-31 08:51:48 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-02-02 02:27:38 +08:00
|
|
|
/// CastGEPIndices - If array indices are not pointer-sized integers,
|
|
|
|
/// explicitly cast them so that they aren't implicitly casted by the
|
|
|
|
/// getelementptr.
|
2011-07-19 21:32:40 +08:00
|
|
|
static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
|
2012-10-09 00:38:25 +08:00
|
|
|
Type *ResultTy, const DataLayout *TD,
|
2011-12-01 11:08:23 +08:00
|
|
|
const TargetLibraryInfo *TLI) {
|
2010-02-02 02:27:38 +08:00
|
|
|
if (!TD) return 0;
|
2012-11-01 16:07:29 +08:00
|
|
|
Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
|
2010-02-02 02:27:38 +08:00
|
|
|
|
|
|
|
bool Any = false;
|
|
|
|
SmallVector<Constant*, 32> NewIdxs;
|
2011-07-19 21:32:40 +08:00
|
|
|
for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
|
2010-02-02 02:27:38 +08:00
|
|
|
if ((i == 1 ||
|
|
|
|
!isa<StructType>(GetElementPtrInst::getIndexedType(Ops[0]->getType(),
|
2011-07-25 17:48:08 +08:00
|
|
|
Ops.slice(1, i-1)))) &&
|
2010-02-02 02:27:38 +08:00
|
|
|
Ops[i]->getType() != IntPtrTy) {
|
|
|
|
Any = true;
|
|
|
|
NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
|
|
|
|
true,
|
|
|
|
IntPtrTy,
|
|
|
|
true),
|
|
|
|
Ops[i], IntPtrTy));
|
|
|
|
} else
|
|
|
|
NewIdxs.push_back(Ops[i]);
|
|
|
|
}
|
|
|
|
if (!Any) return 0;
|
|
|
|
|
|
|
|
Constant *C =
|
2011-07-21 22:31:17 +08:00
|
|
|
ConstantExpr::getGetElementPtr(Ops[0], NewIdxs);
|
2010-02-02 02:27:38 +08:00
|
|
|
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
2011-12-01 11:08:23 +08:00
|
|
|
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
|
2010-02-02 02:27:38 +08:00
|
|
|
C = Folded;
|
|
|
|
return C;
|
|
|
|
}
|
|
|
|
|
2012-07-30 15:25:20 +08:00
|
|
|
/// Strip the pointer casts, but preserve the address space information.
|
|
|
|
static Constant* StripPtrCastKeepAS(Constant* Ptr) {
|
|
|
|
assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
|
|
|
|
PointerType *OldPtrTy = cast<PointerType>(Ptr->getType());
|
|
|
|
Ptr = cast<Constant>(Ptr->stripPointerCasts());
|
|
|
|
PointerType *NewPtrTy = cast<PointerType>(Ptr->getType());
|
|
|
|
|
|
|
|
// Preserve the address space number of the pointer.
|
|
|
|
if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
|
|
|
|
NewPtrTy = NewPtrTy->getElementType()->getPointerTo(
|
|
|
|
OldPtrTy->getAddressSpace());
|
|
|
|
Ptr = ConstantExpr::getBitCast(Ptr, NewPtrTy);
|
|
|
|
}
|
|
|
|
return Ptr;
|
|
|
|
}
|
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
|
|
|
|
/// constant expression, do so.
|
2011-07-19 21:32:40 +08:00
|
|
|
static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
|
2012-10-09 00:38:25 +08:00
|
|
|
Type *ResultTy, const DataLayout *TD,
|
2011-12-01 11:08:23 +08:00
|
|
|
const TargetLibraryInfo *TLI) {
|
2007-01-31 08:51:48 +08:00
|
|
|
Constant *Ptr = Ops[0];
|
2011-12-05 14:29:09 +08:00
|
|
|
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() ||
|
|
|
|
!Ptr->getType()->isPointerTy())
|
2007-01-31 08:51:48 +08:00
|
|
|
return 0;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2012-11-01 16:07:29 +08:00
|
|
|
Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
|
2008-05-08 12:54:43 +08:00
|
|
|
|
|
|
|
// If this is a constant expr gep that is effectively computing an
|
|
|
|
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
|
2011-07-19 21:32:40 +08:00
|
|
|
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
|
2011-01-06 14:19:46 +08:00
|
|
|
if (!isa<ConstantInt>(Ops[i])) {
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2011-01-06 14:19:46 +08:00
|
|
|
// If this is "gep i8* Ptr, (sub 0, V)", fold this as:
|
|
|
|
// "inttoptr (sub (ptrtoint Ptr), V)"
|
2011-07-19 21:32:40 +08:00
|
|
|
if (Ops.size() == 2 &&
|
2011-01-06 14:19:46 +08:00
|
|
|
cast<PointerType>(ResultTy)->getElementType()->isIntegerTy(8)) {
|
|
|
|
ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[1]);
|
2011-01-16 11:43:53 +08:00
|
|
|
assert((CE == 0 || CE->getType() == IntPtrTy) &&
|
2011-01-07 06:24:29 +08:00
|
|
|
"CastGEPIndices didn't canonicalize index types!");
|
2011-01-06 14:19:46 +08:00
|
|
|
if (CE && CE->getOpcode() == Instruction::Sub &&
|
2011-01-07 06:24:29 +08:00
|
|
|
CE->getOperand(0)->isNullValue()) {
|
2011-01-06 14:19:46 +08:00
|
|
|
Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
|
|
|
|
Res = ConstantExpr::getSub(Res, CE->getOperand(1));
|
|
|
|
Res = ConstantExpr::getIntToPtr(Res, ResultTy);
|
|
|
|
if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res))
|
2011-12-01 11:08:23 +08:00
|
|
|
Res = ConstantFoldConstantExpression(ResCE, TD, TLI);
|
2011-01-06 14:19:46 +08:00
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
}
|
2009-08-20 02:18:36 +08:00
|
|
|
return 0;
|
2011-01-06 14:19:46 +08:00
|
|
|
}
|
2012-07-30 15:25:20 +08:00
|
|
|
|
2011-01-07 06:24:29 +08:00
|
|
|
unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy);
|
2011-07-19 22:01:37 +08:00
|
|
|
APInt Offset =
|
|
|
|
APInt(BitWidth, TD->getIndexedOffset(Ptr->getType(),
|
2012-09-06 23:42:13 +08:00
|
|
|
makeArrayRef((Value *const*)
|
|
|
|
Ops.data() + 1,
|
2011-07-19 22:01:37 +08:00
|
|
|
Ops.size() - 1)));
|
2012-07-30 15:25:20 +08:00
|
|
|
Ptr = StripPtrCastKeepAS(Ptr);
|
2010-03-11 03:31:51 +08:00
|
|
|
|
|
|
|
// If this is a GEP of a GEP, fold it all into a single GEP.
|
|
|
|
while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
|
|
|
|
SmallVector<Value *, 4> NestedOps(GEP->op_begin()+1, GEP->op_end());
|
2010-03-13 01:55:20 +08:00
|
|
|
|
|
|
|
// Do not try the incorporate the sub-GEP if some index is not a number.
|
|
|
|
bool AllConstantInt = true;
|
|
|
|
for (unsigned i = 0, e = NestedOps.size(); i != e; ++i)
|
|
|
|
if (!isa<ConstantInt>(NestedOps[i])) {
|
|
|
|
AllConstantInt = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!AllConstantInt)
|
|
|
|
break;
|
|
|
|
|
2010-03-11 03:31:51 +08:00
|
|
|
Ptr = cast<Constant>(GEP->getOperand(0));
|
|
|
|
Offset += APInt(BitWidth,
|
2011-07-19 22:01:37 +08:00
|
|
|
TD->getIndexedOffset(Ptr->getType(), NestedOps));
|
2012-07-30 15:25:20 +08:00
|
|
|
Ptr = StripPtrCastKeepAS(Ptr);
|
2010-03-11 03:31:51 +08:00
|
|
|
}
|
|
|
|
|
2009-08-20 02:18:36 +08:00
|
|
|
// If the base value for this address is a literal integer value, fold the
|
|
|
|
// getelementptr to the resulting integer value casted to the pointer type.
|
2010-03-19 03:34:33 +08:00
|
|
|
APInt BasePtr(BitWidth, 0);
|
|
|
|
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
|
|
|
|
if (CE->getOpcode() == Instruction::IntToPtr)
|
2010-12-07 16:25:19 +08:00
|
|
|
if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
|
|
|
|
BasePtr = Base->getValue().zextOrTrunc(BitWidth);
|
2010-03-19 03:34:33 +08:00
|
|
|
if (Ptr->isNullValue() || BasePtr != 0) {
|
2009-11-06 12:27:31 +08:00
|
|
|
Constant *C = ConstantInt::get(Ptr->getContext(), Offset+BasePtr);
|
2009-08-20 02:18:36 +08:00
|
|
|
return ConstantExpr::getIntToPtr(C, ResultTy);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise form a regular getelementptr. Recompute the indices so that
|
|
|
|
// we eliminate over-indexing of the notional static type array bounds.
|
|
|
|
// This makes it easy to determine if the getelementptr is "inbounds".
|
|
|
|
// Also, this helps GlobalOpt do SROA on GlobalVariables.
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *Ty = Ptr->getType();
|
2012-04-25 02:42:47 +08:00
|
|
|
assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type");
|
2009-08-20 02:18:36 +08:00
|
|
|
SmallVector<Constant*, 32> NewIdxs;
|
2009-08-20 06:46:59 +08:00
|
|
|
do {
|
2011-07-18 12:54:35 +08:00
|
|
|
if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
|
2010-02-16 19:11:14 +08:00
|
|
|
if (ATy->isPointerTy()) {
|
2009-12-03 09:05:45 +08:00
|
|
|
// The only pointer indexing we'll do is on the first index of the GEP.
|
|
|
|
if (!NewIdxs.empty())
|
|
|
|
break;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-12-03 09:05:45 +08:00
|
|
|
// Only handle pointers to sized types, not pointers to functions.
|
|
|
|
if (!ATy->getElementType()->isSized())
|
|
|
|
return 0;
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-08-20 02:18:36 +08:00
|
|
|
// Determine which element of the array the offset points into.
|
2009-08-22 00:52:54 +08:00
|
|
|
APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
|
2012-11-01 16:07:29 +08:00
|
|
|
IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
|
2009-08-20 02:18:36 +08:00
|
|
|
if (ElemSize == 0)
|
2010-11-21 16:39:01 +08:00
|
|
|
// The element size is 0. This may be [0 x Ty]*, so just use a zero
|
2010-11-21 20:43:13 +08:00
|
|
|
// index for this level and proceed to the next level to see if it can
|
|
|
|
// accommodate the offset.
|
2010-11-21 16:39:01 +08:00
|
|
|
NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
|
|
|
|
else {
|
|
|
|
// The element size is non-zero divide the offset by the element
|
|
|
|
// size (rounding down), to compute the index at this level.
|
|
|
|
APInt NewIdx = Offset.udiv(ElemSize);
|
|
|
|
Offset -= NewIdx * ElemSize;
|
|
|
|
NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
|
|
|
|
}
|
2009-08-20 02:18:36 +08:00
|
|
|
Ty = ATy->getElementType();
|
2011-07-18 12:54:35 +08:00
|
|
|
} else if (StructType *STy = dyn_cast<StructType>(Ty)) {
|
2012-04-25 02:42:47 +08:00
|
|
|
// If we end up with an offset that isn't valid for this struct type, we
|
|
|
|
// can't re-form this GEP in a regular form, so bail out. The pointer
|
|
|
|
// operand likely went through casts that are necessary to make the GEP
|
|
|
|
// sensible.
|
2009-08-20 02:18:36 +08:00
|
|
|
const StructLayout &SL = *TD->getStructLayout(STy);
|
2012-04-25 02:42:47 +08:00
|
|
|
if (Offset.uge(SL.getSizeInBytes()))
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Determine which field of the struct the offset points into. The
|
|
|
|
// getZExtValue is fine as we've already ensured that the offset is
|
|
|
|
// within the range representable by the StructLayout API.
|
2009-08-22 00:52:54 +08:00
|
|
|
unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
|
2009-11-06 12:27:31 +08:00
|
|
|
NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
|
|
|
|
ElIdx));
|
2009-08-22 00:52:54 +08:00
|
|
|
Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
|
2009-08-20 02:18:36 +08:00
|
|
|
Ty = STy->getTypeAtIndex(ElIdx);
|
|
|
|
} else {
|
2009-08-20 06:46:59 +08:00
|
|
|
// We've reached some non-indexable type.
|
|
|
|
break;
|
2009-08-20 02:18:36 +08:00
|
|
|
}
|
2009-08-20 06:46:59 +08:00
|
|
|
} while (Ty != cast<PointerType>(ResultTy)->getElementType());
|
|
|
|
|
|
|
|
// If we haven't used up the entire offset by descending the static
|
|
|
|
// type, then the offset is pointing into the middle of an indivisible
|
|
|
|
// member, so we can't simplify it.
|
|
|
|
if (Offset != 0)
|
|
|
|
return 0;
|
2009-08-20 02:18:36 +08:00
|
|
|
|
2009-09-11 08:04:14 +08:00
|
|
|
// Create a GEP.
|
|
|
|
Constant *C =
|
2011-07-21 22:31:17 +08:00
|
|
|
ConstantExpr::getGetElementPtr(Ptr, NewIdxs);
|
2009-09-04 07:34:49 +08:00
|
|
|
assert(cast<PointerType>(C->getType())->getElementType() == Ty &&
|
|
|
|
"Computed GetElementPtr has unexpected type!");
|
2009-08-20 06:46:59 +08:00
|
|
|
|
|
|
|
// If we ended up indexing a member with a type that doesn't match
|
2009-08-21 00:42:55 +08:00
|
|
|
// the type of what the original indices indexed, add a cast.
|
2009-08-20 06:46:59 +08:00
|
|
|
if (Ty != cast<PointerType>(ResultTy)->getElementType())
|
2009-10-25 14:08:26 +08:00
|
|
|
C = FoldBitCast(C, ResultTy, *TD);
|
2009-08-20 02:18:36 +08:00
|
|
|
|
2009-08-20 06:46:59 +08:00
|
|
|
return C;
|
2007-01-31 08:51:48 +08:00
|
|
|
}
|
|
|
|
|
2007-12-11 15:29:44 +08:00
|
|
|
|
2007-01-31 08:51:48 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Constant Folding public APIs
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2010-11-23 18:16:18 +08:00
|
|
|
/// ConstantFoldInstruction - Try to constant fold the specified instruction.
|
|
|
|
/// If successful, the constant result is returned, if not, null is returned.
|
|
|
|
/// Note that this fails if not all of the operands are constant. Otherwise,
|
|
|
|
/// this function can only fail when attempting to fold instructions like loads
|
|
|
|
/// and stores, which have no constant expression form.
|
2011-12-01 11:08:23 +08:00
|
|
|
Constant *llvm::ConstantFoldInstruction(Instruction *I,
|
2012-10-09 00:38:25 +08:00
|
|
|
const DataLayout *TD,
|
2011-12-01 11:08:23 +08:00
|
|
|
const TargetLibraryInfo *TLI) {
|
2010-11-23 18:16:18 +08:00
|
|
|
// Handle PHI nodes quickly here...
|
2007-01-31 07:45:45 +08:00
|
|
|
if (PHINode *PN = dyn_cast<PHINode>(I)) {
|
2010-11-14 20:53:18 +08:00
|
|
|
Constant *CommonValue = 0;
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
|
|
|
|
Value *Incoming = PN->getIncomingValue(i);
|
2010-11-23 18:16:18 +08:00
|
|
|
// If the incoming value is undef then skip it. Note that while we could
|
|
|
|
// skip the value if it is equal to the phi node itself we choose not to
|
|
|
|
// because that would break the rule that constant folding only applies if
|
|
|
|
// all operands are constants.
|
|
|
|
if (isa<UndefValue>(Incoming))
|
2010-11-14 20:53:18 +08:00
|
|
|
continue;
|
2012-04-28 01:50:22 +08:00
|
|
|
// If the incoming value is not a constant, then give up.
|
2010-11-14 20:53:18 +08:00
|
|
|
Constant *C = dyn_cast<Constant>(Incoming);
|
2012-04-28 01:50:22 +08:00
|
|
|
if (!C)
|
|
|
|
return 0;
|
|
|
|
// Fold the PHI's operands.
|
|
|
|
if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C))
|
|
|
|
C = ConstantFoldConstantExpression(NewC, TD, TLI);
|
|
|
|
// If the incoming value is a different constant to
|
|
|
|
// the one we saw previously, then give up.
|
|
|
|
if (CommonValue && C != CommonValue)
|
2010-11-14 20:53:18 +08:00
|
|
|
return 0;
|
|
|
|
CommonValue = C;
|
|
|
|
}
|
2007-01-31 07:45:45 +08:00
|
|
|
|
2012-04-28 01:50:22 +08:00
|
|
|
|
2010-11-14 20:53:18 +08:00
|
|
|
// If we reach here, all incoming values are the same constant or undef.
|
|
|
|
return CommonValue ? CommonValue : UndefValue::get(PN->getType());
|
2007-01-31 07:45:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Scan the operand list, checking to see if they are all constants, if so,
|
|
|
|
// hand off to ConstantFoldInstOperands.
|
|
|
|
SmallVector<Constant*, 8> Ops;
|
2012-04-28 01:50:22 +08:00
|
|
|
for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) {
|
|
|
|
Constant *Op = dyn_cast<Constant>(*i);
|
|
|
|
if (!Op)
|
2007-01-31 07:45:45 +08:00
|
|
|
return 0; // All operands not constant!
|
|
|
|
|
2012-04-28 01:50:22 +08:00
|
|
|
// Fold the Instruction's operands.
|
|
|
|
if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op))
|
|
|
|
Op = ConstantFoldConstantExpression(NewCE, TD, TLI);
|
|
|
|
|
|
|
|
Ops.push_back(Op);
|
|
|
|
}
|
|
|
|
|
2007-12-11 06:53:04 +08:00
|
|
|
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
|
2009-11-10 07:06:58 +08:00
|
|
|
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
|
2011-12-01 11:08:23 +08:00
|
|
|
TD, TLI);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-22 14:25:11 +08:00
|
|
|
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
|
|
|
|
return ConstantFoldLoadInst(LI, TD);
|
2010-11-30 04:36:52 +08:00
|
|
|
|
|
|
|
if (InsertValueInst *IVI = dyn_cast<InsertValueInst>(I))
|
|
|
|
return ConstantExpr::getInsertValue(
|
|
|
|
cast<Constant>(IVI->getAggregateOperand()),
|
|
|
|
cast<Constant>(IVI->getInsertedValueOperand()),
|
2011-07-13 18:26:04 +08:00
|
|
|
IVI->getIndices());
|
2010-11-30 04:36:52 +08:00
|
|
|
|
|
|
|
if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I))
|
|
|
|
return ConstantExpr::getExtractValue(
|
|
|
|
cast<Constant>(EVI->getAggregateOperand()),
|
2011-07-13 18:26:04 +08:00
|
|
|
EVI->getIndices());
|
2010-11-30 04:36:52 +08:00
|
|
|
|
2011-12-01 11:08:23 +08:00
|
|
|
return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, TD, TLI);
|
2007-01-31 07:45:45 +08:00
|
|
|
}
|
|
|
|
|
2013-04-13 20:53:18 +08:00
|
|
|
static Constant *
|
|
|
|
ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout *TD,
|
|
|
|
const TargetLibraryInfo *TLI,
|
|
|
|
SmallPtrSet<ConstantExpr *, 4> &FoldedOps) {
|
|
|
|
SmallVector<Constant *, 8> Ops;
|
|
|
|
for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); i != e;
|
|
|
|
++i) {
|
2009-11-24 00:22:21 +08:00
|
|
|
Constant *NewC = cast<Constant>(*i);
|
2013-04-13 20:53:18 +08:00
|
|
|
// Recursively fold the ConstantExpr's operands. If we have already folded
|
|
|
|
// a ConstantExpr, we don't have to process it again.
|
|
|
|
if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC)) {
|
|
|
|
if (FoldedOps.insert(NewCE))
|
|
|
|
NewC = ConstantFoldConstantExpressionImpl(NewCE, TD, TLI, FoldedOps);
|
|
|
|
}
|
2009-11-24 00:22:21 +08:00
|
|
|
Ops.push_back(NewC);
|
|
|
|
}
|
2008-05-26 04:56:15 +08:00
|
|
|
|
|
|
|
if (CE->isCompare())
|
2009-11-10 07:06:58 +08:00
|
|
|
return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
|
2011-12-01 11:08:23 +08:00
|
|
|
TD, TLI);
|
|
|
|
return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, TD, TLI);
|
2008-05-26 04:56:15 +08:00
|
|
|
}
|
|
|
|
|
2013-04-13 20:53:18 +08:00
|
|
|
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
|
|
|
|
/// using the specified DataLayout. If successful, the constant result is
|
|
|
|
/// result is returned, if not, null is returned.
|
|
|
|
Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
|
|
|
|
const DataLayout *TD,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
|
|
|
SmallPtrSet<ConstantExpr *, 4> FoldedOps;
|
|
|
|
return ConstantFoldConstantExpressionImpl(CE, TD, TLI, FoldedOps);
|
|
|
|
}
|
|
|
|
|
2007-01-31 07:45:45 +08:00
|
|
|
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
|
|
|
|
/// specified opcode and operands. If successful, the constant result is
|
|
|
|
/// returned, if not, null is returned. Note that this function can fail when
|
|
|
|
/// attempting to fold instructions like loads and stores, which have no
|
|
|
|
/// constant expression form.
|
|
|
|
///
|
2009-11-24 00:22:21 +08:00
|
|
|
/// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/etc
|
|
|
|
/// information, due to only being passed an opcode and operands. Constant
|
|
|
|
/// folding using this function strips this information.
|
|
|
|
///
|
2012-11-05 08:11:11 +08:00
|
|
|
Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
|
2011-07-19 21:32:40 +08:00
|
|
|
ArrayRef<Constant *> Ops,
|
2012-10-09 00:38:25 +08:00
|
|
|
const DataLayout *TD,
|
2012-11-05 08:11:11 +08:00
|
|
|
const TargetLibraryInfo *TLI) {
|
2007-01-31 08:51:48 +08:00
|
|
|
// Handle easy binops first.
|
2007-12-11 06:53:04 +08:00
|
|
|
if (Instruction::isBinaryOp(Opcode)) {
|
2007-01-31 08:51:48 +08:00
|
|
|
if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1]))
|
2009-11-06 12:27:31 +08:00
|
|
|
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD))
|
2007-01-31 08:51:48 +08:00
|
|
|
return C;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-07-30 02:55:55 +08:00
|
|
|
return ConstantExpr::get(Opcode, Ops[0], Ops[1]);
|
2007-01-31 08:51:48 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2007-12-11 06:53:04 +08:00
|
|
|
switch (Opcode) {
|
2007-01-31 07:45:45 +08:00
|
|
|
default: return 0;
|
2010-01-02 09:22:23 +08:00
|
|
|
case Instruction::ICmp:
|
2012-02-07 13:05:23 +08:00
|
|
|
case Instruction::FCmp: llvm_unreachable("Invalid for compares");
|
2007-01-31 07:45:45 +08:00
|
|
|
case Instruction::Call:
|
2011-07-19 21:32:40 +08:00
|
|
|
if (Function *F = dyn_cast<Function>(Ops.back()))
|
2007-01-31 07:45:45 +08:00
|
|
|
if (canConstantFoldCallTo(F))
|
2011-12-01 11:08:23 +08:00
|
|
|
return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI);
|
2007-01-31 07:45:45 +08:00
|
|
|
return 0;
|
2007-08-12 07:49:01 +08:00
|
|
|
case Instruction::PtrToInt:
|
|
|
|
// If the input is a inttoptr, eliminate the pair. This requires knowing
|
|
|
|
// the width of a pointer, so it can't be done in ConstantExpr::getCast.
|
|
|
|
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) {
|
|
|
|
if (TD && CE->getOpcode() == Instruction::IntToPtr) {
|
|
|
|
Constant *Input = CE->getOperand(0);
|
2009-06-16 06:12:54 +08:00
|
|
|
unsigned InWidth = Input->getType()->getScalarSizeInBits();
|
Revert the majority of the next patch in the address space series:
r165941: Resubmit the changes to llvm core to update the functions to
support different pointer sizes on a per address space basis.
Despite this commit log, this change primarily changed stuff outside of
VMCore, and those changes do not carry any tests for correctness (or
even plausibility), and we have consistently found questionable or flat
out incorrect cases in these changes. Most of them are probably correct,
but we need to devise a system that makes it more clear when we have
handled the address space concerns correctly, and ideally each pass that
gets updated would receive an accompanying test case that exercises that
pass specificaly w.r.t. alternate address spaces.
However, from this commit, I have retained the new C API entry points.
Those were an orthogonal change that probably should have been split
apart, but they seem entirely good.
In several places the changes were very obvious cleanups with no actual
multiple address space code added; these I have not reverted when
I spotted them.
In a few other places there were merge conflicts due to a cleaner
solution being implemented later, often not using address spaces at all.
In those cases, I've preserved the new code which isn't address space
dependent.
This is part of my ongoing effort to clean out the partial address space
code which carries high risk and low test coverage, and not likely to be
finished before the 3.2 release looms closer. Duncan and I would both
like to see the above issues addressed before we return to these
changes.
llvm-svn: 167222
2012-11-01 17:14:31 +08:00
|
|
|
if (TD->getPointerSizeInBits() < InWidth) {
|
2012-11-05 08:11:11 +08:00
|
|
|
Constant *Mask =
|
2009-11-06 12:27:31 +08:00
|
|
|
ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth,
|
Revert the majority of the next patch in the address space series:
r165941: Resubmit the changes to llvm core to update the functions to
support different pointer sizes on a per address space basis.
Despite this commit log, this change primarily changed stuff outside of
VMCore, and those changes do not carry any tests for correctness (or
even plausibility), and we have consistently found questionable or flat
out incorrect cases in these changes. Most of them are probably correct,
but we need to devise a system that makes it more clear when we have
handled the address space concerns correctly, and ideally each pass that
gets updated would receive an accompanying test case that exercises that
pass specificaly w.r.t. alternate address spaces.
However, from this commit, I have retained the new C API entry points.
Those were an orthogonal change that probably should have been split
apart, but they seem entirely good.
In several places the changes were very obvious cleanups with no actual
multiple address space code added; these I have not reverted when
I spotted them.
In a few other places there were merge conflicts due to a cleaner
solution being implemented later, often not using address spaces at all.
In those cases, I've preserved the new code which isn't address space
dependent.
This is part of my ongoing effort to clean out the partial address space
code which carries high risk and low test coverage, and not likely to be
finished before the 3.2 release looms closer. Duncan and I would both
like to see the above issues addressed before we return to these
changes.
llvm-svn: 167222
2012-11-01 17:14:31 +08:00
|
|
|
TD->getPointerSizeInBits()));
|
2009-07-30 02:55:55 +08:00
|
|
|
Input = ConstantExpr::getAnd(Input, Mask);
|
2008-10-24 14:14:27 +08:00
|
|
|
}
|
2007-08-12 07:49:01 +08:00
|
|
|
// Do a zext or trunc to get to the dest size.
|
2009-07-30 02:55:55 +08:00
|
|
|
return ConstantExpr::getIntegerCast(Input, DestTy, false);
|
2007-08-12 07:49:01 +08:00
|
|
|
}
|
|
|
|
}
|
2009-07-30 02:55:55 +08:00
|
|
|
return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
|
2007-08-12 07:49:01 +08:00
|
|
|
case Instruction::IntToPtr:
|
2008-08-14 04:20:35 +08:00
|
|
|
// If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
|
|
|
|
// the int size is >= the ptr size. This requires knowing the width of a
|
|
|
|
// pointer, so it can't be done in ConstantExpr::getCast.
|
2010-02-24 00:35:41 +08:00
|
|
|
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0]))
|
Revert the majority of the next patch in the address space series:
r165941: Resubmit the changes to llvm core to update the functions to
support different pointer sizes on a per address space basis.
Despite this commit log, this change primarily changed stuff outside of
VMCore, and those changes do not carry any tests for correctness (or
even plausibility), and we have consistently found questionable or flat
out incorrect cases in these changes. Most of them are probably correct,
but we need to devise a system that makes it more clear when we have
handled the address space concerns correctly, and ideally each pass that
gets updated would receive an accompanying test case that exercises that
pass specificaly w.r.t. alternate address spaces.
However, from this commit, I have retained the new C API entry points.
Those were an orthogonal change that probably should have been split
apart, but they seem entirely good.
In several places the changes were very obvious cleanups with no actual
multiple address space code added; these I have not reverted when
I spotted them.
In a few other places there were merge conflicts due to a cleaner
solution being implemented later, often not using address spaces at all.
In those cases, I've preserved the new code which isn't address space
dependent.
This is part of my ongoing effort to clean out the partial address space
code which carries high risk and low test coverage, and not likely to be
finished before the 3.2 release looms closer. Duncan and I would both
like to see the above issues addressed before we return to these
changes.
llvm-svn: 167222
2012-11-01 17:14:31 +08:00
|
|
|
if (TD &&
|
|
|
|
TD->getPointerSizeInBits() <= CE->getType()->getScalarSizeInBits() &&
|
|
|
|
CE->getOpcode() == Instruction::PtrToInt)
|
2010-02-24 00:35:41 +08:00
|
|
|
return FoldBitCast(CE->getOperand(0), DestTy, *TD);
|
|
|
|
|
2009-07-30 02:55:55 +08:00
|
|
|
return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
|
2007-01-31 07:45:45 +08:00
|
|
|
case Instruction::Trunc:
|
|
|
|
case Instruction::ZExt:
|
|
|
|
case Instruction::SExt:
|
|
|
|
case Instruction::FPTrunc:
|
|
|
|
case Instruction::FPExt:
|
|
|
|
case Instruction::UIToFP:
|
|
|
|
case Instruction::SIToFP:
|
|
|
|
case Instruction::FPToUI:
|
|
|
|
case Instruction::FPToSI:
|
2009-07-30 02:55:55 +08:00
|
|
|
return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
|
2007-01-31 07:45:45 +08:00
|
|
|
case Instruction::BitCast:
|
2007-12-11 15:29:44 +08:00
|
|
|
if (TD)
|
2009-10-25 14:08:26 +08:00
|
|
|
return FoldBitCast(Ops[0], DestTy, *TD);
|
2009-07-30 02:55:55 +08:00
|
|
|
return ConstantExpr::getBitCast(Ops[0], DestTy);
|
2007-01-31 07:45:45 +08:00
|
|
|
case Instruction::Select:
|
2009-07-30 02:55:55 +08:00
|
|
|
return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
|
2007-01-31 07:45:45 +08:00
|
|
|
case Instruction::ExtractElement:
|
2009-07-30 02:55:55 +08:00
|
|
|
return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
|
2007-01-31 07:45:45 +08:00
|
|
|
case Instruction::InsertElement:
|
2009-07-30 02:55:55 +08:00
|
|
|
return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
|
2007-01-31 07:45:45 +08:00
|
|
|
case Instruction::ShuffleVector:
|
2009-07-30 02:55:55 +08:00
|
|
|
return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
|
2007-01-31 07:45:45 +08:00
|
|
|
case Instruction::GetElementPtr:
|
2011-12-01 11:08:23 +08:00
|
|
|
if (Constant *C = CastGEPIndices(Ops, DestTy, TD, TLI))
|
2010-02-02 02:27:38 +08:00
|
|
|
return C;
|
2011-12-01 11:08:23 +08:00
|
|
|
if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI))
|
2007-01-31 08:51:48 +08:00
|
|
|
return C;
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2011-07-21 22:31:17 +08:00
|
|
|
return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1));
|
2007-01-31 07:45:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-11 06:53:04 +08:00
|
|
|
/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
|
|
|
|
/// instruction (icmp/fcmp) with the specified operands. If it fails, it
|
|
|
|
/// returns a constant expression of the specified operands.
|
|
|
|
///
|
|
|
|
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
2012-11-05 08:11:11 +08:00
|
|
|
Constant *Ops0, Constant *Ops1,
|
2012-10-09 00:38:25 +08:00
|
|
|
const DataLayout *TD,
|
2011-12-01 11:08:23 +08:00
|
|
|
const TargetLibraryInfo *TLI) {
|
2007-12-11 06:53:04 +08:00
|
|
|
// fold: icmp (inttoptr x), null -> icmp x, 0
|
|
|
|
// fold: icmp (ptrtoint x), 0 -> icmp x, null
|
2008-05-26 04:56:15 +08:00
|
|
|
// fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
|
2007-12-11 06:53:04 +08:00
|
|
|
// fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
|
|
|
|
//
|
|
|
|
// ConstantExpr::getCompare cannot do this, because it doesn't have TD
|
|
|
|
// around to know if bit truncation is happening.
|
2009-11-10 07:06:58 +08:00
|
|
|
if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
|
|
|
|
if (TD && Ops1->isNullValue()) {
|
2012-11-01 16:07:29 +08:00
|
|
|
Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
|
2007-12-11 06:53:04 +08:00
|
|
|
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
|
|
|
// Convert the integer value to the right size to ensure we get the
|
|
|
|
// proper extension or truncation.
|
2009-07-30 02:55:55 +08:00
|
|
|
Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
|
2007-12-11 06:53:04 +08:00
|
|
|
IntPtrTy, false);
|
2009-11-10 07:06:58 +08:00
|
|
|
Constant *Null = Constant::getNullValue(C->getType());
|
2011-12-01 11:08:23 +08:00
|
|
|
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
|
2007-12-11 06:53:04 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2007-12-11 06:53:04 +08:00
|
|
|
// Only do this transformation if the int is intptrty in size, otherwise
|
|
|
|
// there is a truncation or extension that we aren't modeling.
|
2012-11-05 08:11:11 +08:00
|
|
|
if (CE0->getOpcode() == Instruction::PtrToInt &&
|
2012-11-01 16:07:29 +08:00
|
|
|
CE0->getType() == IntPtrTy) {
|
|
|
|
Constant *C = CE0->getOperand(0);
|
|
|
|
Constant *Null = Constant::getNullValue(C->getType());
|
|
|
|
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
|
2007-12-11 06:53:04 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-11-10 07:06:58 +08:00
|
|
|
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
|
2008-05-26 04:56:15 +08:00
|
|
|
if (TD && CE0->getOpcode() == CE1->getOpcode()) {
|
2012-11-01 16:07:29 +08:00
|
|
|
Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
|
2008-05-26 04:56:15 +08:00
|
|
|
|
|
|
|
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
|
|
|
// Convert the integer value to the right size to ensure we get the
|
|
|
|
// proper extension or truncation.
|
2009-07-30 02:55:55 +08:00
|
|
|
Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
|
2008-05-26 04:56:15 +08:00
|
|
|
IntPtrTy, false);
|
2009-07-30 02:55:55 +08:00
|
|
|
Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
|
2008-05-26 04:56:15 +08:00
|
|
|
IntPtrTy, false);
|
2011-12-01 11:08:23 +08:00
|
|
|
return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI);
|
2008-05-26 04:56:15 +08:00
|
|
|
}
|
|
|
|
|
2012-11-01 16:07:29 +08:00
|
|
|
// Only do this transformation if the int is intptrty in size, otherwise
|
|
|
|
// there is a truncation or extension that we aren't modeling.
|
|
|
|
if ((CE0->getOpcode() == Instruction::PtrToInt &&
|
|
|
|
CE0->getType() == IntPtrTy &&
|
|
|
|
CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()))
|
2009-11-10 07:06:58 +08:00
|
|
|
return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0),
|
2012-11-01 16:07:29 +08:00
|
|
|
CE1->getOperand(0), TD, TLI);
|
2007-12-11 06:53:04 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2010-01-02 09:22:23 +08:00
|
|
|
// icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
|
|
|
|
// icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
|
|
|
|
if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
|
|
|
|
CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
|
2012-11-05 08:11:11 +08:00
|
|
|
Constant *LHS =
|
2011-12-01 11:08:23 +08:00
|
|
|
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,
|
|
|
|
TD, TLI);
|
2012-11-05 08:11:11 +08:00
|
|
|
Constant *RHS =
|
2011-12-01 11:08:23 +08:00
|
|
|
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,
|
|
|
|
TD, TLI);
|
2012-11-05 08:11:11 +08:00
|
|
|
unsigned OpC =
|
2010-01-02 09:22:23 +08:00
|
|
|
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
|
|
|
|
Constant *Ops[] = { LHS, RHS };
|
2011-12-01 11:08:23 +08:00
|
|
|
return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI);
|
2010-01-02 09:22:23 +08:00
|
|
|
}
|
2007-12-11 06:53:04 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-11-10 07:06:58 +08:00
|
|
|
return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
|
2007-12-11 06:53:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-01-31 07:45:45 +08:00
|
|
|
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
|
|
|
|
/// getelementptr constantexpr, return the constant value being addressed by the
|
|
|
|
/// constant expression, or null if something is funny and we can't decide.
|
2012-11-05 08:11:11 +08:00
|
|
|
Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
|
2009-10-06 00:36:26 +08:00
|
|
|
ConstantExpr *CE) {
|
2012-01-24 13:43:50 +08:00
|
|
|
if (!CE->getOperand(1)->isNullValue())
|
2007-01-31 07:45:45 +08:00
|
|
|
return 0; // Do not allow stepping over the value!
|
2012-01-25 14:48:06 +08:00
|
|
|
|
|
|
|
// Loop over all of the operands, tracking down which value we are
|
|
|
|
// addressing.
|
|
|
|
for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
|
|
|
|
C = C->getAggregateElement(CE->getOperand(i));
|
|
|
|
if (C == 0) return 0;
|
|
|
|
}
|
|
|
|
return C;
|
2012-01-24 13:43:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr
|
|
|
|
/// indices (with an *implied* zero pointer index that is not in the list),
|
|
|
|
/// return the constant value being addressed by a virtual load, or null if
|
|
|
|
/// something is funny and we can't decide.
|
|
|
|
Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
|
|
|
|
ArrayRef<Constant*> Indices) {
|
2007-01-31 07:45:45 +08:00
|
|
|
// Loop over all of the operands, tracking down which value we are
|
2012-01-24 13:43:50 +08:00
|
|
|
// addressing.
|
|
|
|
for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
|
2012-01-25 14:48:06 +08:00
|
|
|
C = C->getAggregateElement(Indices[i]);
|
|
|
|
if (C == 0) return 0;
|
2012-01-24 13:43:50 +08:00
|
|
|
}
|
2007-01-31 07:45:45 +08:00
|
|
|
return C;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-10-28 00:00:10 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2007-01-31 07:45:45 +08:00
|
|
|
// Constant Folding for Calls
|
2005-10-28 00:00:10 +08:00
|
|
|
//
|
|
|
|
|
|
|
|
/// canConstantFoldCallTo - Return true if its even possible to fold a call to
|
|
|
|
/// the specified function.
|
|
|
|
bool
|
2008-01-31 09:05:10 +08:00
|
|
|
llvm::canConstantFoldCallTo(const Function *F) {
|
2005-10-28 00:00:10 +08:00
|
|
|
switch (F->getIntrinsicID()) {
|
2013-02-07 06:43:31 +08:00
|
|
|
case Intrinsic::fabs:
|
|
|
|
case Intrinsic::log:
|
|
|
|
case Intrinsic::log2:
|
|
|
|
case Intrinsic::log10:
|
|
|
|
case Intrinsic::exp:
|
|
|
|
case Intrinsic::exp2:
|
|
|
|
case Intrinsic::floor:
|
2007-10-03 01:43:59 +08:00
|
|
|
case Intrinsic::sqrt:
|
2011-12-03 08:00:03 +08:00
|
|
|
case Intrinsic::pow:
|
2007-10-03 01:43:59 +08:00
|
|
|
case Intrinsic::powi:
|
2007-04-01 15:35:23 +08:00
|
|
|
case Intrinsic::bswap:
|
|
|
|
case Intrinsic::ctpop:
|
|
|
|
case Intrinsic::ctlz:
|
|
|
|
case Intrinsic::cttz:
|
2009-10-06 06:53:52 +08:00
|
|
|
case Intrinsic::sadd_with_overflow:
|
2011-03-27 22:26:13 +08:00
|
|
|
case Intrinsic::uadd_with_overflow:
|
2009-10-06 06:53:52 +08:00
|
|
|
case Intrinsic::ssub_with_overflow:
|
2011-03-27 22:26:13 +08:00
|
|
|
case Intrinsic::usub_with_overflow:
|
2010-10-14 08:05:07 +08:00
|
|
|
case Intrinsic::smul_with_overflow:
|
2011-03-27 22:26:13 +08:00
|
|
|
case Intrinsic::umul_with_overflow:
|
2010-03-19 08:36:35 +08:00
|
|
|
case Intrinsic::convert_from_fp16:
|
|
|
|
case Intrinsic::convert_to_fp16:
|
2011-01-11 09:07:24 +08:00
|
|
|
case Intrinsic::x86_sse_cvtss2si:
|
|
|
|
case Intrinsic::x86_sse_cvtss2si64:
|
|
|
|
case Intrinsic::x86_sse_cvttss2si:
|
|
|
|
case Intrinsic::x86_sse_cvttss2si64:
|
|
|
|
case Intrinsic::x86_sse2_cvtsd2si:
|
|
|
|
case Intrinsic::x86_sse2_cvtsd2si64:
|
|
|
|
case Intrinsic::x86_sse2_cvttsd2si:
|
|
|
|
case Intrinsic::x86_sse2_cvttsd2si64:
|
2005-10-28 00:00:10 +08:00
|
|
|
return true;
|
2009-10-05 13:00:35 +08:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case 0: break;
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
|
|
|
|
2009-04-03 08:02:39 +08:00
|
|
|
if (!F->hasName()) return false;
|
2009-07-26 16:34:35 +08:00
|
|
|
StringRef Name = F->getName();
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2007-08-08 14:55:43 +08:00
|
|
|
// In these cases, the check of the length is required. We don't want to
|
|
|
|
// return true for a name like "cos\0blah" which strcmp would return equal to
|
|
|
|
// "cos", but has length 8.
|
2009-07-26 16:34:35 +08:00
|
|
|
switch (Name[0]) {
|
2007-08-08 14:55:43 +08:00
|
|
|
default: return false;
|
|
|
|
case 'a':
|
2013-02-21 07:57:30 +08:00
|
|
|
return Name == "acos" || Name == "asin" || Name == "atan" || Name =="atan2";
|
2007-08-08 14:55:43 +08:00
|
|
|
case 'c':
|
2009-07-26 16:34:35 +08:00
|
|
|
return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh";
|
2007-08-08 14:55:43 +08:00
|
|
|
case 'e':
|
2011-05-23 06:22:35 +08:00
|
|
|
return Name == "exp" || Name == "exp2";
|
2007-08-08 14:55:43 +08:00
|
|
|
case 'f':
|
2009-07-26 16:34:35 +08:00
|
|
|
return Name == "fabs" || Name == "fmod" || Name == "floor";
|
2007-08-08 14:55:43 +08:00
|
|
|
case 'l':
|
2009-07-26 16:34:35 +08:00
|
|
|
return Name == "log" || Name == "log10";
|
2007-08-08 14:55:43 +08:00
|
|
|
case 'p':
|
2009-07-26 16:34:35 +08:00
|
|
|
return Name == "pow";
|
2007-08-08 14:55:43 +08:00
|
|
|
case 's':
|
2009-07-26 16:34:35 +08:00
|
|
|
return Name == "sin" || Name == "sinh" || Name == "sqrt" ||
|
|
|
|
Name == "sinf" || Name == "sqrtf";
|
2007-08-08 14:55:43 +08:00
|
|
|
case 't':
|
2009-07-26 16:34:35 +08:00
|
|
|
return Name == "tan" || Name == "tanh";
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-05 08:11:11 +08:00
|
|
|
static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *Ty) {
|
2010-09-18 04:06:27 +08:00
|
|
|
sys::llvm_fenv_clearexcept();
|
2005-10-28 00:00:10 +08:00
|
|
|
V = NativeFP(V);
|
2010-09-18 04:06:27 +08:00
|
|
|
if (sys::llvm_fenv_testexcept()) {
|
|
|
|
sys::llvm_fenv_clearexcept();
|
2008-03-31 02:02:00 +08:00
|
|
|
return 0;
|
2007-09-07 02:13:44 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2013-02-07 06:43:31 +08:00
|
|
|
if (Ty->isHalfTy()) {
|
|
|
|
APFloat APF(V);
|
|
|
|
bool unused;
|
|
|
|
APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused);
|
|
|
|
return ConstantFP::get(Ty->getContext(), APF);
|
|
|
|
}
|
2009-10-05 13:06:24 +08:00
|
|
|
if (Ty->isFloatTy())
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
|
2009-10-05 13:06:24 +08:00
|
|
|
if (Ty->isDoubleTy())
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFP::get(Ty->getContext(), APFloat(V));
|
2013-02-07 06:43:31 +08:00
|
|
|
llvm_unreachable("Can only constant fold half/float/double");
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
|
|
|
|
2007-07-16 23:26:22 +08:00
|
|
|
static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
|
2011-07-18 12:54:35 +08:00
|
|
|
double V, double W, Type *Ty) {
|
2010-09-18 04:06:27 +08:00
|
|
|
sys::llvm_fenv_clearexcept();
|
2007-07-16 23:26:22 +08:00
|
|
|
V = NativeFP(V, W);
|
2010-09-18 04:06:27 +08:00
|
|
|
if (sys::llvm_fenv_testexcept()) {
|
|
|
|
sys::llvm_fenv_clearexcept();
|
2008-03-31 02:02:00 +08:00
|
|
|
return 0;
|
2007-09-07 02:13:44 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2013-02-07 06:43:31 +08:00
|
|
|
if (Ty->isHalfTy()) {
|
|
|
|
APFloat APF(V);
|
|
|
|
bool unused;
|
|
|
|
APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused);
|
|
|
|
return ConstantFP::get(Ty->getContext(), APF);
|
|
|
|
}
|
2009-10-05 13:06:24 +08:00
|
|
|
if (Ty->isFloatTy())
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
|
2009-10-05 13:06:24 +08:00
|
|
|
if (Ty->isDoubleTy())
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFP::get(Ty->getContext(), APFloat(V));
|
2013-02-07 06:43:31 +08:00
|
|
|
llvm_unreachable("Can only constant fold half/float/double");
|
2007-07-16 23:26:22 +08:00
|
|
|
}
|
|
|
|
|
2011-01-11 09:07:24 +08:00
|
|
|
/// ConstantFoldConvertToInt - Attempt to an SSE floating point to integer
|
|
|
|
/// conversion of a constant floating point. If roundTowardZero is false, the
|
|
|
|
/// default IEEE rounding is used (toward nearest, ties to even). This matches
|
|
|
|
/// the behavior of the non-truncating SSE instructions in the default rounding
|
|
|
|
/// mode. The desired integer type Ty is used to select how many bits are
|
|
|
|
/// available for the result. Returns null if the conversion cannot be
|
|
|
|
/// performed, otherwise returns the Constant value resulting from the
|
|
|
|
/// conversion.
|
2012-01-27 05:37:55 +08:00
|
|
|
static Constant *ConstantFoldConvertToInt(const APFloat &Val,
|
|
|
|
bool roundTowardZero, Type *Ty) {
|
2011-01-11 09:07:24 +08:00
|
|
|
// All of these conversion intrinsics form an integer of at most 64bits.
|
|
|
|
unsigned ResultWidth = cast<IntegerType>(Ty)->getBitWidth();
|
|
|
|
assert(ResultWidth <= 64 &&
|
|
|
|
"Can only constant fold conversions to 64 and 32 bit ints");
|
|
|
|
|
|
|
|
uint64_t UIntVal;
|
|
|
|
bool isExact = false;
|
|
|
|
APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
|
|
|
|
: APFloat::rmNearestTiesToEven;
|
|
|
|
APFloat::opStatus status = Val.convertToInteger(&UIntVal, ResultWidth,
|
|
|
|
/*isSigned=*/true, mode,
|
|
|
|
&isExact);
|
|
|
|
if (status != APFloat::opOK && status != APFloat::opInexact)
|
|
|
|
return 0;
|
|
|
|
return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true);
|
|
|
|
}
|
|
|
|
|
2005-10-28 00:00:10 +08:00
|
|
|
/// ConstantFoldCall - Attempt to constant fold a call to the specified function
|
|
|
|
/// with the specified arguments, returning null if unsuccessful.
|
|
|
|
Constant *
|
2011-12-01 11:08:23 +08:00
|
|
|
llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
2009-04-03 08:02:39 +08:00
|
|
|
if (!F->hasName()) return 0;
|
2009-07-26 16:34:35 +08:00
|
|
|
StringRef Name = F->getName();
|
2009-10-05 13:26:04 +08:00
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *Ty = F->getReturnType();
|
2011-07-19 21:32:40 +08:00
|
|
|
if (Operands.size() == 1) {
|
2005-10-28 00:00:10 +08:00
|
|
|
if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) {
|
2011-01-10 17:02:58 +08:00
|
|
|
if (F->getIntrinsicID() == Intrinsic::convert_to_fp16) {
|
2010-03-19 08:36:35 +08:00
|
|
|
APFloat Val(Op->getValueAPF());
|
|
|
|
|
|
|
|
bool lost = false;
|
|
|
|
Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost);
|
|
|
|
|
|
|
|
return ConstantInt::get(F->getContext(), Val.bitcastToAPInt());
|
|
|
|
}
|
2011-12-02 07:16:03 +08:00
|
|
|
if (!TLI)
|
|
|
|
return 0;
|
2010-03-19 08:36:35 +08:00
|
|
|
|
2013-02-07 06:43:31 +08:00
|
|
|
if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
|
2007-09-07 02:13:44 +08:00
|
|
|
return 0;
|
2010-09-28 05:29:20 +08:00
|
|
|
|
|
|
|
/// We only fold functions with finite arguments. Folding NaN and inf is
|
|
|
|
/// likely to be aborted with an exception anyway, and some host libms
|
|
|
|
/// have known errors raising exceptions.
|
|
|
|
if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
|
|
|
|
return 0;
|
|
|
|
|
2007-09-07 02:13:44 +08:00
|
|
|
/// Currently APFloat versions of these functions do not exist, so we use
|
|
|
|
/// the host native double versions. Float versions are not called
|
|
|
|
/// directly but for all these it is true (float)(f((double)arg)) ==
|
|
|
|
/// f(arg). Long double not supported yet.
|
2013-02-07 06:43:31 +08:00
|
|
|
double V;
|
|
|
|
if (Ty->isFloatTy())
|
|
|
|
V = Op->getValueAPF().convertToFloat();
|
|
|
|
else if (Ty->isDoubleTy())
|
|
|
|
V = Op->getValueAPF().convertToDouble();
|
|
|
|
else {
|
|
|
|
bool unused;
|
|
|
|
APFloat APF = Op->getValueAPF();
|
|
|
|
APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused);
|
|
|
|
V = APF.convertToDouble();
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (F->getIntrinsicID()) {
|
|
|
|
default: break;
|
|
|
|
case Intrinsic::fabs:
|
|
|
|
return ConstantFoldFP(fabs, V, Ty);
|
2013-02-07 08:21:34 +08:00
|
|
|
#if HAVE_LOG2
|
2013-02-07 06:43:31 +08:00
|
|
|
case Intrinsic::log2:
|
|
|
|
return ConstantFoldFP(log2, V, Ty);
|
2013-02-07 08:21:34 +08:00
|
|
|
#endif
|
|
|
|
#if HAVE_LOG
|
2013-02-07 06:43:31 +08:00
|
|
|
case Intrinsic::log:
|
|
|
|
return ConstantFoldFP(log, V, Ty);
|
2013-02-07 08:21:34 +08:00
|
|
|
#endif
|
|
|
|
#if HAVE_LOG10
|
2013-02-07 06:43:31 +08:00
|
|
|
case Intrinsic::log10:
|
|
|
|
return ConstantFoldFP(log10, V, Ty);
|
2013-02-07 08:21:34 +08:00
|
|
|
#endif
|
|
|
|
#if HAVE_EXP
|
2013-02-07 06:43:31 +08:00
|
|
|
case Intrinsic::exp:
|
|
|
|
return ConstantFoldFP(exp, V, Ty);
|
2013-02-07 08:21:34 +08:00
|
|
|
#endif
|
|
|
|
#if HAVE_EXP2
|
2013-02-07 06:43:31 +08:00
|
|
|
case Intrinsic::exp2:
|
|
|
|
return ConstantFoldFP(exp2, V, Ty);
|
2013-02-07 08:21:34 +08:00
|
|
|
#endif
|
2013-02-07 06:43:31 +08:00
|
|
|
case Intrinsic::floor:
|
|
|
|
return ConstantFoldFP(floor, V, Ty);
|
|
|
|
}
|
|
|
|
|
2009-07-26 16:34:35 +08:00
|
|
|
switch (Name[0]) {
|
2007-08-08 14:55:43 +08:00
|
|
|
case 'a':
|
2011-12-02 07:16:03 +08:00
|
|
|
if (Name == "acos" && TLI->has(LibFunc::acos))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(acos, V, Ty);
|
2011-12-02 07:16:03 +08:00
|
|
|
else if (Name == "asin" && TLI->has(LibFunc::asin))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(asin, V, Ty);
|
2011-12-02 07:16:03 +08:00
|
|
|
else if (Name == "atan" && TLI->has(LibFunc::atan))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(atan, V, Ty);
|
2007-08-08 14:55:43 +08:00
|
|
|
break;
|
|
|
|
case 'c':
|
2011-12-02 07:16:03 +08:00
|
|
|
if (Name == "ceil" && TLI->has(LibFunc::ceil))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(ceil, V, Ty);
|
2011-12-02 07:16:03 +08:00
|
|
|
else if (Name == "cos" && TLI->has(LibFunc::cos))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(cos, V, Ty);
|
2011-12-02 07:16:03 +08:00
|
|
|
else if (Name == "cosh" && TLI->has(LibFunc::cosh))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(cosh, V, Ty);
|
2011-12-02 07:16:03 +08:00
|
|
|
else if (Name == "cosf" && TLI->has(LibFunc::cosf))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(cos, V, Ty);
|
2007-08-08 14:55:43 +08:00
|
|
|
break;
|
|
|
|
case 'e':
|
2011-12-02 07:16:03 +08:00
|
|
|
if (Name == "exp" && TLI->has(LibFunc::exp))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(exp, V, Ty);
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2011-12-02 07:16:03 +08:00
|
|
|
if (Name == "exp2" && TLI->has(LibFunc::exp2)) {
|
2011-05-23 06:22:35 +08:00
|
|
|
// Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
|
|
|
|
// C99 library.
|
|
|
|
return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
|
|
|
|
}
|
2007-08-08 14:55:43 +08:00
|
|
|
break;
|
|
|
|
case 'f':
|
2011-12-02 07:16:03 +08:00
|
|
|
if (Name == "fabs" && TLI->has(LibFunc::fabs))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(fabs, V, Ty);
|
2011-12-02 07:16:03 +08:00
|
|
|
else if (Name == "floor" && TLI->has(LibFunc::floor))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(floor, V, Ty);
|
2007-08-08 14:55:43 +08:00
|
|
|
break;
|
|
|
|
case 'l':
|
2011-12-02 07:16:03 +08:00
|
|
|
if (Name == "log" && V > 0 && TLI->has(LibFunc::log))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(log, V, Ty);
|
2011-12-02 07:16:03 +08:00
|
|
|
else if (Name == "log10" && V > 0 && TLI->has(LibFunc::log10))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(log10, V, Ty);
|
2011-01-10 17:02:58 +08:00
|
|
|
else if (F->getIntrinsicID() == Intrinsic::sqrt &&
|
2013-02-07 06:43:31 +08:00
|
|
|
(Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())) {
|
2007-08-08 14:55:43 +08:00
|
|
|
if (V >= -0.0)
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(sqrt, V, Ty);
|
2007-08-08 14:55:43 +08:00
|
|
|
else // Undefined
|
2009-08-01 04:28:14 +08:00
|
|
|
return Constant::getNullValue(Ty);
|
2007-08-08 14:55:43 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 's':
|
2011-12-02 07:16:03 +08:00
|
|
|
if (Name == "sin" && TLI->has(LibFunc::sin))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(sin, V, Ty);
|
2011-12-02 07:16:03 +08:00
|
|
|
else if (Name == "sinh" && TLI->has(LibFunc::sinh))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(sinh, V, Ty);
|
2011-12-02 07:16:03 +08:00
|
|
|
else if (Name == "sqrt" && V >= 0 && TLI->has(LibFunc::sqrt))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(sqrt, V, Ty);
|
2011-12-02 07:16:03 +08:00
|
|
|
else if (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc::sqrtf))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(sqrt, V, Ty);
|
2011-12-02 07:16:03 +08:00
|
|
|
else if (Name == "sinf" && TLI->has(LibFunc::sinf))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(sin, V, Ty);
|
2007-08-08 14:55:43 +08:00
|
|
|
break;
|
|
|
|
case 't':
|
2011-12-02 07:16:03 +08:00
|
|
|
if (Name == "tan" && TLI->has(LibFunc::tan))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(tan, V, Ty);
|
2011-12-02 07:16:03 +08:00
|
|
|
else if (Name == "tanh" && TLI->has(LibFunc::tanh))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldFP(tanh, V, Ty);
|
2007-08-08 14:55:43 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
2009-10-05 13:00:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2011-01-10 17:02:58 +08:00
|
|
|
|
2009-10-05 13:00:35 +08:00
|
|
|
if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) {
|
2011-01-10 17:02:58 +08:00
|
|
|
switch (F->getIntrinsicID()) {
|
|
|
|
case Intrinsic::bswap:
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantInt::get(F->getContext(), Op->getValue().byteSwap());
|
2011-01-10 17:02:58 +08:00
|
|
|
case Intrinsic::ctpop:
|
2009-07-25 07:12:02 +08:00
|
|
|
return ConstantInt::get(Ty, Op->getValue().countPopulation());
|
2011-01-10 17:02:58 +08:00
|
|
|
case Intrinsic::convert_from_fp16: {
|
2013-01-22 17:46:31 +08:00
|
|
|
APFloat Val(APFloat::IEEEhalf, Op->getValue());
|
2010-03-19 08:36:35 +08:00
|
|
|
|
|
|
|
bool lost = false;
|
|
|
|
APFloat::opStatus status =
|
|
|
|
Val.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
|
|
|
|
|
|
|
|
// Conversion is always precise.
|
2010-12-23 08:58:24 +08:00
|
|
|
(void)status;
|
2010-03-19 08:36:35 +08:00
|
|
|
assert(status == APFloat::opOK && !lost &&
|
|
|
|
"Precision lost during fp16 constfolding");
|
|
|
|
|
|
|
|
return ConstantFP::get(F->getContext(), Val);
|
|
|
|
}
|
2011-01-10 17:02:58 +08:00
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
2011-01-10 17:02:58 +08:00
|
|
|
|
2012-01-27 05:37:55 +08:00
|
|
|
// Support ConstantVector in case we have an Undef in the top.
|
2012-11-05 08:11:11 +08:00
|
|
|
if (isa<ConstantVector>(Operands[0]) ||
|
2012-01-27 05:37:55 +08:00
|
|
|
isa<ConstantDataVector>(Operands[0])) {
|
|
|
|
Constant *Op = cast<Constant>(Operands[0]);
|
2011-01-11 09:07:24 +08:00
|
|
|
switch (F->getIntrinsicID()) {
|
|
|
|
default: break;
|
|
|
|
case Intrinsic::x86_sse_cvtss2si:
|
|
|
|
case Intrinsic::x86_sse_cvtss2si64:
|
|
|
|
case Intrinsic::x86_sse2_cvtsd2si:
|
|
|
|
case Intrinsic::x86_sse2_cvtsd2si64:
|
2012-01-27 05:37:55 +08:00
|
|
|
if (ConstantFP *FPOp =
|
|
|
|
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
|
|
|
|
return ConstantFoldConvertToInt(FPOp->getValueAPF(),
|
|
|
|
/*roundTowardZero=*/false, Ty);
|
2011-01-11 09:07:24 +08:00
|
|
|
case Intrinsic::x86_sse_cvttss2si:
|
|
|
|
case Intrinsic::x86_sse_cvttss2si64:
|
|
|
|
case Intrinsic::x86_sse2_cvttsd2si:
|
|
|
|
case Intrinsic::x86_sse2_cvttsd2si64:
|
2012-01-27 05:37:55 +08:00
|
|
|
if (ConstantFP *FPOp =
|
|
|
|
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
|
2012-11-05 08:11:11 +08:00
|
|
|
return ConstantFoldConvertToInt(FPOp->getValueAPF(),
|
2012-01-27 05:37:55 +08:00
|
|
|
/*roundTowardZero=*/true, Ty);
|
2011-01-11 09:07:24 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2010-02-17 08:54:58 +08:00
|
|
|
if (isa<UndefValue>(Operands[0])) {
|
2011-01-10 17:02:58 +08:00
|
|
|
if (F->getIntrinsicID() == Intrinsic::bswap)
|
2010-02-17 08:54:58 +08:00
|
|
|
return Operands[0];
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-05 13:00:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2011-01-10 17:02:58 +08:00
|
|
|
|
2011-07-19 21:32:40 +08:00
|
|
|
if (Operands.size() == 2) {
|
2005-10-28 00:00:10 +08:00
|
|
|
if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
|
2013-02-07 06:43:31 +08:00
|
|
|
if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
|
2007-10-03 01:43:59 +08:00
|
|
|
return 0;
|
2013-02-07 06:43:31 +08:00
|
|
|
double Op1V;
|
|
|
|
if (Ty->isFloatTy())
|
|
|
|
Op1V = Op1->getValueAPF().convertToFloat();
|
|
|
|
else if (Ty->isDoubleTy())
|
|
|
|
Op1V = Op1->getValueAPF().convertToDouble();
|
|
|
|
else {
|
|
|
|
bool unused;
|
|
|
|
APFloat APF = Op1->getValueAPF();
|
|
|
|
APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused);
|
|
|
|
Op1V = APF.convertToDouble();
|
|
|
|
}
|
|
|
|
|
2005-10-28 00:00:10 +08:00
|
|
|
if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
|
2009-10-05 13:06:24 +08:00
|
|
|
if (Op2->getType() != Op1->getType())
|
|
|
|
return 0;
|
2011-12-02 07:16:03 +08:00
|
|
|
|
2013-02-07 06:43:31 +08:00
|
|
|
double Op2V;
|
|
|
|
if (Ty->isFloatTy())
|
|
|
|
Op2V = Op2->getValueAPF().convertToFloat();
|
|
|
|
else if (Ty->isDoubleTy())
|
|
|
|
Op2V = Op2->getValueAPF().convertToDouble();
|
|
|
|
else {
|
|
|
|
bool unused;
|
|
|
|
APFloat APF = Op2->getValueAPF();
|
|
|
|
APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused);
|
|
|
|
Op2V = APF.convertToDouble();
|
|
|
|
}
|
2005-10-28 00:00:10 +08:00
|
|
|
|
2011-12-03 08:00:03 +08:00
|
|
|
if (F->getIntrinsicID() == Intrinsic::pow) {
|
|
|
|
return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
|
|
|
|
}
|
|
|
|
if (!TLI)
|
|
|
|
return 0;
|
2011-12-02 07:16:03 +08:00
|
|
|
if (Name == "pow" && TLI->has(LibFunc::pow))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
|
2011-12-02 07:16:03 +08:00
|
|
|
if (Name == "fmod" && TLI->has(LibFunc::fmod))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
|
2011-12-02 07:16:03 +08:00
|
|
|
if (Name == "atan2" && TLI->has(LibFunc::atan2))
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
|
2007-01-15 14:27:37 +08:00
|
|
|
} else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
|
2013-02-07 06:43:31 +08:00
|
|
|
if (F->getIntrinsicID() == Intrinsic::powi && Ty->isHalfTy())
|
|
|
|
return ConstantFP::get(F->getContext(),
|
|
|
|
APFloat((float)std::pow((float)Op1V,
|
|
|
|
(int)Op2C->getZExtValue())));
|
2011-01-10 17:02:58 +08:00
|
|
|
if (F->getIntrinsicID() == Intrinsic::powi && Ty->isFloatTy())
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFP::get(F->getContext(),
|
|
|
|
APFloat((float)std::pow((float)Op1V,
|
2008-04-20 08:41:09 +08:00
|
|
|
(int)Op2C->getZExtValue())));
|
2011-01-10 17:02:58 +08:00
|
|
|
if (F->getIntrinsicID() == Intrinsic::powi && Ty->isDoubleTy())
|
2009-11-06 12:27:31 +08:00
|
|
|
return ConstantFP::get(F->getContext(),
|
|
|
|
APFloat((double)std::pow((double)Op1V,
|
|
|
|
(int)Op2C->getZExtValue())));
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
2009-10-05 13:00:35 +08:00
|
|
|
return 0;
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-05 13:26:04 +08:00
|
|
|
if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
|
|
|
|
if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
|
|
|
|
switch (F->getIntrinsicID()) {
|
|
|
|
default: break;
|
2010-10-14 08:05:07 +08:00
|
|
|
case Intrinsic::sadd_with_overflow:
|
|
|
|
case Intrinsic::uadd_with_overflow:
|
|
|
|
case Intrinsic::ssub_with_overflow:
|
|
|
|
case Intrinsic::usub_with_overflow:
|
2011-03-27 22:26:13 +08:00
|
|
|
case Intrinsic::smul_with_overflow:
|
|
|
|
case Intrinsic::umul_with_overflow: {
|
2010-10-14 08:05:07 +08:00
|
|
|
APInt Res;
|
|
|
|
bool Overflow;
|
|
|
|
switch (F->getIntrinsicID()) {
|
2012-02-07 13:05:23 +08:00
|
|
|
default: llvm_unreachable("Invalid case");
|
2010-10-14 08:05:07 +08:00
|
|
|
case Intrinsic::sadd_with_overflow:
|
|
|
|
Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
|
|
|
|
break;
|
|
|
|
case Intrinsic::uadd_with_overflow:
|
|
|
|
Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow);
|
|
|
|
break;
|
|
|
|
case Intrinsic::ssub_with_overflow:
|
|
|
|
Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow);
|
|
|
|
break;
|
|
|
|
case Intrinsic::usub_with_overflow:
|
|
|
|
Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow);
|
|
|
|
break;
|
|
|
|
case Intrinsic::smul_with_overflow:
|
|
|
|
Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow);
|
|
|
|
break;
|
2011-03-27 22:26:13 +08:00
|
|
|
case Intrinsic::umul_with_overflow:
|
|
|
|
Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow);
|
|
|
|
break;
|
2010-10-14 08:05:07 +08:00
|
|
|
}
|
2009-10-05 13:26:04 +08:00
|
|
|
Constant *Ops[] = {
|
2010-10-14 08:05:07 +08:00
|
|
|
ConstantInt::get(F->getContext(), Res),
|
|
|
|
ConstantInt::get(Type::getInt1Ty(F->getContext()), Overflow)
|
2009-10-05 13:26:04 +08:00
|
|
|
};
|
2011-06-20 12:01:31 +08:00
|
|
|
return ConstantStruct::get(cast<StructType>(F->getReturnType()), Ops);
|
2009-10-05 13:26:04 +08:00
|
|
|
}
|
2011-12-12 12:26:04 +08:00
|
|
|
case Intrinsic::cttz:
|
2013-01-25 00:28:28 +08:00
|
|
|
if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef.
|
|
|
|
return UndefValue::get(Ty);
|
2011-12-12 12:26:04 +08:00
|
|
|
return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros());
|
|
|
|
case Intrinsic::ctlz:
|
2013-01-25 00:28:28 +08:00
|
|
|
if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef.
|
|
|
|
return UndefValue::get(Ty);
|
2011-12-12 12:26:04 +08:00
|
|
|
return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
|
2009-10-05 13:26:04 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-05 08:11:11 +08:00
|
|
|
|
2009-10-05 13:26:04 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 0;
|
2005-10-28 00:00:10 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|