forked from OSchip/llvm-project
Fix the bootstrap of CompilerRT with host compilers that don't support
emulating 128-bit arithmetic on 32-bit x86 targets. This should get the bootstrap back for GCC 4.6 at least. Suggestions on better ways to do the detection here are welcome... llvm-svn: 165863
This commit is contained in:
parent
f402f78eb7
commit
eea8a48218
|
@ -36,7 +36,7 @@ Diag &Diag::operator<<(const Value &V) {
|
|||
|
||||
/// Hexadecimal printing for numbers too large for fprintf to handle directly.
|
||||
static void PrintHex(UIntMax Val) {
|
||||
#ifdef HAVE_INT128_T
|
||||
#if HAVE_INT128_T
|
||||
fprintf(stderr, "0x%08x%08x%08x%08x",
|
||||
(unsigned int)(Val >> 96),
|
||||
(unsigned int)(Val >> 64),
|
||||
|
|
|
@ -27,7 +27,7 @@ SIntMax Value::getSIntValue() const {
|
|||
}
|
||||
if (getType().getIntegerBitWidth() == 64)
|
||||
return *reinterpret_cast<s64*>(Val);
|
||||
#ifdef HAVE_INT128_T
|
||||
#if HAVE_INT128_T
|
||||
if (getType().getIntegerBitWidth() == 128)
|
||||
return *reinterpret_cast<s128*>(Val);
|
||||
#endif
|
||||
|
@ -40,7 +40,7 @@ UIntMax Value::getUIntValue() const {
|
|||
return Val;
|
||||
if (getType().getIntegerBitWidth() == 64)
|
||||
return *reinterpret_cast<u64*>(Val);
|
||||
#ifdef HAVE_INT128_T
|
||||
#if HAVE_INT128_T
|
||||
if (getType().getIntegerBitWidth() == 128)
|
||||
return *reinterpret_cast<u128*>(Val);
|
||||
#endif
|
||||
|
|
|
@ -23,15 +23,19 @@
|
|||
#include "sanitizer_common/sanitizer_common.h"
|
||||
|
||||
// FIXME: Move this out to a config header.
|
||||
#if defined(__clang__) || _LP64
|
||||
typedef __int128 s128;
|
||||
typedef unsigned __int128 u128;
|
||||
#define HAVE_INT128_T 1
|
||||
#else
|
||||
#define HAVE_INT128_T 0
|
||||
#endif
|
||||
|
||||
|
||||
namespace __ubsan {
|
||||
|
||||
/// \brief Largest integer types we support.
|
||||
#ifdef HAVE_INT128_T
|
||||
#if HAVE_INT128_T
|
||||
typedef s128 SIntMax;
|
||||
typedef u128 UIntMax;
|
||||
#else
|
||||
|
|
Loading…
Reference in New Issue