forked from OSchip/llvm-project
[C++2a] Add __builtin_bit_cast, used to implement std::bit_cast
This commit adds a new builtin, __builtin_bit_cast(T, v), which performs a bit_cast from a value v to a type T. This expression can be evaluated at compile time under specific circumstances. The compile time evaluation currently doesn't support bit-fields, but I'm planning on fixing this in a follow up (some of the logic for figuring this out is in CodeGen). I'm also planning follow-ups for supporting some more esoteric types that the constexpr evaluator supports, as well as extending __builtin_memcpy constexpr evaluation to use the same infrastructure. rdar://44987528 Differential revision: https://reviews.llvm.org/D62825 llvm-svn: 364954
This commit is contained in:
parent
5613874947
commit
eee944e7f9
clang
include
clang-c
clang
lib
AST
CodeGen
Edit
Lex
Parse
Sema
Serialization
StaticAnalyzer/Core
test
CodeGenCXX
SemaCXX
tools/libclang
llvm
|
@ -2546,7 +2546,11 @@ enum CXCursorKind {
|
|||
*/
|
||||
CXCursor_OMPTargetTeamsDistributeSimdDirective = 279,
|
||||
|
||||
CXCursor_LastStmt = CXCursor_OMPTargetTeamsDistributeSimdDirective,
|
||||
/** C++2a std::bit_cast expression.
|
||||
*/
|
||||
CXCursor_BuiltinBitCastExpr = 280,
|
||||
|
||||
CXCursor_LastStmt = CXCursor_BuiltinBitCastExpr,
|
||||
|
||||
/**
|
||||
* Cursor that represents the translation unit itself.
|
||||
|
|
|
@ -4716,6 +4716,35 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
/// Represents a C++2a __builtin_bit_cast(T, v) expression. Used to implement
|
||||
/// std::bit_cast. These can sometimes be evaluated as part of a constant
|
||||
/// expression, but otherwise CodeGen to a simple memcpy in general.
|
||||
class BuiltinBitCastExpr final
|
||||
: public ExplicitCastExpr,
|
||||
private llvm::TrailingObjects<BuiltinBitCastExpr, CXXBaseSpecifier *> {
|
||||
friend class ASTStmtReader;
|
||||
friend class CastExpr;
|
||||
friend class TrailingObjects;
|
||||
|
||||
SourceLocation KWLoc;
|
||||
SourceLocation RParenLoc;
|
||||
|
||||
public:
|
||||
BuiltinBitCastExpr(QualType T, ExprValueKind VK, CastKind CK, Expr *SrcExpr,
|
||||
TypeSourceInfo *DstType, SourceLocation KWLoc,
|
||||
SourceLocation RParenLoc)
|
||||
: ExplicitCastExpr(BuiltinBitCastExprClass, T, VK, CK, SrcExpr, 0,
|
||||
DstType),
|
||||
KWLoc(KWLoc), RParenLoc(RParenLoc) {}
|
||||
|
||||
SourceLocation getBeginLoc() const LLVM_READONLY { return KWLoc; }
|
||||
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
|
||||
|
||||
static bool classof(const Stmt *T) {
|
||||
return T->getStmtClass() == BuiltinBitCastExprClass;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace clang
|
||||
|
||||
#endif // LLVM_CLANG_AST_EXPRCXX_H
|
||||
|
|
|
@ -66,6 +66,10 @@ CAST_OPERATION(BitCast)
|
|||
/// bool b; reinterpret_cast<char&>(b) = 'a';
|
||||
CAST_OPERATION(LValueBitCast)
|
||||
|
||||
/// CK_LValueToRValueBitCast - A conversion that causes us to reinterpret an
|
||||
/// lvalue as an rvalue of a different type. Created by __builtin_bit_cast.
|
||||
CAST_OPERATION(LValueToRValueBitCast)
|
||||
|
||||
/// CK_LValueToRValue - A conversion which causes the extraction of
|
||||
/// an r-value from the operand gl-value. The result of an r-value
|
||||
/// conversion is always unqualified.
|
||||
|
|
|
@ -2282,6 +2282,10 @@ DEF_TRAVERSE_STMT(CXXStaticCastExpr, {
|
|||
TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
|
||||
})
|
||||
|
||||
DEF_TRAVERSE_STMT(BuiltinBitCastExpr, {
|
||||
TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
|
||||
})
|
||||
|
||||
template <typename Derived>
|
||||
bool RecursiveASTVisitor<Derived>::TraverseSynOrSemInitListExpr(
|
||||
InitListExpr *S, DataRecursionQueue *Queue) {
|
||||
|
|
|
@ -215,6 +215,19 @@ def note_constexpr_memcpy_unsupported : Note<
|
|||
"size to copy (%4) is not a multiple of size of element type %3 (%5)|"
|
||||
"source is not a contiguous array of at least %4 elements of type %3|"
|
||||
"destination is not a contiguous array of at least %4 elements of type %3}2">;
|
||||
def note_constexpr_bit_cast_unsupported_type : Note<
|
||||
"constexpr bit_cast involving type %0 is not yet supported">;
|
||||
def note_constexpr_bit_cast_unsupported_bitfield : Note<
|
||||
"constexpr bit_cast involving bit-field is not yet supported">;
|
||||
def note_constexpr_bit_cast_invalid_type : Note<
|
||||
"bit_cast %select{from|to}0 a %select{|type with a }1"
|
||||
"%select{union|pointer|member pointer|volatile|reference}2 "
|
||||
"%select{type|member}1 is not allowed in a constant expression">;
|
||||
def note_constexpr_bit_cast_invalid_subtype : Note<
|
||||
"invalid type %0 is a %select{member|base}1 of %2">;
|
||||
def note_constexpr_bit_cast_indet_dest : Note<
|
||||
"indeterminate value can only initialize an object of type 'unsigned char'"
|
||||
"%select{, 'char',|}1 or 'std::byte'; %0 is invalid">;
|
||||
|
||||
def warn_integer_constant_overflow : Warning<
|
||||
"overflow in expression; result is %0 with type %1">,
|
||||
|
|
|
@ -9734,4 +9734,8 @@ def err_builtin_launder_invalid_arg : Error<
|
|||
"%select{non-pointer|function pointer|void pointer}0 argument to "
|
||||
"'__builtin_launder' is not allowed">;
|
||||
|
||||
def err_bit_cast_non_trivially_copyable : Error<
|
||||
"__builtin_bit_cast %select{source|destination}0 type must be trivially copyable">;
|
||||
def err_bit_cast_type_size_mismatch : Error<
|
||||
"__builtin_bit_cast source size does not equal destination size (%0 vs %1)">;
|
||||
} // end of sema component.
|
||||
|
|
|
@ -192,6 +192,7 @@ def ConvertVectorExpr : DStmt<Expr>;
|
|||
def BlockExpr : DStmt<Expr>;
|
||||
def OpaqueValueExpr : DStmt<Expr>;
|
||||
def TypoExpr : DStmt<Expr>;
|
||||
def BuiltinBitCastExpr : DStmt<ExplicitCastExpr>;
|
||||
|
||||
// Microsoft Extensions.
|
||||
def MSPropertyRefExpr : DStmt<Expr>;
|
||||
|
|
|
@ -670,7 +670,7 @@ ALIAS("_pascal" , __pascal , KEYBORLAND)
|
|||
KEYWORD(__builtin_convertvector , KEYALL)
|
||||
ALIAS("__char16_t" , char16_t , KEYCXX)
|
||||
ALIAS("__char32_t" , char32_t , KEYCXX)
|
||||
|
||||
KEYWORD(__builtin_bit_cast , KEYALL)
|
||||
KEYWORD(__builtin_available , KEYALL)
|
||||
|
||||
// Clang-specific keywords enabled only in testing.
|
||||
|
|
|
@ -1780,6 +1780,9 @@ private:
|
|||
// C++ 5.2p1: C++ Casts
|
||||
ExprResult ParseCXXCasts();
|
||||
|
||||
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
|
||||
ExprResult ParseBuiltinBitCast();
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// C++ 5.2p1: C++ Type Identification
|
||||
ExprResult ParseCXXTypeid();
|
||||
|
|
|
@ -5236,6 +5236,13 @@ public:
|
|||
SourceRange AngleBrackets,
|
||||
SourceRange Parens);
|
||||
|
||||
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
|
||||
ExprResult Operand,
|
||||
SourceLocation RParenLoc);
|
||||
|
||||
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
|
||||
Expr *Operand, SourceLocation RParenLoc);
|
||||
|
||||
ExprResult BuildCXXTypeId(QualType TypeInfoType,
|
||||
SourceLocation TypeidLoc,
|
||||
TypeSourceInfo *Operand,
|
||||
|
|
|
@ -1862,6 +1862,7 @@ bool CastExpr::CastConsistency() const {
|
|||
case CK_FloatingComplexToBoolean:
|
||||
case CK_IntegralComplexToBoolean:
|
||||
case CK_LValueBitCast: // -> bool&
|
||||
case CK_LValueToRValueBitCast:
|
||||
case CK_UserDefinedConversion: // operator bool()
|
||||
case CK_BuiltinFnToFnPtr:
|
||||
case CK_FixedPointToBoolean:
|
||||
|
@ -3506,7 +3507,8 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
|
|||
case CXXStaticCastExprClass:
|
||||
case CXXReinterpretCastExprClass:
|
||||
case CXXConstCastExprClass:
|
||||
case CXXFunctionalCastExprClass: {
|
||||
case CXXFunctionalCastExprClass:
|
||||
case BuiltinBitCastExprClass: {
|
||||
// While volatile reads are side-effecting in both C and C++, we treat them
|
||||
// as having possible (not definite) side-effects. This allows idiomatic
|
||||
// code to behave without warning, such as sizeof(*v) for a volatile-
|
||||
|
|
|
@ -343,6 +343,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
|
|||
case Expr::CXXReinterpretCastExprClass:
|
||||
case Expr::CXXConstCastExprClass:
|
||||
case Expr::ObjCBridgedCastExprClass:
|
||||
case Expr::BuiltinBitCastExprClass:
|
||||
// Only in C++ can casts be interesting at all.
|
||||
if (!Lang.CPlusPlus) return Cl::CL_PRValue;
|
||||
return ClassifyUnnamed(Ctx, cast<ExplicitCastExpr>(E)->getTypeAsWritten());
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
#include "clang/Basic/Builtins.h"
|
||||
#include "clang/Basic/FixedPoint.h"
|
||||
#include "clang/Basic/TargetInfo.h"
|
||||
#include "llvm/ADT/Optional.h"
|
||||
#include "llvm/ADT/SmallBitVector.h"
|
||||
#include "llvm/Support/SaveAndRestore.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
|
@ -56,8 +57,10 @@
|
|||
#define DEBUG_TYPE "exprconstant"
|
||||
|
||||
using namespace clang;
|
||||
using llvm::APInt;
|
||||
using llvm::APSInt;
|
||||
using llvm::APFloat;
|
||||
using llvm::Optional;
|
||||
|
||||
static bool IsGlobalLValue(APValue::LValueBase B);
|
||||
|
||||
|
@ -2657,10 +2660,6 @@ static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
|
||||
QualType Type, const LValue &LVal,
|
||||
APValue &RVal);
|
||||
|
||||
/// Try to evaluate the initializer for a variable declaration.
|
||||
///
|
||||
/// \param Info Information about the ongoing evaluation.
|
||||
|
@ -5376,6 +5375,489 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
|
|||
//===----------------------------------------------------------------------===//
|
||||
namespace {
|
||||
|
||||
class BitCastBuffer {
|
||||
// FIXME: We're going to need bit-level granularity when we support
|
||||
// bit-fields.
|
||||
// FIXME: Its possible under the C++ standard for 'char' to not be 8 bits, but
|
||||
// we don't support a host or target where that is the case. Still, we should
|
||||
// use a more generic type in case we ever do.
|
||||
SmallVector<Optional<unsigned char>, 32> Bytes;
|
||||
|
||||
static_assert(std::numeric_limits<unsigned char>::digits >= 8,
|
||||
"Need at least 8 bit unsigned char");
|
||||
|
||||
bool TargetIsLittleEndian;
|
||||
|
||||
public:
|
||||
BitCastBuffer(CharUnits Width, bool TargetIsLittleEndian)
|
||||
: Bytes(Width.getQuantity()),
|
||||
TargetIsLittleEndian(TargetIsLittleEndian) {}
|
||||
|
||||
LLVM_NODISCARD
|
||||
bool readObject(CharUnits Offset, CharUnits Width,
|
||||
SmallVectorImpl<unsigned char> &Output) const {
|
||||
for (CharUnits I = Offset, E = Offset + Width; I != E; ++I) {
|
||||
// If a byte of an integer is uninitialized, then the whole integer is
|
||||
// uninitalized.
|
||||
if (!Bytes[I.getQuantity()])
|
||||
return false;
|
||||
Output.push_back(*Bytes[I.getQuantity()]);
|
||||
}
|
||||
if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian)
|
||||
std::reverse(Output.begin(), Output.end());
|
||||
return true;
|
||||
}
|
||||
|
||||
void writeObject(CharUnits Offset, SmallVectorImpl<unsigned char> &Input) {
|
||||
if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian)
|
||||
std::reverse(Input.begin(), Input.end());
|
||||
|
||||
size_t Index = 0;
|
||||
for (unsigned char Byte : Input) {
|
||||
assert(!Bytes[Offset.getQuantity() + Index] && "overwriting a byte?");
|
||||
Bytes[Offset.getQuantity() + Index] = Byte;
|
||||
++Index;
|
||||
}
|
||||
}
|
||||
|
||||
size_t size() { return Bytes.size(); }
|
||||
};
|
||||
|
||||
/// Traverse an APValue to produce an BitCastBuffer, emulating how the current
|
||||
/// target would represent the value at runtime.
|
||||
class APValueToBufferConverter {
|
||||
EvalInfo &Info;
|
||||
BitCastBuffer Buffer;
|
||||
const CastExpr *BCE;
|
||||
|
||||
APValueToBufferConverter(EvalInfo &Info, CharUnits ObjectWidth,
|
||||
const CastExpr *BCE)
|
||||
: Info(Info),
|
||||
Buffer(ObjectWidth, Info.Ctx.getTargetInfo().isLittleEndian()),
|
||||
BCE(BCE) {}
|
||||
|
||||
bool visit(const APValue &Val, QualType Ty) {
|
||||
return visit(Val, Ty, CharUnits::fromQuantity(0));
|
||||
}
|
||||
|
||||
// Write out Val with type Ty into Buffer starting at Offset.
|
||||
bool visit(const APValue &Val, QualType Ty, CharUnits Offset) {
|
||||
assert((size_t)Offset.getQuantity() <= Buffer.size());
|
||||
|
||||
// As a special case, nullptr_t has an indeterminate value.
|
||||
if (Ty->isNullPtrType())
|
||||
return true;
|
||||
|
||||
// Dig through Src to find the byte at SrcOffset.
|
||||
switch (Val.getKind()) {
|
||||
case APValue::Indeterminate:
|
||||
case APValue::None:
|
||||
return true;
|
||||
|
||||
case APValue::Int:
|
||||
return visitInt(Val.getInt(), Ty, Offset);
|
||||
case APValue::Float:
|
||||
return visitFloat(Val.getFloat(), Ty, Offset);
|
||||
case APValue::Array:
|
||||
return visitArray(Val, Ty, Offset);
|
||||
case APValue::Struct:
|
||||
return visitRecord(Val, Ty, Offset);
|
||||
|
||||
case APValue::ComplexInt:
|
||||
case APValue::ComplexFloat:
|
||||
case APValue::Vector:
|
||||
case APValue::FixedPoint:
|
||||
// FIXME: We should support these.
|
||||
|
||||
case APValue::Union:
|
||||
case APValue::MemberPointer:
|
||||
case APValue::AddrLabelDiff: {
|
||||
Info.FFDiag(BCE->getBeginLoc(),
|
||||
diag::note_constexpr_bit_cast_unsupported_type)
|
||||
<< Ty;
|
||||
return false;
|
||||
}
|
||||
|
||||
case APValue::LValue:
|
||||
llvm_unreachable("LValue subobject in bit_cast?");
|
||||
}
|
||||
}
|
||||
|
||||
bool visitRecord(const APValue &Val, QualType Ty, CharUnits Offset) {
|
||||
const RecordDecl *RD = Ty->getAsRecordDecl();
|
||||
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
|
||||
|
||||
// Visit the base classes.
|
||||
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
|
||||
for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
|
||||
const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
|
||||
CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
|
||||
|
||||
if (!visitRecord(Val.getStructBase(I), BS.getType(),
|
||||
Layout.getBaseClassOffset(BaseDecl) + Offset))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Visit the fields.
|
||||
unsigned FieldIdx = 0;
|
||||
for (FieldDecl *FD : RD->fields()) {
|
||||
if (FD->isBitField()) {
|
||||
Info.FFDiag(BCE->getBeginLoc(),
|
||||
diag::note_constexpr_bit_cast_unsupported_bitfield);
|
||||
return false;
|
||||
}
|
||||
|
||||
uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldIdx);
|
||||
|
||||
assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0 &&
|
||||
"only bit-fields can have sub-char alignment");
|
||||
CharUnits FieldOffset =
|
||||
Info.Ctx.toCharUnitsFromBits(FieldOffsetBits) + Offset;
|
||||
QualType FieldTy = FD->getType();
|
||||
if (!visit(Val.getStructField(FieldIdx), FieldTy, FieldOffset))
|
||||
return false;
|
||||
++FieldIdx;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool visitArray(const APValue &Val, QualType Ty, CharUnits Offset) {
|
||||
const auto *CAT =
|
||||
dyn_cast_or_null<ConstantArrayType>(Ty->getAsArrayTypeUnsafe());
|
||||
if (!CAT)
|
||||
return false;
|
||||
|
||||
CharUnits ElemWidth = Info.Ctx.getTypeSizeInChars(CAT->getElementType());
|
||||
unsigned NumInitializedElts = Val.getArrayInitializedElts();
|
||||
unsigned ArraySize = Val.getArraySize();
|
||||
// First, initialize the initialized elements.
|
||||
for (unsigned I = 0; I != NumInitializedElts; ++I) {
|
||||
const APValue &SubObj = Val.getArrayInitializedElt(I);
|
||||
if (!visit(SubObj, CAT->getElementType(), Offset + I * ElemWidth))
|
||||
return false;
|
||||
}
|
||||
|
||||
// Next, initialize the rest of the array using the filler.
|
||||
if (Val.hasArrayFiller()) {
|
||||
const APValue &Filler = Val.getArrayFiller();
|
||||
for (unsigned I = NumInitializedElts; I != ArraySize; ++I) {
|
||||
if (!visit(Filler, CAT->getElementType(), Offset + I * ElemWidth))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool visitInt(const APSInt &Val, QualType Ty, CharUnits Offset) {
|
||||
CharUnits Width = Info.Ctx.getTypeSizeInChars(Ty);
|
||||
SmallVector<unsigned char, 8> Bytes(Width.getQuantity());
|
||||
llvm::StoreIntToMemory(Val, &*Bytes.begin(), Width.getQuantity());
|
||||
Buffer.writeObject(Offset, Bytes);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool visitFloat(const APFloat &Val, QualType Ty, CharUnits Offset) {
|
||||
APSInt AsInt(Val.bitcastToAPInt());
|
||||
return visitInt(AsInt, Ty, Offset);
|
||||
}
|
||||
|
||||
public:
|
||||
static Optional<BitCastBuffer> convert(EvalInfo &Info, const APValue &Src,
|
||||
const CastExpr *BCE) {
|
||||
CharUnits DstSize = Info.Ctx.getTypeSizeInChars(BCE->getType());
|
||||
APValueToBufferConverter Converter(Info, DstSize, BCE);
|
||||
if (!Converter.visit(Src, BCE->getSubExpr()->getType()))
|
||||
return None;
|
||||
return Converter.Buffer;
|
||||
}
|
||||
};
|
||||
|
||||
/// Write an BitCastBuffer into an APValue.
|
||||
class BufferToAPValueConverter {
|
||||
EvalInfo &Info;
|
||||
const BitCastBuffer &Buffer;
|
||||
const CastExpr *BCE;
|
||||
|
||||
BufferToAPValueConverter(EvalInfo &Info, const BitCastBuffer &Buffer,
|
||||
const CastExpr *BCE)
|
||||
: Info(Info), Buffer(Buffer), BCE(BCE) {}
|
||||
|
||||
// Emit an unsupported bit_cast type error. Sema refuses to build a bit_cast
|
||||
// with an invalid type, so anything left is a deficiency on our part (FIXME).
|
||||
// Ideally this will be unreachable.
|
||||
llvm::NoneType unsupportedType(QualType Ty) {
|
||||
Info.FFDiag(BCE->getBeginLoc(),
|
||||
diag::note_constexpr_bit_cast_unsupported_type)
|
||||
<< Ty;
|
||||
return None;
|
||||
}
|
||||
|
||||
Optional<APValue> visit(const BuiltinType *T, CharUnits Offset,
|
||||
const EnumType *EnumSugar = nullptr) {
|
||||
if (T->isNullPtrType()) {
|
||||
uint64_t NullValue = Info.Ctx.getTargetNullPointerValue(QualType(T, 0));
|
||||
return APValue((Expr *)nullptr,
|
||||
/*Offset=*/CharUnits::fromQuantity(NullValue),
|
||||
APValue::NoLValuePath{}, /*IsNullPtr=*/true);
|
||||
}
|
||||
|
||||
CharUnits SizeOf = Info.Ctx.getTypeSizeInChars(T);
|
||||
SmallVector<uint8_t, 8> Bytes;
|
||||
if (!Buffer.readObject(Offset, SizeOf, Bytes)) {
|
||||
// If this is std::byte or unsigned char, then its okay to store an
|
||||
// indeterminate value.
|
||||
bool IsStdByte = EnumSugar && EnumSugar->isStdByteType();
|
||||
bool IsUChar =
|
||||
!EnumSugar && (T->isSpecificBuiltinType(BuiltinType::UChar) ||
|
||||
T->isSpecificBuiltinType(BuiltinType::Char_U));
|
||||
if (!IsStdByte && !IsUChar) {
|
||||
QualType DisplayType(EnumSugar ? (Type *)EnumSugar : T, 0);
|
||||
Info.FFDiag(BCE->getExprLoc(),
|
||||
diag::note_constexpr_bit_cast_indet_dest)
|
||||
<< DisplayType << Info.Ctx.getLangOpts().CharIsSigned;
|
||||
return None;
|
||||
}
|
||||
|
||||
return APValue::IndeterminateValue();
|
||||
}
|
||||
|
||||
APSInt Val(SizeOf.getQuantity() * Info.Ctx.getCharWidth(), true);
|
||||
llvm::LoadIntFromMemory(Val, &*Bytes.begin(), Bytes.size());
|
||||
|
||||
if (T->isIntegralOrEnumerationType()) {
|
||||
Val.setIsSigned(T->isSignedIntegerOrEnumerationType());
|
||||
return APValue(Val);
|
||||
}
|
||||
|
||||
if (T->isRealFloatingType()) {
|
||||
const llvm::fltSemantics &Semantics =
|
||||
Info.Ctx.getFloatTypeSemantics(QualType(T, 0));
|
||||
return APValue(APFloat(Semantics, Val));
|
||||
}
|
||||
|
||||
return unsupportedType(QualType(T, 0));
|
||||
}
|
||||
|
||||
Optional<APValue> visit(const RecordType *RTy, CharUnits Offset) {
|
||||
const RecordDecl *RD = RTy->getAsRecordDecl();
|
||||
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
|
||||
|
||||
unsigned NumBases = 0;
|
||||
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
|
||||
NumBases = CXXRD->getNumBases();
|
||||
|
||||
APValue ResultVal(APValue::UninitStruct(), NumBases,
|
||||
std::distance(RD->field_begin(), RD->field_end()));
|
||||
|
||||
// Visit the base classes.
|
||||
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
|
||||
for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
|
||||
const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
|
||||
CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
|
||||
if (BaseDecl->isEmpty() ||
|
||||
Info.Ctx.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
|
||||
continue;
|
||||
|
||||
Optional<APValue> SubObj = visitType(
|
||||
BS.getType(), Layout.getBaseClassOffset(BaseDecl) + Offset);
|
||||
if (!SubObj)
|
||||
return None;
|
||||
ResultVal.getStructBase(I) = *SubObj;
|
||||
}
|
||||
}
|
||||
|
||||
// Visit the fields.
|
||||
unsigned FieldIdx = 0;
|
||||
for (FieldDecl *FD : RD->fields()) {
|
||||
// FIXME: We don't currently support bit-fields. A lot of the logic for
|
||||
// this is in CodeGen, so we need to factor it around.
|
||||
if (FD->isBitField()) {
|
||||
Info.FFDiag(BCE->getBeginLoc(),
|
||||
diag::note_constexpr_bit_cast_unsupported_bitfield);
|
||||
return None;
|
||||
}
|
||||
|
||||
uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldIdx);
|
||||
assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0);
|
||||
|
||||
CharUnits FieldOffset =
|
||||
CharUnits::fromQuantity(FieldOffsetBits / Info.Ctx.getCharWidth()) +
|
||||
Offset;
|
||||
QualType FieldTy = FD->getType();
|
||||
Optional<APValue> SubObj = visitType(FieldTy, FieldOffset);
|
||||
if (!SubObj)
|
||||
return None;
|
||||
ResultVal.getStructField(FieldIdx) = *SubObj;
|
||||
++FieldIdx;
|
||||
}
|
||||
|
||||
return ResultVal;
|
||||
}
|
||||
|
||||
Optional<APValue> visit(const EnumType *Ty, CharUnits Offset) {
|
||||
QualType RepresentationType = Ty->getDecl()->getIntegerType();
|
||||
assert(!RepresentationType.isNull() &&
|
||||
"enum forward decl should be caught by Sema");
|
||||
const BuiltinType *AsBuiltin =
|
||||
RepresentationType.getCanonicalType()->getAs<BuiltinType>();
|
||||
assert(AsBuiltin && "non-integral enum underlying type?");
|
||||
// Recurse into the underlying type. Treat std::byte transparently as
|
||||
// unsigned char.
|
||||
return visit(AsBuiltin, Offset, /*EnumTy=*/Ty);
|
||||
}
|
||||
|
||||
Optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) {
|
||||
size_t Size = Ty->getSize().getLimitedValue();
|
||||
CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(Ty->getElementType());
|
||||
|
||||
APValue ArrayValue(APValue::UninitArray(), Size, Size);
|
||||
for (size_t I = 0; I != Size; ++I) {
|
||||
Optional<APValue> ElementValue =
|
||||
visitType(Ty->getElementType(), Offset + I * ElementWidth);
|
||||
if (!ElementValue)
|
||||
return None;
|
||||
ArrayValue.getArrayInitializedElt(I) = std::move(*ElementValue);
|
||||
}
|
||||
|
||||
return ArrayValue;
|
||||
}
|
||||
|
||||
Optional<APValue> visit(const Type *Ty, CharUnits Offset) {
|
||||
return unsupportedType(QualType(Ty, 0));
|
||||
}
|
||||
|
||||
Optional<APValue> visitType(QualType Ty, CharUnits Offset) {
|
||||
QualType Can = Ty.getCanonicalType();
|
||||
|
||||
switch (Can->getTypeClass()) {
|
||||
#define TYPE(Class, Base) \
|
||||
case Type::Class: \
|
||||
return visit(cast<Class##Type>(Can.getTypePtr()), Offset);
|
||||
#define ABSTRACT_TYPE(Class, Base)
|
||||
#define NON_CANONICAL_TYPE(Class, Base) \
|
||||
case Type::Class: \
|
||||
llvm_unreachable("non-canonical type should be impossible!");
|
||||
#define DEPENDENT_TYPE(Class, Base) \
|
||||
case Type::Class: \
|
||||
llvm_unreachable( \
|
||||
"dependent types aren't supported in the constant evaluator!");
|
||||
#define NON_CANONICAL_UNLESS_DEPENDENT(Class, Base) \
|
||||
case Type::Class: \
|
||||
llvm_unreachable("either dependent or not canonical!");
|
||||
#include "clang/AST/TypeNodes.def"
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
// Pull out a full value of type DstType.
|
||||
static Optional<APValue> convert(EvalInfo &Info, BitCastBuffer &Buffer,
|
||||
const CastExpr *BCE) {
|
||||
BufferToAPValueConverter Converter(Info, Buffer, BCE);
|
||||
return Converter.visitType(BCE->getType(), CharUnits::fromQuantity(0));
|
||||
}
|
||||
};
|
||||
|
||||
static bool checkBitCastConstexprEligibilityType(SourceLocation Loc,
|
||||
QualType Ty, EvalInfo *Info,
|
||||
const ASTContext &Ctx,
|
||||
bool CheckingDest) {
|
||||
Ty = Ty.getCanonicalType();
|
||||
|
||||
auto diag = [&](int Reason) {
|
||||
if (Info)
|
||||
Info->FFDiag(Loc, diag::note_constexpr_bit_cast_invalid_type)
|
||||
<< CheckingDest << (Reason == 4) << Reason;
|
||||
return false;
|
||||
};
|
||||
auto note = [&](int Construct, QualType NoteTy, SourceLocation NoteLoc) {
|
||||
if (Info)
|
||||
Info->Note(NoteLoc, diag::note_constexpr_bit_cast_invalid_subtype)
|
||||
<< NoteTy << Construct << Ty;
|
||||
return false;
|
||||
};
|
||||
|
||||
if (Ty->isUnionType())
|
||||
return diag(0);
|
||||
if (Ty->isPointerType())
|
||||
return diag(1);
|
||||
if (Ty->isMemberPointerType())
|
||||
return diag(2);
|
||||
if (Ty.isVolatileQualified())
|
||||
return diag(3);
|
||||
|
||||
if (RecordDecl *Record = Ty->getAsRecordDecl()) {
|
||||
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Record)) {
|
||||
for (CXXBaseSpecifier &BS : CXXRD->bases())
|
||||
if (!checkBitCastConstexprEligibilityType(Loc, BS.getType(), Info, Ctx,
|
||||
CheckingDest))
|
||||
return note(1, BS.getType(), BS.getBeginLoc());
|
||||
}
|
||||
for (FieldDecl *FD : Record->fields()) {
|
||||
if (FD->getType()->isReferenceType())
|
||||
return diag(4);
|
||||
if (!checkBitCastConstexprEligibilityType(Loc, FD->getType(), Info, Ctx,
|
||||
CheckingDest))
|
||||
return note(0, FD->getType(), FD->getBeginLoc());
|
||||
}
|
||||
}
|
||||
|
||||
if (Ty->isArrayType() &&
|
||||
!checkBitCastConstexprEligibilityType(Loc, Ctx.getBaseElementType(Ty),
|
||||
Info, Ctx, CheckingDest))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool checkBitCastConstexprEligibility(EvalInfo *Info,
|
||||
const ASTContext &Ctx,
|
||||
const CastExpr *BCE) {
|
||||
bool DestOK = checkBitCastConstexprEligibilityType(
|
||||
BCE->getBeginLoc(), BCE->getType(), Info, Ctx, true);
|
||||
bool SourceOK = DestOK && checkBitCastConstexprEligibilityType(
|
||||
BCE->getBeginLoc(),
|
||||
BCE->getSubExpr()->getType(), Info, Ctx, false);
|
||||
return SourceOK;
|
||||
}
|
||||
|
||||
static bool handleLValueToRValueBitCast(EvalInfo &Info, APValue &DestValue,
|
||||
APValue &SourceValue,
|
||||
const CastExpr *BCE) {
|
||||
assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 &&
|
||||
"no host or target supports non 8-bit chars");
|
||||
assert(SourceValue.isLValue() &&
|
||||
"LValueToRValueBitcast requires an lvalue operand!");
|
||||
|
||||
if (!checkBitCastConstexprEligibility(&Info, Info.Ctx, BCE))
|
||||
return false;
|
||||
|
||||
LValue SourceLValue;
|
||||
APValue SourceRValue;
|
||||
SourceLValue.setFrom(Info.Ctx, SourceValue);
|
||||
if (!handleLValueToRValueConversion(Info, BCE,
|
||||
BCE->getSubExpr()->getType().withConst(),
|
||||
SourceLValue, SourceRValue))
|
||||
return false;
|
||||
|
||||
// Read out SourceValue into a char buffer.
|
||||
Optional<BitCastBuffer> Buffer =
|
||||
APValueToBufferConverter::convert(Info, SourceRValue, BCE);
|
||||
if (!Buffer)
|
||||
return false;
|
||||
|
||||
// Write out the buffer into a new APValue.
|
||||
Optional<APValue> MaybeDestValue =
|
||||
BufferToAPValueConverter::convert(Info, *Buffer, BCE);
|
||||
if (!MaybeDestValue)
|
||||
return false;
|
||||
|
||||
DestValue = std::move(*MaybeDestValue);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
class ExprEvaluatorBase
|
||||
: public ConstStmtVisitor<Derived, bool> {
|
||||
|
@ -5510,6 +5992,9 @@ public:
|
|||
CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
|
||||
return static_cast<Derived*>(this)->VisitCastExpr(E);
|
||||
}
|
||||
bool VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *E) {
|
||||
return static_cast<Derived*>(this)->VisitCastExpr(E);
|
||||
}
|
||||
|
||||
bool VisitBinaryOperator(const BinaryOperator *E) {
|
||||
switch (E->getOpcode()) {
|
||||
|
@ -5802,6 +6287,14 @@ public:
|
|||
return false;
|
||||
return DerivedSuccess(RVal, E);
|
||||
}
|
||||
case CK_LValueToRValueBitCast: {
|
||||
APValue DestValue, SourceValue;
|
||||
if (!Evaluate(SourceValue, Info, E->getSubExpr()))
|
||||
return false;
|
||||
if (!handleLValueToRValueBitCast(Info, DestValue, SourceValue, E))
|
||||
return false;
|
||||
return DerivedSuccess(DestValue, E);
|
||||
}
|
||||
}
|
||||
|
||||
return Error(E);
|
||||
|
@ -6651,7 +7144,6 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
|
|||
switch (E->getCastKind()) {
|
||||
default:
|
||||
break;
|
||||
|
||||
case CK_BitCast:
|
||||
case CK_CPointerToObjCPointerCast:
|
||||
case CK_BlockPointerToObjCPointerCast:
|
||||
|
@ -8176,7 +8668,7 @@ public:
|
|||
}
|
||||
|
||||
bool Success(const APValue &V, const Expr *E) {
|
||||
if (V.isLValue() || V.isAddrLabelDiff()) {
|
||||
if (V.isLValue() || V.isAddrLabelDiff() || V.isIndeterminate()) {
|
||||
Result = V;
|
||||
return true;
|
||||
}
|
||||
|
@ -10598,6 +11090,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
|
|||
case CK_LValueToRValue:
|
||||
case CK_AtomicToNonAtomic:
|
||||
case CK_NoOp:
|
||||
case CK_LValueToRValueBitCast:
|
||||
return ExprEvaluatorBaseTy::VisitCastExpr(E);
|
||||
|
||||
case CK_MemberPointerToBoolean:
|
||||
|
@ -11210,6 +11703,7 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
|
|||
case CK_LValueToRValue:
|
||||
case CK_AtomicToNonAtomic:
|
||||
case CK_NoOp:
|
||||
case CK_LValueToRValueBitCast:
|
||||
return ExprEvaluatorBaseTy::VisitCastExpr(E);
|
||||
|
||||
case CK_Dependent:
|
||||
|
@ -12512,6 +13006,11 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
|
|||
case Expr::ChooseExprClass: {
|
||||
return CheckICE(cast<ChooseExpr>(E)->getChosenSubExpr(), Ctx);
|
||||
}
|
||||
case Expr::BuiltinBitCastExprClass: {
|
||||
if (!checkBitCastConstexprEligibility(nullptr, Ctx, cast<CastExpr>(E)))
|
||||
return ICEDiag(IK_NotICE, E->getBeginLoc());
|
||||
return CheckICE(cast<CastExpr>(E)->getSubExpr(), Ctx);
|
||||
}
|
||||
}
|
||||
|
||||
llvm_unreachable("Invalid StmtClass!");
|
||||
|
|
|
@ -3634,6 +3634,7 @@ recurse:
|
|||
case Expr::AtomicExprClass:
|
||||
case Expr::SourceLocExprClass:
|
||||
case Expr::FixedPointLiteralClass:
|
||||
case Expr::BuiltinBitCastExprClass:
|
||||
{
|
||||
if (!NullOut) {
|
||||
// As bad as this diagnostic is, it's better than crashing.
|
||||
|
|
|
@ -1703,6 +1703,14 @@ void StmtPrinter::VisitCXXConstCastExpr(CXXConstCastExpr *Node) {
|
|||
VisitCXXNamedCastExpr(Node);
|
||||
}
|
||||
|
||||
void StmtPrinter::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *Node) {
|
||||
OS << "__builtin_bit_cast(";
|
||||
Node->getTypeInfoAsWritten()->getType().print(OS, Policy);
|
||||
OS << ", ";
|
||||
PrintExpr(Node->getSubExpr());
|
||||
OS << ")";
|
||||
}
|
||||
|
||||
void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) {
|
||||
OS << "typeid(";
|
||||
if (Node->isTypeOperand()) {
|
||||
|
|
|
@ -1569,6 +1569,11 @@ void StmtProfiler::VisitCXXConstCastExpr(const CXXConstCastExpr *S) {
|
|||
VisitCXXNamedCastExpr(S);
|
||||
}
|
||||
|
||||
void StmtProfiler::VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *S) {
|
||||
VisitExpr(S);
|
||||
VisitType(S->getTypeInfoAsWritten()->getType());
|
||||
}
|
||||
|
||||
void StmtProfiler::VisitUserDefinedLiteral(const UserDefinedLiteral *S) {
|
||||
VisitCallExpr(S);
|
||||
}
|
||||
|
|
|
@ -4209,6 +4209,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
|
|||
switch (E->getCastKind()) {
|
||||
case CK_ToVoid:
|
||||
case CK_BitCast:
|
||||
case CK_LValueToRValueBitCast:
|
||||
case CK_ArrayToPointerDecay:
|
||||
case CK_FunctionToPointerDecay:
|
||||
case CK_NullToMemberPointer:
|
||||
|
|
|
@ -711,6 +711,25 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
|
|||
break;
|
||||
}
|
||||
|
||||
case CK_LValueToRValueBitCast: {
|
||||
if (Dest.isIgnored()) {
|
||||
CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
|
||||
/*ignoreResult=*/true);
|
||||
break;
|
||||
}
|
||||
|
||||
LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
|
||||
Address SourceAddress =
|
||||
Builder.CreateElementBitCast(SourceLV.getAddress(), CGF.Int8Ty);
|
||||
Address DestAddress =
|
||||
Builder.CreateElementBitCast(Dest.getAddress(), CGF.Int8Ty);
|
||||
llvm::Value *SizeVal = llvm::ConstantInt::get(
|
||||
CGF.SizeTy,
|
||||
CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity());
|
||||
Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
|
||||
break;
|
||||
}
|
||||
|
||||
case CK_DerivedToBase:
|
||||
case CK_BaseToDerived:
|
||||
case CK_UncheckedDerivedToBase: {
|
||||
|
|
|
@ -464,6 +464,15 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
|
|||
return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), Op->getExprLoc());
|
||||
}
|
||||
|
||||
case CK_LValueToRValueBitCast: {
|
||||
LValue SourceLVal = CGF.EmitLValue(Op);
|
||||
Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(),
|
||||
CGF.ConvertTypeForMem(DestTy));
|
||||
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
|
||||
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
|
||||
return EmitLoadOfLValue(DestLV, Op->getExprLoc());
|
||||
}
|
||||
|
||||
case CK_BitCast:
|
||||
case CK_BaseToDerived:
|
||||
case CK_DerivedToBase:
|
||||
|
|
|
@ -1115,6 +1115,7 @@ public:
|
|||
case CK_ToVoid:
|
||||
case CK_Dynamic:
|
||||
case CK_LValueBitCast:
|
||||
case CK_LValueToRValueBitCast:
|
||||
case CK_NullToMemberPointer:
|
||||
case CK_UserDefinedConversion:
|
||||
case CK_CPointerToObjCPointerCast:
|
||||
|
|
|
@ -2033,6 +2033,15 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
|
|||
return EmitLoadOfLValue(LV, CE->getExprLoc());
|
||||
}
|
||||
|
||||
case CK_LValueToRValueBitCast: {
|
||||
LValue SourceLVal = CGF.EmitLValue(E);
|
||||
Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(),
|
||||
CGF.ConvertTypeForMem(DestTy));
|
||||
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
|
||||
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
|
||||
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
|
||||
}
|
||||
|
||||
case CK_CPointerToObjCPointerCast:
|
||||
case CK_BlockPointerToObjCPointerCast:
|
||||
case CK_AnyPointerToBlockPointerCast:
|
||||
|
|
|
@ -1042,6 +1042,7 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
|
|||
case CK_Dependent:
|
||||
case CK_BitCast:
|
||||
case CK_LValueBitCast:
|
||||
case CK_LValueToRValueBitCast:
|
||||
case CK_BaseToDerived:
|
||||
case CK_DerivedToBase:
|
||||
case CK_UncheckedDerivedToBase:
|
||||
|
|
|
@ -1630,6 +1630,7 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
|
|||
.Case("__builtin_FILE", true)
|
||||
.Case("__builtin_FUNCTION", true)
|
||||
.Case("__builtin_COLUMN", true)
|
||||
.Case("__builtin_bit_cast", true)
|
||||
.Default(false);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -1223,6 +1223,9 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
|
|||
case tok::kw_static_cast:
|
||||
Res = ParseCXXCasts();
|
||||
break;
|
||||
case tok::kw___builtin_bit_cast:
|
||||
Res = ParseBuiltinBitCast();
|
||||
break;
|
||||
case tok::kw_typeid:
|
||||
Res = ParseCXXTypeid();
|
||||
break;
|
||||
|
|
|
@ -3513,3 +3513,37 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
|
|||
ConsumeAnyToken();
|
||||
return Result;
|
||||
}
|
||||
|
||||
/// Parse a __builtin_bit_cast(T, E).
|
||||
ExprResult Parser::ParseBuiltinBitCast() {
|
||||
SourceLocation KWLoc = ConsumeToken();
|
||||
|
||||
BalancedDelimiterTracker T(*this, tok::l_paren);
|
||||
if (T.expectAndConsume(diag::err_expected_lparen_after, "__builtin_bit_cast"))
|
||||
return ExprError();
|
||||
|
||||
// Parse the common declaration-specifiers piece.
|
||||
DeclSpec DS(AttrFactory);
|
||||
ParseSpecifierQualifierList(DS);
|
||||
|
||||
// Parse the abstract-declarator, if present.
|
||||
Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
|
||||
ParseDeclarator(DeclaratorInfo);
|
||||
|
||||
if (ExpectAndConsume(tok::comma)) {
|
||||
Diag(Tok.getLocation(), diag::err_expected) << tok::comma;
|
||||
SkipUntil(tok::r_paren, StopAtSemi);
|
||||
return ExprError();
|
||||
}
|
||||
|
||||
ExprResult Operand = ParseExpression();
|
||||
|
||||
if (T.consumeClose())
|
||||
return ExprError();
|
||||
|
||||
if (Operand.isInvalid() || DeclaratorInfo.isInvalidType())
|
||||
return ExprError();
|
||||
|
||||
return Actions.ActOnBuiltinBitCastExpr(KWLoc, DeclaratorInfo, Operand,
|
||||
T.getCloseLocation());
|
||||
}
|
||||
|
|
|
@ -87,6 +87,7 @@ namespace {
|
|||
void CheckDynamicCast();
|
||||
void CheckCXXCStyleCast(bool FunctionalCast, bool ListInitialization);
|
||||
void CheckCStyleCast();
|
||||
void CheckBuiltinBitCast();
|
||||
|
||||
void updatePartOfExplicitCastFlags(CastExpr *CE) {
|
||||
// Walk down from the CE to the OrigSrcExpr, and mark all immediate
|
||||
|
@ -331,6 +332,38 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
|
|||
}
|
||||
}
|
||||
|
||||
ExprResult Sema::ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &D,
|
||||
ExprResult Operand,
|
||||
SourceLocation RParenLoc) {
|
||||
assert(!D.isInvalidType());
|
||||
|
||||
TypeSourceInfo *TInfo = GetTypeForDeclaratorCast(D, Operand.get()->getType());
|
||||
if (D.isInvalidType())
|
||||
return ExprError();
|
||||
|
||||
return BuildBuiltinBitCastExpr(KWLoc, TInfo, Operand.get(), RParenLoc);
|
||||
}
|
||||
|
||||
ExprResult Sema::BuildBuiltinBitCastExpr(SourceLocation KWLoc,
|
||||
TypeSourceInfo *TSI, Expr *Operand,
|
||||
SourceLocation RParenLoc) {
|
||||
CastOperation Op(*this, TSI->getType(), Operand);
|
||||
Op.OpRange = SourceRange(KWLoc, RParenLoc);
|
||||
TypeLoc TL = TSI->getTypeLoc();
|
||||
Op.DestRange = SourceRange(TL.getBeginLoc(), TL.getEndLoc());
|
||||
|
||||
if (!Operand->isTypeDependent() && !TSI->getType()->isDependentType()) {
|
||||
Op.CheckBuiltinBitCast();
|
||||
if (Op.SrcExpr.isInvalid())
|
||||
return ExprError();
|
||||
}
|
||||
|
||||
BuiltinBitCastExpr *BCE =
|
||||
new (Context) BuiltinBitCastExpr(Op.ResultType, Op.ValueKind, Op.Kind,
|
||||
Op.SrcExpr.get(), TSI, KWLoc, RParenLoc);
|
||||
return Op.complete(BCE);
|
||||
}
|
||||
|
||||
/// Try to diagnose a failed overloaded cast. Returns true if
|
||||
/// diagnostics were emitted.
|
||||
static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
|
||||
|
@ -2764,6 +2797,43 @@ void CastOperation::CheckCStyleCast() {
|
|||
checkCastAlign();
|
||||
}
|
||||
|
||||
void CastOperation::CheckBuiltinBitCast() {
|
||||
QualType SrcType = SrcExpr.get()->getType();
|
||||
if (SrcExpr.get()->isRValue())
|
||||
SrcExpr = Self.CreateMaterializeTemporaryExpr(SrcType, SrcExpr.get(),
|
||||
/*IsLValueReference=*/false);
|
||||
|
||||
CharUnits DestSize = Self.Context.getTypeSizeInChars(DestType);
|
||||
CharUnits SourceSize = Self.Context.getTypeSizeInChars(SrcType);
|
||||
if (DestSize != SourceSize) {
|
||||
Self.Diag(OpRange.getBegin(), diag::err_bit_cast_type_size_mismatch)
|
||||
<< (int)SourceSize.getQuantity() << (int)DestSize.getQuantity();
|
||||
SrcExpr = ExprError();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!DestType.isTriviallyCopyableType(Self.Context)) {
|
||||
Self.Diag(OpRange.getBegin(), diag::err_bit_cast_non_trivially_copyable)
|
||||
<< 1;
|
||||
SrcExpr = ExprError();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!SrcType.isTriviallyCopyableType(Self.Context)) {
|
||||
Self.Diag(OpRange.getBegin(), diag::err_bit_cast_non_trivially_copyable)
|
||||
<< 0;
|
||||
SrcExpr = ExprError();
|
||||
return;
|
||||
}
|
||||
|
||||
if (Self.Context.hasSameUnqualifiedType(DestType, SrcType)) {
|
||||
Kind = CK_NoOp;
|
||||
return;
|
||||
}
|
||||
|
||||
Kind = CK_LValueToRValueBitCast;
|
||||
}
|
||||
|
||||
/// DiagnoseCastQual - Warn whenever casts discards a qualifiers, be it either
|
||||
/// const, volatile or both.
|
||||
static void DiagnoseCastQual(Sema &Self, const ExprResult &SrcExpr,
|
||||
|
|
|
@ -1201,6 +1201,7 @@ CanThrowResult Sema::canThrow(const Expr *E) {
|
|||
case Expr::CoyieldExprClass:
|
||||
case Expr::CXXConstCastExprClass:
|
||||
case Expr::CXXReinterpretCastExprClass:
|
||||
case Expr::BuiltinBitCastExprClass:
|
||||
case Expr::CXXStdInitializerListExprClass:
|
||||
case Expr::DesignatedInitExprClass:
|
||||
case Expr::DesignatedInitUpdateExprClass:
|
||||
|
|
|
@ -2652,6 +2652,16 @@ public:
|
|||
ListInitialization);
|
||||
}
|
||||
|
||||
/// Build a new C++ __builtin_bit_cast expression.
|
||||
///
|
||||
/// By default, performs semantic analysis to build the new expression.
|
||||
/// Subclasses may override this routine to provide different behavior.
|
||||
ExprResult RebuildBuiltinBitCastExpr(SourceLocation KWLoc,
|
||||
TypeSourceInfo *TSI, Expr *Sub,
|
||||
SourceLocation RParenLoc) {
|
||||
return getSema().BuildBuiltinBitCastExpr(KWLoc, TSI, Sub, RParenLoc);
|
||||
}
|
||||
|
||||
/// Build a new C++ typeid(type) expression.
|
||||
///
|
||||
/// By default, performs semantic analysis to build the new expression.
|
||||
|
@ -10245,6 +10255,22 @@ TreeTransform<Derived>::TransformCXXNamedCastExpr(CXXNamedCastExpr *E) {
|
|||
E->getAngleBrackets().getEnd(), SubExpr.get(), E->getRParenLoc());
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
ExprResult
|
||||
TreeTransform<Derived>::TransformBuiltinBitCastExpr(BuiltinBitCastExpr *BCE) {
|
||||
TypeSourceInfo *TSI =
|
||||
getDerived().TransformType(BCE->getTypeInfoAsWritten());
|
||||
if (!TSI)
|
||||
return ExprError();
|
||||
|
||||
ExprResult Sub = getDerived().TransformExpr(BCE->getSubExpr());
|
||||
if (Sub.isInvalid())
|
||||
return ExprError();
|
||||
|
||||
return getDerived().RebuildBuiltinBitCastExpr(BCE->getBeginLoc(), TSI,
|
||||
Sub.get(), BCE->getEndLoc());
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
ExprResult
|
||||
TreeTransform<Derived>::TransformCXXStaticCastExpr(CXXStaticCastExpr *E) {
|
||||
|
|
|
@ -1510,6 +1510,12 @@ void ASTStmtReader::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
|
|||
E->setRParenLoc(ReadSourceLocation());
|
||||
}
|
||||
|
||||
void ASTStmtReader::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *E) {
|
||||
VisitExplicitCastExpr(E);
|
||||
E->KWLoc = ReadSourceLocation();
|
||||
E->RParenLoc = ReadSourceLocation();
|
||||
}
|
||||
|
||||
void ASTStmtReader::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
|
||||
VisitCallExpr(E);
|
||||
E->UDSuffixLoc = ReadSourceLocation();
|
||||
|
|
|
@ -1451,6 +1451,12 @@ void ASTStmtWriter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
|
|||
Code = serialization::EXPR_CXX_FUNCTIONAL_CAST;
|
||||
}
|
||||
|
||||
void ASTStmtWriter::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *E) {
|
||||
VisitExplicitCastExpr(E);
|
||||
Record.AddSourceLocation(E->getBeginLoc());
|
||||
Record.AddSourceLocation(E->getEndLoc());
|
||||
}
|
||||
|
||||
void ASTStmtWriter::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
|
||||
VisitCallExpr(E);
|
||||
Record.AddSourceLocation(E->UDSuffixLoc);
|
||||
|
|
|
@ -1687,6 +1687,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
|
|||
case Stmt::CXXReinterpretCastExprClass:
|
||||
case Stmt::CXXConstCastExprClass:
|
||||
case Stmt::CXXFunctionalCastExprClass:
|
||||
case Stmt::BuiltinBitCastExprClass:
|
||||
case Stmt::ObjCBridgedCastExprClass: {
|
||||
Bldr.takeNodes(Pred);
|
||||
const auto *C = cast<CastExpr>(S);
|
||||
|
|
|
@ -380,6 +380,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
|
|||
case CK_Dependent:
|
||||
case CK_ArrayToPointerDecay:
|
||||
case CK_BitCast:
|
||||
case CK_LValueToRValueBitCast:
|
||||
case CK_AddressSpaceConversion:
|
||||
case CK_BooleanToSignedIntegral:
|
||||
case CK_IntegralToPointer:
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
// RUN: %clang_cc1 -O3 -std=c++2a -S -emit-llvm -o - -disable-llvm-passes -triple x86_64-apple-macos10.14 %s | FileCheck %s
|
||||
|
||||
void test_scalar() {
|
||||
// CHECK-LABEL: define void @_Z11test_scalarv
|
||||
__builtin_bit_cast(float, 42);
|
||||
|
||||
// CHECK: load float, float* {{.*}}, align 4, !tbaa ![[MAY_ALIAS_TBAA:.*]]
|
||||
}
|
||||
|
||||
void test_scalar2() {
|
||||
// CHECK-LABEL: define void @_Z12test_scalar2v
|
||||
struct S {int m;};
|
||||
__builtin_bit_cast(int, S{42});
|
||||
|
||||
// CHECK: load i32, i32* {{.*}}, align 4, !tbaa ![[MAY_ALIAS_TBAA]]
|
||||
}
|
||||
|
||||
// CHECK: ![[CHAR_TBAA:.*]] = !{!"omnipotent char", {{.*}}, i64 0}
|
||||
// CHECK: ![[MAY_ALIAS_TBAA]] = !{![[CHAR_TBAA]], ![[CHAR_TBAA]], i64 0}
|
|
@ -0,0 +1,106 @@
|
|||
// RUN: %clang_cc1 -std=c++2a -S -emit-llvm -o - -disable-llvm-passes -triple x86_64-apple-macos10.14 %s | FileCheck %s
|
||||
|
||||
void test_scalar(int &oper) {
|
||||
// CHECK-LABEL: define void @_Z11test_scalarRi
|
||||
__builtin_bit_cast(float, oper);
|
||||
|
||||
// CHECK: [[OPER:%.*]] = alloca i32*
|
||||
// CHECK: [[REF:%.*]] = load i32*, i32**
|
||||
// CHECK-NEXT: [[CASTED:%.*]] = bitcast i32* [[REF]] to float*
|
||||
// CHECK-NEXT: load float, float* [[CASTED]]
|
||||
}
|
||||
|
||||
struct two_ints {
|
||||
int x;
|
||||
int y;
|
||||
};
|
||||
|
||||
unsigned long test_aggregate_to_scalar(two_ints &ti) {
|
||||
// CHECK-LABEL: define i64 @_Z24test_aggregate_to_scalarR8two_ints
|
||||
return __builtin_bit_cast(unsigned long, ti);
|
||||
|
||||
// CHECK: [[TI_ADDR:%.*]] = alloca %struct.two_ints*, align 8
|
||||
// CHECK: [[TI_LOAD:%.*]] = load %struct.two_ints*, %struct.two_ints** [[TI_ADDR]]
|
||||
// CHECK-NEXT: [[CASTED:%.*]] = bitcast %struct.two_ints* [[TI_LOAD]] to i64*
|
||||
// CHECK-NEXT: load i64, i64* [[CASTED]]
|
||||
}
|
||||
|
||||
struct two_floats {
|
||||
float x;
|
||||
float y;
|
||||
};
|
||||
|
||||
two_floats test_aggregate_record(two_ints& ti) {
|
||||
// CHECK-LABEL: define <2 x float> @_Z21test_aggregate_recordR8two_int
|
||||
return __builtin_bit_cast(two_floats, ti);
|
||||
|
||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.two_floats, align 4
|
||||
// CHECK: [[TI:%.*]] = alloca %struct.two_ints*, align 8
|
||||
|
||||
// CHECK: [[LOAD_TI:%.*]] = load %struct.two_ints*, %struct.two_ints** [[TI]]
|
||||
// CHECK: [[MEMCPY_SRC:%.*]] = bitcast %struct.two_ints* [[LOAD_TI]] to i8*
|
||||
// CHECK: [[MEMCPY_DST:%.*]] = bitcast %struct.two_floats* [[RETVAL]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[MEMCPY_DST]], i8* align 4 [[MEMCPY_SRC]]
|
||||
}
|
||||
|
||||
two_floats test_aggregate_array(int (&ary)[2]) {
|
||||
// CHECK-LABEL: define <2 x float> @_Z20test_aggregate_arrayRA2_i
|
||||
return __builtin_bit_cast(two_floats, ary);
|
||||
|
||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.two_floats, align 4
|
||||
// CHECK: [[ARY:%.*]] = alloca [2 x i32]*, align 8
|
||||
|
||||
// CHECK: [[LOAD_ARY:%.*]] = load [2 x i32]*, [2 x i32]** [[ARY]]
|
||||
// CHECK: [[MEMCPY_SRC:%.*]] = bitcast [2 x i32]* [[LOAD_ARY]] to i8*
|
||||
// CHECK: [[MEMCPY_DST:%.*]] = bitcast %struct.two_floats* [[RETVAL]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[MEMCPY_DST]], i8* align 4 [[MEMCPY_SRC]]
|
||||
}
|
||||
|
||||
two_ints test_scalar_to_aggregate(unsigned long ul) {
|
||||
// CHECK-LABEL: define i64 @_Z24test_scalar_to_aggregatem
|
||||
return __builtin_bit_cast(two_ints, ul);
|
||||
|
||||
// CHECK: [[TI:%.*]] = alloca %struct.two_ints, align 4
|
||||
// CHECK: [[TITMP:%.*]] = bitcast %struct.two_ints* [[TI]] to i8*
|
||||
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TITMP]]
|
||||
}
|
||||
|
||||
unsigned long test_complex(_Complex unsigned &cu) {
|
||||
// CHECK-LABEL: define i64 @_Z12test_complexRCj
|
||||
return __builtin_bit_cast(unsigned long, cu);
|
||||
|
||||
// CHECK: [[REF_ALLOCA:%.*]] = alloca { i32, i32 }*, align 8
|
||||
// CHECK-NEXT: store { i32, i32 }* {{.*}}, { i32, i32 }** [[REF_ALLOCA]]
|
||||
// CHECK-NEXT: [[REF:%.*]] = load { i32, i32 }*, { i32, i32 }** [[REF_ALLOCA]]
|
||||
// CHECK-NEXT: [[CASTED:%.*]] = bitcast { i32, i32 }* [[REF]] to i64*
|
||||
// CHECK-NEXT: load i64, i64* [[CASTED]], align 4
|
||||
}
|
||||
|
||||
_Complex unsigned test_to_complex(unsigned long &ul) {
|
||||
// CHECK-LABEL: define i64 @_Z15test_to_complexRm
|
||||
|
||||
return __builtin_bit_cast(_Complex unsigned, ul);
|
||||
|
||||
// CHECK: [[REF:%.*]] = alloca i64*
|
||||
// CHECK: [[LOAD_REF:%.*]] = load i64*, i64** [[REF]]
|
||||
// CHECK: [[CASTED:%.*]] = bitcast i64* [[LOAD_REF]] to { i32, i32 }*
|
||||
}
|
||||
|
||||
unsigned long test_array(int (&ary)[2]) {
|
||||
// CHECK-LABEL: define i64 @_Z10test_arrayRA2_i
|
||||
return __builtin_bit_cast(unsigned long, ary);
|
||||
|
||||
// CHECK: [[REF_ALLOCA:%.*]] = alloca [2 x i32]*
|
||||
// CHECK: [[LOAD_REF:%.*]] = load [2 x i32]*, [2 x i32]** [[REF_ALLOCA]]
|
||||
// CHECK: [[CASTED:%.*]] = bitcast [2 x i32]* [[LOAD_REF]] to i64*
|
||||
// CHECK: load i64, i64* [[CASTED]], align 4
|
||||
}
|
||||
|
||||
two_ints test_rvalue_aggregate() {
|
||||
// CHECK-LABEL: define i64 @_Z21test_rvalue_aggregate
|
||||
return __builtin_bit_cast(two_ints, 42ul);
|
||||
|
||||
// CHECK: [[TI:%.*]] = alloca %struct.two_ints, align 4
|
||||
// CHECK: [[CASTED:%.*]] = bitcast %struct.two_ints* [[TI]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[CASTED]]
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
// RUN: %clang_cc1 -verify -std=c++2a -fsyntax-only -triple x86_64-apple-macosx10.14.0 %s
|
||||
// RUN: %clang_cc1 -verify -std=c++2a -fsyntax-only -triple x86_64-apple-macosx10.14.0 %s -fno-signed-char
|
||||
|
||||
#if !__has_builtin(__builtin_bit_cast)
|
||||
#error
|
||||
#endif
|
||||
|
||||
template <class T, T v>
|
||||
T instantiate() {
|
||||
return __builtin_bit_cast(T, v);
|
||||
}
|
||||
|
||||
int x = instantiate<int, 32>();
|
||||
|
||||
struct secret_ctor {
|
||||
char member;
|
||||
|
||||
private: secret_ctor() = default;
|
||||
};
|
||||
|
||||
void test1() {
|
||||
secret_ctor c = __builtin_bit_cast(secret_ctor, (char)0);
|
||||
}
|
||||
|
||||
void test2() {
|
||||
constexpr int i = 0;
|
||||
// expected-error@+1{{__builtin_bit_cast source size does not equal destination size (4 vs 1)}}
|
||||
constexpr char c = __builtin_bit_cast(char, i);
|
||||
}
|
||||
|
||||
struct not_trivially_copyable {
|
||||
virtual void foo() {}
|
||||
};
|
||||
|
||||
// expected-error@+1{{__builtin_bit_cast source type must be trivially copyable}}
|
||||
constexpr unsigned long ul = __builtin_bit_cast(unsigned long, not_trivially_copyable{});
|
||||
|
||||
// expected-error@+1 {{__builtin_bit_cast destination type must be trivially copyable}}
|
||||
constexpr long us = __builtin_bit_cast(unsigned long &, 0L);
|
|
@ -0,0 +1,383 @@
|
|||
// RUN: %clang_cc1 -verify -std=c++2a -fsyntax-only -triple x86_64-apple-macosx10.14.0 %s
|
||||
// RUN: %clang_cc1 -verify -std=c++2a -fsyntax-only -triple x86_64-apple-macosx10.14.0 %s -fno-signed-char
|
||||
// RUN: %clang_cc1 -verify -std=c++2a -fsyntax-only -triple aarch64_be-linux-gnu %s
|
||||
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
# define LITTLE_END 1
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
# define LITTLE_END 0
|
||||
#else
|
||||
# error "huh?"
|
||||
#endif
|
||||
|
||||
template <class T, class V> struct is_same {
|
||||
static constexpr bool value = false;
|
||||
};
|
||||
template <class T> struct is_same<T, T> {
|
||||
static constexpr bool value = true;
|
||||
};
|
||||
|
||||
static_assert(sizeof(int) == 4);
|
||||
static_assert(sizeof(long long) == 8);
|
||||
|
||||
template <class To, class From>
|
||||
constexpr To bit_cast(const From &from) {
|
||||
static_assert(sizeof(To) == sizeof(From));
|
||||
#ifdef __CHAR_UNSIGNED__
|
||||
// expected-note@+4 2 {{indeterminate value can only initialize an object of type 'unsigned char', 'char', or 'std::byte'; 'signed char' is invalid}}
|
||||
#else
|
||||
// expected-note@+2 2 {{indeterminate value can only initialize an object of type 'unsigned char' or 'std::byte'; 'signed char' is invalid}}
|
||||
#endif
|
||||
return __builtin_bit_cast(To, from);
|
||||
}
|
||||
|
||||
template <class Intermediate, class Init>
|
||||
constexpr bool round_trip(const Init &init) {
|
||||
return bit_cast<Init>(bit_cast<Intermediate>(init)) == init;
|
||||
}
|
||||
|
||||
void test_int() {
|
||||
static_assert(round_trip<unsigned>((int)-1));
|
||||
static_assert(round_trip<unsigned>((int)0x12345678));
|
||||
static_assert(round_trip<unsigned>((int)0x87654321));
|
||||
static_assert(round_trip<unsigned>((int)0x0C05FEFE));
|
||||
}
|
||||
|
||||
void test_array() {
|
||||
constexpr unsigned char input[] = {0xCA, 0xFE, 0xBA, 0xBE};
|
||||
constexpr unsigned expected = LITTLE_END ? 0xBEBAFECA : 0xCAFEBABE;
|
||||
static_assert(bit_cast<unsigned>(input) == expected);
|
||||
}
|
||||
|
||||
void test_record() {
|
||||
struct int_splicer {
|
||||
unsigned x;
|
||||
unsigned y;
|
||||
|
||||
constexpr bool operator==(const int_splicer &other) const {
|
||||
return other.x == x && other.y == y;
|
||||
}
|
||||
};
|
||||
|
||||
constexpr int_splicer splice{0x0C05FEFE, 0xCAFEBABE};
|
||||
|
||||
static_assert(bit_cast<unsigned long long>(splice) == LITTLE_END
|
||||
? 0xCAFEBABE0C05FEFE
|
||||
: 0x0C05FEFECAFEBABE);
|
||||
|
||||
static_assert(bit_cast<int_splicer>(0xCAFEBABE0C05FEFE).x == LITTLE_END
|
||||
? 0x0C05FEFE
|
||||
: 0xCAFEBABE);
|
||||
|
||||
static_assert(round_trip<unsigned long long>(splice));
|
||||
static_assert(round_trip<long long>(splice));
|
||||
|
||||
struct base2 {
|
||||
};
|
||||
|
||||
struct base3 {
|
||||
unsigned z;
|
||||
};
|
||||
|
||||
struct bases : int_splicer, base2, base3 {
|
||||
unsigned doublez;
|
||||
};
|
||||
|
||||
struct tuple4 {
|
||||
unsigned x, y, z, doublez;
|
||||
|
||||
constexpr bool operator==(tuple4 const &other) const {
|
||||
return x == other.x && y == other.y &&
|
||||
z == other.z && doublez == other.doublez;
|
||||
}
|
||||
};
|
||||
constexpr bases b = {{1, 2}, {}, {3}, 4};
|
||||
constexpr tuple4 t4 = bit_cast<tuple4>(b);
|
||||
static_assert(t4 == tuple4{1, 2, 3, 4});
|
||||
static_assert(round_trip<tuple4>(b));
|
||||
}
|
||||
|
||||
void test_partially_initialized() {
|
||||
struct pad {
|
||||
signed char x;
|
||||
int y;
|
||||
};
|
||||
|
||||
struct no_pad {
|
||||
signed char x;
|
||||
signed char p1, p2, p3;
|
||||
int y;
|
||||
};
|
||||
|
||||
static_assert(sizeof(pad) == sizeof(no_pad));
|
||||
|
||||
constexpr pad pir{4, 4};
|
||||
// expected-error@+2 {{constexpr variable 'piw' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{in call to 'bit_cast(pir)'}}
|
||||
constexpr int piw = bit_cast<no_pad>(pir).x;
|
||||
|
||||
// expected-error@+2 {{constexpr variable 'bad' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{in call to 'bit_cast(pir)'}}
|
||||
constexpr no_pad bad = bit_cast<no_pad>(pir);
|
||||
|
||||
constexpr pad fine = bit_cast<pad>(no_pad{1, 2, 3, 4, 5});
|
||||
static_assert(fine.x == 1 && fine.y == 5);
|
||||
}
|
||||
|
||||
void no_bitfields() {
|
||||
// FIXME!
|
||||
struct S {
|
||||
unsigned char x : 8;
|
||||
};
|
||||
|
||||
struct G {
|
||||
unsigned char x : 8;
|
||||
};
|
||||
|
||||
constexpr S s{0};
|
||||
// expected-error@+2 {{constexpr variable 'g' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{constexpr bit_cast involving bit-field is not yet supported}}
|
||||
constexpr G g = __builtin_bit_cast(G, s);
|
||||
}
|
||||
|
||||
void array_members() {
|
||||
struct S {
|
||||
int ar[3];
|
||||
|
||||
constexpr bool operator==(const S &rhs) {
|
||||
return ar[0] == rhs.ar[0] && ar[1] == rhs.ar[1] && ar[2] == rhs.ar[2];
|
||||
}
|
||||
};
|
||||
|
||||
struct G {
|
||||
int a, b, c;
|
||||
|
||||
constexpr bool operator==(const G &rhs) {
|
||||
return a == rhs.a && b == rhs.b && c == rhs.c;
|
||||
}
|
||||
};
|
||||
|
||||
constexpr S s{{1, 2, 3}};
|
||||
constexpr G g = bit_cast<G>(s);
|
||||
static_assert(g.a == 1 && g.b == 2 && g.c == 3);
|
||||
|
||||
static_assert(round_trip<G>(s));
|
||||
static_assert(round_trip<S>(g));
|
||||
}
|
||||
|
||||
void bad_types() {
|
||||
union X {
|
||||
int x;
|
||||
};
|
||||
|
||||
struct G {
|
||||
int g;
|
||||
};
|
||||
// expected-error@+2 {{constexpr variable 'g' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{bit_cast from a union type is not allowed in a constant expression}}
|
||||
constexpr G g = __builtin_bit_cast(G, X{0});
|
||||
// expected-error@+2 {{constexpr variable 'x' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{bit_cast to a union type is not allowed in a constant expression}}
|
||||
constexpr X x = __builtin_bit_cast(X, G{0});
|
||||
|
||||
struct has_pointer {
|
||||
// expected-note@+1 2 {{invalid type 'int *' is a member of 'has_pointer'}}
|
||||
int *ptr;
|
||||
};
|
||||
|
||||
// expected-error@+2 {{constexpr variable 'ptr' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{bit_cast from a pointer type is not allowed in a constant expression}}
|
||||
constexpr unsigned long ptr = __builtin_bit_cast(unsigned long, has_pointer{0});
|
||||
// expected-error@+2 {{constexpr variable 'hptr' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{bit_cast to a pointer type is not allowed in a constant expression}}
|
||||
constexpr has_pointer hptr = __builtin_bit_cast(has_pointer, 0ul);
|
||||
}
|
||||
|
||||
void backtrace() {
|
||||
struct A {
|
||||
// expected-note@+1 {{invalid type 'int *' is a member of 'A'}}
|
||||
int *ptr;
|
||||
};
|
||||
|
||||
struct B {
|
||||
// expected-note@+1 {{invalid type 'A [10]' is a member of 'B'}}
|
||||
A as[10];
|
||||
};
|
||||
|
||||
// expected-note@+1 {{invalid type 'B' is a base of 'C'}}
|
||||
struct C : B {
|
||||
};
|
||||
|
||||
struct E {
|
||||
unsigned long ar[10];
|
||||
};
|
||||
|
||||
// expected-error@+2 {{constexpr variable 'e' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{bit_cast from a pointer type is not allowed in a constant expression}}
|
||||
constexpr E e = __builtin_bit_cast(E, C{});
|
||||
}
|
||||
|
||||
void test_array_fill() {
|
||||
constexpr unsigned char a[4] = {1, 2};
|
||||
constexpr unsigned int i = bit_cast<unsigned int>(a);
|
||||
static_assert(i == LITTLE_END ? 0x00000201 : 0x01020000, "");
|
||||
}
|
||||
|
||||
typedef decltype(nullptr) nullptr_t;
|
||||
|
||||
#ifdef __CHAR_UNSIGNED__
|
||||
// expected-note@+5 {{indeterminate value can only initialize an object of type 'unsigned char', 'char', or 'std::byte'; 'unsigned long' is invalid}}
|
||||
#else
|
||||
// expected-note@+3 {{indeterminate value can only initialize an object of type 'unsigned char' or 'std::byte'; 'unsigned long' is invalid}}
|
||||
#endif
|
||||
// expected-error@+1 {{constexpr variable 'test_from_nullptr' must be initialized by a constant expression}}
|
||||
constexpr unsigned long test_from_nullptr = __builtin_bit_cast(unsigned long, nullptr);
|
||||
|
||||
constexpr int test_from_nullptr_pass = (__builtin_bit_cast(unsigned char[8], nullptr), 0);
|
||||
|
||||
constexpr int test_to_nullptr() {
|
||||
nullptr_t npt = __builtin_bit_cast(nullptr_t, 0ul);
|
||||
|
||||
struct indet_mem {
|
||||
unsigned char data[sizeof(void *)];
|
||||
};
|
||||
indet_mem im = __builtin_bit_cast(indet_mem, nullptr);
|
||||
nullptr_t npt2 = __builtin_bit_cast(nullptr_t, im);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
constexpr int ttn = test_to_nullptr();
|
||||
|
||||
// expected-warning@+2 {{returning reference to local temporary object}}
|
||||
// expected-note@+1 {{temporary created here}}
|
||||
constexpr const long &returns_local() { return 0L; }
|
||||
|
||||
// expected-error@+2 {{constexpr variable 'test_nullptr_bad' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{read of temporary whose lifetime has ended}}
|
||||
constexpr nullptr_t test_nullptr_bad = __builtin_bit_cast(nullptr_t, returns_local());
|
||||
|
||||
constexpr int test_indeterminate(bool read_indet) {
|
||||
struct pad {
|
||||
char a;
|
||||
int b;
|
||||
};
|
||||
|
||||
struct no_pad {
|
||||
char a;
|
||||
unsigned char p1, p2, p3;
|
||||
int b;
|
||||
};
|
||||
|
||||
pad p{1, 2};
|
||||
no_pad np = bit_cast<no_pad>(p);
|
||||
|
||||
int tmp = np.a + np.b;
|
||||
|
||||
unsigned char& indet_ref = np.p1;
|
||||
|
||||
if (read_indet) {
|
||||
// expected-note@+1 {{read of uninitialized object is not allowed in a constant expression}}
|
||||
tmp = indet_ref;
|
||||
}
|
||||
|
||||
indet_ref = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
constexpr int run_test_indeterminate = test_indeterminate(false);
|
||||
// expected-error@+2 {{constexpr variable 'run_test_indeterminate2' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{in call to 'test_indeterminate(true)'}}
|
||||
constexpr int run_test_indeterminate2 = test_indeterminate(true);
|
||||
|
||||
struct ref_mem {
|
||||
const int &rm;
|
||||
};
|
||||
|
||||
constexpr int global_int = 0;
|
||||
|
||||
// expected-error@+2 {{constexpr variable 'run_ref_mem' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{bit_cast from a type with a reference member is not allowed in a constant expression}}
|
||||
constexpr unsigned long run_ref_mem = __builtin_bit_cast(
|
||||
unsigned long, ref_mem{global_int});
|
||||
|
||||
union u {
|
||||
int im;
|
||||
};
|
||||
|
||||
// expected-error@+2 {{constexpr variable 'run_u' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{bit_cast from a union type is not allowed in a constant expression}}
|
||||
constexpr int run_u = __builtin_bit_cast(int, u{32});
|
||||
|
||||
struct vol_mem {
|
||||
volatile int x;
|
||||
};
|
||||
|
||||
// expected-error@+2 {{constexpr variable 'run_vol_mem' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{non-literal type 'vol_mem' cannot be used in a constant expression}}
|
||||
constexpr int run_vol_mem = __builtin_bit_cast(int, vol_mem{43});
|
||||
|
||||
struct mem_ptr {
|
||||
int vol_mem::*x; // expected-note{{invalid type 'int vol_mem::*' is a member of 'mem_ptr'}}
|
||||
};
|
||||
// expected-error@+2 {{constexpr variable 'run_mem_ptr' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{bit_cast from a member pointer type is not allowed in a constant expression}}
|
||||
constexpr int run_mem_ptr = __builtin_bit_cast(unsigned long, mem_ptr{nullptr});
|
||||
|
||||
struct A { char c; /* char padding : 8; */ short s; };
|
||||
struct B { unsigned char x[4]; };
|
||||
|
||||
constexpr B one() {
|
||||
A a = {1, 2};
|
||||
return bit_cast<B>(a);
|
||||
}
|
||||
constexpr char good_one = one().x[0] + one().x[2] + one().x[3];
|
||||
// expected-error@+2 {{constexpr variable 'bad_one' must be initialized by a constant expression}}
|
||||
// expected-note@+1 {{read of uninitialized object is not allowed in a constant expression}}
|
||||
constexpr char bad_one = one().x[1];
|
||||
|
||||
constexpr A two() {
|
||||
B b = one(); // b.x[1] is indeterminate.
|
||||
b.x[0] = 'a';
|
||||
b.x[2] = 1;
|
||||
b.x[3] = 2;
|
||||
return bit_cast<A>(b);
|
||||
}
|
||||
constexpr short good_two = two().c + two().s;
|
||||
|
||||
namespace std {
|
||||
enum byte : unsigned char {};
|
||||
}
|
||||
|
||||
enum my_byte : unsigned char {};
|
||||
|
||||
struct pad {
|
||||
char a;
|
||||
int b;
|
||||
};
|
||||
|
||||
constexpr int ok_byte = (__builtin_bit_cast(std::byte[8], pad{1, 2}), 0);
|
||||
constexpr int ok_uchar = (__builtin_bit_cast(unsigned char[8], pad{1, 2}), 0);
|
||||
|
||||
#ifdef __CHAR_UNSIGNED__
|
||||
// expected-note@+5 {{indeterminate value can only initialize an object of type 'unsigned char', 'char', or 'std::byte'; 'my_byte' is invalid}}}}
|
||||
#else
|
||||
// expected-note@+3 {{indeterminate value can only initialize an object of type 'unsigned char' or 'std::byte'; 'my_byte' is invalid}}
|
||||
#endif
|
||||
// expected-error@+1 {{constexpr variable 'bad_my_byte' must be initialized by a constant expression}}
|
||||
constexpr int bad_my_byte = (__builtin_bit_cast(my_byte[8], pad{1, 2}), 0);
|
||||
#ifndef __CHAR_UNSIGNED__
|
||||
// expected-error@+3 {{constexpr variable 'bad_char' must be initialized by a constant expression}}
|
||||
// expected-note@+2 {{indeterminate value can only initialize an object of type 'unsigned char' or 'std::byte'; 'char' is invalid}}
|
||||
#endif
|
||||
constexpr int bad_char = (__builtin_bit_cast(char[8], pad{1, 2}), 0);
|
||||
|
||||
struct pad_buffer { unsigned char data[sizeof(pad)]; };
|
||||
constexpr bool test_pad_buffer() {
|
||||
pad x = {1, 2};
|
||||
pad_buffer y = __builtin_bit_cast(pad_buffer, x);
|
||||
pad z = __builtin_bit_cast(pad, y);
|
||||
return x.a == z.a && x.b == z.b;
|
||||
}
|
||||
static_assert(test_pad_buffer());
|
|
@ -5194,6 +5194,8 @@ CXString clang_getCursorKindSpelling(enum CXCursorKind Kind) {
|
|||
return cxstring::createRef("CallExpr");
|
||||
case CXCursor_ObjCMessageExpr:
|
||||
return cxstring::createRef("ObjCMessageExpr");
|
||||
case CXCursor_BuiltinBitCastExpr:
|
||||
return cxstring::createRef("BuiltinBitCastExpr");
|
||||
case CXCursor_UnexposedStmt:
|
||||
return cxstring::createRef("UnexposedStmt");
|
||||
case CXCursor_DeclStmt:
|
||||
|
|
|
@ -716,6 +716,8 @@ CXCursor cxcursor::MakeCXCursor(const Stmt *S, const Decl *Parent,
|
|||
case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
|
||||
K = CXCursor_OMPTargetTeamsDistributeSimdDirective;
|
||||
break;
|
||||
case Stmt::BuiltinBitCastExprClass:
|
||||
K = CXCursor_BuiltinBitCastExpr;
|
||||
}
|
||||
|
||||
CXCursor C = { K, 0, { Parent, S, TU } };
|
||||
|
|
|
@ -2212,6 +2212,15 @@ Optional<APInt> SolveQuadraticEquationWrap(APInt A, APInt B, APInt C,
|
|||
// See friend declaration above. This additional declaration is required in
|
||||
// order to compile LLVM with IBM xlC compiler.
|
||||
hash_code hash_value(const APInt &Arg);
|
||||
} // End of llvm namespace
|
||||
|
||||
/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
|
||||
/// with the integer held in IntVal.
|
||||
void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, unsigned StoreBytes);
|
||||
|
||||
/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
|
||||
/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
|
||||
void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes);
|
||||
|
||||
} // namespace llvm
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1019,32 +1019,6 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
|
|||
return Result;
|
||||
}
|
||||
|
||||
/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
|
||||
/// with the integer held in IntVal.
|
||||
static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
|
||||
unsigned StoreBytes) {
|
||||
assert((IntVal.getBitWidth()+7)/8 >= StoreBytes && "Integer too small!");
|
||||
const uint8_t *Src = (const uint8_t *)IntVal.getRawData();
|
||||
|
||||
if (sys::IsLittleEndianHost) {
|
||||
// Little-endian host - the source is ordered from LSB to MSB. Order the
|
||||
// destination from LSB to MSB: Do a straight copy.
|
||||
memcpy(Dst, Src, StoreBytes);
|
||||
} else {
|
||||
// Big-endian host - the source is an array of 64 bit words ordered from
|
||||
// LSW to MSW. Each word is ordered from MSB to LSB. Order the destination
|
||||
// from MSB to LSB: Reverse the word order, but not the bytes in a word.
|
||||
while (StoreBytes > sizeof(uint64_t)) {
|
||||
StoreBytes -= sizeof(uint64_t);
|
||||
// May not be aligned so use memcpy.
|
||||
memcpy(Dst + StoreBytes, Src, sizeof(uint64_t));
|
||||
Src += sizeof(uint64_t);
|
||||
}
|
||||
|
||||
memcpy(Dst, Src + sizeof(uint64_t) - StoreBytes, StoreBytes);
|
||||
}
|
||||
}
|
||||
|
||||
void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
|
||||
GenericValue *Ptr, Type *Ty) {
|
||||
const unsigned StoreBytes = getDataLayout().getTypeStoreSize(Ty);
|
||||
|
@ -1092,33 +1066,6 @@ void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
|
|||
std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr);
|
||||
}
|
||||
|
||||
/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
|
||||
/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
|
||||
static void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) {
|
||||
assert((IntVal.getBitWidth()+7)/8 >= LoadBytes && "Integer too small!");
|
||||
uint8_t *Dst = reinterpret_cast<uint8_t *>(
|
||||
const_cast<uint64_t *>(IntVal.getRawData()));
|
||||
|
||||
if (sys::IsLittleEndianHost)
|
||||
// Little-endian host - the destination must be ordered from LSB to MSB.
|
||||
// The source is ordered from LSB to MSB: Do a straight copy.
|
||||
memcpy(Dst, Src, LoadBytes);
|
||||
else {
|
||||
// Big-endian - the destination is an array of 64 bit words ordered from
|
||||
// LSW to MSW. Each word must be ordered from MSB to LSB. The source is
|
||||
// ordered from MSB to LSB: Reverse the word order, but not the bytes in
|
||||
// a word.
|
||||
while (LoadBytes > sizeof(uint64_t)) {
|
||||
LoadBytes -= sizeof(uint64_t);
|
||||
// May not be aligned so use memcpy.
|
||||
memcpy(Dst, Src + LoadBytes, sizeof(uint64_t));
|
||||
Dst += sizeof(uint64_t);
|
||||
}
|
||||
|
||||
memcpy(Dst + sizeof(uint64_t) - LoadBytes, Src, LoadBytes);
|
||||
}
|
||||
}
|
||||
|
||||
/// FIXME: document
|
||||
///
|
||||
void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
|
||||
|
|
|
@ -2934,3 +2934,56 @@ llvm::APIntOps::SolveQuadraticEquationWrap(APInt A, APInt B, APInt C,
|
|||
LLVM_DEBUG(dbgs() << __func__ << ": solution (wrap): " << X << '\n');
|
||||
return X;
|
||||
}
|
||||
|
||||
/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
|
||||
/// with the integer held in IntVal.
|
||||
void llvm::StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
|
||||
unsigned StoreBytes) {
|
||||
assert((IntVal.getBitWidth()+7)/8 >= StoreBytes && "Integer too small!");
|
||||
const uint8_t *Src = (const uint8_t *)IntVal.getRawData();
|
||||
|
||||
if (sys::IsLittleEndianHost) {
|
||||
// Little-endian host - the source is ordered from LSB to MSB. Order the
|
||||
// destination from LSB to MSB: Do a straight copy.
|
||||
memcpy(Dst, Src, StoreBytes);
|
||||
} else {
|
||||
// Big-endian host - the source is an array of 64 bit words ordered from
|
||||
// LSW to MSW. Each word is ordered from MSB to LSB. Order the destination
|
||||
// from MSB to LSB: Reverse the word order, but not the bytes in a word.
|
||||
while (StoreBytes > sizeof(uint64_t)) {
|
||||
StoreBytes -= sizeof(uint64_t);
|
||||
// May not be aligned so use memcpy.
|
||||
memcpy(Dst + StoreBytes, Src, sizeof(uint64_t));
|
||||
Src += sizeof(uint64_t);
|
||||
}
|
||||
|
||||
memcpy(Dst, Src + sizeof(uint64_t) - StoreBytes, StoreBytes);
|
||||
}
|
||||
}
|
||||
|
||||
/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
|
||||
/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
|
||||
void llvm::LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) {
|
||||
assert((IntVal.getBitWidth()+7)/8 >= LoadBytes && "Integer too small!");
|
||||
uint8_t *Dst = reinterpret_cast<uint8_t *>(
|
||||
const_cast<uint64_t *>(IntVal.getRawData()));
|
||||
|
||||
if (sys::IsLittleEndianHost)
|
||||
// Little-endian host - the destination must be ordered from LSB to MSB.
|
||||
// The source is ordered from LSB to MSB: Do a straight copy.
|
||||
memcpy(Dst, Src, LoadBytes);
|
||||
else {
|
||||
// Big-endian - the destination is an array of 64 bit words ordered from
|
||||
// LSW to MSW. Each word must be ordered from MSB to LSB. The source is
|
||||
// ordered from MSB to LSB: Reverse the word order, but not the bytes in
|
||||
// a word.
|
||||
while (LoadBytes > sizeof(uint64_t)) {
|
||||
LoadBytes -= sizeof(uint64_t);
|
||||
// May not be aligned so use memcpy.
|
||||
memcpy(Dst, Src + LoadBytes, sizeof(uint64_t));
|
||||
Dst += sizeof(uint64_t);
|
||||
}
|
||||
|
||||
memcpy(Dst + sizeof(uint64_t) - LoadBytes, Src, LoadBytes);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue