2008-01-26 09:36:00 +08:00
|
|
|
//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code to emit Constant Expr nodes as LLVM code.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CodeGenFunction.h"
|
2010-08-31 15:33:07 +08:00
|
|
|
#include "CGCXXABI.h"
|
2008-08-13 08:59:25 +08:00
|
|
|
#include "CGObjCRuntime.h"
|
2010-03-31 06:26:10 +08:00
|
|
|
#include "CGRecordLayout.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "CodeGenModule.h"
|
2008-10-06 13:59:01 +08:00
|
|
|
#include "clang/AST/APValue.h"
|
2008-08-11 13:00:27 +08:00
|
|
|
#include "clang/AST/ASTContext.h"
|
2009-07-24 23:20:52 +08:00
|
|
|
#include "clang/AST/RecordLayout.h"
|
2008-08-11 13:00:27 +08:00
|
|
|
#include "clang/AST/StmtVisitor.h"
|
2009-06-14 09:54:56 +08:00
|
|
|
#include "clang/Basic/Builtins.h"
|
2013-01-02 19:45:17 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
2008-01-26 09:36:00 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ConstStructBuilder
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
2015-06-10 08:27:52 +08:00
|
|
|
class ConstExprEmitter;
|
2009-11-29 03:45:26 +08:00
|
|
|
class ConstStructBuilder {
|
2009-07-24 23:20:52 +08:00
|
|
|
CodeGenModule &CGM;
|
|
|
|
CodeGenFunction *CGF;
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
bool Packed;
|
2011-03-17 09:33:18 +08:00
|
|
|
CharUnits NextFieldOffsetInChars;
|
2011-03-18 09:26:17 +08:00
|
|
|
CharUnits LLVMStructAlignment;
|
2012-02-07 08:54:58 +08:00
|
|
|
SmallVector<llvm::Constant *, 32> Elements;
|
2010-04-14 01:45:57 +08:00
|
|
|
public:
|
2015-06-10 08:27:52 +08:00
|
|
|
static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CFG,
|
|
|
|
ConstExprEmitter *Emitter,
|
|
|
|
llvm::ConstantStruct *Base,
|
|
|
|
InitListExpr *Updater);
|
2010-04-14 01:45:57 +08:00
|
|
|
static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
|
|
|
|
InitListExpr *ILE);
|
2012-01-14 12:30:29 +08:00
|
|
|
static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
|
|
|
|
const APValue &Value, QualType ValTy);
|
|
|
|
|
|
|
|
private:
|
2009-07-24 23:20:52 +08:00
|
|
|
ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
|
2011-03-17 09:33:18 +08:00
|
|
|
: CGM(CGM), CGF(CGF), Packed(false),
|
|
|
|
NextFieldOffsetInChars(CharUnits::Zero()),
|
2011-03-18 09:26:17 +08:00
|
|
|
LLVMStructAlignment(CharUnits::One()) { }
|
2009-07-24 23:20:52 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
void AppendField(const FieldDecl *Field, uint64_t FieldOffset,
|
Rework the ConstStructBuilder code to emit missing initializer
elements with explicit zero values instead of with tail padding.
On an example like this:
struct foo { int a; int b; };
struct foo fooarray[] = {
{1, 2},
{4},
};
We now lay this out as:
@fooarray = global [2 x %struct.foo] [%struct.foo { i32 1, i32 2 }, %struct.foo { i32 4, i32 0 }]
instead of as:
@fooarray = global %0 <{ %struct.foo { i32 1, i32 2 }, %1 { i32 4, [4 x i8] zeroinitializer } }>
Preserving both the struct type of the second element, but also the array type of the entire thing.
llvm-svn: 101155
2010-04-14 02:16:19 +08:00
|
|
|
llvm::Constant *InitExpr);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-02-23 16:33:23 +08:00
|
|
|
void AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst);
|
|
|
|
|
2010-07-06 01:04:23 +08:00
|
|
|
void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
|
|
|
|
llvm::ConstantInt *InitExpr);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-03-12 07:42:54 +08:00
|
|
|
void AppendPadding(CharUnits PadSize);
|
2009-07-24 23:20:52 +08:00
|
|
|
|
2011-03-11 10:17:05 +08:00
|
|
|
void AppendTailPadding(CharUnits RecordSize);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
void ConvertStructToPacked();
|
2012-01-14 12:30:29 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
bool Build(InitListExpr *ILE);
|
2015-06-10 08:27:52 +08:00
|
|
|
bool Build(ConstExprEmitter *Emitter, llvm::ConstantStruct *Base,
|
|
|
|
InitListExpr *Updater);
|
2012-02-23 16:33:23 +08:00
|
|
|
void Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
|
2013-09-27 22:48:01 +08:00
|
|
|
const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
|
2012-01-14 12:30:29 +08:00
|
|
|
llvm::Constant *Finalize(QualType Ty);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-03-18 09:26:17 +08:00
|
|
|
CharUnits getAlignment(const llvm::Constant *C) const {
|
|
|
|
if (Packed) return CharUnits::One();
|
|
|
|
return CharUnits::fromQuantity(
|
2012-10-09 00:25:52 +08:00
|
|
|
CGM.getDataLayout().getABITypeAlignment(C->getType()));
|
2010-04-14 01:45:57 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-03-18 09:12:13 +08:00
|
|
|
CharUnits getSizeInChars(const llvm::Constant *C) const {
|
|
|
|
return CharUnits::fromQuantity(
|
2012-10-09 00:25:52 +08:00
|
|
|
CGM.getDataLayout().getTypeAllocSize(C->getType()));
|
2010-04-14 01:45:57 +08:00
|
|
|
}
|
|
|
|
};
|
2009-07-24 23:20:52 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
void ConstStructBuilder::
|
Rework the ConstStructBuilder code to emit missing initializer
elements with explicit zero values instead of with tail padding.
On an example like this:
struct foo { int a; int b; };
struct foo fooarray[] = {
{1, 2},
{4},
};
We now lay this out as:
@fooarray = global [2 x %struct.foo] [%struct.foo { i32 1, i32 2 }, %struct.foo { i32 4, i32 0 }]
instead of as:
@fooarray = global %0 <{ %struct.foo { i32 1, i32 2 }, %1 { i32 4, [4 x i8] zeroinitializer } }>
Preserving both the struct type of the second element, but also the array type of the entire thing.
llvm-svn: 101155
2010-04-14 02:16:19 +08:00
|
|
|
AppendField(const FieldDecl *Field, uint64_t FieldOffset,
|
|
|
|
llvm::Constant *InitCst) {
|
2011-03-15 09:09:02 +08:00
|
|
|
const ASTContext &Context = CGM.getContext();
|
|
|
|
|
|
|
|
CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-02-23 16:33:23 +08:00
|
|
|
AppendBytes(FieldOffsetInChars, InitCst);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ConstStructBuilder::
|
|
|
|
AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
|
|
|
|
|
2011-03-17 09:33:18 +08:00
|
|
|
assert(NextFieldOffsetInChars <= FieldOffsetInChars
|
2010-04-14 01:45:57 +08:00
|
|
|
&& "Field offset mismatch!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-03-18 09:26:17 +08:00
|
|
|
CharUnits FieldAlignment = getAlignment(InitCst);
|
2009-10-02 12:52:12 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
// Round up the field offset to the alignment of the field type.
|
2011-03-17 09:33:18 +08:00
|
|
|
CharUnits AlignedNextFieldOffsetInChars =
|
2016-01-15 05:00:27 +08:00
|
|
|
NextFieldOffsetInChars.alignTo(FieldAlignment);
|
2014-10-20 03:41:46 +08:00
|
|
|
|
2011-03-17 09:33:18 +08:00
|
|
|
if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
|
2010-04-14 01:45:57 +08:00
|
|
|
// We need to append padding.
|
2012-01-14 12:30:29 +08:00
|
|
|
AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
|
2009-07-27 09:23:51 +08:00
|
|
|
|
2011-03-17 09:33:18 +08:00
|
|
|
assert(NextFieldOffsetInChars == FieldOffsetInChars &&
|
2010-04-14 01:45:57 +08:00
|
|
|
"Did not add enough padding!");
|
2009-07-27 09:23:51 +08:00
|
|
|
|
2014-10-20 07:40:06 +08:00
|
|
|
AlignedNextFieldOffsetInChars =
|
2016-01-15 05:00:27 +08:00
|
|
|
NextFieldOffsetInChars.alignTo(FieldAlignment);
|
2014-10-20 07:40:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
|
|
|
|
assert(!Packed && "Alignment is wrong even with a packed struct!");
|
|
|
|
|
|
|
|
// Convert the struct to a packed struct.
|
|
|
|
ConvertStructToPacked();
|
|
|
|
|
|
|
|
// After we pack the struct, we may need to insert padding.
|
|
|
|
if (NextFieldOffsetInChars < FieldOffsetInChars) {
|
|
|
|
// We need to append padding.
|
|
|
|
AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
|
|
|
|
|
|
|
|
assert(NextFieldOffsetInChars == FieldOffsetInChars &&
|
|
|
|
"Did not add enough padding!");
|
|
|
|
}
|
2011-03-17 09:33:18 +08:00
|
|
|
AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
|
2010-04-14 01:45:57 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
// Add the field.
|
Rework the ConstStructBuilder code to emit missing initializer
elements with explicit zero values instead of with tail padding.
On an example like this:
struct foo { int a; int b; };
struct foo fooarray[] = {
{1, 2},
{4},
};
We now lay this out as:
@fooarray = global [2 x %struct.foo] [%struct.foo { i32 1, i32 2 }, %struct.foo { i32 4, i32 0 }]
instead of as:
@fooarray = global %0 <{ %struct.foo { i32 1, i32 2 }, %1 { i32 4, [4 x i8] zeroinitializer } }>
Preserving both the struct type of the second element, but also the array type of the entire thing.
llvm-svn: 101155
2010-04-14 02:16:19 +08:00
|
|
|
Elements.push_back(InitCst);
|
2011-03-17 09:33:18 +08:00
|
|
|
NextFieldOffsetInChars = AlignedNextFieldOffsetInChars +
|
2011-03-18 09:12:13 +08:00
|
|
|
getSizeInChars(InitCst);
|
2012-01-14 12:30:29 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
if (Packed)
|
2012-01-14 12:30:29 +08:00
|
|
|
assert(LLVMStructAlignment == CharUnits::One() &&
|
2011-03-18 09:26:17 +08:00
|
|
|
"Packed struct not byte-aligned!");
|
2010-04-14 01:45:57 +08:00
|
|
|
else
|
|
|
|
LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
|
|
|
|
}
|
|
|
|
|
2010-07-06 01:04:23 +08:00
|
|
|
void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
|
|
|
|
uint64_t FieldOffset,
|
|
|
|
llvm::ConstantInt *CI) {
|
2011-03-12 20:03:11 +08:00
|
|
|
const ASTContext &Context = CGM.getContext();
|
2011-03-19 08:57:28 +08:00
|
|
|
const uint64_t CharWidth = Context.getCharWidth();
|
2011-03-17 09:33:18 +08:00
|
|
|
uint64_t NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
|
|
|
|
if (FieldOffset > NextFieldOffsetInBits) {
|
2010-04-14 01:45:57 +08:00
|
|
|
// We need to add padding.
|
2011-03-12 20:03:11 +08:00
|
|
|
CharUnits PadSize = Context.toCharUnitsFromBits(
|
2016-01-15 05:00:27 +08:00
|
|
|
llvm::alignTo(FieldOffset - NextFieldOffsetInBits,
|
|
|
|
Context.getTargetInfo().getCharAlign()));
|
2010-04-14 01:45:57 +08:00
|
|
|
|
2011-03-12 20:03:11 +08:00
|
|
|
AppendPadding(PadSize);
|
2010-04-14 01:45:57 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-10-11 02:28:20 +08:00
|
|
|
uint64_t FieldSize = Field->getBitWidthValue(Context);
|
2009-07-27 09:23:51 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
llvm::APInt FieldValue = CI->getValue();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
// Promote the size of FieldValue if necessary
|
|
|
|
// FIXME: This should never occur, but currently it can because initializer
|
|
|
|
// constants are cast to bool, and because clang is not enforcing bitfield
|
|
|
|
// width limits.
|
|
|
|
if (FieldSize > FieldValue.getBitWidth())
|
2010-12-07 16:25:34 +08:00
|
|
|
FieldValue = FieldValue.zext(FieldSize);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
// Truncate the size of FieldValue to the bit field size.
|
|
|
|
if (FieldSize < FieldValue.getBitWidth())
|
2010-12-07 16:25:34 +08:00
|
|
|
FieldValue = FieldValue.trunc(FieldSize);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-03-17 09:33:18 +08:00
|
|
|
NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
|
|
|
|
if (FieldOffset < NextFieldOffsetInBits) {
|
2010-04-14 01:45:57 +08:00
|
|
|
// Either part of the field or the entire field can go into the previous
|
|
|
|
// byte.
|
|
|
|
assert(!Elements.empty() && "Elements can't be empty!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-03-17 09:33:18 +08:00
|
|
|
unsigned BitsInPreviousByte = NextFieldOffsetInBits - FieldOffset;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
bool FitsCompletelyInPreviousByte =
|
|
|
|
BitsInPreviousByte >= FieldValue.getBitWidth();
|
2009-07-27 10:56:37 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
llvm::APInt Tmp = FieldValue;
|
2009-07-27 10:56:37 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
if (!FitsCompletelyInPreviousByte) {
|
|
|
|
unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-10-09 00:25:52 +08:00
|
|
|
if (CGM.getDataLayout().isBigEndian()) {
|
2010-04-14 01:45:57 +08:00
|
|
|
Tmp = Tmp.lshr(NewFieldWidth);
|
2010-12-07 16:25:34 +08:00
|
|
|
Tmp = Tmp.trunc(BitsInPreviousByte);
|
2009-07-27 10:56:37 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
// We want the remaining high bits.
|
2010-12-07 16:25:34 +08:00
|
|
|
FieldValue = FieldValue.trunc(NewFieldWidth);
|
2010-04-14 01:45:57 +08:00
|
|
|
} else {
|
2010-12-07 16:25:34 +08:00
|
|
|
Tmp = Tmp.trunc(BitsInPreviousByte);
|
2009-12-09 05:12:32 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
// We want the remaining low bits.
|
|
|
|
FieldValue = FieldValue.lshr(BitsInPreviousByte);
|
2010-12-07 16:25:34 +08:00
|
|
|
FieldValue = FieldValue.trunc(NewFieldWidth);
|
2010-04-14 01:45:57 +08:00
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-03-19 08:57:28 +08:00
|
|
|
Tmp = Tmp.zext(CharWidth);
|
2012-10-09 00:25:52 +08:00
|
|
|
if (CGM.getDataLayout().isBigEndian()) {
|
2009-07-27 10:56:37 +08:00
|
|
|
if (FitsCompletelyInPreviousByte)
|
2010-04-14 01:45:57 +08:00
|
|
|
Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
|
|
|
|
} else {
|
2011-03-19 08:57:28 +08:00
|
|
|
Tmp = Tmp.shl(CharWidth - BitsInPreviousByte);
|
2009-07-27 09:23:51 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-06 02:03:30 +08:00
|
|
|
// 'or' in the bits that go into the previous byte.
|
|
|
|
llvm::Value *LastElt = Elements.back();
|
|
|
|
if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
|
2010-04-14 01:45:57 +08:00
|
|
|
Tmp |= Val->getValue();
|
2010-07-06 02:03:30 +08:00
|
|
|
else {
|
|
|
|
assert(isa<llvm::UndefValue>(LastElt));
|
|
|
|
// If there is an undef field that we're adding to, it can either be a
|
|
|
|
// scalar undef (in which case, we just replace it with our field) or it
|
|
|
|
// is an array. If it is an array, we have to pull one byte off the
|
|
|
|
// array so that the other undef bytes stay around.
|
|
|
|
if (!isa<llvm::IntegerType>(LastElt->getType())) {
|
|
|
|
// The undef padding will be a multibyte array, create a new smaller
|
|
|
|
// padding and then an hole for our i8 to get plopped into.
|
|
|
|
assert(isa<llvm::ArrayType>(LastElt->getType()) &&
|
|
|
|
"Expected array padding of undefs");
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
|
2011-03-19 08:57:28 +08:00
|
|
|
assert(AT->getElementType()->isIntegerTy(CharWidth) &&
|
2010-07-06 02:03:30 +08:00
|
|
|
AT->getNumElements() != 0 &&
|
|
|
|
"Expected non-empty array padding of undefs");
|
|
|
|
|
|
|
|
// Remove the padding array.
|
2011-03-17 09:33:18 +08:00
|
|
|
NextFieldOffsetInChars -= CharUnits::fromQuantity(AT->getNumElements());
|
2010-07-06 02:03:30 +08:00
|
|
|
Elements.pop_back();
|
|
|
|
|
|
|
|
// Add the padding back in two chunks.
|
2011-03-12 07:42:54 +08:00
|
|
|
AppendPadding(CharUnits::fromQuantity(AT->getNumElements()-1));
|
|
|
|
AppendPadding(CharUnits::One());
|
2010-07-06 02:03:30 +08:00
|
|
|
assert(isa<llvm::UndefValue>(Elements.back()) &&
|
2011-03-19 08:57:28 +08:00
|
|
|
Elements.back()->getType()->isIntegerTy(CharWidth) &&
|
2010-07-06 02:03:30 +08:00
|
|
|
"Padding addition didn't work right");
|
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
if (FitsCompletelyInPreviousByte)
|
2010-07-06 01:04:23 +08:00
|
|
|
return;
|
2010-04-14 01:45:57 +08:00
|
|
|
}
|
2009-07-27 09:23:51 +08:00
|
|
|
|
2011-03-19 08:57:28 +08:00
|
|
|
while (FieldValue.getBitWidth() > CharWidth) {
|
2010-04-14 01:45:57 +08:00
|
|
|
llvm::APInt Tmp;
|
2009-07-27 09:23:51 +08:00
|
|
|
|
2012-10-09 00:25:52 +08:00
|
|
|
if (CGM.getDataLayout().isBigEndian()) {
|
2010-04-14 01:45:57 +08:00
|
|
|
// We want the high bits.
|
2011-03-19 08:57:28 +08:00
|
|
|
Tmp =
|
|
|
|
FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth);
|
2010-04-14 01:45:57 +08:00
|
|
|
} else {
|
|
|
|
// We want the low bits.
|
2011-03-19 08:57:28 +08:00
|
|
|
Tmp = FieldValue.trunc(CharWidth);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-03-19 08:57:28 +08:00
|
|
|
FieldValue = FieldValue.lshr(CharWidth);
|
2009-07-27 09:23:51 +08:00
|
|
|
}
|
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
|
2011-03-19 09:28:06 +08:00
|
|
|
++NextFieldOffsetInChars;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-03-19 08:57:28 +08:00
|
|
|
FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - CharWidth);
|
2010-04-14 01:45:57 +08:00
|
|
|
}
|
2009-07-24 23:20:52 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
assert(FieldValue.getBitWidth() > 0 &&
|
|
|
|
"Should have at least one bit left!");
|
2011-03-19 08:57:28 +08:00
|
|
|
assert(FieldValue.getBitWidth() <= CharWidth &&
|
2010-04-14 01:45:57 +08:00
|
|
|
"Should not have more than a byte left!");
|
2009-07-24 23:20:52 +08:00
|
|
|
|
2011-03-19 08:57:28 +08:00
|
|
|
if (FieldValue.getBitWidth() < CharWidth) {
|
2012-10-09 00:25:52 +08:00
|
|
|
if (CGM.getDataLayout().isBigEndian()) {
|
2010-04-14 01:45:57 +08:00
|
|
|
unsigned BitWidth = FieldValue.getBitWidth();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-03-19 08:57:28 +08:00
|
|
|
FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth);
|
2010-04-14 01:45:57 +08:00
|
|
|
} else
|
2011-03-19 08:57:28 +08:00
|
|
|
FieldValue = FieldValue.zext(CharWidth);
|
2009-07-24 23:20:52 +08:00
|
|
|
}
|
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
// Append the last element.
|
|
|
|
Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
|
|
|
|
FieldValue));
|
2011-03-19 09:28:06 +08:00
|
|
|
++NextFieldOffsetInChars;
|
2010-04-14 01:45:57 +08:00
|
|
|
}
|
2009-07-24 23:20:52 +08:00
|
|
|
|
2011-03-12 07:42:54 +08:00
|
|
|
void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
|
|
|
|
if (PadSize.isZero())
|
2010-04-14 01:45:57 +08:00
|
|
|
return;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-02-07 08:39:47 +08:00
|
|
|
llvm::Type *Ty = CGM.Int8Ty;
|
2011-03-12 07:42:54 +08:00
|
|
|
if (PadSize > CharUnits::One())
|
|
|
|
Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-04-17 04:56:35 +08:00
|
|
|
llvm::Constant *C = llvm::UndefValue::get(Ty);
|
2010-04-14 01:45:57 +08:00
|
|
|
Elements.push_back(C);
|
2011-03-18 09:26:17 +08:00
|
|
|
assert(getAlignment(C) == CharUnits::One() &&
|
|
|
|
"Padding must have 1 byte alignment!");
|
2009-10-02 10:15:20 +08:00
|
|
|
|
2011-03-18 09:12:13 +08:00
|
|
|
NextFieldOffsetInChars += getSizeInChars(C);
|
2010-04-14 01:45:57 +08:00
|
|
|
}
|
2009-10-02 10:15:20 +08:00
|
|
|
|
2011-03-11 10:17:05 +08:00
|
|
|
void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
|
2011-03-17 09:33:18 +08:00
|
|
|
assert(NextFieldOffsetInChars <= RecordSize &&
|
2011-03-11 10:17:05 +08:00
|
|
|
"Size mismatch!");
|
2009-10-02 10:15:20 +08:00
|
|
|
|
2011-03-17 09:33:18 +08:00
|
|
|
AppendPadding(RecordSize - NextFieldOffsetInChars);
|
2010-04-14 01:45:57 +08:00
|
|
|
}
|
2009-10-02 10:15:20 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
void ConstStructBuilder::ConvertStructToPacked() {
|
2012-02-07 08:04:27 +08:00
|
|
|
SmallVector<llvm::Constant *, 16> PackedElements;
|
2011-03-18 08:55:06 +08:00
|
|
|
CharUnits ElementOffsetInChars = CharUnits::Zero();
|
2009-10-02 10:15:20 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
|
|
|
|
llvm::Constant *C = Elements[i];
|
2009-10-02 10:15:20 +08:00
|
|
|
|
2011-03-18 09:26:17 +08:00
|
|
|
CharUnits ElementAlign = CharUnits::fromQuantity(
|
2012-10-09 00:25:52 +08:00
|
|
|
CGM.getDataLayout().getABITypeAlignment(C->getType()));
|
2011-03-18 08:55:06 +08:00
|
|
|
CharUnits AlignedElementOffsetInChars =
|
2016-01-15 05:00:27 +08:00
|
|
|
ElementOffsetInChars.alignTo(ElementAlign);
|
2009-10-02 10:15:20 +08:00
|
|
|
|
2011-03-18 08:55:06 +08:00
|
|
|
if (AlignedElementOffsetInChars > ElementOffsetInChars) {
|
2010-04-14 01:45:57 +08:00
|
|
|
// We need some padding.
|
2011-03-18 08:55:06 +08:00
|
|
|
CharUnits NumChars =
|
|
|
|
AlignedElementOffsetInChars - ElementOffsetInChars;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-02-07 08:39:47 +08:00
|
|
|
llvm::Type *Ty = CGM.Int8Ty;
|
2011-03-18 08:55:06 +08:00
|
|
|
if (NumChars > CharUnits::One())
|
|
|
|
Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-04-17 04:56:35 +08:00
|
|
|
llvm::Constant *Padding = llvm::UndefValue::get(Ty);
|
2010-04-14 01:45:57 +08:00
|
|
|
PackedElements.push_back(Padding);
|
2011-03-18 09:12:13 +08:00
|
|
|
ElementOffsetInChars += getSizeInChars(Padding);
|
2009-07-24 23:20:52 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
PackedElements.push_back(C);
|
2011-03-18 09:12:13 +08:00
|
|
|
ElementOffsetInChars += getSizeInChars(C);
|
2010-04-14 01:45:57 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-03-18 08:55:06 +08:00
|
|
|
assert(ElementOffsetInChars == NextFieldOffsetInChars &&
|
2010-04-14 01:45:57 +08:00
|
|
|
"Packing the struct changed its size!");
|
|
|
|
|
2012-02-07 08:04:27 +08:00
|
|
|
Elements.swap(PackedElements);
|
2011-03-18 09:26:17 +08:00
|
|
|
LLVMStructAlignment = CharUnits::One();
|
2010-04-14 01:45:57 +08:00
|
|
|
Packed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ConstStructBuilder::Build(InitListExpr *ILE) {
|
|
|
|
RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
|
|
|
|
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
|
|
|
|
|
|
|
|
unsigned FieldNo = 0;
|
|
|
|
unsigned ElementNo = 0;
|
2016-03-09 06:17:41 +08:00
|
|
|
|
|
|
|
// Bail out if we have base classes. We could support these, but they only
|
|
|
|
// arise in C++1z where we will have already constant folded most interesting
|
|
|
|
// cases. FIXME: There are still a few more cases we can handle this way.
|
|
|
|
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
|
|
|
|
if (CXXRD->getNumBases())
|
|
|
|
return false;
|
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
for (RecordDecl::field_iterator Field = RD->field_begin(),
|
Rework the ConstStructBuilder code to emit missing initializer
elements with explicit zero values instead of with tail padding.
On an example like this:
struct foo { int a; int b; };
struct foo fooarray[] = {
{1, 2},
{4},
};
We now lay this out as:
@fooarray = global [2 x %struct.foo] [%struct.foo { i32 1, i32 2 }, %struct.foo { i32 4, i32 0 }]
instead of as:
@fooarray = global %0 <{ %struct.foo { i32 1, i32 2 }, %1 { i32 4, [4 x i8] zeroinitializer } }>
Preserving both the struct type of the second element, but also the array type of the entire thing.
llvm-svn: 101155
2010-04-14 02:16:19 +08:00
|
|
|
FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
|
|
|
|
// If this is a union, skip all the fields that aren't being initialized.
|
2012-06-07 04:45:41 +08:00
|
|
|
if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
|
2010-04-14 01:45:57 +08:00
|
|
|
continue;
|
|
|
|
|
Rework the ConstStructBuilder code to emit missing initializer
elements with explicit zero values instead of with tail padding.
On an example like this:
struct foo { int a; int b; };
struct foo fooarray[] = {
{1, 2},
{4},
};
We now lay this out as:
@fooarray = global [2 x %struct.foo] [%struct.foo { i32 1, i32 2 }, %struct.foo { i32 4, i32 0 }]
instead of as:
@fooarray = global %0 <{ %struct.foo { i32 1, i32 2 }, %1 { i32 4, [4 x i8] zeroinitializer } }>
Preserving both the struct type of the second element, but also the array type of the entire thing.
llvm-svn: 101155
2010-04-14 02:16:19 +08:00
|
|
|
// Don't emit anonymous bitfields, they just affect layout.
|
2013-06-27 04:50:34 +08:00
|
|
|
if (Field->isUnnamedBitfield())
|
Rework the ConstStructBuilder code to emit missing initializer
elements with explicit zero values instead of with tail padding.
On an example like this:
struct foo { int a; int b; };
struct foo fooarray[] = {
{1, 2},
{4},
};
We now lay this out as:
@fooarray = global [2 x %struct.foo] [%struct.foo { i32 1, i32 2 }, %struct.foo { i32 4, i32 0 }]
instead of as:
@fooarray = global %0 <{ %struct.foo { i32 1, i32 2 }, %1 { i32 4, [4 x i8] zeroinitializer } }>
Preserving both the struct type of the second element, but also the array type of the entire thing.
llvm-svn: 101155
2010-04-14 02:16:19 +08:00
|
|
|
continue;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
Rework the ConstStructBuilder code to emit missing initializer
elements with explicit zero values instead of with tail padding.
On an example like this:
struct foo { int a; int b; };
struct foo fooarray[] = {
{1, 2},
{4},
};
We now lay this out as:
@fooarray = global [2 x %struct.foo] [%struct.foo { i32 1, i32 2 }, %struct.foo { i32 4, i32 0 }]
instead of as:
@fooarray = global %0 <{ %struct.foo { i32 1, i32 2 }, %1 { i32 4, [4 x i8] zeroinitializer } }>
Preserving both the struct type of the second element, but also the array type of the entire thing.
llvm-svn: 101155
2010-04-14 02:16:19 +08:00
|
|
|
// Get the initializer. A struct can include fields without initializers,
|
|
|
|
// we just use explicit null values for them.
|
|
|
|
llvm::Constant *EltInit;
|
|
|
|
if (ElementNo < ILE->getNumInits())
|
|
|
|
EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
|
|
|
|
Field->getType(), CGF);
|
|
|
|
else
|
|
|
|
EltInit = CGM.EmitNullConstant(Field->getType());
|
2010-07-18 07:55:01 +08:00
|
|
|
|
|
|
|
if (!EltInit)
|
|
|
|
return false;
|
2015-03-15 06:24:38 +08:00
|
|
|
|
Rework the ConstStructBuilder code to emit missing initializer
elements with explicit zero values instead of with tail padding.
On an example like this:
struct foo { int a; int b; };
struct foo fooarray[] = {
{1, 2},
{4},
};
We now lay this out as:
@fooarray = global [2 x %struct.foo] [%struct.foo { i32 1, i32 2 }, %struct.foo { i32 4, i32 0 }]
instead of as:
@fooarray = global %0 <{ %struct.foo { i32 1, i32 2 }, %1 { i32 4, [4 x i8] zeroinitializer } }>
Preserving both the struct type of the second element, but also the array type of the entire thing.
llvm-svn: 101155
2010-04-14 02:16:19 +08:00
|
|
|
if (!Field->isBitField()) {
|
|
|
|
// Handle non-bitfield members.
|
2012-06-07 04:45:41 +08:00
|
|
|
AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit);
|
2012-01-14 12:30:29 +08:00
|
|
|
} else {
|
|
|
|
// Otherwise we have a bitfield.
|
2015-03-15 06:24:38 +08:00
|
|
|
if (auto *CI = dyn_cast<llvm::ConstantInt>(EltInit)) {
|
|
|
|
AppendBitField(*Field, Layout.getFieldOffset(FieldNo), CI);
|
|
|
|
} else {
|
|
|
|
// We are trying to initialize a bitfield with a non-trivial constant,
|
|
|
|
// this must require run-time code.
|
|
|
|
return false;
|
|
|
|
}
|
2012-01-14 12:30:29 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-02-23 16:33:23 +08:00
|
|
|
namespace {
|
|
|
|
struct BaseInfo {
|
|
|
|
BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index)
|
|
|
|
: Decl(Decl), Offset(Offset), Index(Index) {
|
|
|
|
}
|
|
|
|
|
|
|
|
const CXXRecordDecl *Decl;
|
|
|
|
CharUnits Offset;
|
|
|
|
unsigned Index;
|
|
|
|
|
|
|
|
bool operator<(const BaseInfo &O) const { return Offset < O.Offset; }
|
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
}
|
2012-02-23 16:33:23 +08:00
|
|
|
|
|
|
|
void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
|
2013-09-27 22:48:01 +08:00
|
|
|
bool IsPrimaryBase,
|
2012-02-23 16:33:23 +08:00
|
|
|
const CXXRecordDecl *VTableClass,
|
|
|
|
CharUnits Offset) {
|
2012-01-14 12:30:29 +08:00
|
|
|
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
|
|
|
|
|
2012-02-23 16:33:23 +08:00
|
|
|
if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
|
|
|
|
// Add a vtable pointer, if we need one and it hasn't already been added.
|
2013-09-27 22:48:01 +08:00
|
|
|
if (CD->isDynamicClass() && !IsPrimaryBase) {
|
|
|
|
llvm::Constant *VTableAddressPoint =
|
|
|
|
CGM.getCXXABI().getVTableAddressPointForConstExpr(
|
|
|
|
BaseSubobject(CD, Offset), VTableClass);
|
|
|
|
AppendBytes(Offset, VTableAddressPoint);
|
|
|
|
}
|
2012-02-23 16:33:23 +08:00
|
|
|
|
|
|
|
// Accumulate and sort bases, in order to visit them in address order, which
|
|
|
|
// may not be the same as declaration order.
|
2013-01-13 03:30:44 +08:00
|
|
|
SmallVector<BaseInfo, 8> Bases;
|
2012-02-23 16:33:23 +08:00
|
|
|
Bases.reserve(CD->getNumBases());
|
2012-01-14 12:30:29 +08:00
|
|
|
unsigned BaseNo = 0;
|
2012-02-23 16:33:23 +08:00
|
|
|
for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(),
|
2012-01-14 12:30:29 +08:00
|
|
|
BaseEnd = CD->bases_end(); Base != BaseEnd; ++Base, ++BaseNo) {
|
2012-02-23 16:33:23 +08:00
|
|
|
assert(!Base->isVirtual() && "should not have virtual bases here");
|
2012-01-14 12:30:29 +08:00
|
|
|
const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl();
|
|
|
|
CharUnits BaseOffset = Layout.getBaseClassOffset(BD);
|
2012-02-23 16:33:23 +08:00
|
|
|
Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo));
|
|
|
|
}
|
|
|
|
std::stable_sort(Bases.begin(), Bases.end());
|
2012-01-14 12:30:29 +08:00
|
|
|
|
2012-02-23 16:33:23 +08:00
|
|
|
for (unsigned I = 0, N = Bases.size(); I != N; ++I) {
|
|
|
|
BaseInfo &Base = Bases[I];
|
|
|
|
|
|
|
|
bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
|
|
|
|
Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase,
|
2013-09-27 22:48:01 +08:00
|
|
|
VTableClass, Offset + Base.Offset);
|
2012-01-14 12:30:29 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned FieldNo = 0;
|
2012-03-30 11:55:31 +08:00
|
|
|
uint64_t OffsetBits = CGM.getContext().toBits(Offset);
|
2012-01-14 12:30:29 +08:00
|
|
|
|
|
|
|
for (RecordDecl::field_iterator Field = RD->field_begin(),
|
|
|
|
FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
|
|
|
|
// If this is a union, skip all the fields that aren't being initialized.
|
2012-06-07 04:45:41 +08:00
|
|
|
if (RD->isUnion() && Val.getUnionField() != *Field)
|
2012-01-14 12:30:29 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Don't emit anonymous bitfields, they just affect layout.
|
2013-06-27 04:50:34 +08:00
|
|
|
if (Field->isUnnamedBitfield())
|
2012-01-14 12:30:29 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Emit the value of the initializer.
|
|
|
|
const APValue &FieldValue =
|
|
|
|
RD->isUnion() ? Val.getUnionValue() : Val.getStructField(FieldNo);
|
|
|
|
llvm::Constant *EltInit =
|
2012-03-03 07:27:11 +08:00
|
|
|
CGM.EmitConstantValueForMemory(FieldValue, Field->getType(), CGF);
|
2012-01-14 12:30:29 +08:00
|
|
|
assert(EltInit && "EmitConstantValue can't fail");
|
|
|
|
|
|
|
|
if (!Field->isBitField()) {
|
|
|
|
// Handle non-bitfield members.
|
2012-06-07 04:45:41 +08:00
|
|
|
AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, EltInit);
|
2010-04-14 01:45:57 +08:00
|
|
|
} else {
|
Rework the ConstStructBuilder code to emit missing initializer
elements with explicit zero values instead of with tail padding.
On an example like this:
struct foo { int a; int b; };
struct foo fooarray[] = {
{1, 2},
{4},
};
We now lay this out as:
@fooarray = global [2 x %struct.foo] [%struct.foo { i32 1, i32 2 }, %struct.foo { i32 4, i32 0 }]
instead of as:
@fooarray = global %0 <{ %struct.foo { i32 1, i32 2 }, %1 { i32 4, [4 x i8] zeroinitializer } }>
Preserving both the struct type of the second element, but also the array type of the entire thing.
llvm-svn: 101155
2010-04-14 02:16:19 +08:00
|
|
|
// Otherwise we have a bitfield.
|
2012-06-07 04:45:41 +08:00
|
|
|
AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
|
2010-07-06 01:04:23 +08:00
|
|
|
cast<llvm::ConstantInt>(EltInit));
|
2009-10-02 10:15:20 +08:00
|
|
|
}
|
2010-04-14 01:45:57 +08:00
|
|
|
}
|
2012-01-14 12:30:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
|
|
|
|
RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
|
|
|
|
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-03-17 09:33:18 +08:00
|
|
|
CharUnits LayoutSizeInChars = Layout.getSize();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-03-17 09:33:18 +08:00
|
|
|
if (NextFieldOffsetInChars > LayoutSizeInChars) {
|
2010-04-14 01:45:57 +08:00
|
|
|
// If the struct is bigger than the size of the record type,
|
|
|
|
// we must have a flexible array member at the end.
|
|
|
|
assert(RD->hasFlexibleArrayMember() &&
|
|
|
|
"Must have flexible array member if struct is bigger than type!");
|
2012-01-14 12:30:29 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
// No tail padding is necessary.
|
2012-01-14 12:30:29 +08:00
|
|
|
} else {
|
|
|
|
// Append tail padding if necessary.
|
|
|
|
CharUnits LLVMSizeInChars =
|
2016-01-15 05:00:27 +08:00
|
|
|
NextFieldOffsetInChars.alignTo(LLVMStructAlignment);
|
2014-10-20 07:40:06 +08:00
|
|
|
|
|
|
|
if (LLVMSizeInChars != LayoutSizeInChars)
|
|
|
|
AppendTailPadding(LayoutSizeInChars);
|
|
|
|
|
2016-01-15 05:00:27 +08:00
|
|
|
LLVMSizeInChars = NextFieldOffsetInChars.alignTo(LLVMStructAlignment);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
// Check if we need to convert the struct to a packed struct.
|
|
|
|
if (NextFieldOffsetInChars <= LayoutSizeInChars &&
|
|
|
|
LLVMSizeInChars > LayoutSizeInChars) {
|
|
|
|
assert(!Packed && "Size mismatch!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
ConvertStructToPacked();
|
|
|
|
assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
|
|
|
|
"Converting to packed did not help!");
|
|
|
|
}
|
2009-07-24 23:20:52 +08:00
|
|
|
|
2016-01-15 05:00:27 +08:00
|
|
|
LLVMSizeInChars = NextFieldOffsetInChars.alignTo(LLVMStructAlignment);
|
2014-10-20 07:40:06 +08:00
|
|
|
|
|
|
|
assert(LayoutSizeInChars == LLVMSizeInChars &&
|
2012-01-14 12:30:29 +08:00
|
|
|
"Tail padding mismatch!");
|
|
|
|
}
|
2009-07-24 23:20:52 +08:00
|
|
|
|
2011-06-20 12:01:35 +08:00
|
|
|
// Pick the type to use. If the type is layout identical to the ConvertType
|
|
|
|
// type then use it, otherwise use whatever the builder produced for us.
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::StructType *STy =
|
2011-06-20 12:01:35 +08:00
|
|
|
llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
|
2012-01-14 12:30:29 +08:00
|
|
|
Elements, Packed);
|
|
|
|
llvm::Type *ValTy = CGM.getTypes().ConvertType(Ty);
|
|
|
|
if (llvm::StructType *ValSTy = dyn_cast<llvm::StructType>(ValTy)) {
|
|
|
|
if (ValSTy->isLayoutIdentical(STy))
|
|
|
|
STy = ValSTy;
|
2011-06-20 12:01:35 +08:00
|
|
|
}
|
2012-01-14 12:30:29 +08:00
|
|
|
|
|
|
|
llvm::Constant *Result = llvm::ConstantStruct::get(STy, Elements);
|
|
|
|
|
2016-01-15 05:00:27 +08:00
|
|
|
assert(NextFieldOffsetInChars.alignTo(getAlignment(Result)) ==
|
|
|
|
getSizeInChars(Result) &&
|
|
|
|
"Size mismatch!");
|
2012-01-14 12:30:29 +08:00
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
return Result;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2015-06-10 08:27:52 +08:00
|
|
|
llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
|
|
|
|
CodeGenFunction *CGF,
|
|
|
|
ConstExprEmitter *Emitter,
|
|
|
|
llvm::ConstantStruct *Base,
|
|
|
|
InitListExpr *Updater) {
|
|
|
|
ConstStructBuilder Builder(CGM, CGF);
|
|
|
|
if (!Builder.Build(Emitter, Base, Updater))
|
|
|
|
return nullptr;
|
|
|
|
return Builder.Finalize(Updater->getType());
|
|
|
|
}
|
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
|
|
|
|
CodeGenFunction *CGF,
|
|
|
|
InitListExpr *ILE) {
|
|
|
|
ConstStructBuilder Builder(CGM, CGF);
|
|
|
|
|
|
|
|
if (!Builder.Build(ILE))
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2012-01-14 12:30:29 +08:00
|
|
|
|
|
|
|
return Builder.Finalize(ILE->getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
|
|
|
|
CodeGenFunction *CGF,
|
|
|
|
const APValue &Val,
|
|
|
|
QualType ValTy) {
|
|
|
|
ConstStructBuilder Builder(CGM, CGF);
|
2012-02-23 16:33:23 +08:00
|
|
|
|
|
|
|
const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
|
|
|
|
const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
|
2013-09-27 22:48:01 +08:00
|
|
|
Builder.Build(Val, RD, false, CD, CharUnits::Zero());
|
2012-02-23 16:33:23 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
return Builder.Finalize(ValTy);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-04-14 01:45:57 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ConstExprEmitter
|
|
|
|
//===----------------------------------------------------------------------===//
|
2012-01-18 05:42:19 +08:00
|
|
|
|
|
|
|
/// This class only needs to handle two cases:
|
|
|
|
/// 1) Literals (this is used by APValue emission to emit literals).
|
|
|
|
/// 2) Arrays, structs and unions (outside C++11 mode, we don't currently
|
|
|
|
/// constant fold these types).
|
2009-11-29 03:45:26 +08:00
|
|
|
class ConstExprEmitter :
|
2008-01-26 12:30:23 +08:00
|
|
|
public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
|
2008-01-26 09:36:00 +08:00
|
|
|
CodeGenModule &CGM;
|
2008-02-27 05:41:45 +08:00
|
|
|
CodeGenFunction *CGF;
|
2009-07-15 07:10:40 +08:00
|
|
|
llvm::LLVMContext &VMContext;
|
2008-01-26 09:36:00 +08:00
|
|
|
public:
|
2008-02-27 05:41:45 +08:00
|
|
|
ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
|
2009-07-15 07:10:40 +08:00
|
|
|
: CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
|
2008-01-26 09:36:00 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-01-26 09:36:00 +08:00
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Visitor Methods
|
|
|
|
//===--------------------------------------------------------------------===//
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-01-26 09:36:00 +08:00
|
|
|
llvm::Constant *VisitStmt(Stmt *S) {
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2008-01-26 09:36:00 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
|
|
|
llvm::Constant *VisitParenExpr(ParenExpr *PE) {
|
|
|
|
return Visit(PE->getSubExpr());
|
2008-01-26 09:36:00 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-15 13:09:51 +08:00
|
|
|
llvm::Constant *
|
|
|
|
VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
|
|
|
|
return Visit(PE->getReplacement());
|
|
|
|
}
|
|
|
|
|
2011-04-15 08:35:48 +08:00
|
|
|
llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
|
|
|
|
return Visit(GE->getResultExpr());
|
|
|
|
}
|
|
|
|
|
2013-07-17 06:40:53 +08:00
|
|
|
llvm::Constant *VisitChooseExpr(ChooseExpr *CE) {
|
2013-07-20 08:40:58 +08:00
|
|
|
return Visit(CE->getChosenSubExpr());
|
2013-07-17 06:40:53 +08:00
|
|
|
}
|
|
|
|
|
2008-01-26 09:36:00 +08:00
|
|
|
llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
|
|
|
|
return Visit(E->getInitializer());
|
|
|
|
}
|
2011-02-03 16:15:49 +08:00
|
|
|
|
2008-08-19 07:01:59 +08:00
|
|
|
llvm::Constant *VisitCastExpr(CastExpr* E) {
|
2015-10-20 12:24:12 +08:00
|
|
|
if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
|
|
|
|
CGM.EmitExplicitCastExprType(ECE, CGF);
|
2011-03-16 05:17:48 +08:00
|
|
|
Expr *subExpr = E->getSubExpr();
|
|
|
|
llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
|
2014-05-21 13:09:00 +08:00
|
|
|
if (!C) return nullptr;
|
2011-03-16 05:17:48 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *destType = ConvertType(E->getType());
|
2011-03-16 05:17:48 +08:00
|
|
|
|
2009-08-23 07:54:44 +08:00
|
|
|
switch (E->getCastKind()) {
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_ToUnion: {
|
2009-08-23 07:54:44 +08:00
|
|
|
// GCC cast to union extension
|
|
|
|
assert(E->getType()->isUnionType() &&
|
|
|
|
"Destination type is not union type!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-08-01 05:38:39 +08:00
|
|
|
// Build a struct with the union sub-element as the first member,
|
|
|
|
// and padded to the appropriate size
|
2012-02-07 08:13:27 +08:00
|
|
|
SmallVector<llvm::Constant*, 2> Elts;
|
|
|
|
SmallVector<llvm::Type*, 2> Types;
|
2009-08-01 05:38:39 +08:00
|
|
|
Elts.push_back(C);
|
|
|
|
Types.push_back(C->getType());
|
2012-10-09 00:25:52 +08:00
|
|
|
unsigned CurSize = CGM.getDataLayout().getTypeAllocSize(C->getType());
|
|
|
|
unsigned TotalSize = CGM.getDataLayout().getTypeAllocSize(destType);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-08-01 05:38:39 +08:00
|
|
|
assert(CurSize <= TotalSize && "Union size mismatch!");
|
|
|
|
if (unsigned NumPadBytes = TotalSize - CurSize) {
|
2012-02-07 08:39:47 +08:00
|
|
|
llvm::Type *Ty = CGM.Int8Ty;
|
2009-08-01 05:38:39 +08:00
|
|
|
if (NumPadBytes > 1)
|
|
|
|
Ty = llvm::ArrayType::get(Ty, NumPadBytes);
|
2009-08-23 07:54:44 +08:00
|
|
|
|
2010-04-17 04:56:35 +08:00
|
|
|
Elts.push_back(llvm::UndefValue::get(Ty));
|
2009-08-01 05:38:39 +08:00
|
|
|
Types.push_back(Ty);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-08-23 07:54:44 +08:00
|
|
|
llvm::StructType* STy =
|
|
|
|
llvm::StructType::get(C->getType()->getContext(), Types, false);
|
2009-08-01 05:38:39 +08:00
|
|
|
return llvm::ConstantStruct::get(STy, Elts);
|
2009-01-17 08:48:48 +08:00
|
|
|
}
|
2009-10-19 04:31:03 +08:00
|
|
|
|
2013-12-11 21:39:46 +08:00
|
|
|
case CK_AddressSpaceConversion:
|
|
|
|
return llvm::ConstantExpr::getAddrSpaceCast(C, destType);
|
|
|
|
|
2011-03-16 05:17:48 +08:00
|
|
|
case CK_LValueToRValue:
|
2012-01-17 01:27:18 +08:00
|
|
|
case CK_AtomicToNonAtomic:
|
|
|
|
case CK_NonAtomicToAtomic:
|
2011-03-16 05:17:48 +08:00
|
|
|
case CK_NoOp:
|
2013-07-17 06:40:53 +08:00
|
|
|
case CK_ConstructorConversion:
|
2011-03-16 05:17:48 +08:00
|
|
|
return C;
|
|
|
|
|
2016-07-29 03:26:30 +08:00
|
|
|
case CK_IntToOCLSampler:
|
|
|
|
llvm_unreachable("global sampler variables are not generated");
|
|
|
|
|
2011-03-16 05:17:48 +08:00
|
|
|
case CK_Dependent: llvm_unreachable("saw dependent cast!");
|
|
|
|
|
2012-08-31 08:14:07 +08:00
|
|
|
case CK_BuiltinFnToFnPtr:
|
|
|
|
llvm_unreachable("builtin functions are handled elsewhere");
|
|
|
|
|
2012-02-15 09:22:51 +08:00
|
|
|
case CK_ReinterpretMemberPointer:
|
|
|
|
case CK_DerivedToBaseMemberPointer:
|
|
|
|
case CK_BaseToDerivedMemberPointer:
|
|
|
|
return CGM.getCXXABI().EmitMemberPointerConversion(E, C);
|
|
|
|
|
2011-03-16 05:17:48 +08:00
|
|
|
// These will never be supported.
|
|
|
|
case CK_ObjCObjectLValueCast:
|
2011-09-10 14:18:15 +08:00
|
|
|
case CK_ARCProduceObject:
|
|
|
|
case CK_ARCConsumeObject:
|
|
|
|
case CK_ARCReclaimReturnedObject:
|
|
|
|
case CK_ARCExtendBlockObject:
|
2012-02-22 13:02:47 +08:00
|
|
|
case CK_CopyAndAutoreleaseBlockObject:
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2011-03-16 05:17:48 +08:00
|
|
|
|
2012-01-18 05:42:19 +08:00
|
|
|
// These don't need to be handled here because Evaluate knows how to
|
|
|
|
// evaluate them in the cases where they can be folded.
|
2012-02-15 09:22:51 +08:00
|
|
|
case CK_BitCast:
|
2012-01-18 05:42:19 +08:00
|
|
|
case CK_ToVoid:
|
|
|
|
case CK_Dynamic:
|
|
|
|
case CK_LValueBitCast:
|
|
|
|
case CK_NullToMemberPointer:
|
2011-03-16 05:17:48 +08:00
|
|
|
case CK_UserDefinedConversion:
|
2012-01-05 07:13:47 +08:00
|
|
|
case CK_CPointerToObjCPointerCast:
|
|
|
|
case CK_BlockPointerToObjCPointerCast:
|
|
|
|
case CK_AnyPointerToBlockPointerCast:
|
2011-03-16 05:17:48 +08:00
|
|
|
case CK_ArrayToPointerDecay:
|
|
|
|
case CK_FunctionToPointerDecay:
|
|
|
|
case CK_BaseToDerived:
|
|
|
|
case CK_DerivedToBase:
|
|
|
|
case CK_UncheckedDerivedToBase:
|
|
|
|
case CK_MemberPointerToBoolean:
|
|
|
|
case CK_VectorSplat:
|
|
|
|
case CK_FloatingRealToComplex:
|
|
|
|
case CK_FloatingComplexToReal:
|
|
|
|
case CK_FloatingComplexToBoolean:
|
|
|
|
case CK_FloatingComplexCast:
|
|
|
|
case CK_FloatingComplexToIntegralComplex:
|
|
|
|
case CK_IntegralRealToComplex:
|
|
|
|
case CK_IntegralComplexToReal:
|
|
|
|
case CK_IntegralComplexToBoolean:
|
|
|
|
case CK_IntegralComplexCast:
|
|
|
|
case CK_IntegralComplexToFloatingComplex:
|
|
|
|
case CK_PointerToIntegral:
|
|
|
|
case CK_PointerToBoolean:
|
|
|
|
case CK_NullToPointer:
|
2012-01-05 07:13:47 +08:00
|
|
|
case CK_IntegralCast:
|
2016-01-13 09:52:39 +08:00
|
|
|
case CK_BooleanToSignedIntegral:
|
2012-01-05 07:13:47 +08:00
|
|
|
case CK_IntegralToPointer:
|
2011-03-16 05:17:48 +08:00
|
|
|
case CK_IntegralToBoolean:
|
|
|
|
case CK_IntegralToFloating:
|
|
|
|
case CK_FloatingToIntegral:
|
|
|
|
case CK_FloatingToBoolean:
|
|
|
|
case CK_FloatingCast:
|
2013-01-20 20:31:11 +08:00
|
|
|
case CK_ZeroToOCLEvent:
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2009-02-22 15:29:04 +08:00
|
|
|
}
|
2011-03-17 08:46:34 +08:00
|
|
|
llvm_unreachable("Invalid CastKind");
|
2008-01-26 09:36:00 +08:00
|
|
|
}
|
2008-01-29 09:15:48 +08:00
|
|
|
|
2008-04-08 12:40:51 +08:00
|
|
|
llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
|
|
|
|
return Visit(DAE->getExpr());
|
|
|
|
}
|
2013-04-21 06:23:05 +08:00
|
|
|
|
|
|
|
llvm::Constant *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
|
|
|
|
// No need for a DefaultInitExprScope: we don't handle 'this' in a
|
|
|
|
// constant expression.
|
|
|
|
return Visit(DIE->getExpr());
|
|
|
|
}
|
2008-04-08 12:40:51 +08:00
|
|
|
|
2016-06-22 04:29:17 +08:00
|
|
|
llvm::Constant *VisitExprWithCleanups(ExprWithCleanups *E) {
|
|
|
|
if (!E->cleanupsHaveSideEffects())
|
|
|
|
return Visit(E->getSubExpr());
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2011-06-22 01:03:29 +08:00
|
|
|
llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
|
|
|
|
return Visit(E->GetTemporaryExpr());
|
|
|
|
}
|
|
|
|
|
2008-05-30 18:24:46 +08:00
|
|
|
llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::ArrayType *AType =
|
2010-04-19 03:06:43 +08:00
|
|
|
cast<llvm::ArrayType>(ConvertType(ILE->getType()));
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ElemTy = AType->getElementType();
|
2012-04-15 10:50:59 +08:00
|
|
|
unsigned NumInitElements = ILE->getNumInits();
|
2008-02-05 10:39:50 +08:00
|
|
|
unsigned NumElements = AType->getNumElements();
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
// Initialising an array requires us to automatically
|
2008-02-05 10:39:50 +08:00
|
|
|
// initialise any elements that have not been initialised explicitly
|
|
|
|
unsigned NumInitableElts = std::min(NumInitElements, NumElements);
|
|
|
|
|
2014-12-29 07:46:59 +08:00
|
|
|
// Initialize remaining array elements.
|
|
|
|
// FIXME: This doesn't handle member pointers correctly!
|
|
|
|
llvm::Constant *fillC;
|
|
|
|
if (Expr *filler = ILE->getArrayFiller())
|
|
|
|
fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
|
|
|
|
else
|
|
|
|
fillC = llvm::Constant::getNullValue(ElemTy);
|
|
|
|
if (!fillC)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// Try to use a ConstantAggregateZero if we can.
|
|
|
|
if (fillC->isNullValue() && !NumInitableElts)
|
|
|
|
return llvm::ConstantAggregateZero::get(AType);
|
|
|
|
|
2008-02-05 10:39:50 +08:00
|
|
|
// Copy initializer elements.
|
2012-02-07 08:13:27 +08:00
|
|
|
std::vector<llvm::Constant*> Elts;
|
|
|
|
Elts.reserve(NumInitableElts + NumElements);
|
2012-02-14 20:06:21 +08:00
|
|
|
|
2008-05-31 03:58:50 +08:00
|
|
|
bool RewriteType = false;
|
2012-02-14 20:06:21 +08:00
|
|
|
for (unsigned i = 0; i < NumInitableElts; ++i) {
|
2009-04-08 12:48:15 +08:00
|
|
|
Expr *Init = ILE->getInit(i);
|
|
|
|
llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
|
2009-02-18 02:43:32 +08:00
|
|
|
if (!C)
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2008-05-31 03:58:50 +08:00
|
|
|
RewriteType |= (C->getType() != ElemTy);
|
2008-02-05 10:39:50 +08:00
|
|
|
Elts.push_back(C);
|
2008-01-26 09:36:00 +08:00
|
|
|
}
|
2008-05-31 03:58:50 +08:00
|
|
|
|
2011-04-21 08:27:41 +08:00
|
|
|
RewriteType |= (fillC->getType() != ElemTy);
|
2012-02-14 20:06:21 +08:00
|
|
|
Elts.resize(NumElements, fillC);
|
2008-02-05 10:39:50 +08:00
|
|
|
|
2008-05-31 03:58:50 +08:00
|
|
|
if (RewriteType) {
|
|
|
|
// FIXME: Try to avoid packing the array
|
2011-07-11 17:56:20 +08:00
|
|
|
std::vector<llvm::Type*> Types;
|
2012-02-07 08:13:27 +08:00
|
|
|
Types.reserve(NumInitableElts + NumElements);
|
2012-02-07 07:46:08 +08:00
|
|
|
for (unsigned i = 0, e = Elts.size(); i < e; ++i)
|
2008-05-31 03:58:50 +08:00
|
|
|
Types.push_back(Elts[i]->getType());
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
|
2009-08-06 07:18:46 +08:00
|
|
|
Types, true);
|
2009-07-28 06:29:56 +08:00
|
|
|
return llvm::ConstantStruct::get(SType, Elts);
|
2008-05-31 03:58:50 +08:00
|
|
|
}
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
return llvm::ConstantArray::get(AType, Elts);
|
2008-02-05 10:39:50 +08:00
|
|
|
}
|
|
|
|
|
2012-09-11 13:51:06 +08:00
|
|
|
llvm::Constant *EmitRecordInitialization(InitListExpr *ILE) {
|
2009-08-01 05:34:04 +08:00
|
|
|
return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
|
2008-05-30 18:24:46 +08:00
|
|
|
}
|
|
|
|
|
2009-01-30 14:13:25 +08:00
|
|
|
llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
|
2009-04-14 05:47:26 +08:00
|
|
|
return CGM.EmitNullConstant(E->getType());
|
2009-01-30 14:13:25 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-05 10:39:50 +08:00
|
|
|
llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
|
2016-12-07 07:52:28 +08:00
|
|
|
if (ILE->isTransparent())
|
|
|
|
return Visit(ILE->getInit(0));
|
|
|
|
|
2008-05-30 18:24:46 +08:00
|
|
|
if (ILE->getType()->isArrayType())
|
|
|
|
return EmitArrayInitialization(ILE);
|
|
|
|
|
2012-09-05 16:37:43 +08:00
|
|
|
if (ILE->getType()->isRecordType())
|
2012-09-11 13:51:06 +08:00
|
|
|
return EmitRecordInitialization(ILE);
|
2012-09-05 16:37:43 +08:00
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2008-01-26 09:36:00 +08:00
|
|
|
}
|
2008-02-22 01:57:49 +08:00
|
|
|
|
2015-06-10 08:27:52 +08:00
|
|
|
llvm::Constant *EmitDesignatedInitUpdater(llvm::Constant *Base,
|
|
|
|
InitListExpr *Updater) {
|
|
|
|
QualType ExprType = Updater->getType();
|
|
|
|
|
|
|
|
if (ExprType->isArrayType()) {
|
|
|
|
llvm::ArrayType *AType = cast<llvm::ArrayType>(ConvertType(ExprType));
|
|
|
|
llvm::Type *ElemType = AType->getElementType();
|
|
|
|
|
|
|
|
unsigned NumInitElements = Updater->getNumInits();
|
|
|
|
unsigned NumElements = AType->getNumElements();
|
|
|
|
|
|
|
|
std::vector<llvm::Constant *> Elts;
|
|
|
|
Elts.reserve(NumElements);
|
|
|
|
|
|
|
|
if (llvm::ConstantDataArray *DataArray =
|
|
|
|
dyn_cast<llvm::ConstantDataArray>(Base))
|
|
|
|
for (unsigned i = 0; i != NumElements; ++i)
|
|
|
|
Elts.push_back(DataArray->getElementAsConstant(i));
|
|
|
|
else if (llvm::ConstantArray *Array =
|
|
|
|
dyn_cast<llvm::ConstantArray>(Base))
|
|
|
|
for (unsigned i = 0; i != NumElements; ++i)
|
|
|
|
Elts.push_back(Array->getOperand(i));
|
|
|
|
else
|
|
|
|
return nullptr; // FIXME: other array types not implemented
|
|
|
|
|
|
|
|
llvm::Constant *fillC = nullptr;
|
|
|
|
if (Expr *filler = Updater->getArrayFiller())
|
|
|
|
if (!isa<NoInitExpr>(filler))
|
|
|
|
fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
|
|
|
|
bool RewriteType = (fillC && fillC->getType() != ElemType);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i != NumElements; ++i) {
|
|
|
|
Expr *Init = nullptr;
|
|
|
|
if (i < NumInitElements)
|
|
|
|
Init = Updater->getInit(i);
|
|
|
|
|
|
|
|
if (!Init && fillC)
|
|
|
|
Elts[i] = fillC;
|
|
|
|
else if (!Init || isa<NoInitExpr>(Init))
|
|
|
|
; // Do nothing.
|
|
|
|
else if (InitListExpr *ChildILE = dyn_cast<InitListExpr>(Init))
|
|
|
|
Elts[i] = EmitDesignatedInitUpdater(Elts[i], ChildILE);
|
|
|
|
else
|
|
|
|
Elts[i] = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
|
|
|
|
|
|
|
|
if (!Elts[i])
|
|
|
|
return nullptr;
|
|
|
|
RewriteType |= (Elts[i]->getType() != ElemType);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (RewriteType) {
|
|
|
|
std::vector<llvm::Type *> Types;
|
|
|
|
Types.reserve(NumElements);
|
|
|
|
for (unsigned i = 0; i != NumElements; ++i)
|
|
|
|
Types.push_back(Elts[i]->getType());
|
|
|
|
llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
|
|
|
|
Types, true);
|
|
|
|
return llvm::ConstantStruct::get(SType, Elts);
|
|
|
|
}
|
|
|
|
|
|
|
|
return llvm::ConstantArray::get(AType, Elts);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ExprType->isRecordType())
|
|
|
|
return ConstStructBuilder::BuildStruct(CGM, CGF, this,
|
|
|
|
dyn_cast<llvm::ConstantStruct>(Base), Updater);
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Constant *VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
|
|
|
|
return EmitDesignatedInitUpdater(
|
|
|
|
CGM.EmitConstantExpr(E->getBase(), E->getType(), CGF),
|
|
|
|
E->getUpdater());
|
|
|
|
}
|
|
|
|
|
2010-02-02 16:02:49 +08:00
|
|
|
llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
|
|
|
|
if (!E->getConstructor()->isTrivial())
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2010-02-02 16:02:49 +08:00
|
|
|
|
2010-02-06 02:38:45 +08:00
|
|
|
QualType Ty = E->getType();
|
|
|
|
|
|
|
|
// FIXME: We should not have to call getBaseElementType here.
|
|
|
|
const RecordType *RT =
|
|
|
|
CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
|
|
|
|
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
|
|
|
|
|
|
|
|
// If the class doesn't have a trivial destructor, we can't emit it as a
|
|
|
|
// constant expr.
|
|
|
|
if (!RD->hasTrivialDestructor())
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
|
|
|
|
2010-02-02 16:02:49 +08:00
|
|
|
// Only copy and default constructors can be trivial.
|
|
|
|
|
|
|
|
|
|
|
|
if (E->getNumArgs()) {
|
|
|
|
assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
|
2011-08-31 03:58:05 +08:00
|
|
|
assert(E->getConstructor()->isCopyOrMoveConstructor() &&
|
|
|
|
"trivial ctor has argument but isn't a copy/move ctor");
|
2010-02-02 16:02:49 +08:00
|
|
|
|
|
|
|
Expr *Arg = E->getArg(0);
|
|
|
|
assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
|
|
|
|
"argument to copy ctor is of wrong type");
|
|
|
|
|
2010-02-02 20:15:55 +08:00
|
|
|
return Visit(Arg);
|
2010-02-02 16:02:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return CGM.EmitNullConstant(Ty);
|
|
|
|
}
|
|
|
|
|
2008-01-26 09:36:00 +08:00
|
|
|
llvm::Constant *VisitStringLiteral(StringLiteral *E) {
|
2011-11-01 10:23:42 +08:00
|
|
|
return CGM.GetConstantArrayFromStringLiteral(E);
|
2008-01-26 09:36:00 +08:00
|
|
|
}
|
|
|
|
|
2009-02-25 06:18:39 +08:00
|
|
|
llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
|
|
|
|
// This must be an @encode initializing an array in a static initializer.
|
|
|
|
// Don't emit it as the address of the string, emit the string data itself
|
|
|
|
// as an inline array.
|
|
|
|
std::string Str;
|
|
|
|
CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
|
2014-04-24 01:44:58 +08:00
|
|
|
QualType T = E->getType();
|
|
|
|
if (T->getTypeClass() == Type::TypeOfExpr)
|
|
|
|
T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType();
|
|
|
|
const ConstantArrayType *CAT = cast<ConstantArrayType>(T);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-25 06:18:39 +08:00
|
|
|
// Resize the string to the right size, adding zeros at the end, or
|
|
|
|
// truncating as needed.
|
|
|
|
Str.resize(CAT->getSize().getZExtValue(), '\0');
|
2012-02-05 10:30:40 +08:00
|
|
|
return llvm::ConstantDataArray::getString(VMContext, Str, false);
|
2009-02-25 06:18:39 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-05-29 19:22:45 +08:00
|
|
|
llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
|
|
|
|
return Visit(E->getSubExpr());
|
|
|
|
}
|
2009-02-20 06:01:56 +08:00
|
|
|
|
2008-01-26 09:36:00 +08:00
|
|
|
// Utility methods
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ConvertType(QualType T) {
|
2008-01-26 09:36:00 +08:00
|
|
|
return CGM.getTypes().ConvertType(T);
|
|
|
|
}
|
2008-01-26 12:30:23 +08:00
|
|
|
|
2008-11-16 14:23:45 +08:00
|
|
|
public:
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
ConstantAddress EmitLValue(APValue::LValueBase LVBase) {
|
2011-11-13 06:28:03 +08:00
|
|
|
if (const ValueDecl *Decl = LVBase.dyn_cast<const ValueDecl*>()) {
|
2010-03-05 05:26:03 +08:00
|
|
|
if (Decl->hasAttr<WeakRefAttr>())
|
2010-08-22 09:00:03 +08:00
|
|
|
return CGM.GetWeakRefReference(Decl);
|
2008-01-26 10:08:50 +08:00
|
|
|
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return ConstantAddress(CGM.GetAddrOfFunction(FD), CharUnits::One());
|
2008-04-16 06:42:06 +08:00
|
|
|
if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
|
2009-02-25 02:41:57 +08:00
|
|
|
// We can never refer to a variable with local storage.
|
2009-09-09 23:08:12 +08:00
|
|
|
if (!VD->hasLocalStorage()) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CharUnits Align = CGM.getContext().getDeclAlign(VD);
|
2009-02-25 02:41:57 +08:00
|
|
|
if (VD->isFileVarDecl() || VD->hasExternalStorage())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return ConstantAddress(CGM.GetAddrOfGlobalVar(VD), Align);
|
|
|
|
else if (VD->isLocalVarDecl()) {
|
|
|
|
auto Ptr = CGM.getOrCreateStaticVarDecl(
|
2014-10-08 09:07:54 +08:00
|
|
|
*VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false));
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return ConstantAddress(Ptr, Align);
|
|
|
|
}
|
2008-04-16 06:42:06 +08:00
|
|
|
}
|
2008-02-27 05:41:45 +08:00
|
|
|
}
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return ConstantAddress::invalid();
|
2011-11-13 06:28:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Expr *E = const_cast<Expr*>(LVBase.get<const Expr*>());
|
|
|
|
switch (E->getStmtClass()) {
|
|
|
|
default: break;
|
|
|
|
case Expr::CompoundLiteralExprClass: {
|
|
|
|
// Note that due to the nature of compound literals, this is guaranteed
|
|
|
|
// to be the only use of the variable, so we just generate it here.
|
|
|
|
CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
|
2011-12-22 08:04:00 +08:00
|
|
|
llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
|
|
|
|
CLE->getType(), CGF);
|
2011-11-13 06:28:03 +08:00
|
|
|
// FIXME: "Leaked" on failure.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
if (!C) return ConstantAddress::invalid();
|
|
|
|
|
|
|
|
CharUnits Align = CGM.getContext().getTypeAlignInChars(E->getType());
|
|
|
|
|
|
|
|
auto GV = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
|
2011-11-13 06:28:03 +08:00
|
|
|
E->getType().isConstant(CGM.getContext()),
|
|
|
|
llvm::GlobalValue::InternalLinkage,
|
2014-05-21 13:09:00 +08:00
|
|
|
C, ".compoundliteral", nullptr,
|
2012-06-23 19:51:46 +08:00
|
|
|
llvm::GlobalVariable::NotThreadLocal,
|
2011-11-13 06:28:03 +08:00
|
|
|
CGM.getContext().getTargetAddressSpace(E->getType()));
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
GV->setAlignment(Align.getQuantity());
|
|
|
|
return ConstantAddress(GV, Align);
|
2008-01-26 10:08:50 +08:00
|
|
|
}
|
2008-08-14 07:20:05 +08:00
|
|
|
case Expr::StringLiteralClass:
|
|
|
|
return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
|
2009-02-25 06:18:39 +08:00
|
|
|
case Expr::ObjCEncodeExprClass:
|
|
|
|
return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
|
2008-11-16 14:23:45 +08:00
|
|
|
case Expr::ObjCStringLiteralClass: {
|
|
|
|
ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
ConstantAddress C =
|
2010-01-23 10:40:42 +08:00
|
|
|
CGM.getObjCRuntime().GenerateConstantString(SL->getString());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return C.getElementBitCast(ConvertType(E->getType()));
|
2008-11-16 14:23:45 +08:00
|
|
|
}
|
2008-12-12 13:18:02 +08:00
|
|
|
case Expr::PredefinedExprClass: {
|
2009-11-14 16:37:13 +08:00
|
|
|
unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
|
|
|
|
if (CGF) {
|
2010-08-21 11:01:12 +08:00
|
|
|
LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
|
2015-11-06 05:16:22 +08:00
|
|
|
return cast<ConstantAddress>(Res.getAddress());
|
2009-11-14 16:37:13 +08:00
|
|
|
} else if (Type == PredefinedExpr::PrettyFunction) {
|
|
|
|
return CGM.GetAddrOfConstantCString("top level", ".tmp");
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-11-14 16:37:13 +08:00
|
|
|
return CGM.GetAddrOfConstantCString("", ".tmp");
|
2008-12-12 13:18:02 +08:00
|
|
|
}
|
2009-01-25 09:21:06 +08:00
|
|
|
case Expr::AddrLabelExprClass: {
|
|
|
|
assert(CGF && "Invalid address of label expression outside function.");
|
2009-10-29 07:59:40 +08:00
|
|
|
llvm::Constant *Ptr =
|
|
|
|
CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Ptr = llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
|
|
|
|
return ConstantAddress(Ptr, CharUnits::One());
|
2009-01-25 09:21:06 +08:00
|
|
|
}
|
2009-01-25 09:54:01 +08:00
|
|
|
case Expr::CallExprClass: {
|
|
|
|
CallExpr* CE = cast<CallExpr>(E);
|
2013-12-29 05:59:02 +08:00
|
|
|
unsigned builtin = CE->getBuiltinCallee();
|
2010-01-23 10:40:42 +08:00
|
|
|
if (builtin !=
|
|
|
|
Builtin::BI__builtin___CFStringMakeConstantString &&
|
|
|
|
builtin !=
|
|
|
|
Builtin::BI__builtin___NSStringMakeConstantString)
|
2009-01-25 09:54:01 +08:00
|
|
|
break;
|
|
|
|
const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
|
|
|
|
const StringLiteral *Literal = cast<StringLiteral>(Arg);
|
2010-01-23 10:40:42 +08:00
|
|
|
if (builtin ==
|
|
|
|
Builtin::BI__builtin___NSStringMakeConstantString) {
|
|
|
|
return CGM.getObjCRuntime().GenerateConstantString(Literal);
|
|
|
|
}
|
2009-04-01 00:53:37 +08:00
|
|
|
// FIXME: need to deal with UCN conversion issues.
|
2009-04-01 21:55:36 +08:00
|
|
|
return CGM.GetAddrOfConstantCFString(Literal);
|
2009-01-25 09:54:01 +08:00
|
|
|
}
|
2009-02-19 09:01:04 +08:00
|
|
|
case Expr::BlockExprClass: {
|
2016-11-03 10:21:43 +08:00
|
|
|
StringRef FunctionName;
|
2009-03-01 09:09:12 +08:00
|
|
|
if (CGF)
|
|
|
|
FunctionName = CGF->CurFn->getName();
|
|
|
|
else
|
|
|
|
FunctionName = "global";
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
// This is not really an l-value.
|
|
|
|
llvm::Constant *Ptr =
|
2016-11-03 10:21:43 +08:00
|
|
|
CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return ConstantAddress(Ptr, CGM.getPointerAlign());
|
2009-02-19 09:01:04 +08:00
|
|
|
}
|
2011-12-27 20:18:28 +08:00
|
|
|
case Expr::CXXTypeidExprClass: {
|
|
|
|
CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E);
|
|
|
|
QualType T;
|
|
|
|
if (Typeid->isTypeOperand())
|
2013-09-27 15:04:31 +08:00
|
|
|
T = Typeid->getTypeOperand(CGM.getContext());
|
2011-12-27 20:18:28 +08:00
|
|
|
else
|
|
|
|
T = Typeid->getExprOperand()->getType();
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return ConstantAddress(CGM.GetAddrOfRTTIDescriptor(T),
|
|
|
|
CGM.getPointerAlign());
|
2011-12-27 20:18:28 +08:00
|
|
|
}
|
2012-10-11 18:13:44 +08:00
|
|
|
case Expr::CXXUuidofExprClass: {
|
|
|
|
return CGM.GetAddrOfUuidDescriptor(cast<CXXUuidofExpr>(E));
|
|
|
|
}
|
2013-06-05 08:46:14 +08:00
|
|
|
case Expr::MaterializeTemporaryExprClass: {
|
|
|
|
MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(E);
|
|
|
|
assert(MTE->getStorageDuration() == SD_Static);
|
|
|
|
SmallVector<const Expr *, 2> CommaLHSs;
|
|
|
|
SmallVector<SubobjectAdjustment, 2> Adjustments;
|
|
|
|
const Expr *Inner = MTE->GetTemporaryExpr()
|
|
|
|
->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
|
|
|
|
return CGM.GetAddrOfGlobalTemporary(MTE, Inner);
|
|
|
|
}
|
2008-02-11 08:23:10 +08:00
|
|
|
}
|
2009-02-18 02:43:32 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return ConstantAddress::invalid();
|
2008-01-26 10:08:50 +08:00
|
|
|
}
|
|
|
|
};
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-01-26 09:36:00 +08:00
|
|
|
} // end anonymous namespace.
|
|
|
|
|
2015-06-10 08:27:52 +08:00
|
|
|
bool ConstStructBuilder::Build(ConstExprEmitter *Emitter,
|
|
|
|
llvm::ConstantStruct *Base,
|
|
|
|
InitListExpr *Updater) {
|
|
|
|
assert(Base && "base expression should not be empty");
|
|
|
|
|
|
|
|
QualType ExprType = Updater->getType();
|
|
|
|
RecordDecl *RD = ExprType->getAs<RecordType>()->getDecl();
|
|
|
|
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
|
|
|
|
const llvm::StructLayout *BaseLayout = CGM.getDataLayout().getStructLayout(
|
|
|
|
Base->getType());
|
|
|
|
unsigned FieldNo = -1;
|
|
|
|
unsigned ElementNo = 0;
|
|
|
|
|
2016-03-09 06:17:41 +08:00
|
|
|
// Bail out if we have base classes. We could support these, but they only
|
|
|
|
// arise in C++1z where we will have already constant folded most interesting
|
|
|
|
// cases. FIXME: There are still a few more cases we can handle this way.
|
|
|
|
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
|
|
|
|
if (CXXRD->getNumBases())
|
|
|
|
return false;
|
|
|
|
|
2015-06-10 08:27:52 +08:00
|
|
|
for (FieldDecl *Field : RD->fields()) {
|
|
|
|
++FieldNo;
|
|
|
|
|
|
|
|
if (RD->isUnion() && Updater->getInitializedFieldInUnion() != Field)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Skip anonymous bitfields.
|
|
|
|
if (Field->isUnnamedBitfield())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
llvm::Constant *EltInit = Base->getOperand(ElementNo);
|
|
|
|
|
|
|
|
// Bail out if the type of the ConstantStruct does not have the same layout
|
|
|
|
// as the type of the InitListExpr.
|
|
|
|
if (CGM.getTypes().ConvertType(Field->getType()) != EltInit->getType() ||
|
|
|
|
Layout.getFieldOffset(ElementNo) !=
|
|
|
|
BaseLayout->getElementOffsetInBits(ElementNo))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Get the initializer. If we encounter an empty field or a NoInitExpr,
|
|
|
|
// we use values from the base expression.
|
|
|
|
Expr *Init = nullptr;
|
|
|
|
if (ElementNo < Updater->getNumInits())
|
|
|
|
Init = Updater->getInit(ElementNo);
|
|
|
|
|
|
|
|
if (!Init || isa<NoInitExpr>(Init))
|
|
|
|
; // Do nothing.
|
|
|
|
else if (InitListExpr *ChildILE = dyn_cast<InitListExpr>(Init))
|
|
|
|
EltInit = Emitter->EmitDesignatedInitUpdater(EltInit, ChildILE);
|
|
|
|
else
|
|
|
|
EltInit = CGM.EmitConstantExpr(Init, Field->getType(), CGF);
|
|
|
|
|
|
|
|
++ElementNo;
|
|
|
|
|
|
|
|
if (!EltInit)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!Field->isBitField())
|
|
|
|
AppendField(Field, Layout.getFieldOffset(FieldNo), EltInit);
|
|
|
|
else if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(EltInit))
|
|
|
|
AppendBitField(Field, Layout.getFieldOffset(FieldNo), CI);
|
|
|
|
else
|
|
|
|
// Initializing a bitfield with a non-trivial constant?
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
llvm::Constant *CodeGenModule::EmitConstantInit(const VarDecl &D,
|
|
|
|
CodeGenFunction *CGF) {
|
2013-01-11 07:28:43 +08:00
|
|
|
// Make a quick check if variable can be default NULL initialized
|
|
|
|
// and avoid going through rest of code which may do, for c++11,
|
|
|
|
// initialization of memory to all NULLs.
|
|
|
|
if (!D.hasLocalStorage()) {
|
|
|
|
QualType Ty = D.getType();
|
|
|
|
if (Ty->isArrayType())
|
|
|
|
Ty = Context.getBaseElementType(Ty);
|
|
|
|
if (Ty->isRecordType())
|
|
|
|
if (const CXXConstructExpr *E =
|
|
|
|
dyn_cast_or_null<CXXConstructExpr>(D.getInit())) {
|
|
|
|
const CXXConstructorDecl *CD = E->getConstructor();
|
2013-04-05 15:47:28 +08:00
|
|
|
if (CD->isTrivial() && CD->isDefaultConstructor())
|
2013-01-11 07:28:43 +08:00
|
|
|
return EmitNullConstant(D.getType());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
if (const APValue *Value = D.evaluateValue())
|
2012-03-03 07:27:11 +08:00
|
|
|
return EmitConstantValueForMemory(*Value, D.getType(), CGF);
|
2012-01-14 12:30:29 +08:00
|
|
|
|
2012-02-14 06:16:19 +08:00
|
|
|
// FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a
|
|
|
|
// reference is a constant expression, and the reference binds to a temporary,
|
|
|
|
// then constant initialization is performed. ConstExprEmitter will
|
|
|
|
// incorrectly emit a prvalue constant in this case, and the calling code
|
|
|
|
// interprets that as the (pointer) value of the reference, rather than the
|
|
|
|
// desired value of the referee.
|
|
|
|
if (D.getType()->isReferenceType())
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2012-02-14 06:16:19 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
const Expr *E = D.getInit();
|
|
|
|
assert(E && "No initializer to emit");
|
|
|
|
|
|
|
|
llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
|
|
|
|
if (C && C->getType()->isIntegerTy(1)) {
|
|
|
|
llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
|
|
|
|
C = llvm::ConstantExpr::getZExt(C, BoolTy);
|
|
|
|
}
|
|
|
|
return C;
|
|
|
|
}
|
|
|
|
|
2008-02-27 05:41:45 +08:00
|
|
|
llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
|
2009-04-08 12:48:15 +08:00
|
|
|
QualType DestType,
|
2008-07-27 06:37:01 +08:00
|
|
|
CodeGenFunction *CGF) {
|
2008-12-01 10:42:14 +08:00
|
|
|
Expr::EvalResult Result;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-04-11 09:08:03 +08:00
|
|
|
bool Success = false;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-27 14:04:58 +08:00
|
|
|
if (DestType->isReferenceType())
|
|
|
|
Success = E->EvaluateAsLValue(Result, Context);
|
2009-09-09 23:08:12 +08:00
|
|
|
else
|
2011-10-29 08:50:52 +08:00
|
|
|
Success = E->EvaluateAsRValue(Result, Context);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Constant *C = nullptr;
|
2012-01-14 12:30:29 +08:00
|
|
|
if (Success && !Result.HasSideEffects)
|
2012-03-03 07:27:11 +08:00
|
|
|
C = EmitConstantValue(Result.Val, DestType, CGF);
|
|
|
|
else
|
|
|
|
C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
|
2009-02-20 05:44:24 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
if (C && C->getType()->isIntegerTy(1)) {
|
|
|
|
llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
|
|
|
|
C = llvm::ConstantExpr::getZExt(C, BoolTy);
|
|
|
|
}
|
|
|
|
return C;
|
|
|
|
}
|
2009-02-20 05:44:24 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
|
|
|
|
QualType DestType,
|
|
|
|
CodeGenFunction *CGF) {
|
2014-07-31 14:31:19 +08:00
|
|
|
// For an _Atomic-qualified constant, we may need to add tail padding.
|
|
|
|
if (auto *AT = DestType->getAs<AtomicType>()) {
|
|
|
|
QualType InnerType = AT->getValueType();
|
|
|
|
auto *Inner = EmitConstantValue(Value, InnerType, CGF);
|
|
|
|
|
|
|
|
uint64_t InnerSize = Context.getTypeSize(InnerType);
|
|
|
|
uint64_t OuterSize = Context.getTypeSize(DestType);
|
|
|
|
if (InnerSize == OuterSize)
|
|
|
|
return Inner;
|
|
|
|
|
|
|
|
assert(InnerSize < OuterSize && "emitted over-large constant for atomic");
|
|
|
|
llvm::Constant *Elts[] = {
|
|
|
|
Inner,
|
|
|
|
llvm::ConstantAggregateZero::get(
|
|
|
|
llvm::ArrayType::get(Int8Ty, (OuterSize - InnerSize) / 8))
|
|
|
|
};
|
|
|
|
return llvm::ConstantStruct::getAnon(Elts);
|
|
|
|
}
|
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
switch (Value.getKind()) {
|
|
|
|
case APValue::Uninitialized:
|
|
|
|
llvm_unreachable("Constant expressions should be initialized.");
|
|
|
|
case APValue::LValue: {
|
|
|
|
llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
|
|
|
|
llvm::Constant *Offset =
|
2012-02-07 08:39:47 +08:00
|
|
|
llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
|
2012-01-14 12:30:29 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Constant *C = nullptr;
|
2012-01-14 12:30:29 +08:00
|
|
|
if (APValue::LValueBase LVBase = Value.getLValueBase()) {
|
|
|
|
// An array can be represented as an lvalue referring to the base.
|
|
|
|
if (isa<llvm::ArrayType>(DestTy)) {
|
|
|
|
assert(Offset->isNullValue() && "offset on array initializer");
|
|
|
|
return ConstExprEmitter(*this, CGF).Visit(
|
|
|
|
const_cast<Expr*>(LVBase.get<const Expr*>()));
|
|
|
|
}
|
2009-02-20 05:44:24 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase).getPointer();
|
2009-02-20 05:44:24 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
// Apply offset if necessary.
|
|
|
|
if (!Offset->isNullValue()) {
|
2014-04-18 01:45:37 +08:00
|
|
|
unsigned AS = C->getType()->getPointerAddressSpace();
|
|
|
|
llvm::Type *CharPtrTy = Int8Ty->getPointerTo(AS);
|
|
|
|
llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, CharPtrTy);
|
2015-04-03 02:55:21 +08:00
|
|
|
Casted = llvm::ConstantExpr::getGetElementPtr(Int8Ty, Casted, Offset);
|
2013-12-11 21:39:46 +08:00
|
|
|
C = llvm::ConstantExpr::getPointerCast(Casted, C->getType());
|
2008-11-16 14:23:45 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
// Convert to the appropriate type; this could be an lvalue for
|
|
|
|
// an integer.
|
|
|
|
if (isa<llvm::PointerType>(DestTy))
|
2013-12-11 21:39:46 +08:00
|
|
|
return llvm::ConstantExpr::getPointerCast(C, DestTy);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
return llvm::ConstantExpr::getPtrToInt(C, DestTy);
|
|
|
|
} else {
|
|
|
|
C = Offset;
|
2011-03-27 17:32:40 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
// Convert to the appropriate type; this could be an lvalue for
|
|
|
|
// an integer.
|
2016-12-15 05:38:18 +08:00
|
|
|
if (isa<llvm::PointerType>(DestTy)) {
|
2016-06-18 01:47:24 +08:00
|
|
|
// Convert the integer to a pointer-sized integer before converting it
|
|
|
|
// to a pointer.
|
|
|
|
C = llvm::ConstantExpr::getIntegerCast(
|
|
|
|
C, getDataLayout().getIntPtrType(DestTy),
|
|
|
|
/*isSigned=*/false);
|
2012-01-14 12:30:29 +08:00
|
|
|
return llvm::ConstantExpr::getIntToPtr(C, DestTy);
|
2016-06-18 01:47:24 +08:00
|
|
|
}
|
2011-03-27 17:32:40 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
// If the types don't match this should only be a truncate.
|
|
|
|
if (C->getType() != DestTy)
|
|
|
|
return llvm::ConstantExpr::getTrunc(C, DestTy);
|
2011-03-27 17:32:40 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
return C;
|
2009-01-18 09:01:34 +08:00
|
|
|
}
|
2012-01-14 12:30:29 +08:00
|
|
|
}
|
2012-03-03 07:27:11 +08:00
|
|
|
case APValue::Int:
|
|
|
|
return llvm::ConstantInt::get(VMContext, Value.getInt());
|
2012-01-14 12:30:29 +08:00
|
|
|
case APValue::ComplexInt: {
|
|
|
|
llvm::Constant *Complex[2];
|
|
|
|
|
|
|
|
Complex[0] = llvm::ConstantInt::get(VMContext,
|
|
|
|
Value.getComplexIntReal());
|
|
|
|
Complex[1] = llvm::ConstantInt::get(VMContext,
|
|
|
|
Value.getComplexIntImag());
|
|
|
|
|
|
|
|
// FIXME: the target may want to specify that this is packed.
|
|
|
|
llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
|
|
|
|
Complex[1]->getType(),
|
2014-12-02 06:02:27 +08:00
|
|
|
nullptr);
|
2012-01-14 12:30:29 +08:00
|
|
|
return llvm::ConstantStruct::get(STy, Complex);
|
|
|
|
}
|
|
|
|
case APValue::Float: {
|
|
|
|
const llvm::APFloat &Init = Value.getFloat();
|
2016-12-14 19:57:17 +08:00
|
|
|
if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf() &&
|
2014-08-28 00:31:57 +08:00
|
|
|
!Context.getLangOpts().NativeHalfType &&
|
|
|
|
!Context.getLangOpts().HalfArgsAndReturns)
|
2012-01-14 12:30:29 +08:00
|
|
|
return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
|
|
|
|
else
|
|
|
|
return llvm::ConstantFP::get(VMContext, Init);
|
|
|
|
}
|
|
|
|
case APValue::ComplexFloat: {
|
|
|
|
llvm::Constant *Complex[2];
|
|
|
|
|
|
|
|
Complex[0] = llvm::ConstantFP::get(VMContext,
|
|
|
|
Value.getComplexFloatReal());
|
|
|
|
Complex[1] = llvm::ConstantFP::get(VMContext,
|
|
|
|
Value.getComplexFloatImag());
|
|
|
|
|
|
|
|
// FIXME: the target may want to specify that this is packed.
|
|
|
|
llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
|
|
|
|
Complex[1]->getType(),
|
2014-12-02 06:02:27 +08:00
|
|
|
nullptr);
|
2012-01-14 12:30:29 +08:00
|
|
|
return llvm::ConstantStruct::get(STy, Complex);
|
|
|
|
}
|
|
|
|
case APValue::Vector: {
|
|
|
|
unsigned NumElts = Value.getVectorLength();
|
2015-12-11 08:23:35 +08:00
|
|
|
SmallVector<llvm::Constant *, 4> Inits(NumElts);
|
2012-01-14 12:30:29 +08:00
|
|
|
|
2015-12-11 08:23:35 +08:00
|
|
|
for (unsigned I = 0; I != NumElts; ++I) {
|
|
|
|
const APValue &Elt = Value.getVectorElt(I);
|
2012-01-14 12:30:29 +08:00
|
|
|
if (Elt.isInt())
|
2015-12-11 08:23:35 +08:00
|
|
|
Inits[I] = llvm::ConstantInt::get(VMContext, Elt.getInt());
|
|
|
|
else if (Elt.isFloat())
|
|
|
|
Inits[I] = llvm::ConstantFP::get(VMContext, Elt.getFloat());
|
2012-01-14 12:30:29 +08:00
|
|
|
else
|
2015-12-11 08:23:35 +08:00
|
|
|
llvm_unreachable("unsupported vector element type");
|
2008-11-16 04:45:50 +08:00
|
|
|
}
|
2012-01-14 12:30:29 +08:00
|
|
|
return llvm::ConstantVector::get(Inits);
|
2008-11-16 04:45:50 +08:00
|
|
|
}
|
2012-01-14 12:30:29 +08:00
|
|
|
case APValue::AddrLabelDiff: {
|
|
|
|
const AddrLabelExpr *LHSExpr = Value.getAddrLabelDiffLHS();
|
|
|
|
const AddrLabelExpr *RHSExpr = Value.getAddrLabelDiffRHS();
|
|
|
|
llvm::Constant *LHS = EmitConstantExpr(LHSExpr, LHSExpr->getType(), CGF);
|
|
|
|
llvm::Constant *RHS = EmitConstantExpr(RHSExpr, RHSExpr->getType(), CGF);
|
|
|
|
|
|
|
|
// Compute difference
|
|
|
|
llvm::Type *ResultType = getTypes().ConvertType(DestType);
|
|
|
|
LHS = llvm::ConstantExpr::getPtrToInt(LHS, IntPtrTy);
|
|
|
|
RHS = llvm::ConstantExpr::getPtrToInt(RHS, IntPtrTy);
|
|
|
|
llvm::Constant *AddrLabelDiff = llvm::ConstantExpr::getSub(LHS, RHS);
|
|
|
|
|
|
|
|
// LLVM is a bit sensitive about the exact format of the
|
|
|
|
// address-of-label difference; make sure to truncate after
|
|
|
|
// the subtraction.
|
|
|
|
return llvm::ConstantExpr::getTruncOrBitCast(AddrLabelDiff, ResultType);
|
|
|
|
}
|
|
|
|
case APValue::Struct:
|
|
|
|
case APValue::Union:
|
|
|
|
return ConstStructBuilder::BuildStruct(*this, CGF, Value, DestType);
|
|
|
|
case APValue::Array: {
|
|
|
|
const ArrayType *CAT = Context.getAsArrayType(DestType);
|
|
|
|
unsigned NumElements = Value.getArraySize();
|
|
|
|
unsigned NumInitElts = Value.getArrayInitializedElts();
|
2008-06-01 23:31:44 +08:00
|
|
|
|
2012-01-14 12:30:29 +08:00
|
|
|
// Emit array filler, if there is one.
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Constant *Filler = nullptr;
|
2012-01-14 12:30:29 +08:00
|
|
|
if (Value.hasArrayFiller())
|
2012-03-03 07:27:11 +08:00
|
|
|
Filler = EmitConstantValueForMemory(Value.getArrayFiller(),
|
|
|
|
CAT->getElementType(), CGF);
|
2012-01-14 12:30:29 +08:00
|
|
|
|
|
|
|
// Emit initializer elements.
|
2014-12-14 20:16:43 +08:00
|
|
|
llvm::Type *CommonElementType =
|
|
|
|
getTypes().ConvertType(CAT->getElementType());
|
2014-12-29 07:46:59 +08:00
|
|
|
|
|
|
|
// Try to use a ConstantAggregateZero if we can.
|
|
|
|
if (Filler && Filler->isNullValue() && !NumInitElts) {
|
|
|
|
llvm::ArrayType *AType =
|
|
|
|
llvm::ArrayType::get(CommonElementType, NumElements);
|
|
|
|
return llvm::ConstantAggregateZero::get(AType);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<llvm::Constant*> Elts;
|
|
|
|
Elts.reserve(NumElements);
|
2012-01-14 12:30:29 +08:00
|
|
|
for (unsigned I = 0; I < NumElements; ++I) {
|
|
|
|
llvm::Constant *C = Filler;
|
|
|
|
if (I < NumInitElts)
|
2012-03-03 07:27:11 +08:00
|
|
|
C = EmitConstantValueForMemory(Value.getArrayInitializedElt(I),
|
|
|
|
CAT->getElementType(), CGF);
|
2013-03-05 09:27:54 +08:00
|
|
|
else
|
|
|
|
assert(Filler && "Missing filler for implicit elements of initializer");
|
2012-01-14 12:30:29 +08:00
|
|
|
if (I == 0)
|
|
|
|
CommonElementType = C->getType();
|
|
|
|
else if (C->getType() != CommonElementType)
|
2014-05-21 13:09:00 +08:00
|
|
|
CommonElementType = nullptr;
|
2012-01-14 12:30:29 +08:00
|
|
|
Elts.push_back(C);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!CommonElementType) {
|
|
|
|
// FIXME: Try to avoid packing the array
|
|
|
|
std::vector<llvm::Type*> Types;
|
2012-02-07 08:13:27 +08:00
|
|
|
Types.reserve(NumElements);
|
2012-02-07 07:46:08 +08:00
|
|
|
for (unsigned i = 0, e = Elts.size(); i < e; ++i)
|
2012-01-14 12:30:29 +08:00
|
|
|
Types.push_back(Elts[i]->getType());
|
|
|
|
llvm::StructType *SType = llvm::StructType::get(VMContext, Types, true);
|
|
|
|
return llvm::ConstantStruct::get(SType, Elts);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::ArrayType *AType =
|
|
|
|
llvm::ArrayType::get(CommonElementType, NumElements);
|
|
|
|
return llvm::ConstantArray::get(AType, Elts);
|
2008-06-01 23:31:44 +08:00
|
|
|
}
|
2012-01-14 12:30:29 +08:00
|
|
|
case APValue::MemberPointer:
|
|
|
|
return getCXXABI().EmitMemberPointer(Value, DestType);
|
|
|
|
}
|
|
|
|
llvm_unreachable("Unknown APValue kind");
|
2008-01-26 09:36:00 +08:00
|
|
|
}
|
2009-04-14 05:47:26 +08:00
|
|
|
|
2012-03-03 07:27:11 +08:00
|
|
|
llvm::Constant *
|
|
|
|
CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
|
|
|
|
QualType DestType,
|
|
|
|
CodeGenFunction *CGF) {
|
|
|
|
llvm::Constant *C = EmitConstantValue(Value, DestType, CGF);
|
|
|
|
if (C->getType()->isIntegerTy(1)) {
|
|
|
|
llvm::Type *BoolTy = getTypes().ConvertTypeForMem(DestType);
|
|
|
|
C = llvm::ConstantExpr::getZExt(C, BoolTy);
|
|
|
|
}
|
|
|
|
return C;
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
ConstantAddress
|
2011-11-23 06:48:32 +08:00
|
|
|
CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
|
|
|
|
assert(E->isFileScope() && "not a file-scope compound literal expr");
|
2014-05-21 13:09:00 +08:00
|
|
|
return ConstExprEmitter(*this, nullptr).EmitLValue(E);
|
2011-11-23 06:48:32 +08:00
|
|
|
}
|
|
|
|
|
2011-02-03 16:15:49 +08:00
|
|
|
llvm::Constant *
|
|
|
|
CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
|
|
|
|
// Member pointer constants always have a very particular form.
|
|
|
|
const MemberPointerType *type = cast<MemberPointerType>(uo->getType());
|
|
|
|
const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl();
|
|
|
|
|
|
|
|
// A member function pointer.
|
|
|
|
if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl))
|
2015-06-23 15:31:01 +08:00
|
|
|
return getCXXABI().EmitMemberFunctionPointer(method);
|
2011-02-03 16:15:49 +08:00
|
|
|
|
|
|
|
// Otherwise, a member data pointer.
|
2012-01-14 12:30:29 +08:00
|
|
|
uint64_t fieldOffset = getContext().getFieldOffset(decl);
|
2011-02-03 16:15:49 +08:00
|
|
|
CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
|
|
|
|
return getCXXABI().EmitMemberDataPointer(type, chars);
|
|
|
|
}
|
|
|
|
|
2011-02-15 14:40:56 +08:00
|
|
|
static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *baseType,
|
2011-02-15 14:40:56 +08:00
|
|
|
const CXXRecordDecl *base);
|
2010-11-23 02:42:14 +08:00
|
|
|
|
2011-02-15 14:40:56 +08:00
|
|
|
static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
|
2016-12-15 05:38:18 +08:00
|
|
|
const CXXRecordDecl *record,
|
2011-02-15 14:40:56 +08:00
|
|
|
bool asCompleteObject) {
|
|
|
|
const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::StructType *structure =
|
2011-02-15 14:40:56 +08:00
|
|
|
(asCompleteObject ? layout.getLLVMType()
|
|
|
|
: layout.getBaseSubobjectLLVMType());
|
|
|
|
|
|
|
|
unsigned numElements = structure->getNumElements();
|
|
|
|
std::vector<llvm::Constant *> elements(numElements);
|
|
|
|
|
|
|
|
// Fill in all the bases.
|
2016-12-15 05:38:18 +08:00
|
|
|
for (const auto &I : record->bases()) {
|
|
|
|
if (I.isVirtual()) {
|
|
|
|
// Ignore virtual bases; if we're laying out for a complete
|
|
|
|
// object, we'll lay these out later.
|
|
|
|
continue;
|
|
|
|
}
|
2010-11-23 02:42:14 +08:00
|
|
|
|
2016-12-15 05:38:18 +08:00
|
|
|
const CXXRecordDecl *base =
|
|
|
|
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
|
2010-11-23 02:42:14 +08:00
|
|
|
|
2016-12-15 05:38:18 +08:00
|
|
|
// Ignore empty bases.
|
|
|
|
if (base->isEmpty() ||
|
|
|
|
CGM.getContext().getASTRecordLayout(base).getNonVirtualSize().isZero())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
|
|
|
|
llvm::Type *baseType = structure->getElementType(fieldIndex);
|
|
|
|
elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
|
2010-11-23 02:42:14 +08:00
|
|
|
}
|
|
|
|
|
2011-02-15 14:40:56 +08:00
|
|
|
// Fill in all the fields.
|
2014-03-09 04:12:42 +08:00
|
|
|
for (const auto *Field : record->fields()) {
|
2011-12-07 09:30:11 +08:00
|
|
|
// Fill in non-bitfields. (Bitfields always use a zero pattern, which we
|
|
|
|
// will fill in later.)
|
2014-03-09 04:12:42 +08:00
|
|
|
if (!Field->isBitField()) {
|
|
|
|
unsigned fieldIndex = layout.getLLVMFieldNo(Field);
|
|
|
|
elements[fieldIndex] = CGM.EmitNullConstant(Field->getType());
|
2011-12-07 09:30:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// For unions, stop after the first named field.
|
2015-05-30 17:12:07 +08:00
|
|
|
if (record->isUnion()) {
|
|
|
|
if (Field->getIdentifier())
|
|
|
|
break;
|
|
|
|
if (const auto *FieldRD =
|
|
|
|
dyn_cast_or_null<RecordDecl>(Field->getType()->getAsTagDecl()))
|
|
|
|
if (FieldRD->findFirstNamedDataMember())
|
|
|
|
break;
|
|
|
|
}
|
2011-02-15 14:40:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fill in the virtual bases, if we're working with the complete object.
|
2016-12-15 05:38:18 +08:00
|
|
|
if (asCompleteObject) {
|
|
|
|
for (const auto &I : record->vbases()) {
|
2011-02-15 14:40:56 +08:00
|
|
|
const CXXRecordDecl *base =
|
2014-03-14 00:15:17 +08:00
|
|
|
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
|
2011-02-15 14:40:56 +08:00
|
|
|
|
|
|
|
// Ignore empty bases.
|
|
|
|
if (base->isEmpty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned fieldIndex = layout.getVirtualBaseIndex(base);
|
|
|
|
|
|
|
|
// We might have already laid this field out.
|
|
|
|
if (elements[fieldIndex]) continue;
|
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *baseType = structure->getElementType(fieldIndex);
|
2011-02-15 14:40:56 +08:00
|
|
|
elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
|
|
|
|
}
|
2010-11-23 02:42:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now go through all other fields and zero them out.
|
2011-02-15 14:40:56 +08:00
|
|
|
for (unsigned i = 0; i != numElements; ++i) {
|
|
|
|
if (!elements[i])
|
|
|
|
elements[i] = llvm::Constant::getNullValue(structure->getElementType(i));
|
2010-11-23 02:42:14 +08:00
|
|
|
}
|
|
|
|
|
2011-02-15 14:40:56 +08:00
|
|
|
return llvm::ConstantStruct::get(structure, elements);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Emit the null constant for a base subobject.
|
|
|
|
static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *baseType,
|
2011-02-15 14:40:56 +08:00
|
|
|
const CXXRecordDecl *base) {
|
|
|
|
const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
|
|
|
|
|
|
|
|
// Just zero out bases that don't have any pointer to data members.
|
|
|
|
if (baseLayout.isZeroInitializableAsBase())
|
|
|
|
return llvm::Constant::getNullValue(baseType);
|
|
|
|
|
2014-10-17 09:00:43 +08:00
|
|
|
// Otherwise, we can just use its null constant.
|
|
|
|
return EmitNullConstant(CGM, base, /*asCompleteObject=*/false);
|
2010-11-23 02:42:14 +08:00
|
|
|
}
|
|
|
|
|
2009-04-14 05:47:26 +08:00
|
|
|
llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
|
2010-08-23 05:01:12 +08:00
|
|
|
if (getTypes().isZeroInitializable(T))
|
2009-08-25 01:16:23 +08:00
|
|
|
return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
|
2010-02-02 13:17:25 +08:00
|
|
|
|
2009-08-10 02:26:27 +08:00
|
|
|
if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
|
2012-02-07 06:00:56 +08:00
|
|
|
llvm::ArrayType *ATy =
|
|
|
|
cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-08-10 02:26:27 +08:00
|
|
|
QualType ElementTy = CAT->getElementType();
|
|
|
|
|
2010-02-02 13:17:25 +08:00
|
|
|
llvm::Constant *Element = EmitNullConstant(ElementTy);
|
|
|
|
unsigned NumElements = CAT->getSize().getZExtValue();
|
2012-02-07 06:00:56 +08:00
|
|
|
SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
|
2010-02-02 13:17:25 +08:00
|
|
|
return llvm::ConstantArray::get(ATy, Array);
|
2009-08-10 02:26:27 +08:00
|
|
|
}
|
2009-08-23 09:25:01 +08:00
|
|
|
|
2016-12-15 05:38:18 +08:00
|
|
|
if (const RecordType *RT = T->getAs<RecordType>()) {
|
|
|
|
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
|
|
|
|
return ::EmitNullConstant(*this, RD, /*complete object*/ true);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2015-04-24 09:25:08 +08:00
|
|
|
assert(T->isMemberDataPointerType() &&
|
2010-02-02 13:17:25 +08:00
|
|
|
"Should only see pointers to data members here!");
|
2014-10-17 09:00:43 +08:00
|
|
|
|
2011-02-03 16:15:49 +08:00
|
|
|
return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
|
2009-04-14 05:47:26 +08:00
|
|
|
}
|
2011-10-14 10:27:24 +08:00
|
|
|
|
|
|
|
llvm::Constant *
|
|
|
|
CodeGenModule::EmitNullConstantForBase(const CXXRecordDecl *Record) {
|
|
|
|
return ::EmitNullConstant(*this, Record, false);
|
|
|
|
}
|