2009-11-24 13:51:11 +08:00
|
|
|
//===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===//
|
2009-09-12 12:27:24 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code dealing with C++ code generation of classes
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-02-25 10:48:22 +08:00
|
|
|
#include "CGBlocks.h"
|
2010-08-12 05:04:37 +08:00
|
|
|
#include "CGDebugInfo.h"
|
2009-09-12 12:27:24 +08:00
|
|
|
#include "CodeGenFunction.h"
|
2009-10-07 06:43:30 +08:00
|
|
|
#include "clang/AST/CXXInheritance.h"
|
2010-09-17 10:31:44 +08:00
|
|
|
#include "clang/AST/EvaluatedExprVisitor.h"
|
2009-09-12 12:27:24 +08:00
|
|
|
#include "clang/AST/RecordLayout.h"
|
2010-02-19 17:25:03 +08:00
|
|
|
#include "clang/AST/StmtCXX.h"
|
2011-02-23 04:55:26 +08:00
|
|
|
#include "clang/Frontend/CodeGenOptions.h"
|
2009-10-07 06:43:30 +08:00
|
|
|
|
2009-09-12 12:27:24 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
2011-03-22 08:53:26 +08:00
|
|
|
static CharUnits
|
2010-04-25 05:06:20 +08:00
|
|
|
ComputeNonVirtualBaseClassOffset(ASTContext &Context,
|
|
|
|
const CXXRecordDecl *DerivedClass,
|
2010-08-07 14:22:56 +08:00
|
|
|
CastExpr::path_const_iterator Start,
|
|
|
|
CastExpr::path_const_iterator End) {
|
2011-03-22 08:53:26 +08:00
|
|
|
CharUnits Offset = CharUnits::Zero();
|
2010-04-25 05:06:20 +08:00
|
|
|
|
|
|
|
const CXXRecordDecl *RD = DerivedClass;
|
|
|
|
|
2010-08-07 14:22:56 +08:00
|
|
|
for (CastExpr::path_const_iterator I = Start; I != End; ++I) {
|
2010-04-25 05:06:20 +08:00
|
|
|
const CXXBaseSpecifier *Base = *I;
|
|
|
|
assert(!Base->isVirtual() && "Should not see virtual bases here!");
|
|
|
|
|
|
|
|
// Get the layout.
|
|
|
|
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
|
|
|
|
|
|
|
|
const CXXRecordDecl *BaseDecl =
|
|
|
|
cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
|
|
|
|
|
|
|
|
// Add the offset.
|
2011-03-22 08:53:26 +08:00
|
|
|
Offset += Layout.getBaseClassOffset(BaseDecl);
|
2010-04-25 05:06:20 +08:00
|
|
|
|
|
|
|
RD = BaseDecl;
|
|
|
|
}
|
|
|
|
|
2011-03-22 08:53:26 +08:00
|
|
|
return Offset;
|
2010-04-25 05:06:20 +08:00
|
|
|
}
|
2009-09-12 12:27:24 +08:00
|
|
|
|
2010-04-25 05:23:59 +08:00
|
|
|
llvm::Constant *
|
|
|
|
CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
|
2010-08-07 14:22:56 +08:00
|
|
|
CastExpr::path_const_iterator PathBegin,
|
|
|
|
CastExpr::path_const_iterator PathEnd) {
|
|
|
|
assert(PathBegin != PathEnd && "Base path should not be empty!");
|
2010-04-25 05:23:59 +08:00
|
|
|
|
2011-03-22 08:53:26 +08:00
|
|
|
CharUnits Offset =
|
2010-08-07 14:22:56 +08:00
|
|
|
ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
|
|
|
|
PathBegin, PathEnd);
|
2011-03-22 08:53:26 +08:00
|
|
|
if (Offset.isZero())
|
2010-04-25 05:23:59 +08:00
|
|
|
return 0;
|
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *PtrDiffTy =
|
2010-04-25 05:23:59 +08:00
|
|
|
Types.ConvertType(getContext().getPointerDiffType());
|
|
|
|
|
2011-03-22 08:53:26 +08:00
|
|
|
return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
|
2009-09-29 11:13:20 +08:00
|
|
|
}
|
|
|
|
|
2010-04-25 07:01:49 +08:00
|
|
|
/// Gets the address of a direct base class within a complete object.
|
2010-02-16 12:15:37 +08:00
|
|
|
/// This should only be used for (1) non-virtual bases or (2) virtual bases
|
|
|
|
/// when the type is known to be complete (e.g. in complete destructors).
|
|
|
|
///
|
|
|
|
/// The object pointed to by 'This' is assumed to be non-null.
|
|
|
|
llvm::Value *
|
2010-04-25 07:01:49 +08:00
|
|
|
CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
|
|
|
|
const CXXRecordDecl *Derived,
|
|
|
|
const CXXRecordDecl *Base,
|
|
|
|
bool BaseIsVirtual) {
|
2010-02-16 12:15:37 +08:00
|
|
|
// 'this' must be a pointer (in some address space) to Derived.
|
|
|
|
assert(This->getType()->isPointerTy() &&
|
|
|
|
cast<llvm::PointerType>(This->getType())->getElementType()
|
|
|
|
== ConvertType(Derived));
|
|
|
|
|
|
|
|
// Compute the offset of the virtual base.
|
2011-03-22 09:21:15 +08:00
|
|
|
CharUnits Offset;
|
2010-02-16 12:15:37 +08:00
|
|
|
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
|
2010-04-25 07:01:49 +08:00
|
|
|
if (BaseIsVirtual)
|
2011-03-22 09:21:15 +08:00
|
|
|
Offset = Layout.getVBaseClassOffset(Base);
|
2010-02-16 12:15:37 +08:00
|
|
|
else
|
2011-03-22 09:21:15 +08:00
|
|
|
Offset = Layout.getBaseClassOffset(Base);
|
2010-02-16 12:15:37 +08:00
|
|
|
|
|
|
|
// Shift and cast down to the base type.
|
|
|
|
// TODO: for complete types, this should be possible with a GEP.
|
|
|
|
llvm::Value *V = This;
|
2011-03-22 09:21:15 +08:00
|
|
|
if (Offset.isPositive()) {
|
2010-02-16 12:15:37 +08:00
|
|
|
V = Builder.CreateBitCast(V, Int8PtrTy);
|
2011-03-22 09:21:15 +08:00
|
|
|
V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
|
2010-02-16 12:15:37 +08:00
|
|
|
}
|
|
|
|
V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
|
|
|
|
|
|
|
|
return V;
|
2010-03-29 03:40:00 +08:00
|
|
|
}
|
2010-02-16 12:15:37 +08:00
|
|
|
|
2010-04-21 00:03:35 +08:00
|
|
|
static llvm::Value *
|
2012-08-01 13:04:58 +08:00
|
|
|
ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
|
|
|
|
CharUnits nonVirtualOffset,
|
|
|
|
llvm::Value *virtualOffset) {
|
|
|
|
// Assert that we have something to do.
|
|
|
|
assert(!nonVirtualOffset.isZero() || virtualOffset != 0);
|
|
|
|
|
|
|
|
// Compute the offset from the static and dynamic components.
|
|
|
|
llvm::Value *baseOffset;
|
|
|
|
if (!nonVirtualOffset.isZero()) {
|
|
|
|
baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy,
|
|
|
|
nonVirtualOffset.getQuantity());
|
|
|
|
if (virtualOffset) {
|
|
|
|
baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
baseOffset = virtualOffset;
|
|
|
|
}
|
2010-04-21 00:03:35 +08:00
|
|
|
|
|
|
|
// Apply the base offset.
|
2012-08-01 13:04:58 +08:00
|
|
|
ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
|
|
|
|
ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr");
|
|
|
|
return ptr;
|
2010-04-21 00:03:35 +08:00
|
|
|
}
|
|
|
|
|
2010-04-25 05:06:20 +08:00
|
|
|
llvm::Value *
|
|
|
|
CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
|
2010-04-25 07:01:49 +08:00
|
|
|
const CXXRecordDecl *Derived,
|
2010-08-07 14:22:56 +08:00
|
|
|
CastExpr::path_const_iterator PathBegin,
|
|
|
|
CastExpr::path_const_iterator PathEnd,
|
2010-04-25 05:06:20 +08:00
|
|
|
bool NullCheckValue) {
|
2010-08-07 14:22:56 +08:00
|
|
|
assert(PathBegin != PathEnd && "Base path should not be empty!");
|
2010-04-25 05:06:20 +08:00
|
|
|
|
2010-08-07 14:22:56 +08:00
|
|
|
CastExpr::path_const_iterator Start = PathBegin;
|
2010-04-25 05:06:20 +08:00
|
|
|
const CXXRecordDecl *VBase = 0;
|
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
// Sema has done some convenient canonicalization here: if the
|
|
|
|
// access path involved any virtual steps, the conversion path will
|
|
|
|
// *start* with a step down to the correct virtual base subobject,
|
|
|
|
// and hence will not require any further steps.
|
2010-04-25 05:06:20 +08:00
|
|
|
if ((*Start)->isVirtual()) {
|
|
|
|
VBase =
|
|
|
|
cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
|
|
|
|
++Start;
|
|
|
|
}
|
2012-08-01 13:04:58 +08:00
|
|
|
|
|
|
|
// Compute the static offset of the ultimate destination within its
|
|
|
|
// allocating subobject (the virtual base, if there is one, or else
|
|
|
|
// the "complete" object that we see).
|
2011-03-22 08:53:26 +08:00
|
|
|
CharUnits NonVirtualOffset =
|
2010-04-25 07:01:49 +08:00
|
|
|
ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
|
2010-08-07 14:22:56 +08:00
|
|
|
Start, PathEnd);
|
2010-04-25 05:06:20 +08:00
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
// If there's a virtual step, we can sometimes "devirtualize" it.
|
|
|
|
// For now, that's limited to when the derived type is final.
|
|
|
|
// TODO: "devirtualize" this for accesses to known-complete objects.
|
|
|
|
if (VBase && Derived->hasAttr<FinalAttr>()) {
|
|
|
|
const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived);
|
|
|
|
CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase);
|
|
|
|
NonVirtualOffset += vBaseOffset;
|
|
|
|
VBase = 0; // we no longer have a virtual step
|
|
|
|
}
|
|
|
|
|
2010-04-25 05:06:20 +08:00
|
|
|
// Get the base pointer type.
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *BasePtrTy =
|
2010-08-07 14:22:56 +08:00
|
|
|
ConvertType((PathEnd[-1])->getType())->getPointerTo();
|
2012-08-01 13:04:58 +08:00
|
|
|
|
|
|
|
// If the static offset is zero and we don't have a virtual step,
|
|
|
|
// just do a bitcast; null checks are unnecessary.
|
2011-03-22 08:53:26 +08:00
|
|
|
if (NonVirtualOffset.isZero() && !VBase) {
|
2010-04-25 05:06:20 +08:00
|
|
|
return Builder.CreateBitCast(Value, BasePtrTy);
|
|
|
|
}
|
2012-08-01 13:04:58 +08:00
|
|
|
|
|
|
|
llvm::BasicBlock *origBB = 0;
|
|
|
|
llvm::BasicBlock *endBB = 0;
|
2010-04-25 05:06:20 +08:00
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
// Skip over the offset (and the vtable load) if we're supposed to
|
|
|
|
// null-check the pointer.
|
2010-04-25 05:06:20 +08:00
|
|
|
if (NullCheckValue) {
|
2012-08-01 13:04:58 +08:00
|
|
|
origBB = Builder.GetInsertBlock();
|
|
|
|
llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
|
|
|
|
endBB = createBasicBlock("cast.end");
|
2010-04-25 05:06:20 +08:00
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
llvm::Value *isNull = Builder.CreateIsNull(Value);
|
|
|
|
Builder.CreateCondBr(isNull, endBB, notNullBB);
|
|
|
|
EmitBlock(notNullBB);
|
2010-04-25 05:06:20 +08:00
|
|
|
}
|
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
// Compute the virtual offset.
|
2010-04-25 05:06:20 +08:00
|
|
|
llvm::Value *VirtualOffset = 0;
|
2011-01-29 11:18:56 +08:00
|
|
|
if (VBase) {
|
2012-08-01 13:04:58 +08:00
|
|
|
VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
|
2011-01-29 11:18:56 +08:00
|
|
|
}
|
2010-04-25 05:06:20 +08:00
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
// Apply both offsets.
|
2011-03-22 08:53:26 +08:00
|
|
|
Value = ApplyNonVirtualAndVirtualOffset(*this, Value,
|
2011-03-23 08:45:26 +08:00
|
|
|
NonVirtualOffset,
|
2010-04-25 05:06:20 +08:00
|
|
|
VirtualOffset);
|
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
// Cast to the destination type.
|
2010-04-25 05:06:20 +08:00
|
|
|
Value = Builder.CreateBitCast(Value, BasePtrTy);
|
2012-08-01 13:04:58 +08:00
|
|
|
|
|
|
|
// Build a phi if we needed a null check.
|
2010-04-25 05:06:20 +08:00
|
|
|
if (NullCheckValue) {
|
2012-08-01 13:04:58 +08:00
|
|
|
llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
|
|
|
|
Builder.CreateBr(endBB);
|
|
|
|
EmitBlock(endBB);
|
2010-04-25 05:06:20 +08:00
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
|
|
|
|
PHI->addIncoming(Value, notNullBB);
|
|
|
|
PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
|
2010-04-25 05:06:20 +08:00
|
|
|
Value = PHI;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Value;
|
|
|
|
}
|
|
|
|
|
2009-11-24 01:57:54 +08:00
|
|
|
llvm::Value *
|
|
|
|
CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
|
2010-04-25 07:01:49 +08:00
|
|
|
const CXXRecordDecl *Derived,
|
2010-08-07 14:22:56 +08:00
|
|
|
CastExpr::path_const_iterator PathBegin,
|
|
|
|
CastExpr::path_const_iterator PathEnd,
|
2009-11-24 01:57:54 +08:00
|
|
|
bool NullCheckValue) {
|
2010-08-07 14:22:56 +08:00
|
|
|
assert(PathBegin != PathEnd && "Base path should not be empty!");
|
2010-04-25 05:23:59 +08:00
|
|
|
|
2009-11-24 01:57:54 +08:00
|
|
|
QualType DerivedTy =
|
2010-04-25 07:01:49 +08:00
|
|
|
getContext().getCanonicalType(getContext().getTagDeclType(Derived));
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
|
2009-11-24 01:57:54 +08:00
|
|
|
|
2010-01-31 09:43:37 +08:00
|
|
|
llvm::Value *NonVirtualOffset =
|
2010-08-07 14:22:56 +08:00
|
|
|
CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
|
2010-01-31 09:43:37 +08:00
|
|
|
|
|
|
|
if (!NonVirtualOffset) {
|
|
|
|
// No offset, we can just cast back.
|
|
|
|
return Builder.CreateBitCast(Value, DerivedPtrTy);
|
|
|
|
}
|
|
|
|
|
2009-11-24 01:57:54 +08:00
|
|
|
llvm::BasicBlock *CastNull = 0;
|
|
|
|
llvm::BasicBlock *CastNotNull = 0;
|
|
|
|
llvm::BasicBlock *CastEnd = 0;
|
|
|
|
|
|
|
|
if (NullCheckValue) {
|
|
|
|
CastNull = createBasicBlock("cast.null");
|
|
|
|
CastNotNull = createBasicBlock("cast.notnull");
|
|
|
|
CastEnd = createBasicBlock("cast.end");
|
|
|
|
|
2011-04-11 08:30:07 +08:00
|
|
|
llvm::Value *IsNull = Builder.CreateIsNull(Value);
|
2009-11-24 01:57:54 +08:00
|
|
|
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
|
|
|
|
EmitBlock(CastNotNull);
|
|
|
|
}
|
|
|
|
|
2010-01-31 09:43:37 +08:00
|
|
|
// Apply the offset.
|
2012-02-29 06:07:56 +08:00
|
|
|
Value = Builder.CreateBitCast(Value, Int8PtrTy);
|
|
|
|
Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
|
|
|
|
"sub.ptr");
|
2010-01-31 09:43:37 +08:00
|
|
|
|
|
|
|
// Just cast.
|
|
|
|
Value = Builder.CreateBitCast(Value, DerivedPtrTy);
|
2009-11-24 01:57:54 +08:00
|
|
|
|
|
|
|
if (NullCheckValue) {
|
|
|
|
Builder.CreateBr(CastEnd);
|
|
|
|
EmitBlock(CastNull);
|
|
|
|
Builder.CreateBr(CastEnd);
|
|
|
|
EmitBlock(CastEnd);
|
|
|
|
|
2011-03-30 19:28:58 +08:00
|
|
|
llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
|
2009-11-24 01:57:54 +08:00
|
|
|
PHI->addIncoming(Value, CastNotNull);
|
|
|
|
PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
|
2009-09-12 14:04:24 +08:00
|
|
|
CastNull);
|
2009-11-24 01:57:54 +08:00
|
|
|
Value = PHI;
|
2009-09-12 14:04:24 +08:00
|
|
|
}
|
2009-09-12 12:27:24 +08:00
|
|
|
|
2009-11-24 01:57:54 +08:00
|
|
|
return Value;
|
2009-09-12 12:27:24 +08:00
|
|
|
}
|
2010-03-30 11:27:09 +08:00
|
|
|
|
2010-01-02 09:01:18 +08:00
|
|
|
/// GetVTTParameter - Return the VTT parameter that should be passed to a
|
|
|
|
/// base constructor/destructor with virtual bases.
|
2010-05-03 07:33:10 +08:00
|
|
|
static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD,
|
|
|
|
bool ForVirtualBase) {
|
2010-03-23 12:11:45 +08:00
|
|
|
if (!CodeGenVTables::needsVTTParameter(GD)) {
|
2010-01-02 09:01:18 +08:00
|
|
|
// This constructor/destructor does not need a VTT parameter.
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
const CXXRecordDecl *RD = cast<CXXMethodDecl>(CGF.CurFuncDecl)->getParent();
|
|
|
|
const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
|
2010-02-19 03:59:28 +08:00
|
|
|
|
2010-01-02 09:01:18 +08:00
|
|
|
llvm::Value *VTT;
|
|
|
|
|
2010-02-19 03:59:28 +08:00
|
|
|
uint64_t SubVTTIndex;
|
|
|
|
|
|
|
|
// If the record matches the base, this is the complete ctor/dtor
|
|
|
|
// variant calling the base variant in a class with virtual bases.
|
|
|
|
if (RD == Base) {
|
2010-03-23 12:11:45 +08:00
|
|
|
assert(!CodeGenVTables::needsVTTParameter(CGF.CurGD) &&
|
2010-02-19 03:59:28 +08:00
|
|
|
"doing no-op VTT offset in base dtor/ctor?");
|
2010-05-03 07:33:10 +08:00
|
|
|
assert(!ForVirtualBase && "Can't have same class as virtual base!");
|
2010-02-19 03:59:28 +08:00
|
|
|
SubVTTIndex = 0;
|
|
|
|
} else {
|
2010-05-03 07:53:25 +08:00
|
|
|
const ASTRecordLayout &Layout =
|
|
|
|
CGF.getContext().getASTRecordLayout(RD);
|
2011-03-24 09:21:01 +08:00
|
|
|
CharUnits BaseOffset = ForVirtualBase ?
|
|
|
|
Layout.getVBaseClassOffset(Base) :
|
|
|
|
Layout.getBaseClassOffset(Base);
|
2010-05-03 07:53:25 +08:00
|
|
|
|
|
|
|
SubVTTIndex =
|
|
|
|
CGF.CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
|
2010-02-19 03:59:28 +08:00
|
|
|
assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
|
|
|
|
}
|
2010-01-02 09:01:18 +08:00
|
|
|
|
2010-03-23 12:11:45 +08:00
|
|
|
if (CodeGenVTables::needsVTTParameter(CGF.CurGD)) {
|
2010-01-02 09:01:18 +08:00
|
|
|
// A VTT parameter was passed to the constructor, use it.
|
|
|
|
VTT = CGF.LoadCXXVTT();
|
|
|
|
VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
|
|
|
|
} else {
|
|
|
|
// We're the complete constructor, so get the VTT by name.
|
2011-01-30 03:16:51 +08:00
|
|
|
VTT = CGF.CGM.getVTables().GetAddrOfVTT(RD);
|
2010-01-02 09:01:18 +08:00
|
|
|
VTT = CGF.Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
|
|
|
|
}
|
|
|
|
|
|
|
|
return VTT;
|
|
|
|
}
|
|
|
|
|
2010-07-21 09:23:41 +08:00
|
|
|
namespace {
|
2010-07-21 13:30:47 +08:00
|
|
|
/// Call the destructor for a direct base class.
|
2010-07-21 15:22:38 +08:00
|
|
|
struct CallBaseDtor : EHScopeStack::Cleanup {
|
2010-07-21 13:30:47 +08:00
|
|
|
const CXXRecordDecl *BaseClass;
|
|
|
|
bool BaseIsVirtual;
|
|
|
|
CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
|
|
|
|
: BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
|
2010-07-21 09:23:41 +08:00
|
|
|
|
2011-07-13 04:27:29 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) {
|
2010-07-21 13:30:47 +08:00
|
|
|
const CXXRecordDecl *DerivedClass =
|
|
|
|
cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
|
|
|
|
|
|
|
|
const CXXDestructorDecl *D = BaseClass->getDestructor();
|
|
|
|
llvm::Value *Addr =
|
|
|
|
CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(),
|
|
|
|
DerivedClass, BaseClass,
|
|
|
|
BaseIsVirtual);
|
|
|
|
CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, Addr);
|
2010-07-21 09:23:41 +08:00
|
|
|
}
|
|
|
|
};
|
2010-09-17 10:31:44 +08:00
|
|
|
|
|
|
|
/// A visitor which checks whether an initializer uses 'this' in a
|
|
|
|
/// way which requires the vtable to be properly set.
|
|
|
|
struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> {
|
|
|
|
typedef EvaluatedExprVisitor<DynamicThisUseChecker> super;
|
|
|
|
|
|
|
|
bool UsesThis;
|
|
|
|
|
|
|
|
DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {}
|
|
|
|
|
|
|
|
// Black-list all explicit and implicit references to 'this'.
|
|
|
|
//
|
|
|
|
// Do we need to worry about external references to 'this' derived
|
|
|
|
// from arbitrary code? If so, then anything which runs arbitrary
|
|
|
|
// external code might potentially access the vtable.
|
|
|
|
void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; }
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
|
|
|
|
DynamicThisUseChecker Checker(C);
|
|
|
|
Checker.Visit(const_cast<Expr*>(Init));
|
|
|
|
return Checker.UsesThis;
|
2010-07-21 09:23:41 +08:00
|
|
|
}
|
|
|
|
|
2009-12-25 06:46:43 +08:00
|
|
|
static void EmitBaseInitializer(CodeGenFunction &CGF,
|
|
|
|
const CXXRecordDecl *ClassDecl,
|
2011-01-09 04:30:50 +08:00
|
|
|
CXXCtorInitializer *BaseInit,
|
2009-12-25 06:46:43 +08:00
|
|
|
CXXCtorType CtorType) {
|
|
|
|
assert(BaseInit->isBaseInitializer() &&
|
|
|
|
"Must have base initializer!");
|
|
|
|
|
|
|
|
llvm::Value *ThisPtr = CGF.LoadCXXThis();
|
|
|
|
|
|
|
|
const Type *BaseType = BaseInit->getBaseClass();
|
|
|
|
CXXRecordDecl *BaseClassDecl =
|
|
|
|
cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
|
|
|
|
|
2010-04-12 08:51:03 +08:00
|
|
|
bool isBaseVirtual = BaseInit->isBaseVirtual();
|
2009-12-25 06:46:43 +08:00
|
|
|
|
|
|
|
// The base constructor doesn't construct virtual bases.
|
|
|
|
if (CtorType == Ctor_Base && isBaseVirtual)
|
|
|
|
return;
|
|
|
|
|
2010-09-17 10:31:44 +08:00
|
|
|
// If the initializer for the base (other than the constructor
|
|
|
|
// itself) accesses 'this' in any way, we need to initialize the
|
|
|
|
// vtables.
|
|
|
|
if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit()))
|
|
|
|
CGF.InitializeVTablePointers(ClassDecl);
|
|
|
|
|
2010-02-16 12:15:37 +08:00
|
|
|
// We can pretend to be a complete class because it only matters for
|
|
|
|
// virtual bases, and we only do virtual bases for complete ctors.
|
2010-04-25 07:01:49 +08:00
|
|
|
llvm::Value *V =
|
|
|
|
CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
|
2010-07-21 13:30:47 +08:00
|
|
|
BaseClassDecl,
|
|
|
|
isBaseVirtual);
|
2011-12-03 10:13:40 +08:00
|
|
|
CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType);
|
2011-08-26 04:40:09 +08:00
|
|
|
AggValueSlot AggSlot =
|
2011-12-03 08:54:26 +08:00
|
|
|
AggValueSlot::forAddr(V, Alignment, Qualifiers(),
|
2011-08-26 04:40:09 +08:00
|
|
|
AggValueSlot::IsDestructed,
|
2011-08-26 07:04:34 +08:00
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2012-03-30 01:37:10 +08:00
|
|
|
AggValueSlot::IsNotAliased);
|
2010-09-15 18:14:12 +08:00
|
|
|
|
|
|
|
CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
|
2010-02-07 04:00:21 +08:00
|
|
|
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CGF.CGM.getLangOpts().Exceptions &&
|
2011-02-20 08:20:27 +08:00
|
|
|
!BaseClassDecl->hasTrivialDestructor())
|
2010-07-21 15:22:38 +08:00
|
|
|
CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
|
|
|
|
isBaseVirtual);
|
2009-12-25 06:46:43 +08:00
|
|
|
}
|
|
|
|
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
static void EmitAggMemberInitializer(CodeGenFunction &CGF,
|
|
|
|
LValue LHS,
|
2012-02-14 10:15:49 +08:00
|
|
|
Expr *Init,
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
llvm::Value *ArrayIndexVar,
|
|
|
|
QualType T,
|
2012-02-14 10:15:49 +08:00
|
|
|
ArrayRef<VarDecl *> ArrayIndexes,
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
unsigned Index) {
|
2012-02-14 10:15:49 +08:00
|
|
|
if (Index == ArrayIndexes.size()) {
|
2011-12-03 08:54:26 +08:00
|
|
|
LValue LV = LHS;
|
2012-02-19 23:41:54 +08:00
|
|
|
{ // Scope for Cleanups.
|
|
|
|
CodeGenFunction::RunCleanupsScope Cleanups(CGF);
|
|
|
|
|
|
|
|
if (ArrayIndexVar) {
|
|
|
|
// If we have an array index variable, load it and use it as an offset.
|
|
|
|
// Then, increment the value.
|
|
|
|
llvm::Value *Dest = LHS.getAddress();
|
|
|
|
llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
|
|
|
|
Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
|
|
|
|
llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
|
|
|
|
Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
|
|
|
|
CGF.Builder.CreateStore(Next, ArrayIndexVar);
|
|
|
|
|
|
|
|
// Update the LValue.
|
|
|
|
LV.setAddress(Dest);
|
|
|
|
CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
|
|
|
|
LV.setAlignment(std::min(Align, LV.getAlignment()));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!CGF.hasAggregateLLVMType(T)) {
|
|
|
|
CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false);
|
|
|
|
} else if (T->isAnyComplexType()) {
|
|
|
|
CGF.EmitComplexExprIntoAddr(Init, LV.getAddress(),
|
|
|
|
LV.isVolatileQualified());
|
|
|
|
} else {
|
|
|
|
AggValueSlot Slot =
|
|
|
|
AggValueSlot::forLValue(LV,
|
|
|
|
AggValueSlot::IsDestructed,
|
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2012-03-30 01:37:10 +08:00
|
|
|
AggValueSlot::IsNotAliased);
|
2012-02-19 23:41:54 +08:00
|
|
|
|
|
|
|
CGF.EmitAggExpr(Init, Slot);
|
|
|
|
}
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
}
|
2010-09-15 18:14:12 +08:00
|
|
|
|
2012-02-19 23:41:54 +08:00
|
|
|
// Now, outside of the initializer cleanup scope, destroy the backing array
|
|
|
|
// for a std::initializer_list member.
|
2012-02-20 00:03:09 +08:00
|
|
|
CGF.MaybeEmitStdInitializerListCleanup(LV.getAddress(), Init);
|
2012-02-19 23:41:54 +08:00
|
|
|
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
|
|
|
|
assert(Array && "Array initialization without the array type?");
|
|
|
|
llvm::Value *IndexVar
|
2012-02-14 10:15:49 +08:00
|
|
|
= CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
assert(IndexVar && "Array index variable not loaded");
|
|
|
|
|
|
|
|
// Initialize this index variable to zero.
|
|
|
|
llvm::Value* Zero
|
|
|
|
= llvm::Constant::getNullValue(
|
|
|
|
CGF.ConvertType(CGF.getContext().getSizeType()));
|
|
|
|
CGF.Builder.CreateStore(Zero, IndexVar);
|
|
|
|
|
|
|
|
// Start the loop with a block that tests the condition.
|
|
|
|
llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
|
|
|
|
llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
|
|
|
|
|
|
|
|
CGF.EmitBlock(CondBlock);
|
|
|
|
|
|
|
|
llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
|
|
|
|
// Generate: if (loop-index < number-of-elements) fall to the loop body,
|
|
|
|
// otherwise, go to the block after the for-loop.
|
|
|
|
uint64_t NumElements = Array->getSize().getZExtValue();
|
|
|
|
llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
|
2010-05-06 14:35:23 +08:00
|
|
|
llvm::Value *NumElementsPtr =
|
|
|
|
llvm::ConstantInt::get(Counter->getType(), NumElements);
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
|
|
|
|
"isless");
|
|
|
|
|
|
|
|
// If the condition is true, execute the body.
|
|
|
|
CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
|
|
|
|
|
|
|
|
CGF.EmitBlock(ForBody);
|
|
|
|
llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
|
|
|
|
|
|
|
|
{
|
2010-07-06 09:34:17 +08:00
|
|
|
CodeGenFunction::RunCleanupsScope Cleanups(CGF);
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
|
|
|
|
// Inside the loop body recurse to emit the inner loop or, eventually, the
|
|
|
|
// constructor call.
|
2012-02-14 10:15:49 +08:00
|
|
|
EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
|
|
|
|
Array->getElementType(), ArrayIndexes, Index + 1);
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CGF.EmitBlock(ContinueBlock);
|
|
|
|
|
|
|
|
// Emit the increment of the loop counter.
|
|
|
|
llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
|
|
|
|
Counter = CGF.Builder.CreateLoad(IndexVar);
|
|
|
|
NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
|
|
|
|
CGF.Builder.CreateStore(NextVal, IndexVar);
|
|
|
|
|
|
|
|
// Finally, branch back up to the condition for the next iteration.
|
|
|
|
CGF.EmitBranch(CondBlock);
|
|
|
|
|
|
|
|
// Emit the fall-through block.
|
|
|
|
CGF.EmitBlock(AfterFor, true);
|
|
|
|
}
|
2010-07-21 09:23:41 +08:00
|
|
|
|
|
|
|
namespace {
|
2010-07-21 15:22:38 +08:00
|
|
|
struct CallMemberDtor : EHScopeStack::Cleanup {
|
2012-02-14 10:15:49 +08:00
|
|
|
llvm::Value *V;
|
2010-07-21 09:23:41 +08:00
|
|
|
CXXDestructorDecl *Dtor;
|
|
|
|
|
2012-02-14 10:15:49 +08:00
|
|
|
CallMemberDtor(llvm::Value *V, CXXDestructorDecl *Dtor)
|
|
|
|
: V(V), Dtor(Dtor) {}
|
2010-07-21 09:23:41 +08:00
|
|
|
|
2011-07-13 04:27:29 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) {
|
2010-07-21 09:23:41 +08:00
|
|
|
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
|
2012-02-14 10:15:49 +08:00
|
|
|
V);
|
2010-07-21 09:23:41 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
2011-08-31 03:58:05 +08:00
|
|
|
|
2009-12-25 06:46:43 +08:00
|
|
|
static void EmitMemberInitializer(CodeGenFunction &CGF,
|
|
|
|
const CXXRecordDecl *ClassDecl,
|
2011-01-09 04:30:50 +08:00
|
|
|
CXXCtorInitializer *MemberInit,
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
const CXXConstructorDecl *Constructor,
|
|
|
|
FunctionArgList &Args) {
|
2010-12-04 17:14:42 +08:00
|
|
|
assert(MemberInit->isAnyMemberInitializer() &&
|
2009-12-25 06:46:43 +08:00
|
|
|
"Must have member initializer!");
|
2011-06-12 01:19:42 +08:00
|
|
|
assert(MemberInit->getInit() && "Must have initializer!");
|
2009-12-25 06:46:43 +08:00
|
|
|
|
|
|
|
// non-static data member initializers.
|
2010-12-04 17:14:42 +08:00
|
|
|
FieldDecl *Field = MemberInit->getAnyMember();
|
2012-02-14 10:15:49 +08:00
|
|
|
QualType FieldType = Field->getType();
|
2009-12-25 06:46:43 +08:00
|
|
|
|
|
|
|
llvm::Value *ThisPtr = CGF.LoadCXXThis();
|
2012-04-16 11:54:45 +08:00
|
|
|
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
|
2012-08-08 11:51:37 +08:00
|
|
|
LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
|
2012-04-16 11:54:45 +08:00
|
|
|
|
2010-12-04 17:14:42 +08:00
|
|
|
if (MemberInit->isIndirectMemberInitializer()) {
|
2012-08-08 11:51:37 +08:00
|
|
|
// If we are initializing an anonymous union field, drill down to
|
|
|
|
// the field.
|
|
|
|
IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember();
|
|
|
|
IndirectFieldDecl::chain_iterator I = IndirectField->chain_begin(),
|
|
|
|
IEnd = IndirectField->chain_end();
|
|
|
|
for ( ; I != IEnd; ++I)
|
|
|
|
LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(*I));
|
2010-12-04 17:14:42 +08:00
|
|
|
FieldType = MemberInit->getIndirectMember()->getAnonField()->getType();
|
2010-05-21 09:18:57 +08:00
|
|
|
} else {
|
2012-08-08 11:51:37 +08:00
|
|
|
LHS = CGF.EmitLValueForFieldInitialization(LHS, Field);
|
2009-12-25 06:46:43 +08:00
|
|
|
}
|
|
|
|
|
2012-02-14 10:15:49 +08:00
|
|
|
// Special case: if we are in a copy or move constructor, and we are copying
|
|
|
|
// an array of PODs or classes with trivial copy constructors, ignore the
|
|
|
|
// AST and perform the copy we know is equivalent.
|
|
|
|
// FIXME: This is hacky at best... if we had a bit more explicit information
|
|
|
|
// in the AST, we could generalize it more easily.
|
|
|
|
const ConstantArrayType *Array
|
|
|
|
= CGF.getContext().getAsConstantArrayType(FieldType);
|
|
|
|
if (Array && Constructor->isImplicitlyDefined() &&
|
|
|
|
Constructor->isCopyOrMoveConstructor()) {
|
|
|
|
QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
|
2012-11-08 07:56:21 +08:00
|
|
|
CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
|
2012-02-14 10:15:49 +08:00
|
|
|
if (BaseElementTy.isPODType(CGF.getContext()) ||
|
2012-11-08 07:56:21 +08:00
|
|
|
(CE && CE->getConstructor()->isTrivial())) {
|
|
|
|
// Find the source pointer. We know it's the last argument because
|
|
|
|
// we know we're in an implicit copy constructor.
|
2012-02-14 10:15:49 +08:00
|
|
|
unsigned SrcArgIndex = Args.size() - 1;
|
|
|
|
llvm::Value *SrcPtr
|
|
|
|
= CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
|
2012-04-16 11:54:45 +08:00
|
|
|
LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
|
|
|
|
LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field);
|
2012-02-14 10:15:49 +08:00
|
|
|
|
|
|
|
// Copy the aggregate.
|
|
|
|
CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
|
2012-03-30 01:37:10 +08:00
|
|
|
LHS.isVolatileQualified());
|
2012-02-14 10:15:49 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ArrayRef<VarDecl *> ArrayIndexes;
|
|
|
|
if (MemberInit->getNumArrayIndices())
|
|
|
|
ArrayIndexes = MemberInit->getArrayIndexes();
|
2012-02-14 10:31:03 +08:00
|
|
|
CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
|
2012-02-14 10:15:49 +08:00
|
|
|
}
|
|
|
|
|
2012-02-14 10:31:03 +08:00
|
|
|
void CodeGenFunction::EmitInitializerForField(FieldDecl *Field,
|
|
|
|
LValue LHS, Expr *Init,
|
|
|
|
ArrayRef<VarDecl *> ArrayIndexes) {
|
2012-02-14 10:15:49 +08:00
|
|
|
QualType FieldType = Field->getType();
|
2012-02-14 10:31:03 +08:00
|
|
|
if (!hasAggregateLLVMType(FieldType)) {
|
2011-06-16 07:02:42 +08:00
|
|
|
if (LHS.isSimple()) {
|
2012-02-14 10:31:03 +08:00
|
|
|
EmitExprAsInit(Init, Field, LHS, false);
|
2011-06-16 07:02:42 +08:00
|
|
|
} else {
|
2012-02-14 10:31:03 +08:00
|
|
|
RValue RHS = RValue::get(EmitScalarExpr(Init));
|
|
|
|
EmitStoreThroughLValue(RHS, LHS);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
2012-02-14 10:15:49 +08:00
|
|
|
} else if (FieldType->isAnyComplexType()) {
|
2012-02-14 10:31:03 +08:00
|
|
|
EmitComplexExprIntoAddr(Init, LHS.getAddress(), LHS.isVolatileQualified());
|
2009-12-25 06:46:43 +08:00
|
|
|
} else {
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
llvm::Value *ArrayIndexVar = 0;
|
2012-02-14 10:15:49 +08:00
|
|
|
if (ArrayIndexes.size()) {
|
2012-02-14 10:31:03 +08:00
|
|
|
llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
|
|
|
|
// The LHS is a pointer to the first object we'll be constructing, as
|
|
|
|
// a flat array.
|
2012-02-14 10:31:03 +08:00
|
|
|
QualType BaseElementTy = getContext().getBaseElementType(FieldType);
|
|
|
|
llvm::Type *BasePtr = ConvertType(BaseElementTy);
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
BasePtr = llvm::PointerType::getUnqual(BasePtr);
|
2012-02-14 10:31:03 +08:00
|
|
|
llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(),
|
|
|
|
BasePtr);
|
|
|
|
LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
|
|
|
|
// Create an array index that will be used to walk over all of the
|
|
|
|
// objects we're constructing.
|
2012-02-14 10:31:03 +08:00
|
|
|
ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index");
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
|
2012-02-14 10:31:03 +08:00
|
|
|
Builder.CreateStore(Zero, ArrayIndexVar);
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
|
|
|
|
|
|
|
|
// Emit the block variables for the array indices, if any.
|
2012-02-14 10:15:49 +08:00
|
|
|
for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
|
2012-02-14 10:31:03 +08:00
|
|
|
EmitAutoVarDecl(*ArrayIndexes[I]);
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
}
|
|
|
|
|
2012-02-14 10:31:03 +08:00
|
|
|
EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
|
2012-02-14 10:15:49 +08:00
|
|
|
ArrayIndexes, 0);
|
2010-02-07 03:50:17 +08:00
|
|
|
|
2012-03-11 15:00:24 +08:00
|
|
|
if (!CGM.getLangOpts().Exceptions)
|
2010-02-07 03:50:17 +08:00
|
|
|
return;
|
|
|
|
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
// FIXME: If we have an array of classes w/ non-trivial destructors,
|
|
|
|
// we need to destroy in reverse order of construction along the exception
|
|
|
|
// path.
|
2010-02-07 03:50:17 +08:00
|
|
|
const RecordType *RT = FieldType->getAs<RecordType>();
|
|
|
|
if (!RT)
|
|
|
|
return;
|
|
|
|
|
|
|
|
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
|
2010-07-21 09:23:41 +08:00
|
|
|
if (!RD->hasTrivialDestructor())
|
2012-02-14 10:31:03 +08:00
|
|
|
EHStack.pushCleanup<CallMemberDtor>(EHCleanup, LHS.getAddress(),
|
|
|
|
RD->getDestructor());
|
2009-12-25 06:46:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
/// Checks whether the given constructor is a valid subject for the
|
|
|
|
/// complete-to-base constructor delegation optimization, i.e.
|
|
|
|
/// emitting the complete constructor as a simple call to the base
|
|
|
|
/// constructor.
|
|
|
|
static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
|
|
|
|
|
|
|
|
// Currently we disable the optimization for classes with virtual
|
|
|
|
// bases because (1) the addresses of parameter variables need to be
|
|
|
|
// consistent across all initializers but (2) the delegate function
|
|
|
|
// call necessarily creates a second copy of the parameter variable.
|
|
|
|
//
|
|
|
|
// The limiting example (purely theoretical AFAIK):
|
|
|
|
// struct A { A(int &c) { c++; } };
|
|
|
|
// struct B : virtual A {
|
|
|
|
// B(int count) : A(count) { printf("%d\n", count); }
|
|
|
|
// };
|
|
|
|
// ...although even this example could in principle be emitted as a
|
|
|
|
// delegation since the address of the parameter doesn't escape.
|
|
|
|
if (Ctor->getParent()->getNumVBases()) {
|
|
|
|
// TODO: white-list trivial vbase initializers. This case wouldn't
|
|
|
|
// be subject to the restrictions below.
|
|
|
|
|
|
|
|
// TODO: white-list cases where:
|
|
|
|
// - there are no non-reference parameters to the constructor
|
|
|
|
// - the initializers don't access any non-reference parameters
|
|
|
|
// - the initializers don't take the address of non-reference
|
|
|
|
// parameters
|
|
|
|
// - etc.
|
|
|
|
// If we ever add any of the above cases, remember that:
|
|
|
|
// - function-try-blocks will always blacklist this optimization
|
|
|
|
// - we need to perform the constructor prologue and cleanup in
|
|
|
|
// EmitConstructorBody.
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We also disable the optimization for variadic functions because
|
|
|
|
// it's impossible to "re-pass" varargs.
|
|
|
|
if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
|
|
|
|
return false;
|
|
|
|
|
2011-05-01 15:04:31 +08:00
|
|
|
// FIXME: Decide if we can do a delegation of a delegating constructor.
|
|
|
|
if (Ctor->isDelegatingConstructor())
|
|
|
|
return false;
|
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-02-19 17:25:03 +08:00
|
|
|
/// EmitConstructorBody - Emits the body of the current constructor.
|
|
|
|
void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
|
|
|
|
const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
|
|
|
|
CXXCtorType CtorType = CurGD.getCtorType();
|
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
// Before we go any further, try the complete->base constructor
|
|
|
|
// delegation optimization.
|
2012-04-20 16:05:00 +08:00
|
|
|
if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
|
|
|
|
CGM.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
|
2010-08-12 05:04:37 +08:00
|
|
|
if (CGDebugInfo *DI = getDebugInfo())
|
2011-10-14 05:45:18 +08:00
|
|
|
DI->EmitLocation(Builder, Ctor->getLocEnd());
|
2010-02-23 08:48:20 +08:00
|
|
|
EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-02-19 17:25:03 +08:00
|
|
|
Stmt *Body = Ctor->getBody();
|
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
// Enter the function-try-block before the constructor prologue if
|
|
|
|
// applicable.
|
|
|
|
bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
|
|
|
|
if (IsTryBody)
|
2010-07-07 14:56:46 +08:00
|
|
|
EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
|
2010-02-19 17:25:03 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
|
2010-02-19 17:25:03 +08:00
|
|
|
|
2012-03-30 12:25:03 +08:00
|
|
|
// TODO: in restricted cases, we can emit the vbase initializers of
|
|
|
|
// a complete ctor and then delegate to the base ctor.
|
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
// Emit the constructor prologue, i.e. the base and member
|
|
|
|
// initializers.
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
EmitCtorPrologue(Ctor, CtorType, Args);
|
2010-02-19 17:25:03 +08:00
|
|
|
|
|
|
|
// Emit the body of the statement.
|
2010-02-23 08:48:20 +08:00
|
|
|
if (IsTryBody)
|
2010-02-19 17:25:03 +08:00
|
|
|
EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
|
|
|
|
else if (Body)
|
|
|
|
EmitStmt(Body);
|
|
|
|
|
|
|
|
// Emit any cleanup blocks associated with the member or base
|
|
|
|
// initializers, which includes (along the exceptional path) the
|
|
|
|
// destructors for those members and bases that were fully
|
|
|
|
// constructed.
|
2010-07-06 09:34:17 +08:00
|
|
|
PopCleanupBlocks(CleanupDepth);
|
2010-02-19 17:25:03 +08:00
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
if (IsTryBody)
|
2010-07-07 14:56:46 +08:00
|
|
|
ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
|
2010-02-19 17:25:03 +08:00
|
|
|
}
|
|
|
|
|
2009-12-25 06:46:43 +08:00
|
|
|
/// EmitCtorPrologue - This routine generates necessary code to initialize
|
|
|
|
/// base classes and non-static data members belonging to this constructor.
|
|
|
|
void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
CXXCtorType CtorType,
|
|
|
|
FunctionArgList &Args) {
|
2011-05-01 15:04:31 +08:00
|
|
|
if (CD->isDelegatingConstructor())
|
|
|
|
return EmitDelegatingCXXConstructorCall(CD, Args);
|
|
|
|
|
2009-12-25 06:46:43 +08:00
|
|
|
const CXXRecordDecl *ClassDecl = CD->getParent();
|
2010-02-03 03:58:43 +08:00
|
|
|
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<CXXCtorInitializer *, 8> MemberInitializers;
|
2009-12-25 06:46:43 +08:00
|
|
|
|
|
|
|
for (CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
|
|
|
|
E = CD->init_end();
|
|
|
|
B != E; ++B) {
|
2011-01-09 04:30:50 +08:00
|
|
|
CXXCtorInitializer *Member = (*B);
|
2009-12-25 06:46:43 +08:00
|
|
|
|
2011-05-04 04:19:28 +08:00
|
|
|
if (Member->isBaseInitializer()) {
|
2009-12-25 06:46:43 +08:00
|
|
|
EmitBaseInitializer(*this, ClassDecl, Member, CtorType);
|
2011-05-04 04:19:28 +08:00
|
|
|
} else {
|
|
|
|
assert(Member->isAnyMemberInitializer() &&
|
|
|
|
"Delegating initializer on non-delegating constructor");
|
2010-02-03 03:58:43 +08:00
|
|
|
MemberInitializers.push_back(Member);
|
2011-05-04 04:19:28 +08:00
|
|
|
}
|
2009-12-25 06:46:43 +08:00
|
|
|
}
|
|
|
|
|
2010-03-29 05:07:49 +08:00
|
|
|
InitializeVTablePointers(ClassDecl);
|
2010-02-03 03:58:43 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I)
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I], CD, Args);
|
2009-12-25 06:46:43 +08:00
|
|
|
}
|
|
|
|
|
2011-05-16 01:36:21 +08:00
|
|
|
static bool
|
|
|
|
FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field);
|
|
|
|
|
|
|
|
static bool
|
|
|
|
HasTrivialDestructorBody(ASTContext &Context,
|
|
|
|
const CXXRecordDecl *BaseClassDecl,
|
|
|
|
const CXXRecordDecl *MostDerivedClassDecl)
|
|
|
|
{
|
|
|
|
// If the destructor is trivial we don't have to check anything else.
|
|
|
|
if (BaseClassDecl->hasTrivialDestructor())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!BaseClassDecl->getDestructor()->hasTrivialBody())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check fields.
|
|
|
|
for (CXXRecordDecl::field_iterator I = BaseClassDecl->field_begin(),
|
|
|
|
E = BaseClassDecl->field_end(); I != E; ++I) {
|
2012-06-07 04:45:41 +08:00
|
|
|
const FieldDecl *Field = *I;
|
2011-05-16 01:36:21 +08:00
|
|
|
|
|
|
|
if (!FieldHasTrivialDestructorBody(Context, Field))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check non-virtual bases.
|
|
|
|
for (CXXRecordDecl::base_class_const_iterator I =
|
|
|
|
BaseClassDecl->bases_begin(), E = BaseClassDecl->bases_end();
|
|
|
|
I != E; ++I) {
|
|
|
|
if (I->isVirtual())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const CXXRecordDecl *NonVirtualBase =
|
|
|
|
cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
|
|
|
|
if (!HasTrivialDestructorBody(Context, NonVirtualBase,
|
|
|
|
MostDerivedClassDecl))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BaseClassDecl == MostDerivedClassDecl) {
|
|
|
|
// Check virtual bases.
|
|
|
|
for (CXXRecordDecl::base_class_const_iterator I =
|
|
|
|
BaseClassDecl->vbases_begin(), E = BaseClassDecl->vbases_end();
|
|
|
|
I != E; ++I) {
|
|
|
|
const CXXRecordDecl *VirtualBase =
|
|
|
|
cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
|
|
|
|
if (!HasTrivialDestructorBody(Context, VirtualBase,
|
|
|
|
MostDerivedClassDecl))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
FieldHasTrivialDestructorBody(ASTContext &Context,
|
|
|
|
const FieldDecl *Field)
|
|
|
|
{
|
|
|
|
QualType FieldBaseElementType = Context.getBaseElementType(Field->getType());
|
|
|
|
|
|
|
|
const RecordType *RT = FieldBaseElementType->getAs<RecordType>();
|
|
|
|
if (!RT)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
|
|
|
|
return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl);
|
|
|
|
}
|
|
|
|
|
2011-05-15 07:26:09 +08:00
|
|
|
/// CanSkipVTablePointerInitialization - Check whether we need to initialize
|
|
|
|
/// any vtable pointers before calling this destructor.
|
|
|
|
static bool CanSkipVTablePointerInitialization(ASTContext &Context,
|
2011-05-16 12:08:36 +08:00
|
|
|
const CXXDestructorDecl *Dtor) {
|
2011-05-15 07:26:09 +08:00
|
|
|
if (!Dtor->hasTrivialBody())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check the fields.
|
|
|
|
const CXXRecordDecl *ClassDecl = Dtor->getParent();
|
|
|
|
for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
|
|
|
|
E = ClassDecl->field_end(); I != E; ++I) {
|
2012-06-07 04:45:41 +08:00
|
|
|
const FieldDecl *Field = *I;
|
2011-05-15 07:26:09 +08:00
|
|
|
|
2011-05-16 01:36:21 +08:00
|
|
|
if (!FieldHasTrivialDestructorBody(Context, Field))
|
|
|
|
return false;
|
2011-05-15 07:26:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-02-19 17:25:03 +08:00
|
|
|
/// EmitDestructorBody - Emits the body of the current destructor.
|
|
|
|
void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
|
|
|
|
const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
|
|
|
|
CXXDtorType DtorType = CurGD.getDtorType();
|
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// The call to operator delete in a deleting destructor happens
|
|
|
|
// outside of the function-try-block, which means it's always
|
|
|
|
// possible to delegate the destructor body to the complete
|
|
|
|
// destructor. Do so.
|
|
|
|
if (DtorType == Dtor_Deleting) {
|
|
|
|
EnterDtorCleanups(Dtor, Dtor_Deleting);
|
|
|
|
EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
|
|
|
|
LoadCXXThis());
|
|
|
|
PopCleanupBlock();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-02-19 17:25:03 +08:00
|
|
|
Stmt *Body = Dtor->getBody();
|
|
|
|
|
|
|
|
// If the body is a function-try-block, enter the try before
|
2010-07-21 13:30:47 +08:00
|
|
|
// anything else.
|
|
|
|
bool isTryBody = (Body && isa<CXXTryStmt>(Body));
|
2010-02-19 17:25:03 +08:00
|
|
|
if (isTryBody)
|
2010-07-07 14:56:46 +08:00
|
|
|
EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
|
2010-02-19 17:25:03 +08:00
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// Enter the epilogue cleanups.
|
|
|
|
RunCleanupsScope DtorEpilogue(*this);
|
|
|
|
|
2010-02-19 17:25:03 +08:00
|
|
|
// If this is the complete variant, just invoke the base variant;
|
|
|
|
// the epilogue will destruct the virtual bases. But we can't do
|
|
|
|
// this optimization if the body is a function-try-block, because
|
|
|
|
// we'd introduce *two* handler blocks.
|
2010-07-21 13:30:47 +08:00
|
|
|
switch (DtorType) {
|
|
|
|
case Dtor_Deleting: llvm_unreachable("already handled deleting case");
|
|
|
|
|
|
|
|
case Dtor_Complete:
|
|
|
|
// Enter the cleanup scopes for virtual bases.
|
|
|
|
EnterDtorCleanups(Dtor, Dtor_Complete);
|
|
|
|
|
2012-04-20 16:05:00 +08:00
|
|
|
if (!isTryBody && CGM.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
|
2010-07-21 13:30:47 +08:00
|
|
|
EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
|
|
|
|
LoadCXXThis());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Fallthrough: act like we're in the base variant.
|
2010-02-19 17:25:03 +08:00
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
case Dtor_Base:
|
|
|
|
// Enter the cleanup scopes for fields and non-virtual bases.
|
|
|
|
EnterDtorCleanups(Dtor, Dtor_Base);
|
|
|
|
|
|
|
|
// Initialize the vtable pointers before entering the body.
|
2011-05-15 07:26:09 +08:00
|
|
|
if (!CanSkipVTablePointerInitialization(getContext(), Dtor))
|
|
|
|
InitializeVTablePointers(Dtor->getParent());
|
2010-02-19 17:25:03 +08:00
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
if (isTryBody)
|
|
|
|
EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
|
|
|
|
else if (Body)
|
|
|
|
EmitStmt(Body);
|
|
|
|
else {
|
|
|
|
assert(Dtor->isImplicit() && "bodyless dtor not implicit");
|
|
|
|
// nothing to do besides what's in the epilogue
|
|
|
|
}
|
2011-02-03 07:12:46 +08:00
|
|
|
// -fapple-kext must inline any call to this dtor into
|
|
|
|
// the caller's body.
|
2012-11-02 06:30:59 +08:00
|
|
|
if (getLangOpts().AppleKext)
|
2012-10-10 11:13:20 +08:00
|
|
|
CurFn->addFnAttr(llvm::Attributes::AlwaysInline);
|
2010-07-21 13:30:47 +08:00
|
|
|
break;
|
2010-02-19 17:25:03 +08:00
|
|
|
}
|
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// Jump out through the epilogue cleanups.
|
|
|
|
DtorEpilogue.ForceCleanup();
|
2010-02-19 17:25:03 +08:00
|
|
|
|
|
|
|
// Exit the try if applicable.
|
|
|
|
if (isTryBody)
|
2010-07-07 14:56:46 +08:00
|
|
|
ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
|
2010-02-19 17:25:03 +08:00
|
|
|
}
|
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
namespace {
|
|
|
|
/// Call the operator delete associated with the current destructor.
|
2010-07-21 15:22:38 +08:00
|
|
|
struct CallDtorDelete : EHScopeStack::Cleanup {
|
2010-07-21 13:30:47 +08:00
|
|
|
CallDtorDelete() {}
|
|
|
|
|
2011-07-13 04:27:29 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) {
|
2010-07-21 13:30:47 +08:00
|
|
|
const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
|
|
|
|
const CXXRecordDecl *ClassDecl = Dtor->getParent();
|
|
|
|
CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
|
|
|
|
CGF.getContext().getTagDeclType(ClassDecl));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2011-07-13 00:41:08 +08:00
|
|
|
class DestroyField : public EHScopeStack::Cleanup {
|
|
|
|
const FieldDecl *field;
|
2012-01-26 11:33:36 +08:00
|
|
|
CodeGenFunction::Destroyer *destroyer;
|
2011-07-13 00:41:08 +08:00
|
|
|
bool useEHCleanupForArray;
|
|
|
|
|
|
|
|
public:
|
|
|
|
DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
|
|
|
|
bool useEHCleanupForArray)
|
2012-01-26 11:33:36 +08:00
|
|
|
: field(field), destroyer(destroyer),
|
2011-07-13 00:41:08 +08:00
|
|
|
useEHCleanupForArray(useEHCleanupForArray) {}
|
|
|
|
|
2011-07-13 04:27:29 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) {
|
2011-07-13 00:41:08 +08:00
|
|
|
// Find the address of the field.
|
|
|
|
llvm::Value *thisValue = CGF.LoadCXXThis();
|
2012-04-16 11:54:45 +08:00
|
|
|
QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
|
|
|
|
LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
|
|
|
|
LValue LV = CGF.EmitLValueForField(ThisLV, field);
|
2011-07-13 00:41:08 +08:00
|
|
|
assert(LV.isSimple());
|
|
|
|
|
|
|
|
CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
|
2011-07-13 04:27:29 +08:00
|
|
|
flags.isForNormalCleanup() && useEHCleanupForArray);
|
2010-07-21 13:30:47 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2009-12-25 06:46:43 +08:00
|
|
|
/// EmitDtorEpilogue - Emit all code that comes at the end of class's
|
|
|
|
/// destructor. This is to call destructors on members and base classes
|
|
|
|
/// in reverse order of their construction.
|
2010-07-21 13:30:47 +08:00
|
|
|
void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
|
|
|
|
CXXDtorType DtorType) {
|
2009-12-25 06:46:43 +08:00
|
|
|
assert(!DD->isTrivial() &&
|
|
|
|
"Should not emit dtor epilogue for trivial dtor!");
|
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// The deleting-destructor phase just needs to call the appropriate
|
|
|
|
// operator delete that Sema picked up.
|
2010-02-19 03:59:28 +08:00
|
|
|
if (DtorType == Dtor_Deleting) {
|
|
|
|
assert(DD->getOperatorDelete() &&
|
|
|
|
"operator delete missing - EmitDtorEpilogue");
|
2010-07-21 15:22:38 +08:00
|
|
|
EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
|
2010-02-19 03:59:28 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
const CXXRecordDecl *ClassDecl = DD->getParent();
|
|
|
|
|
2011-09-18 20:11:43 +08:00
|
|
|
// Unions have no bases and do not call field destructors.
|
|
|
|
if (ClassDecl->isUnion())
|
|
|
|
return;
|
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// The complete-destructor phase just destructs all the virtual bases.
|
2010-02-19 03:59:28 +08:00
|
|
|
if (DtorType == Dtor_Complete) {
|
2010-07-21 13:30:47 +08:00
|
|
|
|
|
|
|
// We push them in the forward order so that they'll be popped in
|
|
|
|
// the reverse order.
|
|
|
|
for (CXXRecordDecl::base_class_const_iterator I =
|
|
|
|
ClassDecl->vbases_begin(), E = ClassDecl->vbases_end();
|
2010-02-19 03:59:28 +08:00
|
|
|
I != E; ++I) {
|
|
|
|
const CXXBaseSpecifier &Base = *I;
|
|
|
|
CXXRecordDecl *BaseClassDecl
|
|
|
|
= cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
|
|
|
|
|
|
|
|
// Ignore trivial destructors.
|
|
|
|
if (BaseClassDecl->hasTrivialDestructor())
|
|
|
|
continue;
|
2010-07-21 13:30:47 +08:00
|
|
|
|
2010-07-21 15:22:38 +08:00
|
|
|
EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
|
|
|
|
BaseClassDecl,
|
|
|
|
/*BaseIsVirtual*/ true);
|
2010-02-19 03:59:28 +08:00
|
|
|
}
|
2010-07-21 13:30:47 +08:00
|
|
|
|
2010-02-19 03:59:28 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(DtorType == Dtor_Base);
|
2010-07-21 13:30:47 +08:00
|
|
|
|
|
|
|
// Destroy non-virtual bases.
|
|
|
|
for (CXXRecordDecl::base_class_const_iterator I =
|
|
|
|
ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) {
|
|
|
|
const CXXBaseSpecifier &Base = *I;
|
|
|
|
|
|
|
|
// Ignore virtual bases.
|
|
|
|
if (Base.isVirtual())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
|
|
|
|
|
|
|
|
// Ignore trivial destructors.
|
|
|
|
if (BaseClassDecl->hasTrivialDestructor())
|
|
|
|
continue;
|
2010-02-19 03:59:28 +08:00
|
|
|
|
2010-07-21 15:22:38 +08:00
|
|
|
EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
|
|
|
|
BaseClassDecl,
|
|
|
|
/*BaseIsVirtual*/ false);
|
2010-07-21 13:30:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Destroy direct fields.
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<const FieldDecl *, 16> FieldDecls;
|
2009-12-25 06:46:43 +08:00
|
|
|
for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
|
|
|
|
E = ClassDecl->field_end(); I != E; ++I) {
|
2012-06-07 04:45:41 +08:00
|
|
|
const FieldDecl *field = *I;
|
2011-07-13 00:41:08 +08:00
|
|
|
QualType type = field->getType();
|
|
|
|
QualType::DestructionKind dtorKind = type.isDestructedType();
|
|
|
|
if (!dtorKind) continue;
|
|
|
|
|
2012-02-26 17:11:52 +08:00
|
|
|
// Anonymous union members do not have their destructors called.
|
|
|
|
const RecordType *RT = type->getAsUnionType();
|
|
|
|
if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
|
|
|
|
|
2011-07-13 00:41:08 +08:00
|
|
|
CleanupKind cleanupKind = getCleanupKind(dtorKind);
|
|
|
|
EHStack.pushCleanup<DestroyField>(cleanupKind, field,
|
|
|
|
getDestroyer(dtorKind),
|
|
|
|
cleanupKind & EHCleanup);
|
2009-12-25 06:46:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
|
|
|
|
/// constructor for each of several members of an array.
|
2010-07-21 09:10:17 +08:00
|
|
|
///
|
2011-07-13 14:10:41 +08:00
|
|
|
/// \param ctor the constructor to call for each element
|
|
|
|
/// \param argBegin,argEnd the arguments to evaluate and pass to the
|
|
|
|
/// constructor
|
|
|
|
/// \param arrayType the type of the array to initialize
|
|
|
|
/// \param arrayBegin an arrayType*
|
|
|
|
/// \param zeroInitialize true if each element should be
|
|
|
|
/// zero-initialized before it is constructed
|
2010-01-02 04:29:01 +08:00
|
|
|
void
|
2011-07-13 14:10:41 +08:00
|
|
|
CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
|
|
|
|
const ConstantArrayType *arrayType,
|
|
|
|
llvm::Value *arrayBegin,
|
|
|
|
CallExpr::const_arg_iterator argBegin,
|
|
|
|
CallExpr::const_arg_iterator argEnd,
|
|
|
|
bool zeroInitialize) {
|
|
|
|
QualType elementType;
|
|
|
|
llvm::Value *numElements =
|
|
|
|
emitArrayLength(arrayType, elementType, arrayBegin);
|
|
|
|
|
|
|
|
EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin,
|
|
|
|
argBegin, argEnd, zeroInitialize);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
|
|
|
|
/// constructor for each of several members of an array.
|
|
|
|
///
|
|
|
|
/// \param ctor the constructor to call for each element
|
|
|
|
/// \param numElements the number of elements in the array;
|
2011-07-13 15:37:11 +08:00
|
|
|
/// may be zero
|
2011-07-13 14:10:41 +08:00
|
|
|
/// \param argBegin,argEnd the arguments to evaluate and pass to the
|
|
|
|
/// constructor
|
|
|
|
/// \param arrayBegin a T*, where T is the type constructed by ctor
|
|
|
|
/// \param zeroInitialize true if each element should be
|
|
|
|
/// zero-initialized before it is constructed
|
2010-01-02 04:29:01 +08:00
|
|
|
void
|
2011-07-13 14:10:41 +08:00
|
|
|
CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
|
|
|
|
llvm::Value *numElements,
|
|
|
|
llvm::Value *arrayBegin,
|
|
|
|
CallExpr::const_arg_iterator argBegin,
|
|
|
|
CallExpr::const_arg_iterator argEnd,
|
|
|
|
bool zeroInitialize) {
|
2011-07-13 15:37:11 +08:00
|
|
|
|
|
|
|
// It's legal for numElements to be zero. This can happen both
|
|
|
|
// dynamically, because x can be zero in 'new A[x]', and statically,
|
|
|
|
// because of GCC extensions that permit zero-length arrays. There
|
|
|
|
// are probably legitimate places where we could assume that this
|
|
|
|
// doesn't happen, but it's not clear that it's worth it.
|
|
|
|
llvm::BranchInst *zeroCheckBranch = 0;
|
|
|
|
|
|
|
|
// Optimize for a constant count.
|
|
|
|
llvm::ConstantInt *constantCount
|
|
|
|
= dyn_cast<llvm::ConstantInt>(numElements);
|
|
|
|
if (constantCount) {
|
|
|
|
// Just skip out if the constant count is zero.
|
|
|
|
if (constantCount->isZero()) return;
|
|
|
|
|
|
|
|
// Otherwise, emit the check.
|
|
|
|
} else {
|
|
|
|
llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop");
|
|
|
|
llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty");
|
|
|
|
zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB);
|
|
|
|
EmitBlock(loopBB);
|
|
|
|
}
|
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
// Find the end of the array.
|
|
|
|
llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
|
|
|
|
"arrayctor.end");
|
|
|
|
|
|
|
|
// Enter the loop, setting up a phi for the current location to initialize.
|
|
|
|
llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
|
|
|
|
llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop");
|
|
|
|
EmitBlock(loopBB);
|
|
|
|
llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2,
|
|
|
|
"arrayctor.cur");
|
|
|
|
cur->addIncoming(arrayBegin, entryBB);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
|
|
|
// Inside the loop body, emit the constructor call on the array element.
|
2011-07-13 14:10:41 +08:00
|
|
|
|
|
|
|
QualType type = getContext().getTypeDeclType(ctor->getParent());
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2010-07-21 09:10:17 +08:00
|
|
|
// Zero initialize the storage, if requested.
|
2011-07-13 14:10:41 +08:00
|
|
|
if (zeroInitialize)
|
|
|
|
EmitNullInitialization(cur, type);
|
2010-07-21 09:10:17 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
// C++ [class.temporary]p4:
|
|
|
|
// There are two contexts in which temporaries are destroyed at a different
|
|
|
|
// point than the end of the full-expression. The first context is when a
|
|
|
|
// default constructor is called to initialize an element of an array.
|
|
|
|
// If the constructor has one or more default arguments, the destruction of
|
|
|
|
// every temporary created in a default argument expression is sequenced
|
|
|
|
// before the construction of the next array element, if any.
|
|
|
|
|
2010-03-30 11:14:41 +08:00
|
|
|
{
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope Scope(*this);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
// Evaluate the constructor and its arguments in a regular
|
|
|
|
// partial-destroy cleanup.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getLangOpts().Exceptions &&
|
2011-07-13 14:10:41 +08:00
|
|
|
!ctor->getParent()->hasTrivialDestructor()) {
|
|
|
|
Destroyer *destroyer = destroyCXXObject;
|
|
|
|
pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
|
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false,
|
|
|
|
cur, argBegin, argEnd);
|
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
// Go to the next element.
|
|
|
|
llvm::Value *next =
|
|
|
|
Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1),
|
|
|
|
"arrayctor.next");
|
|
|
|
cur->addIncoming(next, Builder.GetInsertBlock());
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
// Check whether that's the end of the loop.
|
|
|
|
llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done");
|
|
|
|
llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont");
|
|
|
|
Builder.CreateCondBr(done, contBB, loopBB);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2011-07-13 15:37:11 +08:00
|
|
|
// Patch the earlier check to skip over the loop.
|
|
|
|
if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB);
|
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
EmitBlock(contBB);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2011-07-09 09:37:26 +08:00
|
|
|
void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *addr,
|
|
|
|
QualType type) {
|
|
|
|
const RecordType *rtype = type->castAs<RecordType>();
|
|
|
|
const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
|
|
|
|
const CXXDestructorDecl *dtor = record->getDestructor();
|
|
|
|
assert(!dtor->isTrivial());
|
|
|
|
CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
|
|
|
|
addr);
|
|
|
|
}
|
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
|
2010-05-03 07:20:53 +08:00
|
|
|
CXXCtorType Type, bool ForVirtualBase,
|
2010-01-02 04:29:01 +08:00
|
|
|
llvm::Value *This,
|
|
|
|
CallExpr::const_arg_iterator ArgBeg,
|
|
|
|
CallExpr::const_arg_iterator ArgEnd) {
|
2011-02-23 04:55:26 +08:00
|
|
|
|
|
|
|
CGDebugInfo *DI = getDebugInfo();
|
2012-04-27 15:24:20 +08:00
|
|
|
if (DI &&
|
2012-10-24 04:05:01 +08:00
|
|
|
CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::LimitedDebugInfo) {
|
2012-02-02 05:44:56 +08:00
|
|
|
// If debug info for this class has not been emitted then this is the
|
|
|
|
// right time to do so.
|
2011-02-23 04:55:26 +08:00
|
|
|
const CXXRecordDecl *Parent = D->getParent();
|
|
|
|
DI->getOrCreateRecordType(CGM.getContext().getTypeDeclType(Parent),
|
|
|
|
Parent->getLocation());
|
|
|
|
}
|
|
|
|
|
2010-02-06 08:25:16 +08:00
|
|
|
if (D->isTrivial()) {
|
|
|
|
if (ArgBeg == ArgEnd) {
|
|
|
|
// Trivial default constructor, no codegen required.
|
|
|
|
assert(D->isDefaultConstructor() &&
|
|
|
|
"trivial 0-arg ctor not a default ctor");
|
2010-01-02 04:29:01 +08:00
|
|
|
return;
|
|
|
|
}
|
2010-02-06 08:25:16 +08:00
|
|
|
|
|
|
|
assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
|
2011-08-31 03:58:05 +08:00
|
|
|
assert(D->isCopyOrMoveConstructor() &&
|
|
|
|
"trivial 1-arg ctor not a copy/move ctor");
|
2010-02-06 08:25:16 +08:00
|
|
|
|
|
|
|
const Expr *E = (*ArgBeg);
|
|
|
|
QualType Ty = E->getType();
|
|
|
|
llvm::Value *Src = EmitLValue(E).getAddress();
|
|
|
|
EmitAggregateCopy(This, Src, Ty);
|
2010-01-02 04:29:01 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-05-03 07:33:10 +08:00
|
|
|
llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(D, Type), ForVirtualBase);
|
2010-01-02 04:29:01 +08:00
|
|
|
llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
|
|
|
|
|
2012-10-10 03:52:38 +08:00
|
|
|
// FIXME: Provide a source location here.
|
|
|
|
EmitCXXMemberCall(D, SourceLocation(), Callee, ReturnValueSlot(), This,
|
|
|
|
VTT, ArgBeg, ArgEnd);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2010-11-14 05:53:34 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
|
|
|
|
llvm::Value *This, llvm::Value *Src,
|
|
|
|
CallExpr::const_arg_iterator ArgBeg,
|
|
|
|
CallExpr::const_arg_iterator ArgEnd) {
|
|
|
|
if (D->isTrivial()) {
|
|
|
|
assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
|
2011-08-31 03:58:05 +08:00
|
|
|
assert(D->isCopyOrMoveConstructor() &&
|
|
|
|
"trivial 1-arg ctor not a copy/move ctor");
|
2010-11-14 05:53:34 +08:00
|
|
|
EmitAggregateCopy(This, Src, (*ArgBeg)->getType());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D,
|
|
|
|
clang::Ctor_Complete);
|
|
|
|
assert(D->isInstance() &&
|
|
|
|
"Trying to emit a member call expr on a static method!");
|
|
|
|
|
|
|
|
const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>();
|
|
|
|
|
|
|
|
CallArgList Args;
|
|
|
|
|
|
|
|
// Push the this ptr.
|
2011-05-03 01:57:46 +08:00
|
|
|
Args.add(RValue::get(This), D->getThisType(getContext()));
|
2010-11-14 05:53:34 +08:00
|
|
|
|
|
|
|
|
|
|
|
// Push the src ptr.
|
|
|
|
QualType QT = *(FPT->arg_type_begin());
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *t = CGM.getTypes().ConvertType(QT);
|
2010-11-14 05:53:34 +08:00
|
|
|
Src = Builder.CreateBitCast(Src, t);
|
2011-05-03 01:57:46 +08:00
|
|
|
Args.add(RValue::get(Src), QT);
|
2010-11-14 05:53:34 +08:00
|
|
|
|
|
|
|
// Skip over first argument (Src).
|
|
|
|
++ArgBeg;
|
|
|
|
CallExpr::const_arg_iterator Arg = ArgBeg;
|
|
|
|
for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin()+1,
|
|
|
|
E = FPT->arg_type_end(); I != E; ++I, ++Arg) {
|
|
|
|
assert(Arg != ArgEnd && "Running over edge of argument list!");
|
2011-03-12 04:59:21 +08:00
|
|
|
EmitCallArg(Args, *Arg, *I);
|
2010-11-14 05:53:34 +08:00
|
|
|
}
|
|
|
|
// Either we've emitted all the call args, or we have a call to a
|
|
|
|
// variadic function.
|
|
|
|
assert((Arg == ArgEnd || FPT->isVariadic()) &&
|
|
|
|
"Extra arguments in non-variadic function!");
|
|
|
|
// If we still have any arguments, emit them using the type of the argument.
|
|
|
|
for (; Arg != ArgEnd; ++Arg) {
|
|
|
|
QualType ArgType = Arg->getType();
|
2011-03-12 04:59:21 +08:00
|
|
|
EmitCallArg(Args, *Arg, ArgType);
|
2010-11-14 05:53:34 +08:00
|
|
|
}
|
|
|
|
|
2012-07-07 14:41:13 +08:00
|
|
|
EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All),
|
|
|
|
Callee, ReturnValueSlot(), Args, D);
|
2010-11-14 05:53:34 +08:00
|
|
|
}
|
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
|
|
|
|
CXXCtorType CtorType,
|
|
|
|
const FunctionArgList &Args) {
|
|
|
|
CallArgList DelegateArgs;
|
|
|
|
|
|
|
|
FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
|
|
|
|
assert(I != E && "no parameters to constructor");
|
|
|
|
|
|
|
|
// this
|
2011-05-03 01:57:46 +08:00
|
|
|
DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType());
|
2010-02-23 08:48:20 +08:00
|
|
|
++I;
|
|
|
|
|
|
|
|
// vtt
|
2010-05-03 07:33:10 +08:00
|
|
|
if (llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(Ctor, CtorType),
|
|
|
|
/*ForVirtualBase=*/false)) {
|
2010-02-23 08:48:20 +08:00
|
|
|
QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
|
2011-05-03 01:57:46 +08:00
|
|
|
DelegateArgs.add(RValue::get(VTT), VoidPP);
|
2010-02-23 08:48:20 +08:00
|
|
|
|
2010-03-23 12:11:45 +08:00
|
|
|
if (CodeGenVTables::needsVTTParameter(CurGD)) {
|
2010-02-23 08:48:20 +08:00
|
|
|
assert(I != E && "cannot skip vtt parameter, already done with args");
|
2011-03-09 12:27:21 +08:00
|
|
|
assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type");
|
2010-02-23 08:48:20 +08:00
|
|
|
++I;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Explicit arguments.
|
|
|
|
for (; I != E; ++I) {
|
2011-03-12 04:59:21 +08:00
|
|
|
const VarDecl *param = *I;
|
|
|
|
EmitDelegateCallArg(DelegateArgs, param);
|
2010-02-23 08:48:20 +08:00
|
|
|
}
|
|
|
|
|
2012-02-17 11:33:10 +08:00
|
|
|
EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType),
|
2010-02-23 08:48:20 +08:00
|
|
|
CGM.GetAddrOfCXXConstructor(Ctor, CtorType),
|
|
|
|
ReturnValueSlot(), DelegateArgs, Ctor);
|
|
|
|
}
|
|
|
|
|
2011-05-04 07:05:34 +08:00
|
|
|
namespace {
|
|
|
|
struct CallDelegatingCtorDtor : EHScopeStack::Cleanup {
|
|
|
|
const CXXDestructorDecl *Dtor;
|
|
|
|
llvm::Value *Addr;
|
|
|
|
CXXDtorType Type;
|
|
|
|
|
|
|
|
CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr,
|
|
|
|
CXXDtorType Type)
|
|
|
|
: Dtor(D), Addr(Addr), Type(Type) {}
|
|
|
|
|
2011-07-13 04:27:29 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) {
|
2011-05-04 07:05:34 +08:00
|
|
|
CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
|
|
|
|
Addr);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2011-05-01 15:04:31 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
|
|
|
|
const FunctionArgList &Args) {
|
|
|
|
assert(Ctor->isDelegatingConstructor());
|
|
|
|
|
|
|
|
llvm::Value *ThisPtr = LoadCXXThis();
|
|
|
|
|
2011-12-03 08:54:26 +08:00
|
|
|
QualType Ty = getContext().getTagDeclType(Ctor->getParent());
|
2011-12-03 10:13:40 +08:00
|
|
|
CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
|
2011-06-16 07:02:42 +08:00
|
|
|
AggValueSlot AggSlot =
|
2011-12-03 08:54:26 +08:00
|
|
|
AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
|
2011-08-26 04:40:09 +08:00
|
|
|
AggValueSlot::IsDestructed,
|
2011-08-26 07:04:34 +08:00
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2012-03-30 01:37:10 +08:00
|
|
|
AggValueSlot::IsNotAliased);
|
2011-05-01 15:04:31 +08:00
|
|
|
|
|
|
|
EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
|
|
|
|
|
2011-05-04 07:05:34 +08:00
|
|
|
const CXXRecordDecl *ClassDecl = Ctor->getParent();
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
|
2011-05-04 07:05:34 +08:00
|
|
|
CXXDtorType Type =
|
|
|
|
CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
|
|
|
|
|
|
|
|
EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup,
|
|
|
|
ClassDecl->getDestructor(),
|
|
|
|
ThisPtr, Type);
|
|
|
|
}
|
|
|
|
}
|
2011-05-01 15:04:31 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
|
|
|
|
CXXDtorType Type,
|
2010-05-03 07:29:11 +08:00
|
|
|
bool ForVirtualBase,
|
2010-01-02 04:29:01 +08:00
|
|
|
llvm::Value *This) {
|
2010-05-03 07:33:10 +08:00
|
|
|
llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type),
|
|
|
|
ForVirtualBase);
|
2011-02-02 07:22:34 +08:00
|
|
|
llvm::Value *Callee = 0;
|
2012-11-02 06:30:59 +08:00
|
|
|
if (getLangOpts().AppleKext)
|
2011-02-04 03:27:17 +08:00
|
|
|
Callee = BuildAppleKextVirtualDestructorCall(DD, Type,
|
|
|
|
DD->getParent());
|
2011-02-02 07:22:34 +08:00
|
|
|
|
|
|
|
if (!Callee)
|
|
|
|
Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2012-10-10 03:52:38 +08:00
|
|
|
// FIXME: Provide a source location here.
|
|
|
|
EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This,
|
|
|
|
VTT, 0, 0);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2010-07-21 09:41:18 +08:00
|
|
|
namespace {
|
2010-07-21 15:22:38 +08:00
|
|
|
struct CallLocalDtor : EHScopeStack::Cleanup {
|
2010-07-21 09:41:18 +08:00
|
|
|
const CXXDestructorDecl *Dtor;
|
|
|
|
llvm::Value *Addr;
|
|
|
|
|
|
|
|
CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
|
|
|
|
: Dtor(D), Addr(Addr) {}
|
|
|
|
|
2011-07-13 04:27:29 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) {
|
2010-07-21 09:41:18 +08:00
|
|
|
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
|
|
|
|
/*ForVirtualBase=*/false, Addr);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2010-07-21 14:29:51 +08:00
|
|
|
void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
|
|
|
|
llvm::Value *Addr) {
|
2010-07-21 15:22:38 +08:00
|
|
|
EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
|
2010-07-21 14:29:51 +08:00
|
|
|
}
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
|
|
|
|
CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
|
|
|
|
if (!ClassDecl) return;
|
|
|
|
if (ClassDecl->hasTrivialDestructor()) return;
|
|
|
|
|
|
|
|
const CXXDestructorDecl *D = ClassDecl->getDestructor();
|
2011-04-28 10:15:35 +08:00
|
|
|
assert(D && D->isUsed() && "destructor not marked as used!");
|
2010-07-21 14:29:51 +08:00
|
|
|
PushDestructorCleanup(D, Addr);
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
llvm::Value *
|
2010-01-31 09:36:53 +08:00
|
|
|
CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
|
|
|
|
const CXXRecordDecl *ClassDecl,
|
2010-01-02 04:29:01 +08:00
|
|
|
const CXXRecordDecl *BaseClassDecl) {
|
2010-10-27 02:44:08 +08:00
|
|
|
llvm::Value *VTablePtr = GetVTablePtr(This, Int8PtrTy);
|
2011-04-07 20:37:09 +08:00
|
|
|
CharUnits VBaseOffsetOffset =
|
2011-09-26 09:56:30 +08:00
|
|
|
CGM.getVTableContext().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
|
|
|
llvm::Value *VBaseOffsetPtr =
|
2011-04-07 20:37:09 +08:00
|
|
|
Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
|
|
|
|
"vbase.offset.ptr");
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *PtrDiffTy =
|
2010-01-02 04:29:01 +08:00
|
|
|
ConvertType(getContext().getPointerDiffType());
|
|
|
|
|
|
|
|
VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr,
|
|
|
|
PtrDiffTy->getPointerTo());
|
|
|
|
|
|
|
|
llvm::Value *VBaseOffset = Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
|
|
|
|
|
|
|
|
return VBaseOffset;
|
|
|
|
}
|
|
|
|
|
2010-03-29 03:40:00 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
|
2010-04-20 13:22:15 +08:00
|
|
|
const CXXRecordDecl *NearestVBase,
|
2011-03-23 09:04:18 +08:00
|
|
|
CharUnits OffsetFromNearestVBase,
|
2010-03-29 03:40:00 +08:00
|
|
|
llvm::Constant *VTable,
|
|
|
|
const CXXRecordDecl *VTableClass) {
|
2010-03-29 09:08:49 +08:00
|
|
|
const CXXRecordDecl *RD = Base.getBase();
|
|
|
|
|
2010-03-29 03:40:00 +08:00
|
|
|
// Compute the address point.
|
2010-03-29 09:08:49 +08:00
|
|
|
llvm::Value *VTableAddressPoint;
|
2010-03-29 10:38:51 +08:00
|
|
|
|
2010-03-29 09:08:49 +08:00
|
|
|
// Check if we need to use a vtable from the VTT.
|
2010-03-29 10:38:51 +08:00
|
|
|
if (CodeGenVTables::needsVTTParameter(CurGD) &&
|
2010-04-20 13:22:15 +08:00
|
|
|
(RD->getNumVBases() || NearestVBase)) {
|
2010-03-29 09:08:49 +08:00
|
|
|
// Get the secondary vpointer index.
|
|
|
|
uint64_t VirtualPointerIndex =
|
|
|
|
CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
|
|
|
|
|
|
|
|
/// Load the VTT.
|
|
|
|
llvm::Value *VTT = LoadCXXVTT();
|
|
|
|
if (VirtualPointerIndex)
|
|
|
|
VTT = Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
|
|
|
|
|
|
|
|
// And load the address point from the VTT.
|
|
|
|
VTableAddressPoint = Builder.CreateLoad(VTT);
|
|
|
|
} else {
|
2011-09-26 09:56:41 +08:00
|
|
|
uint64_t AddressPoint =
|
2011-09-26 09:56:50 +08:00
|
|
|
CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base);
|
2010-03-29 09:08:49 +08:00
|
|
|
VTableAddressPoint =
|
2010-03-29 03:40:00 +08:00
|
|
|
Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint);
|
2010-03-29 09:08:49 +08:00
|
|
|
}
|
2010-03-29 03:40:00 +08:00
|
|
|
|
|
|
|
// Compute where to store the address point.
|
2010-05-03 08:29:58 +08:00
|
|
|
llvm::Value *VirtualOffset = 0;
|
2011-03-23 08:45:26 +08:00
|
|
|
CharUnits NonVirtualOffset = CharUnits::Zero();
|
2010-04-21 02:05:10 +08:00
|
|
|
|
|
|
|
if (CodeGenVTables::needsVTTParameter(CurGD) && NearestVBase) {
|
|
|
|
// We need to use the virtual base offset offset because the virtual base
|
|
|
|
// might have a different offset in the most derived class.
|
2010-05-03 08:29:58 +08:00
|
|
|
VirtualOffset = GetVirtualBaseClassOffset(LoadCXXThis(), VTableClass,
|
|
|
|
NearestVBase);
|
2011-03-23 09:04:18 +08:00
|
|
|
NonVirtualOffset = OffsetFromNearestVBase;
|
2010-04-21 02:05:10 +08:00
|
|
|
} else {
|
2010-05-03 08:29:58 +08:00
|
|
|
// We can just use the base offset in the complete class.
|
2011-03-24 09:21:01 +08:00
|
|
|
NonVirtualOffset = Base.getBaseOffset();
|
2010-04-21 02:05:10 +08:00
|
|
|
}
|
2010-05-03 08:29:58 +08:00
|
|
|
|
|
|
|
// Apply the offsets.
|
|
|
|
llvm::Value *VTableField = LoadCXXThis();
|
|
|
|
|
2011-03-23 08:45:26 +08:00
|
|
|
if (!NonVirtualOffset.isZero() || VirtualOffset)
|
2010-05-03 08:29:58 +08:00
|
|
|
VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
|
|
|
|
NonVirtualOffset,
|
|
|
|
VirtualOffset);
|
2010-04-21 00:22:16 +08:00
|
|
|
|
2010-03-29 03:40:00 +08:00
|
|
|
// Finally, store the address point.
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *AddressPointPtrTy =
|
2010-03-29 03:40:00 +08:00
|
|
|
VTableAddressPoint->getType()->getPointerTo();
|
|
|
|
VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
|
2012-03-27 01:03:51 +08:00
|
|
|
llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
|
|
|
|
CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
|
2010-03-29 03:40:00 +08:00
|
|
|
}
|
|
|
|
|
2010-03-29 05:07:49 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
|
2010-04-20 13:22:15 +08:00
|
|
|
const CXXRecordDecl *NearestVBase,
|
2011-03-23 09:04:18 +08:00
|
|
|
CharUnits OffsetFromNearestVBase,
|
2010-03-29 05:07:49 +08:00
|
|
|
bool BaseIsNonVirtualPrimaryBase,
|
|
|
|
llvm::Constant *VTable,
|
|
|
|
const CXXRecordDecl *VTableClass,
|
|
|
|
VisitedVirtualBasesSetTy& VBases) {
|
|
|
|
// If this base is a non-virtual primary base the address point has already
|
|
|
|
// been set.
|
|
|
|
if (!BaseIsNonVirtualPrimaryBase) {
|
|
|
|
// Initialize the vtable pointer for this base.
|
2010-05-03 08:07:07 +08:00
|
|
|
InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase,
|
|
|
|
VTable, VTableClass);
|
2010-03-29 05:07:49 +08:00
|
|
|
}
|
2010-03-29 04:55:21 +08:00
|
|
|
|
2010-03-29 05:07:49 +08:00
|
|
|
const CXXRecordDecl *RD = Base.getBase();
|
2010-03-29 04:55:21 +08:00
|
|
|
|
2010-03-29 05:07:49 +08:00
|
|
|
// Traverse bases.
|
|
|
|
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
|
|
|
|
E = RD->bases_end(); I != E; ++I) {
|
2010-03-26 12:39:42 +08:00
|
|
|
CXXRecordDecl *BaseDecl
|
|
|
|
= cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
|
2010-03-29 05:07:49 +08:00
|
|
|
|
|
|
|
// Ignore classes without a vtable.
|
|
|
|
if (!BaseDecl->isDynamicClass())
|
|
|
|
continue;
|
|
|
|
|
2011-03-23 09:04:18 +08:00
|
|
|
CharUnits BaseOffset;
|
|
|
|
CharUnits BaseOffsetFromNearestVBase;
|
2010-03-29 09:16:41 +08:00
|
|
|
bool BaseDeclIsNonVirtualPrimaryBase;
|
2010-03-29 05:07:49 +08:00
|
|
|
|
|
|
|
if (I->isVirtual()) {
|
|
|
|
// Check if we've visited this virtual base before.
|
|
|
|
if (!VBases.insert(BaseDecl))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const ASTRecordLayout &Layout =
|
|
|
|
getContext().getASTRecordLayout(VTableClass);
|
|
|
|
|
2011-03-23 09:04:18 +08:00
|
|
|
BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
|
|
|
|
BaseOffsetFromNearestVBase = CharUnits::Zero();
|
2010-03-29 09:16:41 +08:00
|
|
|
BaseDeclIsNonVirtualPrimaryBase = false;
|
2010-03-29 05:07:49 +08:00
|
|
|
} else {
|
|
|
|
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
|
|
|
|
|
2011-03-24 09:21:01 +08:00
|
|
|
BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
|
2010-05-03 08:07:07 +08:00
|
|
|
BaseOffsetFromNearestVBase =
|
2011-03-23 09:04:18 +08:00
|
|
|
OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
|
2010-03-29 09:16:41 +08:00
|
|
|
BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
|
2010-03-29 05:07:49 +08:00
|
|
|
}
|
2010-03-26 12:39:42 +08:00
|
|
|
|
2011-03-24 09:21:01 +08:00
|
|
|
InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset),
|
2010-04-20 13:22:15 +08:00
|
|
|
I->isVirtual() ? BaseDecl : NearestVBase,
|
2010-05-03 08:07:07 +08:00
|
|
|
BaseOffsetFromNearestVBase,
|
2010-03-29 09:16:41 +08:00
|
|
|
BaseDeclIsNonVirtualPrimaryBase,
|
2010-03-29 05:07:49 +08:00
|
|
|
VTable, VTableClass, VBases);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-29 05:07:49 +08:00
|
|
|
void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
|
|
|
|
// Ignore classes without a vtable.
|
2010-03-26 12:39:42 +08:00
|
|
|
if (!RD->isDynamicClass())
|
2010-01-02 04:29:01 +08:00
|
|
|
return;
|
|
|
|
|
2010-03-29 05:07:49 +08:00
|
|
|
// Get the VTable.
|
|
|
|
llvm::Constant *VTable = CGM.getVTables().GetAddrOfVTable(RD);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2010-03-29 05:07:49 +08:00
|
|
|
// Initialize the vtable pointers for this class and all of its bases.
|
|
|
|
VisitedVirtualBasesSetTy VBases;
|
2011-03-24 09:21:01 +08:00
|
|
|
InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()),
|
|
|
|
/*NearestVBase=*/0,
|
2011-03-23 09:04:18 +08:00
|
|
|
/*OffsetFromNearestVBase=*/CharUnits::Zero(),
|
2010-03-29 05:07:49 +08:00
|
|
|
/*BaseIsNonVirtualPrimaryBase=*/false,
|
|
|
|
VTable, RD, VBases);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
2010-10-27 02:44:08 +08:00
|
|
|
|
|
|
|
llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *Ty) {
|
2010-10-27 02:44:08 +08:00
|
|
|
llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
|
2012-03-27 01:03:51 +08:00
|
|
|
llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
|
|
|
|
CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr());
|
|
|
|
return VTable;
|
2010-10-27 02:44:08 +08:00
|
|
|
}
|
2011-05-09 04:32:23 +08:00
|
|
|
|
|
|
|
static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
|
|
|
|
const Expr *E = Base;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
E = E->IgnoreParens();
|
|
|
|
if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
|
|
|
|
if (CE->getCastKind() == CK_DerivedToBase ||
|
|
|
|
CE->getCastKind() == CK_UncheckedDerivedToBase ||
|
|
|
|
CE->getCastKind() == CK_NoOp) {
|
|
|
|
E = CE->getSubExpr();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
QualType DerivedType = E->getType();
|
|
|
|
if (const PointerType *PTy = DerivedType->getAs<PointerType>())
|
|
|
|
DerivedType = PTy->getPointeeType();
|
|
|
|
|
|
|
|
return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
|
|
|
|
// quite what we want.
|
|
|
|
static const Expr *skipNoOpCastsAndParens(const Expr *E) {
|
|
|
|
while (true) {
|
|
|
|
if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
|
|
|
|
E = PE->getSubExpr();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
|
|
|
|
if (CE->getCastKind() == CK_NoOp) {
|
|
|
|
E = CE->getSubExpr();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
|
|
|
|
if (UO->getOpcode() == UO_Extension) {
|
|
|
|
E = UO->getSubExpr();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return E;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// canDevirtualizeMemberFunctionCall - Checks whether the given virtual member
|
|
|
|
/// function call on the given expr can be devirtualized.
|
|
|
|
static bool canDevirtualizeMemberFunctionCall(const Expr *Base,
|
|
|
|
const CXXMethodDecl *MD) {
|
|
|
|
// If the most derived class is marked final, we know that no subclass can
|
|
|
|
// override this member function and so we can devirtualize it. For example:
|
|
|
|
//
|
|
|
|
// struct A { virtual void f(); }
|
|
|
|
// struct B final : A { };
|
|
|
|
//
|
|
|
|
// void f(B *b) {
|
|
|
|
// b->f();
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
|
|
|
|
if (MostDerivedClassDecl->hasAttr<FinalAttr>())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// If the member function is marked 'final', we know that it can't be
|
|
|
|
// overridden and can therefore devirtualize it.
|
|
|
|
if (MD->hasAttr<FinalAttr>())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Similarly, if the class itself is marked 'final' it can't be overridden
|
|
|
|
// and we can therefore devirtualize the member function call.
|
|
|
|
if (MD->getParent()->hasAttr<FinalAttr>())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
Base = skipNoOpCastsAndParens(Base);
|
|
|
|
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
|
|
|
|
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
|
|
|
|
// This is a record decl. We know the type and can devirtualize it.
|
|
|
|
return VD->getType()->isRecordType();
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can always devirtualize calls on temporary object expressions.
|
|
|
|
if (isa<CXXConstructExpr>(Base))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// And calls on bound temporaries.
|
|
|
|
if (isa<CXXBindTemporaryExpr>(Base))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Check if this is a call expr that returns a record type.
|
|
|
|
if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
|
|
|
|
return CE->getCallReturnType()->isRecordType();
|
|
|
|
|
|
|
|
// We can't devirtualize the call.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool UseVirtualCall(ASTContext &Context,
|
|
|
|
const CXXOperatorCallExpr *CE,
|
|
|
|
const CXXMethodDecl *MD) {
|
|
|
|
if (!MD->isVirtual())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// When building with -fapple-kext, all calls must go through the vtable since
|
|
|
|
// the kernel linker can do runtime patching of vtables.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (Context.getLangOpts().AppleKext)
|
2011-05-09 04:32:23 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return !canDevirtualizeMemberFunctionCall(CE->getArg(0), MD);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *
|
|
|
|
CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
|
|
|
|
const CXXMethodDecl *MD,
|
|
|
|
llvm::Value *This) {
|
2012-02-17 11:33:10 +08:00
|
|
|
llvm::FunctionType *fnType =
|
|
|
|
CGM.getTypes().GetFunctionType(
|
|
|
|
CGM.getTypes().arrangeCXXMethodDeclaration(MD));
|
2011-05-09 04:32:23 +08:00
|
|
|
|
|
|
|
if (UseVirtualCall(getContext(), E, MD))
|
2012-02-17 11:33:10 +08:00
|
|
|
return BuildVirtualCall(MD, This, fnType);
|
2011-05-09 04:32:23 +08:00
|
|
|
|
2012-02-17 11:33:10 +08:00
|
|
|
return CGM.GetAddrOfFunction(MD, fnType);
|
2011-05-09 04:32:23 +08:00
|
|
|
}
|
2012-02-16 09:37:33 +08:00
|
|
|
|
2012-07-07 14:41:13 +08:00
|
|
|
void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *lambda,
|
|
|
|
CallArgList &callArgs) {
|
2012-02-25 10:48:22 +08:00
|
|
|
// Lookup the call operator
|
2012-07-07 14:41:13 +08:00
|
|
|
DeclarationName operatorName
|
2012-02-16 11:47:28 +08:00
|
|
|
= getContext().DeclarationNames.getCXXOperatorName(OO_Call);
|
2012-07-07 14:41:13 +08:00
|
|
|
CXXMethodDecl *callOperator =
|
|
|
|
cast<CXXMethodDecl>(*lambda->lookup(operatorName).first);
|
2012-02-16 11:47:28 +08:00
|
|
|
|
|
|
|
// Get the address of the call operator.
|
2012-07-07 14:41:13 +08:00
|
|
|
const CGFunctionInfo &calleeFnInfo =
|
|
|
|
CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
|
|
|
|
llvm::Value *callee =
|
|
|
|
CGM.GetAddrOfFunction(GlobalDecl(callOperator),
|
|
|
|
CGM.getTypes().GetFunctionType(calleeFnInfo));
|
|
|
|
|
|
|
|
// Prepare the return slot.
|
|
|
|
const FunctionProtoType *FPT =
|
|
|
|
callOperator->getType()->castAs<FunctionProtoType>();
|
|
|
|
QualType resultType = FPT->getResultType();
|
|
|
|
ReturnValueSlot returnSlot;
|
|
|
|
if (!resultType->isVoidType() &&
|
|
|
|
calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
|
|
|
|
hasAggregateLLVMType(calleeFnInfo.getReturnType()))
|
|
|
|
returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified());
|
|
|
|
|
|
|
|
// We don't need to separately arrange the call arguments because
|
|
|
|
// the call can't be variadic anyway --- it's impossible to forward
|
|
|
|
// variadic arguments.
|
2012-02-16 11:47:28 +08:00
|
|
|
|
|
|
|
// Now emit our call.
|
2012-07-07 14:41:13 +08:00
|
|
|
RValue RV = EmitCall(calleeFnInfo, callee, returnSlot,
|
|
|
|
callArgs, callOperator);
|
2012-02-16 11:47:28 +08:00
|
|
|
|
2012-07-07 14:41:13 +08:00
|
|
|
// If necessary, copy the returned value into the slot.
|
|
|
|
if (!resultType->isVoidType() && returnSlot.isNull())
|
|
|
|
EmitReturnOfRValue(RV, resultType);
|
2012-12-14 07:37:17 +08:00
|
|
|
else
|
|
|
|
EmitBranchThroughCleanup(ReturnBlock);
|
2012-02-16 11:47:28 +08:00
|
|
|
}
|
|
|
|
|
2012-02-25 10:48:22 +08:00
|
|
|
void CodeGenFunction::EmitLambdaBlockInvokeBody() {
|
|
|
|
const BlockDecl *BD = BlockInfo->getBlockDecl();
|
|
|
|
const VarDecl *variable = BD->capture_begin()->getVariable();
|
|
|
|
const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
|
|
|
|
|
|
|
|
// Start building arguments for forwarding call
|
|
|
|
CallArgList CallArgs;
|
|
|
|
|
|
|
|
QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
|
|
|
|
llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false);
|
|
|
|
CallArgs.add(RValue::get(ThisPtr), ThisType);
|
|
|
|
|
|
|
|
// Add the rest of the parameters.
|
|
|
|
for (BlockDecl::param_const_iterator I = BD->param_begin(),
|
|
|
|
E = BD->param_end(); I != E; ++I) {
|
|
|
|
ParmVarDecl *param = *I;
|
|
|
|
EmitDelegateCallArg(CallArgs, param);
|
|
|
|
}
|
|
|
|
|
|
|
|
EmitForwardingCallToLambda(Lambda, CallArgs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
|
|
|
|
if (cast<CXXMethodDecl>(CurFuncDecl)->isVariadic()) {
|
|
|
|
// FIXME: Making this work correctly is nasty because it requires either
|
|
|
|
// cloning the body of the call operator or making the call operator forward.
|
|
|
|
CGM.ErrorUnsupported(CurFuncDecl, "lambda conversion to variadic function");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
EmitFunctionBody(Args);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
|
|
|
|
const CXXRecordDecl *Lambda = MD->getParent();
|
|
|
|
|
|
|
|
// Start building arguments for forwarding call
|
|
|
|
CallArgList CallArgs;
|
|
|
|
|
|
|
|
QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
|
|
|
|
llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
|
|
|
|
CallArgs.add(RValue::get(ThisPtr), ThisType);
|
|
|
|
|
|
|
|
// Add the rest of the parameters.
|
|
|
|
for (FunctionDecl::param_const_iterator I = MD->param_begin(),
|
|
|
|
E = MD->param_end(); I != E; ++I) {
|
|
|
|
ParmVarDecl *param = *I;
|
|
|
|
EmitDelegateCallArg(CallArgs, param);
|
|
|
|
}
|
|
|
|
|
|
|
|
EmitForwardingCallToLambda(Lambda, CallArgs);
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:02:34 +08:00
|
|
|
void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) {
|
|
|
|
if (MD->isVariadic()) {
|
2012-02-16 11:47:28 +08:00
|
|
|
// FIXME: Making this work correctly is nasty because it requires either
|
|
|
|
// cloning the body of the call operator or making the call operator forward.
|
|
|
|
CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
|
2012-02-25 10:48:22 +08:00
|
|
|
return;
|
2012-02-16 11:47:28 +08:00
|
|
|
}
|
|
|
|
|
2012-02-17 11:02:34 +08:00
|
|
|
EmitLambdaDelegatingInvokeBody(MD);
|
2012-02-16 09:37:33 +08:00
|
|
|
}
|