2009-11-24 13:51:11 +08:00
|
|
|
//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
|
2009-09-23 06:53:17 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code dealing with code generation of C++ expressions
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CodeGenFunction.h"
|
2011-10-07 02:29:37 +08:00
|
|
|
#include "CGCUDARuntime.h"
|
2010-08-31 15:33:07 +08:00
|
|
|
#include "CGCXXABI.h"
|
2010-10-01 03:05:55 +08:00
|
|
|
#include "CGDebugInfo.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "CGObjCRuntime.h"
|
|
|
|
#include "clang/Frontend/CodeGenOptions.h"
|
2013-01-02 19:45:17 +08:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2011-04-13 10:35:36 +08:00
|
|
|
#include "llvm/Support/CallSite.h"
|
|
|
|
|
2009-09-23 06:53:17 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
|
2012-10-10 03:52:38 +08:00
|
|
|
SourceLocation CallLoc,
|
2010-01-02 04:29:01 +08:00
|
|
|
llvm::Value *Callee,
|
|
|
|
ReturnValueSlot ReturnValue,
|
|
|
|
llvm::Value *This,
|
2013-02-13 16:37:51 +08:00
|
|
|
llvm::Value *ImplicitParam,
|
|
|
|
QualType ImplicitParamTy,
|
2010-01-02 04:29:01 +08:00
|
|
|
CallExpr::const_arg_iterator ArgBeg,
|
|
|
|
CallExpr::const_arg_iterator ArgEnd) {
|
|
|
|
assert(MD->isInstance() &&
|
|
|
|
"Trying to emit a member call expr on a static method!");
|
|
|
|
|
2012-08-24 08:54:33 +08:00
|
|
|
// C++11 [class.mfct.non-static]p2:
|
|
|
|
// If a non-static member function of a class X is called for an object that
|
|
|
|
// is not of type X, or of a type derived from X, the behavior is undefined.
|
2012-10-25 10:14:12 +08:00
|
|
|
EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
|
|
|
|
: TCK_MemberCall,
|
|
|
|
CallLoc, This, getContext().getRecordType(MD->getParent()));
|
2012-08-24 08:54:33 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
CallArgList Args;
|
|
|
|
|
|
|
|
// Push the this ptr.
|
2011-05-03 01:57:46 +08:00
|
|
|
Args.add(RValue::get(This), MD->getThisType(getContext()));
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2013-02-13 16:37:51 +08:00
|
|
|
// If there is an implicit parameter (e.g. VTT), emit it.
|
|
|
|
if (ImplicitParam) {
|
|
|
|
Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
|
2010-01-02 09:01:18 +08:00
|
|
|
}
|
2012-02-17 11:33:10 +08:00
|
|
|
|
|
|
|
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
|
|
|
|
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
|
2010-01-02 09:01:18 +08:00
|
|
|
|
2012-02-17 11:33:10 +08:00
|
|
|
// And the rest of the call args.
|
2010-01-02 04:29:01 +08:00
|
|
|
EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
|
|
|
|
|
2012-07-07 14:41:13 +08:00
|
|
|
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
|
2010-03-31 04:24:48 +08:00
|
|
|
Callee, ReturnValue, Args, MD);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2011-04-11 02:20:53 +08:00
|
|
|
// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
|
|
|
|
// quite what we want.
|
|
|
|
static const Expr *skipNoOpCastsAndParens(const Expr *E) {
|
|
|
|
while (true) {
|
|
|
|
if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
|
|
|
|
E = PE->getSubExpr();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
|
|
|
|
if (CE->getCastKind() == CK_NoOp) {
|
|
|
|
E = CE->getSubExpr();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
|
|
|
|
if (UO->getOpcode() == UO_Extension) {
|
|
|
|
E = UO->getSubExpr();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return E;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
|
|
|
|
/// expr can be devirtualized.
|
2011-01-21 09:04:41 +08:00
|
|
|
static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
|
|
|
|
const Expr *Base,
|
2010-10-27 21:28:46 +08:00
|
|
|
const CXXMethodDecl *MD) {
|
|
|
|
|
2011-01-29 11:52:01 +08:00
|
|
|
// When building with -fapple-kext, all calls must go through the vtable since
|
|
|
|
// the kernel linker can do runtime patching of vtables.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (Context.getLangOpts().AppleKext)
|
2011-01-21 09:04:41 +08:00
|
|
|
return false;
|
|
|
|
|
2011-01-29 11:52:01 +08:00
|
|
|
// If the most derived class is marked final, we know that no subclass can
|
|
|
|
// override this member function and so we can devirtualize it. For example:
|
|
|
|
//
|
|
|
|
// struct A { virtual void f(); }
|
|
|
|
// struct B final : A { };
|
|
|
|
//
|
|
|
|
// void f(B *b) {
|
|
|
|
// b->f();
|
|
|
|
// }
|
|
|
|
//
|
2012-06-28 02:18:05 +08:00
|
|
|
const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
|
2011-01-29 11:52:01 +08:00
|
|
|
if (MostDerivedClassDecl->hasAttr<FinalAttr>())
|
|
|
|
return true;
|
|
|
|
|
2011-01-24 05:07:30 +08:00
|
|
|
// If the member function is marked 'final', we know that it can't be
|
2010-10-27 21:34:43 +08:00
|
|
|
// overridden and can therefore devirtualize it.
|
2011-01-25 00:26:15 +08:00
|
|
|
if (MD->hasAttr<FinalAttr>())
|
2010-10-27 21:28:46 +08:00
|
|
|
return true;
|
2010-10-27 21:34:43 +08:00
|
|
|
|
2011-01-24 05:07:30 +08:00
|
|
|
// Similarly, if the class itself is marked 'final' it can't be overridden
|
|
|
|
// and we can therefore devirtualize the member function call.
|
2011-01-25 00:26:15 +08:00
|
|
|
if (MD->getParent()->hasAttr<FinalAttr>())
|
2010-10-27 21:34:43 +08:00
|
|
|
return true;
|
|
|
|
|
2011-04-11 02:20:53 +08:00
|
|
|
Base = skipNoOpCastsAndParens(Base);
|
2010-01-02 04:29:01 +08:00
|
|
|
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
|
|
|
|
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
|
|
|
|
// This is a record decl. We know the type and can devirtualize it.
|
|
|
|
return VD->getType()->isRecordType();
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2012-08-16 06:59:28 +08:00
|
|
|
|
|
|
|
// We can devirtualize calls on an object accessed by a class member access
|
|
|
|
// expression, since by C++11 [basic.life]p6 we know that it can't refer to
|
|
|
|
// a derived class object constructed in the same location.
|
|
|
|
if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
|
|
|
|
if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
|
|
|
|
return VD->getType()->isRecordType();
|
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
// We can always devirtualize calls on temporary object expressions.
|
2010-02-01 04:58:15 +08:00
|
|
|
if (isa<CXXConstructExpr>(Base))
|
2010-01-02 04:29:01 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// And calls on bound temporaries.
|
|
|
|
if (isa<CXXBindTemporaryExpr>(Base))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Check if this is a call expr that returns a record type.
|
|
|
|
if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
|
|
|
|
return CE->getCallReturnType()->isRecordType();
|
2010-10-27 21:28:46 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
// We can't devirtualize the call.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-06-28 22:28:57 +08:00
|
|
|
static CXXRecordDecl *getCXXRecord(const Expr *E) {
|
|
|
|
QualType T = E->getType();
|
|
|
|
if (const PointerType *PTy = T->getAs<PointerType>())
|
|
|
|
T = PTy->getPointeeType();
|
|
|
|
const RecordType *Ty = T->castAs<RecordType>();
|
|
|
|
return cast<CXXRecordDecl>(Ty->getDecl());
|
|
|
|
}
|
|
|
|
|
2011-01-18 13:04:39 +08:00
|
|
|
// Note: This function also emit constructor calls to support a MSVC
|
|
|
|
// extensions allowing explicit constructor function call.
|
2010-01-02 04:29:01 +08:00
|
|
|
RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
|
|
|
|
ReturnValueSlot ReturnValue) {
|
2011-04-11 15:02:50 +08:00
|
|
|
const Expr *callee = CE->getCallee()->IgnoreParens();
|
|
|
|
|
|
|
|
if (isa<BinaryOperator>(callee))
|
2010-01-02 04:29:01 +08:00
|
|
|
return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
|
2011-04-11 15:02:50 +08:00
|
|
|
|
|
|
|
const MemberExpr *ME = cast<MemberExpr>(callee);
|
2010-01-02 04:29:01 +08:00
|
|
|
const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
|
|
|
|
|
|
|
|
if (MD->isStatic()) {
|
|
|
|
// The method is static, emit it as we would a regular call.
|
|
|
|
llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
|
|
|
|
return EmitCall(getContext().getPointerType(MD->getType()), Callee,
|
|
|
|
ReturnValue, CE->arg_begin(), CE->arg_end());
|
|
|
|
}
|
|
|
|
|
2010-09-03 09:26:39 +08:00
|
|
|
// Compute the object pointer.
|
2012-06-28 09:56:38 +08:00
|
|
|
const Expr *Base = ME->getBase();
|
|
|
|
bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
|
|
|
|
|
2012-06-28 22:28:57 +08:00
|
|
|
const CXXMethodDecl *DevirtualizedMethod = NULL;
|
|
|
|
if (CanUseVirtualCall &&
|
|
|
|
canDevirtualizeMemberFunctionCalls(getContext(), Base, MD)) {
|
|
|
|
const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
|
|
|
|
DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
|
|
|
|
assert(DevirtualizedMethod);
|
|
|
|
const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
|
|
|
|
const Expr *Inner = Base->ignoreParenBaseCasts();
|
|
|
|
if (getCXXRecord(Inner) == DevirtualizedClass)
|
|
|
|
// If the class of the Inner expression is where the dynamic method
|
|
|
|
// is defined, build the this pointer from it.
|
|
|
|
Base = Inner;
|
|
|
|
else if (getCXXRecord(Base) != DevirtualizedClass) {
|
|
|
|
// If the method is defined in a class that is not the best dynamic
|
|
|
|
// one or the one of the full expression, we would have to build
|
|
|
|
// a derived-to-base cast to compute the correct this pointer, but
|
|
|
|
// we don't have support for that yet, so do a virtual call.
|
|
|
|
DevirtualizedMethod = NULL;
|
|
|
|
}
|
2012-06-29 01:57:36 +08:00
|
|
|
// If the return types are not the same, this might be a case where more
|
|
|
|
// code needs to run to compensate for it. For example, the derived
|
|
|
|
// method might return a type that inherits form from the return
|
|
|
|
// type of MD and has a prefix.
|
|
|
|
// For now we just avoid devirtualizing these covariant cases.
|
|
|
|
if (DevirtualizedMethod &&
|
|
|
|
DevirtualizedMethod->getResultType().getCanonicalType() !=
|
|
|
|
MD->getResultType().getCanonicalType())
|
2012-06-28 23:11:39 +08:00
|
|
|
DevirtualizedMethod = NULL;
|
2012-06-28 22:28:57 +08:00
|
|
|
}
|
2012-06-28 09:56:38 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
llvm::Value *This;
|
|
|
|
if (ME->isArrow())
|
2012-06-28 22:28:57 +08:00
|
|
|
This = EmitScalarExpr(Base);
|
2010-12-04 16:14:53 +08:00
|
|
|
else
|
2012-06-28 22:28:57 +08:00
|
|
|
This = EmitLValue(Base).getAddress();
|
2012-06-28 09:56:38 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2010-09-03 09:26:39 +08:00
|
|
|
if (MD->isTrivial()) {
|
|
|
|
if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
|
2011-01-18 13:04:39 +08:00
|
|
|
if (isa<CXXConstructorDecl>(MD) &&
|
|
|
|
cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
|
|
|
|
return RValue::get(0);
|
|
|
|
|
2011-08-31 03:58:05 +08:00
|
|
|
if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
|
|
|
|
// We don't like to generate the trivial copy/move assignment operator
|
|
|
|
// when it isn't necessary; just produce the proper effect here.
|
2011-01-18 13:04:39 +08:00
|
|
|
llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
|
2012-09-30 20:43:37 +08:00
|
|
|
EmitAggregateAssign(This, RHS, CE->getType());
|
2011-01-18 13:04:39 +08:00
|
|
|
return RValue::get(This);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isa<CXXConstructorDecl>(MD) &&
|
2011-08-31 03:58:05 +08:00
|
|
|
cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
|
|
|
|
// Trivial move and copy ctor are the same.
|
2011-01-18 13:04:39 +08:00
|
|
|
llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
|
|
|
|
EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
|
|
|
|
CE->arg_begin(), CE->arg_end());
|
|
|
|
return RValue::get(This);
|
|
|
|
}
|
|
|
|
llvm_unreachable("unknown trivial member function");
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2010-09-03 09:26:39 +08:00
|
|
|
// Compute the function type we're calling.
|
2012-10-25 08:12:49 +08:00
|
|
|
const CXXMethodDecl *CalleeDecl = DevirtualizedMethod ? DevirtualizedMethod : MD;
|
2011-01-18 13:04:39 +08:00
|
|
|
const CGFunctionInfo *FInfo = 0;
|
2012-10-25 08:12:49 +08:00
|
|
|
if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
|
|
|
|
FInfo = &CGM.getTypes().arrangeCXXDestructor(Dtor,
|
2012-02-17 11:33:10 +08:00
|
|
|
Dtor_Complete);
|
2012-10-25 08:12:49 +08:00
|
|
|
else if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
|
|
|
|
FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor,
|
|
|
|
Ctor_Complete);
|
2011-01-18 13:04:39 +08:00
|
|
|
else
|
2012-10-25 08:12:49 +08:00
|
|
|
FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
|
2010-09-03 09:26:39 +08:00
|
|
|
|
[ms-cxxabi] Emit linkonce complete dtors in TUs that need them
Based on Peter Collingbourne's destructor patches.
Prior to this change, clang was considering ?1 to be the complete
destructor and the base destructor, which was wrong. This lead to
crashes when clang tried to emit two LLVM functions with the same name.
In this ABI, TUs with non-inline dtors might not emit a complete
destructor. They are emitted as inline thunks in TUs that need them,
and they always delegate to the base dtors of the complete class and its
virtual bases. This change uses the DeferredDecls machinery to emit
complete dtors as needed.
Currently in clang try body destructors can catch exceptions thrown by
virtual base destructors. In the Microsoft C++ ABI, clang may not have
the destructor definition, in which case clang won't wrap the virtual
virtual base destructor calls in a try-catch. Diagnosing this in user
code is TODO.
Finally, for classes that don't use virtual inheritance, MSVC always
calls the base destructor (?1) directly. This is a useful code size
optimization that avoids emitting lots of extra thunks or aliases.
Implementing it also means our existing tests continue to pass, and is
consistent with MSVC's output.
We can do the same for Itanium by tweaking GetAddrOfCXXDestructor, but
it will require further testing.
Reviewers: rjmccall
CC: cfe-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1066
llvm-svn: 186828
2013-07-22 21:51:44 +08:00
|
|
|
llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
|
2010-09-03 09:26:39 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
// C++ [class.virtual]p12:
|
|
|
|
// Explicit qualification with the scope operator (5.1) suppresses the
|
|
|
|
// virtual call mechanism.
|
|
|
|
//
|
|
|
|
// We also don't emit a virtual call if the base expression has a record type
|
|
|
|
// because then we know what the type is.
|
2012-06-28 22:28:57 +08:00
|
|
|
bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
|
2013-06-20 07:23:19 +08:00
|
|
|
llvm::Value *Callee;
|
2013-07-01 04:40:16 +08:00
|
|
|
|
2010-09-03 09:26:39 +08:00
|
|
|
if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
|
2013-07-01 04:40:16 +08:00
|
|
|
assert(CE->arg_begin() == CE->arg_end() &&
|
|
|
|
"Destructor shouldn't have explicit parameters");
|
|
|
|
assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
|
2010-09-03 09:26:39 +08:00
|
|
|
if (UseVirtualCall) {
|
2013-07-01 04:40:16 +08:00
|
|
|
CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
|
|
|
|
CE->getExprLoc(), This);
|
2010-01-02 04:29:01 +08:00
|
|
|
} else {
|
2012-11-02 06:30:59 +08:00
|
|
|
if (getLangOpts().AppleKext &&
|
2011-02-02 07:22:34 +08:00
|
|
|
MD->isVirtual() &&
|
|
|
|
ME->hasQualifier())
|
2011-02-04 03:27:17 +08:00
|
|
|
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
|
2012-06-28 22:28:57 +08:00
|
|
|
else if (!DevirtualizedMethod)
|
[ms-cxxabi] Emit linkonce complete dtors in TUs that need them
Based on Peter Collingbourne's destructor patches.
Prior to this change, clang was considering ?1 to be the complete
destructor and the base destructor, which was wrong. This lead to
crashes when clang tried to emit two LLVM functions with the same name.
In this ABI, TUs with non-inline dtors might not emit a complete
destructor. They are emitted as inline thunks in TUs that need them,
and they always delegate to the base dtors of the complete class and its
virtual bases. This change uses the DeferredDecls machinery to emit
complete dtors as needed.
Currently in clang try body destructors can catch exceptions thrown by
virtual base destructors. In the Microsoft C++ ABI, clang may not have
the destructor definition, in which case clang won't wrap the virtual
virtual base destructor calls in a try-catch. Diagnosing this in user
code is TODO.
Finally, for classes that don't use virtual inheritance, MSVC always
calls the base destructor (?1) directly. This is a useful code size
optimization that avoids emitting lots of extra thunks or aliases.
Implementing it also means our existing tests continue to pass, and is
consistent with MSVC's output.
We can do the same for Itanium by tweaking GetAddrOfCXXDestructor, but
it will require further testing.
Reviewers: rjmccall
CC: cfe-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1066
llvm-svn: 186828
2013-07-22 21:51:44 +08:00
|
|
|
Callee = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete, FInfo, Ty);
|
2012-06-27 01:45:31 +08:00
|
|
|
else {
|
2012-06-28 22:28:57 +08:00
|
|
|
const CXXDestructorDecl *DDtor =
|
|
|
|
cast<CXXDestructorDecl>(DevirtualizedMethod);
|
2012-06-27 01:45:31 +08:00
|
|
|
Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
|
|
|
|
}
|
2013-07-01 04:40:16 +08:00
|
|
|
EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
|
|
|
|
/*ImplicitParam=*/0, QualType(), 0, 0);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
2013-07-01 04:40:16 +08:00
|
|
|
return RValue::get(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
|
2011-01-18 13:04:39 +08:00
|
|
|
Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
|
2010-09-03 09:26:39 +08:00
|
|
|
} else if (UseVirtualCall) {
|
2013-07-01 04:40:16 +08:00
|
|
|
Callee = BuildVirtualCall(MD, This, Ty);
|
2010-01-02 04:29:01 +08:00
|
|
|
} else {
|
2012-11-02 06:30:59 +08:00
|
|
|
if (getLangOpts().AppleKext &&
|
2011-01-29 07:42:29 +08:00
|
|
|
MD->isVirtual() &&
|
2011-01-21 09:04:41 +08:00
|
|
|
ME->hasQualifier())
|
2011-02-04 03:27:17 +08:00
|
|
|
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
|
2012-06-28 22:28:57 +08:00
|
|
|
else if (!DevirtualizedMethod)
|
2012-06-27 03:18:25 +08:00
|
|
|
Callee = CGM.GetAddrOfFunction(MD, Ty);
|
2012-06-27 01:45:31 +08:00
|
|
|
else {
|
2012-06-28 22:28:57 +08:00
|
|
|
Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
|
2012-06-27 01:45:31 +08:00
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2012-10-10 03:52:38 +08:00
|
|
|
return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
|
2013-02-13 16:37:51 +08:00
|
|
|
/*ImplicitParam=*/0, QualType(),
|
|
|
|
CE->arg_begin(), CE->arg_end());
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
RValue
|
|
|
|
CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
|
|
|
|
ReturnValueSlot ReturnValue) {
|
|
|
|
const BinaryOperator *BO =
|
|
|
|
cast<BinaryOperator>(E->getCallee()->IgnoreParens());
|
|
|
|
const Expr *BaseExpr = BO->getLHS();
|
|
|
|
const Expr *MemFnExpr = BO->getRHS();
|
|
|
|
|
|
|
|
const MemberPointerType *MPT =
|
2011-04-27 04:42:42 +08:00
|
|
|
MemFnExpr->getType()->castAs<MemberPointerType>();
|
2010-08-22 08:05:51 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
const FunctionProtoType *FPT =
|
2011-04-27 04:42:42 +08:00
|
|
|
MPT->getPointeeType()->castAs<FunctionProtoType>();
|
2010-01-02 04:29:01 +08:00
|
|
|
const CXXRecordDecl *RD =
|
|
|
|
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
|
|
|
|
|
|
|
|
// Get the member function pointer.
|
2010-08-22 18:59:02 +08:00
|
|
|
llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
|
|
|
// Emit the 'this' pointer.
|
|
|
|
llvm::Value *This;
|
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
if (BO->getOpcode() == BO_PtrMemI)
|
2010-01-02 04:29:01 +08:00
|
|
|
This = EmitScalarExpr(BaseExpr);
|
|
|
|
else
|
|
|
|
This = EmitLValue(BaseExpr).getAddress();
|
|
|
|
|
2012-10-10 03:52:38 +08:00
|
|
|
EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This,
|
|
|
|
QualType(MPT->getClass(), 0));
|
2012-08-24 08:54:33 +08:00
|
|
|
|
2010-08-22 08:05:51 +08:00
|
|
|
// Ask the ABI to load the callee. Note that This is modified.
|
|
|
|
llvm::Value *Callee =
|
2011-02-08 16:22:06 +08:00
|
|
|
CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
|
|
|
CallArgList Args;
|
|
|
|
|
|
|
|
QualType ThisType =
|
|
|
|
getContext().getPointerType(getContext().getTagDeclType(RD));
|
|
|
|
|
|
|
|
// Push the this ptr.
|
2011-05-03 01:57:46 +08:00
|
|
|
Args.add(RValue::get(This), ThisType);
|
2012-07-07 14:41:13 +08:00
|
|
|
|
|
|
|
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
|
|
|
// And the rest of the call args
|
|
|
|
EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
|
2012-07-07 14:41:13 +08:00
|
|
|
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), Callee,
|
2011-03-03 05:36:49 +08:00
|
|
|
ReturnValue, Args);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
RValue
|
|
|
|
CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
|
|
|
|
const CXXMethodDecl *MD,
|
|
|
|
ReturnValueSlot ReturnValue) {
|
|
|
|
assert(MD->isInstance() &&
|
|
|
|
"Trying to emit a member call expr on a static method!");
|
2010-12-04 16:14:53 +08:00
|
|
|
LValue LV = EmitLValue(E->getArg(0));
|
|
|
|
llvm::Value *This = LV.getAddress();
|
|
|
|
|
2011-09-07 00:26:56 +08:00
|
|
|
if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
|
|
|
|
MD->isTrivial()) {
|
|
|
|
llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
|
|
|
|
QualType Ty = E->getType();
|
2012-09-30 20:43:37 +08:00
|
|
|
EmitAggregateAssign(This, Src, Ty);
|
2011-09-07 00:26:56 +08:00
|
|
|
return RValue::get(This);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2011-05-09 04:32:23 +08:00
|
|
|
llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
|
2012-10-10 03:52:38 +08:00
|
|
|
return EmitCXXMemberCall(MD, E->getExprLoc(), Callee, ReturnValue, This,
|
2013-02-13 16:37:51 +08:00
|
|
|
/*ImplicitParam=*/0, QualType(),
|
|
|
|
E->arg_begin() + 1, E->arg_end());
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2011-10-07 02:29:37 +08:00
|
|
|
RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
|
|
|
|
ReturnValueSlot ReturnValue) {
|
|
|
|
return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
|
|
|
|
}
|
|
|
|
|
2011-10-14 10:27:24 +08:00
|
|
|
static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *DestPtr,
|
|
|
|
const CXXRecordDecl *Base) {
|
|
|
|
if (Base->isEmpty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
|
|
|
|
|
|
|
|
const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
|
|
|
|
CharUnits Size = Layout.getNonVirtualSize();
|
|
|
|
CharUnits Align = Layout.getNonVirtualAlign();
|
|
|
|
|
|
|
|
llvm::Value *SizeVal = CGF.CGM.getSize(Size);
|
|
|
|
|
|
|
|
// If the type contains a pointer to data member we can't memset it to zero.
|
|
|
|
// Instead, create a null constant and copy it to the destination.
|
|
|
|
// TODO: there are other patterns besides zero that we can usefully memset,
|
|
|
|
// like -1, which happens to be the pattern used by member-pointers.
|
|
|
|
// TODO: isZeroInitializable can be over-conservative in the case where a
|
|
|
|
// virtual base contains a member pointer.
|
|
|
|
if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
|
|
|
|
llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
|
|
|
|
|
|
|
|
llvm::GlobalVariable *NullVariable =
|
|
|
|
new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
|
|
|
|
/*isConstant=*/true,
|
|
|
|
llvm::GlobalVariable::PrivateLinkage,
|
|
|
|
NullConstant, Twine());
|
|
|
|
NullVariable->setAlignment(Align.getQuantity());
|
|
|
|
llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
|
|
|
|
|
|
|
|
// Get and call the appropriate llvm.memcpy overload.
|
|
|
|
CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, just memset the whole thing to zero. This is legal
|
|
|
|
// because in LLVM, all default initializers (other than the ones we just
|
|
|
|
// handled above) are guaranteed to have a bit pattern of all zeros.
|
|
|
|
CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
|
|
|
|
Align.getQuantity());
|
|
|
|
}
|
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
void
|
2010-09-15 18:14:12 +08:00
|
|
|
CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
|
|
|
|
AggValueSlot Dest) {
|
|
|
|
assert(!Dest.isIgnored() && "Must have a destination!");
|
2010-01-02 04:29:01 +08:00
|
|
|
const CXXConstructorDecl *CD = E->getConstructor();
|
2010-08-23 00:15:35 +08:00
|
|
|
|
|
|
|
// If we require zero initialization before (or instead of) calling the
|
|
|
|
// constructor, as can be the case with a non-user-provided default
|
2011-04-29 06:57:55 +08:00
|
|
|
// constructor, emit the zero initialization now, unless destination is
|
|
|
|
// already zeroed.
|
2011-10-14 10:27:24 +08:00
|
|
|
if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
|
|
|
|
switch (E->getConstructionKind()) {
|
|
|
|
case CXXConstructExpr::CK_Delegating:
|
|
|
|
case CXXConstructExpr::CK_Complete:
|
|
|
|
EmitNullInitialization(Dest.getAddr(), E->getType());
|
|
|
|
break;
|
|
|
|
case CXXConstructExpr::CK_VirtualBase:
|
|
|
|
case CXXConstructExpr::CK_NonVirtualBase:
|
|
|
|
EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-08-23 00:15:35 +08:00
|
|
|
|
|
|
|
// If this is a call to a trivial default constructor, do nothing.
|
|
|
|
if (CD->isTrivial() && CD->isDefaultConstructor())
|
|
|
|
return;
|
|
|
|
|
2010-09-18 08:58:34 +08:00
|
|
|
// Elide the constructor if we're constructing from a temporary.
|
|
|
|
// The temporary check is required because Sema sets this on NRVO
|
|
|
|
// returns.
|
2012-11-02 06:30:59 +08:00
|
|
|
if (getLangOpts().ElideConstructors && E->isElidable()) {
|
2010-09-18 08:58:34 +08:00
|
|
|
assert(getContext().hasSameUnqualifiedType(E->getType(),
|
|
|
|
E->getArg(0)->getType()));
|
2010-09-15 18:14:12 +08:00
|
|
|
if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
|
|
|
|
EmitAggExpr(E->getArg(0), Dest);
|
2010-05-15 08:13:29 +08:00
|
|
|
return;
|
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
2010-08-23 00:15:35 +08:00
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
if (const ConstantArrayType *arrayType
|
|
|
|
= getContext().getAsConstantArrayType(E->getType())) {
|
|
|
|
EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
|
2010-01-02 04:29:01 +08:00
|
|
|
E->arg_begin(), E->arg_end());
|
2011-07-13 14:10:41 +08:00
|
|
|
} else {
|
2011-05-07 05:28:42 +08:00
|
|
|
CXXCtorType Type = Ctor_Complete;
|
2011-05-04 04:19:28 +08:00
|
|
|
bool ForVirtualBase = false;
|
2013-01-31 13:50:40 +08:00
|
|
|
bool Delegating = false;
|
|
|
|
|
2011-05-04 04:19:28 +08:00
|
|
|
switch (E->getConstructionKind()) {
|
|
|
|
case CXXConstructExpr::CK_Delegating:
|
2011-05-01 15:04:31 +08:00
|
|
|
// We should be emitting a constructor; GlobalDecl will assert this
|
|
|
|
Type = CurGD.getCtorType();
|
2013-01-31 13:50:40 +08:00
|
|
|
Delegating = true;
|
2011-05-04 04:19:28 +08:00
|
|
|
break;
|
2011-05-01 15:04:31 +08:00
|
|
|
|
2011-05-04 04:19:28 +08:00
|
|
|
case CXXConstructExpr::CK_Complete:
|
|
|
|
Type = Ctor_Complete;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CXXConstructExpr::CK_VirtualBase:
|
|
|
|
ForVirtualBase = true;
|
|
|
|
// fall-through
|
|
|
|
|
|
|
|
case CXXConstructExpr::CK_NonVirtualBase:
|
|
|
|
Type = Ctor_Base;
|
|
|
|
}
|
2010-05-03 07:20:53 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
// Call the constructor.
|
2013-01-31 13:50:40 +08:00
|
|
|
EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest.getAddr(),
|
2010-01-02 04:29:01 +08:00
|
|
|
E->arg_begin(), E->arg_end());
|
2010-05-03 07:20:53 +08:00
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2010-11-14 05:53:34 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
|
|
|
|
llvm::Value *Src,
|
2010-12-03 01:02:11 +08:00
|
|
|
const Expr *Exp) {
|
2010-12-06 16:20:24 +08:00
|
|
|
if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
|
2010-11-14 05:53:34 +08:00
|
|
|
Exp = E->getSubExpr();
|
|
|
|
assert(isa<CXXConstructExpr>(Exp) &&
|
|
|
|
"EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
|
|
|
|
const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
|
|
|
|
const CXXConstructorDecl *CD = E->getConstructor();
|
|
|
|
RunCleanupsScope Scope(*this);
|
|
|
|
|
|
|
|
// If we require zero initialization before (or instead of) calling the
|
|
|
|
// constructor, as can be the case with a non-user-provided default
|
|
|
|
// constructor, emit the zero initialization now.
|
|
|
|
// FIXME. Do I still need this for a copy ctor synthesis?
|
|
|
|
if (E->requiresZeroInitialization())
|
|
|
|
EmitNullInitialization(Dest, E->getType());
|
|
|
|
|
2010-11-15 21:54:43 +08:00
|
|
|
assert(!getContext().getAsConstantArrayType(E->getType())
|
|
|
|
&& "EmitSynthesizedCXXCopyCtor - Copied-in Array");
|
2010-11-14 05:53:34 +08:00
|
|
|
EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
|
|
|
|
E->arg_begin(), E->arg_end());
|
|
|
|
}
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
|
|
|
|
const CXXNewExpr *E) {
|
2009-12-14 04:04:38 +08:00
|
|
|
if (!E->isArray())
|
2010-01-27 03:44:24 +08:00
|
|
|
return CharUnits::Zero();
|
2009-12-14 04:04:38 +08:00
|
|
|
|
2011-05-16 09:05:12 +08:00
|
|
|
// No cookie is required if the operator new[] being used is the
|
|
|
|
// reserved placement operator new[].
|
|
|
|
if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
|
2010-08-23 09:17:59 +08:00
|
|
|
return CharUnits::Zero();
|
|
|
|
|
2011-01-27 17:37:56 +08:00
|
|
|
return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
|
2009-09-24 00:07:23 +08:00
|
|
|
}
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
|
|
|
|
const CXXNewExpr *e,
|
2012-02-23 01:37:52 +08:00
|
|
|
unsigned minElements,
|
2011-05-15 15:14:44 +08:00
|
|
|
llvm::Value *&numElements,
|
|
|
|
llvm::Value *&sizeWithoutCookie) {
|
|
|
|
QualType type = e->getAllocatedType();
|
|
|
|
|
|
|
|
if (!e->isArray()) {
|
|
|
|
CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
|
|
|
|
sizeWithoutCookie
|
|
|
|
= llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
|
|
|
|
return sizeWithoutCookie;
|
2010-07-21 09:10:17 +08:00
|
|
|
}
|
2009-09-24 00:07:23 +08:00
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
// The width of size_t.
|
|
|
|
unsigned sizeWidth = CGF.SizeTy->getBitWidth();
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
// Figure out the cookie size.
|
2011-05-15 15:14:44 +08:00
|
|
|
llvm::APInt cookieSize(sizeWidth,
|
|
|
|
CalculateCookiePadding(CGF, e).getQuantity());
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2009-09-24 00:07:23 +08:00
|
|
|
// Emit the array size expression.
|
2010-08-26 23:23:38 +08:00
|
|
|
// We multiply the size of all dimensions for NumElements.
|
|
|
|
// e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
|
2011-05-15 15:14:44 +08:00
|
|
|
numElements = CGF.EmitScalarExpr(e->getArraySize());
|
|
|
|
assert(isa<llvm::IntegerType>(numElements->getType()));
|
|
|
|
|
|
|
|
// The number of elements can be have an arbitrary integer type;
|
|
|
|
// essentially, we need to multiply it by a constant factor, add a
|
|
|
|
// cookie size, and verify that the result is representable as a
|
|
|
|
// size_t. That's just a gloss, though, and it's wrong in one
|
|
|
|
// important way: if the count is negative, it's an error even if
|
|
|
|
// the cookie size would bring the total size >= 0.
|
2011-05-21 00:38:50 +08:00
|
|
|
bool isSigned
|
|
|
|
= e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::IntegerType *numElementsType
|
2011-05-15 15:14:44 +08:00
|
|
|
= cast<llvm::IntegerType>(numElements->getType());
|
|
|
|
unsigned numElementsWidth = numElementsType->getBitWidth();
|
|
|
|
|
|
|
|
// Compute the constant factor.
|
|
|
|
llvm::APInt arraySizeMultiplier(sizeWidth, 1);
|
2010-08-26 23:23:38 +08:00
|
|
|
while (const ConstantArrayType *CAT
|
2011-05-15 15:14:44 +08:00
|
|
|
= CGF.getContext().getAsConstantArrayType(type)) {
|
|
|
|
type = CAT->getElementType();
|
|
|
|
arraySizeMultiplier *= CAT->getSize();
|
2010-08-26 23:23:38 +08:00
|
|
|
}
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
|
|
|
|
llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
|
|
|
|
typeSizeMultiplier *= arraySizeMultiplier;
|
|
|
|
|
|
|
|
// This will be a size_t.
|
|
|
|
llvm::Value *size;
|
2010-07-21 04:19:24 +08:00
|
|
|
|
2010-07-21 05:55:52 +08:00
|
|
|
// If someone is doing 'new int[42]' there is no need to do a dynamic check.
|
|
|
|
// Don't bloat the -O0 code.
|
2011-05-15 15:14:44 +08:00
|
|
|
if (llvm::ConstantInt *numElementsC =
|
|
|
|
dyn_cast<llvm::ConstantInt>(numElements)) {
|
|
|
|
const llvm::APInt &count = numElementsC->getValue();
|
|
|
|
|
|
|
|
bool hasAnyOverflow = false;
|
|
|
|
|
|
|
|
// If 'count' was a negative number, it's an overflow.
|
|
|
|
if (isSigned && count.isNegative())
|
|
|
|
hasAnyOverflow = true;
|
|
|
|
|
|
|
|
// We want to do all this arithmetic in size_t. If numElements is
|
|
|
|
// wider than that, check whether it's already too big, and if so,
|
|
|
|
// overflow.
|
|
|
|
else if (numElementsWidth > sizeWidth &&
|
|
|
|
numElementsWidth - sizeWidth > count.countLeadingZeros())
|
|
|
|
hasAnyOverflow = true;
|
|
|
|
|
|
|
|
// Okay, compute a count at the right width.
|
|
|
|
llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
|
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
// If there is a brace-initializer, we cannot allocate fewer elements than
|
|
|
|
// there are initializers. If we do, that's treated like an overflow.
|
|
|
|
if (adjustedCount.ult(minElements))
|
|
|
|
hasAnyOverflow = true;
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
// Scale numElements by that. This might overflow, but we don't
|
|
|
|
// care because it only overflows if allocationSize does, too, and
|
|
|
|
// if that overflows then we shouldn't use this.
|
|
|
|
numElements = llvm::ConstantInt::get(CGF.SizeTy,
|
|
|
|
adjustedCount * arraySizeMultiplier);
|
|
|
|
|
|
|
|
// Compute the size before cookie, and track whether it overflowed.
|
|
|
|
bool overflow;
|
|
|
|
llvm::APInt allocationSize
|
|
|
|
= adjustedCount.umul_ov(typeSizeMultiplier, overflow);
|
|
|
|
hasAnyOverflow |= overflow;
|
|
|
|
|
|
|
|
// Add in the cookie, and check whether it's overflowed.
|
|
|
|
if (cookieSize != 0) {
|
|
|
|
// Save the current size without a cookie. This shouldn't be
|
|
|
|
// used if there was overflow.
|
|
|
|
sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
|
|
|
|
|
|
|
|
allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
|
|
|
|
hasAnyOverflow |= overflow;
|
2010-09-02 17:58:18 +08:00
|
|
|
}
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
// On overflow, produce a -1 so operator new will fail.
|
|
|
|
if (hasAnyOverflow) {
|
|
|
|
size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
|
2010-07-21 05:55:52 +08:00
|
|
|
} else {
|
2011-05-15 15:14:44 +08:00
|
|
|
size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
|
2010-07-21 05:55:52 +08:00
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
// Otherwise, we might need to use the overflow intrinsics.
|
|
|
|
} else {
|
2012-02-23 01:37:52 +08:00
|
|
|
// There are up to five conditions we need to test for:
|
2011-05-15 15:14:44 +08:00
|
|
|
// 1) if isSigned, we need to check whether numElements is negative;
|
|
|
|
// 2) if numElementsWidth > sizeWidth, we need to check whether
|
|
|
|
// numElements is larger than something representable in size_t;
|
2012-02-23 01:37:52 +08:00
|
|
|
// 3) if minElements > 0, we need to check whether numElements is smaller
|
|
|
|
// than that.
|
|
|
|
// 4) we need to compute
|
2011-05-15 15:14:44 +08:00
|
|
|
// sizeWithoutCookie := numElements * typeSizeMultiplier
|
|
|
|
// and check whether it overflows; and
|
2012-02-23 01:37:52 +08:00
|
|
|
// 5) if we need a cookie, we need to compute
|
2011-05-15 15:14:44 +08:00
|
|
|
// size := sizeWithoutCookie + cookieSize
|
|
|
|
// and check whether it overflows.
|
|
|
|
|
|
|
|
llvm::Value *hasOverflow = 0;
|
|
|
|
|
|
|
|
// If numElementsWidth > sizeWidth, then one way or another, we're
|
|
|
|
// going to have to do a comparison for (2), and this happens to
|
|
|
|
// take care of (1), too.
|
|
|
|
if (numElementsWidth > sizeWidth) {
|
|
|
|
llvm::APInt threshold(numElementsWidth, 1);
|
|
|
|
threshold <<= sizeWidth;
|
|
|
|
|
|
|
|
llvm::Value *thresholdV
|
|
|
|
= llvm::ConstantInt::get(numElementsType, threshold);
|
|
|
|
|
|
|
|
hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
|
|
|
|
numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
|
|
|
|
|
|
|
|
// Otherwise, if we're signed, we want to sext up to size_t.
|
|
|
|
} else if (isSigned) {
|
|
|
|
if (numElementsWidth < sizeWidth)
|
|
|
|
numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
|
|
|
|
|
|
|
|
// If there's a non-1 type size multiplier, then we can do the
|
|
|
|
// signedness check at the same time as we do the multiply
|
|
|
|
// because a negative number times anything will cause an
|
2012-02-23 01:37:52 +08:00
|
|
|
// unsigned overflow. Otherwise, we have to do it here. But at least
|
|
|
|
// in this case, we can subsume the >= minElements check.
|
2011-05-15 15:14:44 +08:00
|
|
|
if (typeSizeMultiplier == 1)
|
|
|
|
hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
|
2012-02-23 01:37:52 +08:00
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, minElements));
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
// Otherwise, zext up to size_t if necessary.
|
|
|
|
} else if (numElementsWidth < sizeWidth) {
|
|
|
|
numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
|
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
assert(numElements->getType() == CGF.SizeTy);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
if (minElements) {
|
|
|
|
// Don't allow allocation of fewer elements than we have initializers.
|
|
|
|
if (!hasOverflow) {
|
|
|
|
hasOverflow = CGF.Builder.CreateICmpULT(numElements,
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, minElements));
|
|
|
|
} else if (numElementsWidth > sizeWidth) {
|
|
|
|
// The other existing overflow subsumes this check.
|
|
|
|
// We do an unsigned comparison, since any signed value < -1 is
|
|
|
|
// taken care of either above or below.
|
|
|
|
hasOverflow = CGF.Builder.CreateOr(hasOverflow,
|
|
|
|
CGF.Builder.CreateICmpULT(numElements,
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, minElements)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
size = numElements;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
// Multiply by the type size if necessary. This multiplier
|
|
|
|
// includes all the factors for nested arrays.
|
|
|
|
//
|
|
|
|
// This step also causes numElements to be scaled up by the
|
|
|
|
// nested-array factor if necessary. Overflow on this computation
|
|
|
|
// can be ignored because the result shouldn't be used if
|
|
|
|
// allocation fails.
|
|
|
|
if (typeSizeMultiplier != 1) {
|
|
|
|
llvm::Value *umul_with_overflow
|
2011-07-15 01:45:50 +08:00
|
|
|
= CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
llvm::Value *tsmV =
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
|
|
|
|
llvm::Value *result =
|
|
|
|
CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
|
|
|
|
|
|
|
|
llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
|
|
|
|
if (hasOverflow)
|
|
|
|
hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
|
|
|
|
else
|
|
|
|
hasOverflow = overflowed;
|
|
|
|
|
|
|
|
size = CGF.Builder.CreateExtractValue(result, 0);
|
|
|
|
|
|
|
|
// Also scale up numElements by the array size multiplier.
|
|
|
|
if (arraySizeMultiplier != 1) {
|
|
|
|
// If the base element type size is 1, then we can re-use the
|
|
|
|
// multiply we just did.
|
|
|
|
if (typeSize.isOne()) {
|
|
|
|
assert(arraySizeMultiplier == typeSizeMultiplier);
|
|
|
|
numElements = size;
|
|
|
|
|
|
|
|
// Otherwise we need a separate multiply.
|
|
|
|
} else {
|
|
|
|
llvm::Value *asmV =
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
|
|
|
|
numElements = CGF.Builder.CreateMul(numElements, asmV);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// numElements doesn't need to be scaled.
|
|
|
|
assert(arraySizeMultiplier == 1);
|
2010-09-02 17:58:18 +08:00
|
|
|
}
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
// Add in the cookie size if necessary.
|
|
|
|
if (cookieSize != 0) {
|
|
|
|
sizeWithoutCookie = size;
|
|
|
|
|
|
|
|
llvm::Value *uadd_with_overflow
|
2011-07-15 01:45:50 +08:00
|
|
|
= CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
|
|
|
|
llvm::Value *result =
|
|
|
|
CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
|
|
|
|
|
|
|
|
llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
|
|
|
|
if (hasOverflow)
|
|
|
|
hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
|
|
|
|
else
|
|
|
|
hasOverflow = overflowed;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
size = CGF.Builder.CreateExtractValue(result, 0);
|
2010-09-02 17:58:18 +08:00
|
|
|
}
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
// If we had any possibility of dynamic overflow, make a select to
|
|
|
|
// overwrite 'size' with an all-ones value, which should cause
|
|
|
|
// operator new to throw.
|
|
|
|
if (hasOverflow)
|
|
|
|
size = CGF.Builder.CreateSelect(hasOverflow,
|
|
|
|
llvm::Constant::getAllOnesValue(CGF.SizeTy),
|
|
|
|
size);
|
2010-07-21 05:55:52 +08:00
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
if (cookieSize == 0)
|
|
|
|
sizeWithoutCookie = size;
|
2010-09-02 17:58:18 +08:00
|
|
|
else
|
2011-05-15 15:14:44 +08:00
|
|
|
assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
return size;
|
2009-09-24 00:07:23 +08:00
|
|
|
}
|
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
|
|
|
|
QualType AllocType, llvm::Value *NewPtr) {
|
2010-08-21 10:24:36 +08:00
|
|
|
|
2011-12-03 10:13:40 +08:00
|
|
|
CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
|
2013-03-08 05:37:08 +08:00
|
|
|
switch (CGF.getEvaluationKind(AllocType)) {
|
|
|
|
case TEK_Scalar:
|
2011-12-03 10:13:40 +08:00
|
|
|
CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
|
2011-12-03 12:14:32 +08:00
|
|
|
Alignment),
|
2011-06-16 12:16:24 +08:00
|
|
|
false);
|
2013-03-08 05:37:08 +08:00
|
|
|
return;
|
|
|
|
case TEK_Complex:
|
|
|
|
CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType,
|
|
|
|
Alignment),
|
|
|
|
/*isInit*/ true);
|
|
|
|
return;
|
|
|
|
case TEK_Aggregate: {
|
2010-09-15 18:14:12 +08:00
|
|
|
AggValueSlot Slot
|
2011-12-03 08:54:26 +08:00
|
|
|
= AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
|
2011-08-26 04:40:09 +08:00
|
|
|
AggValueSlot::IsDestructed,
|
2011-08-26 15:31:35 +08:00
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2012-03-30 01:37:10 +08:00
|
|
|
AggValueSlot::IsNotAliased);
|
2010-09-15 18:14:12 +08:00
|
|
|
CGF.EmitAggExpr(Init, Slot);
|
2013-03-08 05:37:08 +08:00
|
|
|
return;
|
|
|
|
}
|
2010-09-15 18:14:12 +08:00
|
|
|
}
|
2013-03-08 05:37:08 +08:00
|
|
|
llvm_unreachable("bad evaluation kind");
|
2010-06-26 02:26:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
|
2011-09-15 14:49:18 +08:00
|
|
|
QualType elementType,
|
|
|
|
llvm::Value *beginPtr,
|
|
|
|
llvm::Value *numElements) {
|
2012-02-16 20:22:20 +08:00
|
|
|
if (!E->hasInitializer())
|
|
|
|
return; // We have a POD type.
|
2011-09-15 14:49:18 +08:00
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
llvm::Value *explicitPtr = beginPtr;
|
2011-09-15 14:49:18 +08:00
|
|
|
// Find the end of the array, hoisted out of the loop.
|
|
|
|
llvm::Value *endPtr =
|
|
|
|
Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
|
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
unsigned initializerElements = 0;
|
|
|
|
|
|
|
|
const Expr *Init = E->getInitializer();
|
2012-02-24 08:13:55 +08:00
|
|
|
llvm::AllocaInst *endOfInit = 0;
|
|
|
|
QualType::DestructionKind dtorKind = elementType.isDestructedType();
|
|
|
|
EHScopeStack::stable_iterator cleanup;
|
|
|
|
llvm::Instruction *cleanupDominator = 0;
|
2012-02-23 01:37:52 +08:00
|
|
|
// If the initializer is an initializer list, first do the explicit elements.
|
|
|
|
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
|
|
|
|
initializerElements = ILE->getNumInits();
|
2012-02-24 08:13:55 +08:00
|
|
|
|
|
|
|
// Enter a partial-destruction cleanup if necessary.
|
|
|
|
if (needsEHCleanup(dtorKind)) {
|
|
|
|
// In principle we could tell the cleanup where we are more
|
|
|
|
// directly, but the control flow can get so varied here that it
|
|
|
|
// would actually be quite complex. Therefore we go through an
|
|
|
|
// alloca.
|
|
|
|
endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
|
|
|
|
cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
|
|
|
|
pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
|
|
|
|
getDestroyer(dtorKind));
|
|
|
|
cleanup = EHStack.stable_begin();
|
|
|
|
}
|
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
|
2012-02-24 08:13:55 +08:00
|
|
|
// Tell the cleanup that it needs to destroy up to this
|
|
|
|
// element. TODO: some of these stores can be trivially
|
|
|
|
// observed to be unnecessary.
|
|
|
|
if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
|
2012-02-23 01:37:52 +08:00
|
|
|
StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
|
|
|
|
explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
|
|
|
|
}
|
|
|
|
|
|
|
|
// The remaining elements are filled with the array filler expression.
|
|
|
|
Init = ILE->getArrayFiller();
|
|
|
|
}
|
|
|
|
|
2011-09-15 14:49:18 +08:00
|
|
|
// Create the continuation block.
|
|
|
|
llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
|
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
// If the number of elements isn't constant, we have to now check if there is
|
|
|
|
// anything left to initialize.
|
|
|
|
if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
|
|
|
|
// If all elements have already been initialized, skip the whole loop.
|
2012-02-24 08:13:55 +08:00
|
|
|
if (constNum->getZExtValue() <= initializerElements) {
|
|
|
|
// If there was a cleanup, deactivate it.
|
|
|
|
if (cleanupDominator)
|
2012-09-11 05:20:09 +08:00
|
|
|
DeactivateCleanupBlock(cleanup, cleanupDominator);
|
2012-02-24 08:13:55 +08:00
|
|
|
return;
|
|
|
|
}
|
2012-02-23 01:37:52 +08:00
|
|
|
} else {
|
2011-09-15 14:49:18 +08:00
|
|
|
llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
|
2012-02-23 01:37:52 +08:00
|
|
|
llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
|
2011-09-15 14:49:18 +08:00
|
|
|
"array.isempty");
|
|
|
|
Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
|
|
|
|
EmitBlock(nonEmptyBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enter the loop.
|
|
|
|
llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
|
|
|
|
llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
|
|
|
|
|
|
|
|
EmitBlock(loopBB);
|
|
|
|
|
|
|
|
// Set up the current-element phi.
|
|
|
|
llvm::PHINode *curPtr =
|
2012-02-23 01:37:52 +08:00
|
|
|
Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
|
|
|
|
curPtr->addIncoming(explicitPtr, entryBB);
|
2011-09-15 14:49:18 +08:00
|
|
|
|
2012-02-24 08:13:55 +08:00
|
|
|
// Store the new cleanup position for irregular cleanups.
|
|
|
|
if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
|
|
|
|
|
2011-09-15 14:49:18 +08:00
|
|
|
// Enter a partial-destruction cleanup if necessary.
|
2012-02-24 08:13:55 +08:00
|
|
|
if (!cleanupDominator && needsEHCleanup(dtorKind)) {
|
2011-09-15 14:49:18 +08:00
|
|
|
pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
|
|
|
|
getDestroyer(dtorKind));
|
|
|
|
cleanup = EHStack.stable_begin();
|
2011-11-10 18:43:54 +08:00
|
|
|
cleanupDominator = Builder.CreateUnreachable();
|
2011-09-15 14:49:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the initializer into this element.
|
2012-02-23 01:37:52 +08:00
|
|
|
StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
|
2011-09-15 14:49:18 +08:00
|
|
|
|
|
|
|
// Leave the cleanup if we entered one.
|
2011-12-10 07:05:37 +08:00
|
|
|
if (cleanupDominator) {
|
2011-11-10 18:43:54 +08:00
|
|
|
DeactivateCleanupBlock(cleanup, cleanupDominator);
|
|
|
|
cleanupDominator->eraseFromParent();
|
|
|
|
}
|
2011-09-15 14:49:18 +08:00
|
|
|
|
|
|
|
// Advance to the next element.
|
|
|
|
llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
|
|
|
|
|
|
|
|
// Check whether we've gotten to the end of the array and, if so,
|
|
|
|
// exit the loop.
|
|
|
|
llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
|
|
|
|
Builder.CreateCondBr(isEnd, contBB, loopBB);
|
|
|
|
curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
|
|
|
|
|
|
|
|
EmitBlock(contBB);
|
2010-06-26 02:26:07 +08:00
|
|
|
}
|
|
|
|
|
2010-07-21 09:10:17 +08:00
|
|
|
static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
|
|
|
|
llvm::Value *NewPtr, llvm::Value *Size) {
|
2011-02-08 16:22:06 +08:00
|
|
|
CGF.EmitCastToVoidPtr(NewPtr);
|
2011-01-19 09:58:38 +08:00
|
|
|
CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
|
2010-12-30 08:13:21 +08:00
|
|
|
CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
|
2011-01-19 09:58:38 +08:00
|
|
|
Alignment.getQuantity(), false);
|
2010-07-21 09:10:17 +08:00
|
|
|
}
|
|
|
|
|
2009-09-24 00:07:23 +08:00
|
|
|
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
|
2011-09-15 14:49:18 +08:00
|
|
|
QualType ElementType,
|
2009-09-24 00:07:23 +08:00
|
|
|
llvm::Value *NewPtr,
|
2010-07-21 09:10:17 +08:00
|
|
|
llvm::Value *NumElements,
|
|
|
|
llvm::Value *AllocSizeWithoutCookie) {
|
2012-02-16 20:22:20 +08:00
|
|
|
const Expr *Init = E->getInitializer();
|
2009-11-25 02:43:52 +08:00
|
|
|
if (E->isArray()) {
|
2012-02-16 20:22:20 +08:00
|
|
|
if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
|
|
|
|
CXXConstructorDecl *Ctor = CCE->getConstructor();
|
2012-02-24 01:07:43 +08:00
|
|
|
if (Ctor->isTrivial()) {
|
2010-07-21 09:10:17 +08:00
|
|
|
// If new expression did not specify value-initialization, then there
|
|
|
|
// is no initialization.
|
2012-02-16 20:22:20 +08:00
|
|
|
if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
|
2010-07-21 09:10:17 +08:00
|
|
|
return;
|
|
|
|
|
2011-09-15 14:49:18 +08:00
|
|
|
if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
|
2010-07-21 09:10:17 +08:00
|
|
|
// Optimization: since zero initialization will just set the memory
|
|
|
|
// to all zeroes, generate a single memset to do it in one shot.
|
2011-09-15 14:49:18 +08:00
|
|
|
EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
|
2010-07-21 09:10:17 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2011-07-13 14:10:41 +08:00
|
|
|
|
2012-02-16 20:22:20 +08:00
|
|
|
CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
|
|
|
|
CCE->arg_begin(), CCE->arg_end(),
|
2012-08-25 15:11:29 +08:00
|
|
|
CCE->requiresZeroInitialization());
|
2010-05-03 23:09:17 +08:00
|
|
|
return;
|
2012-02-16 20:22:20 +08:00
|
|
|
} else if (Init && isa<ImplicitValueInitExpr>(Init) &&
|
2011-12-10 07:05:37 +08:00
|
|
|
CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
|
2010-07-21 09:10:17 +08:00
|
|
|
// Optimization: since zero initialization will just set the memory
|
|
|
|
// to all zeroes, generate a single memset to do it in one shot.
|
2011-09-15 14:49:18 +08:00
|
|
|
EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
|
|
|
|
return;
|
2010-06-26 02:26:07 +08:00
|
|
|
}
|
2012-02-16 20:22:20 +08:00
|
|
|
CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
|
|
|
|
return;
|
2009-11-25 02:43:52 +08:00
|
|
|
}
|
2009-09-24 00:07:23 +08:00
|
|
|
|
2012-02-16 20:22:20 +08:00
|
|
|
if (!Init)
|
2010-06-26 04:01:13 +08:00
|
|
|
return;
|
2012-02-16 20:22:20 +08:00
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
|
2009-09-24 00:07:23 +08:00
|
|
|
}
|
|
|
|
|
2013-07-22 07:12:18 +08:00
|
|
|
/// Emit a call to an operator new or operator delete function, as implicitly
|
|
|
|
/// created by new-expressions and delete-expressions.
|
|
|
|
static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
|
|
|
|
const FunctionDecl *Callee,
|
|
|
|
const FunctionProtoType *CalleeType,
|
|
|
|
const CallArgList &Args) {
|
|
|
|
llvm::Instruction *CallOrInvoke;
|
|
|
|
RValue RV =
|
|
|
|
CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(Args, CalleeType),
|
|
|
|
CGF.CGM.GetAddrOfFunction(Callee), ReturnValueSlot(), Args,
|
|
|
|
Callee, &CallOrInvoke);
|
|
|
|
|
|
|
|
/// C++1y [expr.new]p10:
|
|
|
|
/// [In a new-expression,] an implementation is allowed to omit a call
|
|
|
|
/// to a replaceable global allocation function.
|
|
|
|
///
|
|
|
|
/// We model such elidable calls with the 'builtin' attribute.
|
|
|
|
if (Callee->isReplaceableGlobalAllocationFunction()) {
|
|
|
|
// FIXME: Add addAttribute to CallSite.
|
|
|
|
if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
|
|
|
|
CI->addAttribute(llvm::AttributeSet::FunctionIndex,
|
|
|
|
llvm::Attribute::Builtin);
|
|
|
|
else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
|
|
|
|
II->addAttribute(llvm::AttributeSet::FunctionIndex,
|
|
|
|
llvm::Attribute::Builtin);
|
|
|
|
else
|
|
|
|
llvm_unreachable("unexpected kind of call instruction");
|
|
|
|
}
|
|
|
|
|
|
|
|
return RV;
|
|
|
|
}
|
|
|
|
|
2010-09-14 15:57:04 +08:00
|
|
|
namespace {
|
|
|
|
/// A cleanup to call the given 'operator delete' function upon
|
|
|
|
/// abnormal exit from a new expression.
|
|
|
|
class CallDeleteDuringNew : public EHScopeStack::Cleanup {
|
|
|
|
size_t NumPlacementArgs;
|
|
|
|
const FunctionDecl *OperatorDelete;
|
|
|
|
llvm::Value *Ptr;
|
|
|
|
llvm::Value *AllocSize;
|
|
|
|
|
|
|
|
RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
|
|
|
|
|
|
|
|
public:
|
|
|
|
static size_t getExtraSize(size_t NumPlacementArgs) {
|
|
|
|
return NumPlacementArgs * sizeof(RValue);
|
|
|
|
}
|
|
|
|
|
|
|
|
CallDeleteDuringNew(size_t NumPlacementArgs,
|
|
|
|
const FunctionDecl *OperatorDelete,
|
|
|
|
llvm::Value *Ptr,
|
|
|
|
llvm::Value *AllocSize)
|
|
|
|
: NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
|
|
|
|
Ptr(Ptr), AllocSize(AllocSize) {}
|
|
|
|
|
|
|
|
void setPlacementArg(unsigned I, RValue Arg) {
|
|
|
|
assert(I < NumPlacementArgs && "index out of range");
|
|
|
|
getPlacementArgs()[I] = Arg;
|
|
|
|
}
|
|
|
|
|
2011-07-13 04:27:29 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) {
|
2010-09-14 15:57:04 +08:00
|
|
|
const FunctionProtoType *FPT
|
|
|
|
= OperatorDelete->getType()->getAs<FunctionProtoType>();
|
|
|
|
assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
|
2010-09-15 05:45:42 +08:00
|
|
|
(FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
|
|
CallArgList DeleteArgs;
|
|
|
|
|
|
|
|
// The first argument is always a void*.
|
|
|
|
FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(RValue::get(Ptr), *AI++);
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
|
|
// A member 'operator delete' can take an extra 'size_t' argument.
|
|
|
|
if (FPT->getNumArgs() == NumPlacementArgs + 2)
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(RValue::get(AllocSize), *AI++);
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
|
|
// Pass the rest of the arguments, which must match exactly.
|
|
|
|
for (unsigned I = 0; I != NumPlacementArgs; ++I)
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(getPlacementArgs()[I], *AI++);
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
|
|
// Call 'operator delete'.
|
2013-07-22 07:12:18 +08:00
|
|
|
EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
|
2010-09-14 15:57:04 +08:00
|
|
|
}
|
|
|
|
};
|
2010-09-17 08:50:28 +08:00
|
|
|
|
|
|
|
/// A cleanup to call the given 'operator delete' function upon
|
|
|
|
/// abnormal exit from a new expression when the new expression is
|
|
|
|
/// conditional.
|
|
|
|
class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
|
|
|
|
size_t NumPlacementArgs;
|
|
|
|
const FunctionDecl *OperatorDelete;
|
2011-01-28 18:53:53 +08:00
|
|
|
DominatingValue<RValue>::saved_type Ptr;
|
|
|
|
DominatingValue<RValue>::saved_type AllocSize;
|
2010-09-17 08:50:28 +08:00
|
|
|
|
2011-01-28 18:53:53 +08:00
|
|
|
DominatingValue<RValue>::saved_type *getPlacementArgs() {
|
|
|
|
return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
|
2010-09-17 08:50:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
static size_t getExtraSize(size_t NumPlacementArgs) {
|
2011-01-28 18:53:53 +08:00
|
|
|
return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
|
2010-09-17 08:50:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
|
|
|
|
const FunctionDecl *OperatorDelete,
|
2011-01-28 18:53:53 +08:00
|
|
|
DominatingValue<RValue>::saved_type Ptr,
|
|
|
|
DominatingValue<RValue>::saved_type AllocSize)
|
2010-09-17 08:50:28 +08:00
|
|
|
: NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
|
|
|
|
Ptr(Ptr), AllocSize(AllocSize) {}
|
|
|
|
|
2011-01-28 18:53:53 +08:00
|
|
|
void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
|
2010-09-17 08:50:28 +08:00
|
|
|
assert(I < NumPlacementArgs && "index out of range");
|
|
|
|
getPlacementArgs()[I] = Arg;
|
|
|
|
}
|
|
|
|
|
2011-07-13 04:27:29 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) {
|
2010-09-17 08:50:28 +08:00
|
|
|
const FunctionProtoType *FPT
|
|
|
|
= OperatorDelete->getType()->getAs<FunctionProtoType>();
|
|
|
|
assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
|
|
|
|
(FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
|
|
|
|
|
|
|
|
CallArgList DeleteArgs;
|
|
|
|
|
|
|
|
// The first argument is always a void*.
|
|
|
|
FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(Ptr.restore(CGF), *AI++);
|
2010-09-17 08:50:28 +08:00
|
|
|
|
|
|
|
// A member 'operator delete' can take an extra 'size_t' argument.
|
|
|
|
if (FPT->getNumArgs() == NumPlacementArgs + 2) {
|
2011-01-28 18:53:53 +08:00
|
|
|
RValue RV = AllocSize.restore(CGF);
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(RV, *AI++);
|
2010-09-17 08:50:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Pass the rest of the arguments, which must match exactly.
|
|
|
|
for (unsigned I = 0; I != NumPlacementArgs; ++I) {
|
2011-01-28 18:53:53 +08:00
|
|
|
RValue RV = getPlacementArgs()[I].restore(CGF);
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(RV, *AI++);
|
2010-09-17 08:50:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Call 'operator delete'.
|
2013-07-22 07:12:18 +08:00
|
|
|
EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
|
2010-09-17 08:50:28 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Enter a cleanup to call 'operator delete' if the initializer in a
|
|
|
|
/// new-expression throws.
|
|
|
|
static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
|
|
|
|
const CXXNewExpr *E,
|
|
|
|
llvm::Value *NewPtr,
|
|
|
|
llvm::Value *AllocSize,
|
|
|
|
const CallArgList &NewArgs) {
|
|
|
|
// If we're not inside a conditional branch, then the cleanup will
|
|
|
|
// dominate and we can do the easier (and more efficient) thing.
|
|
|
|
if (!CGF.isInConditionalBranch()) {
|
|
|
|
CallDeleteDuringNew *Cleanup = CGF.EHStack
|
|
|
|
.pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
|
|
|
|
E->getNumPlacementArgs(),
|
|
|
|
E->getOperatorDelete(),
|
|
|
|
NewPtr, AllocSize);
|
|
|
|
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
|
2011-05-03 02:05:27 +08:00
|
|
|
Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
|
2010-09-17 08:50:28 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we need to save all this stuff.
|
2011-01-28 18:53:53 +08:00
|
|
|
DominatingValue<RValue>::saved_type SavedNewPtr =
|
|
|
|
DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
|
|
|
|
DominatingValue<RValue>::saved_type SavedAllocSize =
|
|
|
|
DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
|
2010-09-17 08:50:28 +08:00
|
|
|
|
|
|
|
CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
|
2011-11-10 18:43:54 +08:00
|
|
|
.pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
|
2010-09-17 08:50:28 +08:00
|
|
|
E->getNumPlacementArgs(),
|
|
|
|
E->getOperatorDelete(),
|
|
|
|
SavedNewPtr,
|
|
|
|
SavedAllocSize);
|
|
|
|
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
|
2011-01-28 18:53:53 +08:00
|
|
|
Cleanup->setPlacementArg(I,
|
2011-05-03 02:05:27 +08:00
|
|
|
DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
|
2010-09-17 08:50:28 +08:00
|
|
|
|
2011-11-10 18:43:54 +08:00
|
|
|
CGF.initFullExprCleanup();
|
2010-09-14 15:57:04 +08:00
|
|
|
}
|
|
|
|
|
2009-09-23 06:53:17 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
|
2011-03-07 11:12:35 +08:00
|
|
|
// The element type being allocated.
|
|
|
|
QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
// 1. Build a call to the allocation function.
|
|
|
|
FunctionDecl *allocator = E->getOperatorNew();
|
|
|
|
const FunctionProtoType *allocatorType =
|
|
|
|
allocator->getType()->castAs<FunctionProtoType>();
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
CallArgList allocatorArgs;
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
// The allocation size is the first argument.
|
2011-03-07 11:12:35 +08:00
|
|
|
QualType sizeType = getContext().getSizeType();
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
// If there is a brace-initializer, cannot allocate fewer elements than inits.
|
|
|
|
unsigned minElements = 0;
|
|
|
|
if (E->isArray() && E->hasInitializer()) {
|
|
|
|
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
|
|
|
|
minElements = ILE->getNumInits();
|
|
|
|
}
|
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
llvm::Value *numElements = 0;
|
|
|
|
llvm::Value *allocSizeWithoutCookie = 0;
|
|
|
|
llvm::Value *allocSize =
|
2012-02-23 01:37:52 +08:00
|
|
|
EmitCXXNewAllocSize(*this, E, minElements, numElements,
|
|
|
|
allocSizeWithoutCookie);
|
2009-09-24 00:07:23 +08:00
|
|
|
|
2011-05-03 01:57:46 +08:00
|
|
|
allocatorArgs.add(RValue::get(allocSize), sizeType);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
// Emit the rest of the arguments.
|
|
|
|
// FIXME: Ideally, this should just use EmitCallArgs.
|
2011-03-07 11:12:35 +08:00
|
|
|
CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
// First, use the types from the function type.
|
|
|
|
// We start at 1 here because the first argument (the allocation size)
|
|
|
|
// has already been emitted.
|
2011-03-07 11:12:35 +08:00
|
|
|
for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
|
|
|
|
++i, ++placementArg) {
|
|
|
|
QualType argType = allocatorType->getArgType(i);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
|
|
|
|
placementArg->getType()) &&
|
2009-09-23 06:53:17 +08:00
|
|
|
"type mismatch in call argument!");
|
|
|
|
|
2011-03-12 04:59:21 +08:00
|
|
|
EmitCallArg(allocatorArgs, *placementArg, argType);
|
2009-09-23 06:53:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Either we've emitted all the call args, or we have a call to a
|
|
|
|
// variadic function.
|
2011-03-07 11:12:35 +08:00
|
|
|
assert((placementArg == E->placement_arg_end() ||
|
|
|
|
allocatorType->isVariadic()) &&
|
|
|
|
"Extra arguments to non-variadic function!");
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
// If we still have any arguments, emit them using the type of the argument.
|
2011-03-07 11:12:35 +08:00
|
|
|
for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
|
|
|
|
placementArg != placementArgsEnd; ++placementArg) {
|
2011-03-12 04:59:21 +08:00
|
|
|
EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
|
2009-09-23 06:53:17 +08:00
|
|
|
}
|
|
|
|
|
2011-05-16 09:05:12 +08:00
|
|
|
// Emit the allocation call. If the allocator is a global placement
|
|
|
|
// operator, just "inline" it directly.
|
|
|
|
RValue RV;
|
|
|
|
if (allocator->isReservedGlobalPlacementOperator()) {
|
|
|
|
assert(allocatorArgs.size() == 2);
|
|
|
|
RV = allocatorArgs[1].RV;
|
|
|
|
// TODO: kill any unnecessary computations done for the size
|
|
|
|
// argument.
|
|
|
|
} else {
|
2013-07-22 07:12:18 +08:00
|
|
|
RV = EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
|
2011-05-16 09:05:12 +08:00
|
|
|
}
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
// Emit a null check on the allocation result if the allocation
|
|
|
|
// function is allowed to return null (because it has a non-throwing
|
|
|
|
// exception spec; for this part, we inline
|
|
|
|
// CXXNewExpr::shouldNullCheckAllocation()) and we have an
|
|
|
|
// interesting initializer.
|
2011-03-14 01:09:40 +08:00
|
|
|
bool nullCheck = allocatorType->isNothrow(getContext()) &&
|
2012-02-16 20:22:20 +08:00
|
|
|
(!allocType.isPODType(getContext()) || E->hasInitializer());
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
llvm::BasicBlock *nullCheckBB = 0;
|
|
|
|
llvm::BasicBlock *contBB = 0;
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
llvm::Value *allocation = RV.getScalarVal();
|
2012-10-25 23:39:14 +08:00
|
|
|
unsigned AS = allocation->getType()->getPointerAddressSpace();
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-07 09:52:56 +08:00
|
|
|
// The null-check means that the initializer is conditionally
|
|
|
|
// evaluated.
|
|
|
|
ConditionalEvaluation conditional(*this);
|
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
if (nullCheck) {
|
|
|
|
conditional.begin(*this);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
nullCheckBB = Builder.GetInsertBlock();
|
|
|
|
llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
|
|
|
|
contBB = createBasicBlock("new.cont");
|
2011-03-07 09:52:56 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
|
|
|
|
Builder.CreateCondBr(isNull, contBB, notNullBB);
|
|
|
|
EmitBlock(notNullBB);
|
2009-09-23 06:53:17 +08:00
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2010-09-14 15:57:04 +08:00
|
|
|
// If there's an operator delete, enter a cleanup to call it if an
|
|
|
|
// exception is thrown.
|
2011-03-07 11:12:35 +08:00
|
|
|
EHScopeStack::stable_iterator operatorDeleteCleanup;
|
2011-11-10 18:43:54 +08:00
|
|
|
llvm::Instruction *cleanupDominator = 0;
|
2011-05-16 09:05:12 +08:00
|
|
|
if (E->getOperatorDelete() &&
|
|
|
|
!E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
|
2011-03-07 11:12:35 +08:00
|
|
|
EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
|
|
|
|
operatorDeleteCleanup = EHStack.stable_begin();
|
2011-11-10 18:43:54 +08:00
|
|
|
cleanupDominator = Builder.CreateUnreachable();
|
2010-09-14 15:57:04 +08:00
|
|
|
}
|
|
|
|
|
2011-09-07 02:53:03 +08:00
|
|
|
assert((allocSize == allocSizeWithoutCookie) ==
|
|
|
|
CalculateCookiePadding(*this, E).isZero());
|
|
|
|
if (allocSize != allocSizeWithoutCookie) {
|
|
|
|
assert(E->isArray());
|
|
|
|
allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
|
|
|
|
numElements,
|
|
|
|
E, allocType);
|
|
|
|
}
|
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *elementPtrTy
|
2011-03-07 11:12:35 +08:00
|
|
|
= ConvertTypeForMem(allocType)->getPointerTo(AS);
|
|
|
|
llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
|
2010-09-14 15:57:04 +08:00
|
|
|
|
2011-09-15 14:49:18 +08:00
|
|
|
EmitNewInitializer(*this, E, allocType, result, numElements,
|
|
|
|
allocSizeWithoutCookie);
|
2010-09-02 17:58:18 +08:00
|
|
|
if (E->isArray()) {
|
|
|
|
// NewPtr is a pointer to the base element type. If we're
|
|
|
|
// allocating an array of arrays, we'll need to cast back to the
|
|
|
|
// array pointer type.
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *resultType = ConvertTypeForMem(E->getType());
|
2011-03-07 11:12:35 +08:00
|
|
|
if (result->getType() != resultType)
|
|
|
|
result = Builder.CreateBitCast(result, resultType);
|
2010-03-25 00:57:01 +08:00
|
|
|
}
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
|
|
// Deactivate the 'operator delete' cleanup if we finished
|
|
|
|
// initialization.
|
2011-11-10 18:43:54 +08:00
|
|
|
if (operatorDeleteCleanup.isValid()) {
|
|
|
|
DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
|
|
|
|
cleanupDominator->eraseFromParent();
|
|
|
|
}
|
2012-02-16 20:22:20 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
if (nullCheck) {
|
2011-03-07 09:52:56 +08:00
|
|
|
conditional.end(*this);
|
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
|
|
|
|
EmitBlock(contBB);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-30 19:28:58 +08:00
|
|
|
llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
|
2011-03-07 11:12:35 +08:00
|
|
|
PHI->addIncoming(result, notNullBB);
|
|
|
|
PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
|
|
|
|
nullCheckBB);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
result = PHI;
|
2009-09-23 06:53:17 +08:00
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
return result;
|
2009-09-23 06:53:17 +08:00
|
|
|
}
|
|
|
|
|
2009-11-18 08:50:08 +08:00
|
|
|
void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
|
|
|
|
llvm::Value *Ptr,
|
|
|
|
QualType DeleteTy) {
|
2010-09-02 17:58:18 +08:00
|
|
|
assert(DeleteFD->getOverloadedOperator() == OO_Delete);
|
|
|
|
|
2009-11-18 08:50:08 +08:00
|
|
|
const FunctionProtoType *DeleteFTy =
|
|
|
|
DeleteFD->getType()->getAs<FunctionProtoType>();
|
|
|
|
|
|
|
|
CallArgList DeleteArgs;
|
|
|
|
|
2009-12-14 04:04:38 +08:00
|
|
|
// Check if we need to pass the size to the delete operator.
|
|
|
|
llvm::Value *Size = 0;
|
|
|
|
QualType SizeTy;
|
2009-11-18 08:50:08 +08:00
|
|
|
if (DeleteFTy->getNumArgs() == 2) {
|
2009-12-14 04:04:38 +08:00
|
|
|
SizeTy = DeleteFTy->getArgType(1);
|
2010-01-27 03:59:28 +08:00
|
|
|
CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
|
|
|
|
Size = llvm::ConstantInt::get(ConvertType(SizeTy),
|
|
|
|
DeleteTypeSize.getQuantity());
|
2009-12-14 04:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
QualType ArgTy = DeleteFTy->getArgType(0);
|
|
|
|
llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
|
2009-12-14 04:04:38 +08:00
|
|
|
|
|
|
|
if (Size)
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(RValue::get(Size), SizeTy);
|
2009-11-18 08:50:08 +08:00
|
|
|
|
|
|
|
// Emit the call to delete.
|
2013-07-22 07:12:18 +08:00
|
|
|
EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
|
2009-11-18 08:50:08 +08:00
|
|
|
}
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
namespace {
|
|
|
|
/// Calls the given 'operator delete' on a single object.
|
|
|
|
struct CallObjectDelete : EHScopeStack::Cleanup {
|
|
|
|
llvm::Value *Ptr;
|
|
|
|
const FunctionDecl *OperatorDelete;
|
|
|
|
QualType ElementType;
|
|
|
|
|
|
|
|
CallObjectDelete(llvm::Value *Ptr,
|
|
|
|
const FunctionDecl *OperatorDelete,
|
|
|
|
QualType ElementType)
|
|
|
|
: Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
|
|
|
|
|
2011-07-13 04:27:29 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) {
|
2010-09-02 17:58:18 +08:00
|
|
|
CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Emit the code for deleting a single object.
|
|
|
|
static void EmitObjectDelete(CodeGenFunction &CGF,
|
|
|
|
const FunctionDecl *OperatorDelete,
|
|
|
|
llvm::Value *Ptr,
|
2011-07-13 08:54:47 +08:00
|
|
|
QualType ElementType,
|
|
|
|
bool UseGlobalDelete) {
|
2010-09-02 17:58:18 +08:00
|
|
|
// Find the destructor for the type, if applicable. If the
|
|
|
|
// destructor is virtual, we'll just emit the vcall and return.
|
|
|
|
const CXXDestructorDecl *Dtor = 0;
|
|
|
|
if (const RecordType *RT = ElementType->getAs<RecordType>()) {
|
|
|
|
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
|
2011-08-03 02:05:30 +08:00
|
|
|
if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
|
2010-09-02 17:58:18 +08:00
|
|
|
Dtor = RD->getDestructor();
|
|
|
|
|
|
|
|
if (Dtor->isVirtual()) {
|
2011-07-13 08:54:47 +08:00
|
|
|
if (UseGlobalDelete) {
|
|
|
|
// If we're supposed to call the global delete, make sure we do so
|
|
|
|
// even if the destructor throws.
|
2012-09-25 18:10:39 +08:00
|
|
|
|
|
|
|
// Derive the complete-object pointer, which is what we need
|
|
|
|
// to pass to the deallocation function.
|
|
|
|
llvm::Value *completePtr =
|
|
|
|
CGF.CGM.getCXXABI().adjustToCompleteObject(CGF, Ptr, ElementType);
|
|
|
|
|
2011-07-13 08:54:47 +08:00
|
|
|
CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
|
2012-09-25 18:10:39 +08:00
|
|
|
completePtr, OperatorDelete,
|
2011-07-13 08:54:47 +08:00
|
|
|
ElementType);
|
|
|
|
}
|
2013-02-15 22:45:22 +08:00
|
|
|
|
2012-10-10 03:52:38 +08:00
|
|
|
// FIXME: Provide a source location here.
|
2013-02-15 22:45:22 +08:00
|
|
|
CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
|
|
|
|
CGF.CGM.getCXXABI().EmitVirtualDestructorCall(CGF, Dtor, DtorType,
|
2013-07-01 04:40:16 +08:00
|
|
|
SourceLocation(), Ptr);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-07-13 08:54:47 +08:00
|
|
|
if (UseGlobalDelete) {
|
|
|
|
CGF.PopCleanupBlock();
|
|
|
|
}
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure that we call delete even if the dtor throws.
|
2011-01-28 16:37:24 +08:00
|
|
|
// This doesn't have to a conditional cleanup because we're going
|
|
|
|
// to pop it off in a second.
|
2010-09-02 17:58:18 +08:00
|
|
|
CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
|
|
|
|
Ptr, OperatorDelete, ElementType);
|
|
|
|
|
|
|
|
if (Dtor)
|
|
|
|
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
|
2013-01-31 13:50:40 +08:00
|
|
|
/*ForVirtualBase=*/false,
|
|
|
|
/*Delegating=*/false,
|
|
|
|
Ptr);
|
2012-03-11 15:00:24 +08:00
|
|
|
else if (CGF.getLangOpts().ObjCAutoRefCount &&
|
2011-06-16 07:02:42 +08:00
|
|
|
ElementType->isObjCLifetimeType()) {
|
|
|
|
switch (ElementType.getObjCLifetime()) {
|
|
|
|
case Qualifiers::OCL_None:
|
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
|
|
|
break;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
case Qualifiers::OCL_Strong: {
|
|
|
|
// Load the pointer value.
|
|
|
|
llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
|
|
|
|
ElementType.isVolatileQualified());
|
|
|
|
|
2013-03-13 11:10:54 +08:00
|
|
|
CGF.EmitARCRelease(PtrValue, ARCPreciseLifetime);
|
2011-06-16 07:02:42 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Weak:
|
|
|
|
CGF.EmitARCDestroyWeak(Ptr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
CGF.PopCleanupBlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
/// Calls the given 'operator delete' on an array of objects.
|
|
|
|
struct CallArrayDelete : EHScopeStack::Cleanup {
|
|
|
|
llvm::Value *Ptr;
|
|
|
|
const FunctionDecl *OperatorDelete;
|
|
|
|
llvm::Value *NumElements;
|
|
|
|
QualType ElementType;
|
|
|
|
CharUnits CookieSize;
|
|
|
|
|
|
|
|
CallArrayDelete(llvm::Value *Ptr,
|
|
|
|
const FunctionDecl *OperatorDelete,
|
|
|
|
llvm::Value *NumElements,
|
|
|
|
QualType ElementType,
|
|
|
|
CharUnits CookieSize)
|
|
|
|
: Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
|
|
|
|
ElementType(ElementType), CookieSize(CookieSize) {}
|
|
|
|
|
2011-07-13 04:27:29 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) {
|
2010-09-02 17:58:18 +08:00
|
|
|
const FunctionProtoType *DeleteFTy =
|
|
|
|
OperatorDelete->getType()->getAs<FunctionProtoType>();
|
|
|
|
assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
|
|
|
|
|
|
|
|
CallArgList Args;
|
|
|
|
|
|
|
|
// Pass the pointer as the first argument.
|
|
|
|
QualType VoidPtrTy = DeleteFTy->getArgType(0);
|
|
|
|
llvm::Value *DeletePtr
|
|
|
|
= CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
|
2011-05-03 01:57:46 +08:00
|
|
|
Args.add(RValue::get(DeletePtr), VoidPtrTy);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
|
|
// Pass the original requested size as the second argument.
|
|
|
|
if (DeleteFTy->getNumArgs() == 2) {
|
|
|
|
QualType size_t = DeleteFTy->getArgType(1);
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::IntegerType *SizeTy
|
2010-09-02 17:58:18 +08:00
|
|
|
= cast<llvm::IntegerType>(CGF.ConvertType(size_t));
|
|
|
|
|
|
|
|
CharUnits ElementTypeSize =
|
|
|
|
CGF.CGM.getContext().getTypeSizeInChars(ElementType);
|
|
|
|
|
|
|
|
// The size of an element, multiplied by the number of elements.
|
|
|
|
llvm::Value *Size
|
|
|
|
= llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
|
|
|
|
Size = CGF.Builder.CreateMul(Size, NumElements);
|
|
|
|
|
|
|
|
// Plus the size of the cookie if applicable.
|
|
|
|
if (!CookieSize.isZero()) {
|
|
|
|
llvm::Value *CookieSizeV
|
|
|
|
= llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
|
|
|
|
Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
|
|
|
|
}
|
|
|
|
|
2011-05-03 01:57:46 +08:00
|
|
|
Args.add(RValue::get(Size), size_t);
|
2010-09-02 17:58:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the call to delete.
|
2013-07-22 07:12:18 +08:00
|
|
|
EmitNewDeleteCall(CGF, OperatorDelete, DeleteFTy, Args);
|
2010-09-02 17:58:18 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Emit the code for deleting an array of objects.
|
|
|
|
static void EmitArrayDelete(CodeGenFunction &CGF,
|
2011-01-27 17:37:56 +08:00
|
|
|
const CXXDeleteExpr *E,
|
2011-07-13 09:41:37 +08:00
|
|
|
llvm::Value *deletedPtr,
|
|
|
|
QualType elementType) {
|
|
|
|
llvm::Value *numElements = 0;
|
|
|
|
llvm::Value *allocatedPtr = 0;
|
|
|
|
CharUnits cookieSize;
|
|
|
|
CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
|
|
|
|
numElements, allocatedPtr, cookieSize);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-07-13 09:41:37 +08:00
|
|
|
assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
|
|
// Make sure that we call delete even if one of the dtors throws.
|
2011-07-13 09:41:37 +08:00
|
|
|
const FunctionDecl *operatorDelete = E->getOperatorDelete();
|
2010-09-02 17:58:18 +08:00
|
|
|
CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
|
2011-07-13 09:41:37 +08:00
|
|
|
allocatedPtr, operatorDelete,
|
|
|
|
numElements, elementType,
|
|
|
|
cookieSize);
|
|
|
|
|
|
|
|
// Destroy the elements.
|
|
|
|
if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
|
|
|
|
assert(numElements && "no element count for a type with a destructor!");
|
|
|
|
|
|
|
|
llvm::Value *arrayEnd =
|
|
|
|
CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
|
2011-07-13 16:09:46 +08:00
|
|
|
|
|
|
|
// Note that it is legal to allocate a zero-length array, and we
|
|
|
|
// can never fold the check away because the length should always
|
|
|
|
// come from a cookie.
|
2011-07-13 09:41:37 +08:00
|
|
|
CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
|
|
|
|
CGF.getDestroyer(dtorKind),
|
2011-07-13 16:09:46 +08:00
|
|
|
/*checkZeroLength*/ true,
|
2011-07-13 09:41:37 +08:00
|
|
|
CGF.needsEHCleanup(dtorKind));
|
2010-09-02 17:58:18 +08:00
|
|
|
}
|
|
|
|
|
2011-07-13 09:41:37 +08:00
|
|
|
// Pop the cleanup block.
|
2010-09-02 17:58:18 +08:00
|
|
|
CGF.PopCleanupBlock();
|
|
|
|
}
|
|
|
|
|
2009-09-23 06:53:17 +08:00
|
|
|
void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
|
2009-09-30 02:16:17 +08:00
|
|
|
const Expr *Arg = E->getArgument();
|
|
|
|
llvm::Value *Ptr = EmitScalarExpr(Arg);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
// Null check the pointer.
|
|
|
|
llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
|
|
|
|
llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
|
|
|
|
|
2011-04-11 08:30:07 +08:00
|
|
|
llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
|
|
|
|
EmitBlock(DeleteNotNull);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
|
|
// We might be deleting a pointer to array. If so, GEP down to the
|
|
|
|
// first non-array element.
|
|
|
|
// (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
|
|
|
|
QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
|
|
|
|
if (DeleteTy->isConstantArrayType()) {
|
|
|
|
llvm::Value *Zero = Builder.getInt32(0);
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<llvm::Value*,8> GEP;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
|
|
GEP.push_back(Zero); // point at the outermost array
|
|
|
|
|
|
|
|
// For each layer of array type we're pointing at:
|
|
|
|
while (const ConstantArrayType *Arr
|
|
|
|
= getContext().getAsConstantArrayType(DeleteTy)) {
|
|
|
|
// 1. Unpeel the array type.
|
|
|
|
DeleteTy = Arr->getElementType();
|
|
|
|
|
|
|
|
// 2. GEP to the first element of the array.
|
|
|
|
GEP.push_back(Zero);
|
2009-09-23 06:53:17 +08:00
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-07-22 16:16:57 +08:00
|
|
|
Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
|
2009-09-23 06:53:17 +08:00
|
|
|
}
|
|
|
|
|
2010-09-03 01:38:50 +08:00
|
|
|
assert(ConvertTypeForMem(DeleteTy) ==
|
|
|
|
cast<llvm::PointerType>(Ptr->getType())->getElementType());
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
|
|
if (E->isArrayForm()) {
|
2011-01-27 17:37:56 +08:00
|
|
|
EmitArrayDelete(*this, E, Ptr, DeleteTy);
|
2010-09-02 17:58:18 +08:00
|
|
|
} else {
|
2011-07-13 08:54:47 +08:00
|
|
|
EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
|
|
|
|
E->isGlobalDelete());
|
2010-09-02 17:58:18 +08:00
|
|
|
}
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
EmitBlock(DeleteEnd);
|
|
|
|
}
|
2009-11-15 16:09:41 +08:00
|
|
|
|
2011-04-11 22:13:40 +08:00
|
|
|
static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
|
|
|
|
// void __cxa_bad_typeid();
|
2012-02-07 08:39:47 +08:00
|
|
|
llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
|
2011-04-11 22:13:40 +08:00
|
|
|
|
|
|
|
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void EmitBadTypeidCall(CodeGenFunction &CGF) {
|
2011-04-13 10:35:36 +08:00
|
|
|
llvm::Value *Fn = getBadTypeidFn(CGF);
|
2013-03-01 03:01:20 +08:00
|
|
|
CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
|
2011-04-11 22:13:40 +08:00
|
|
|
CGF.Builder.CreateUnreachable();
|
|
|
|
}
|
|
|
|
|
2011-04-18 08:57:03 +08:00
|
|
|
static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
|
|
|
|
const Expr *E,
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *StdTypeInfoPtrTy) {
|
2011-04-18 08:57:03 +08:00
|
|
|
// Get the vtable pointer.
|
|
|
|
llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
|
|
|
|
|
|
|
|
// C++ [expr.typeid]p2:
|
|
|
|
// If the glvalue expression is obtained by applying the unary * operator to
|
|
|
|
// a pointer and the pointer is a null pointer value, the typeid expression
|
|
|
|
// throws the std::bad_typeid exception.
|
|
|
|
if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
|
|
|
|
if (UO->getOpcode() == UO_Deref) {
|
|
|
|
llvm::BasicBlock *BadTypeidBlock =
|
|
|
|
CGF.createBasicBlock("typeid.bad_typeid");
|
|
|
|
llvm::BasicBlock *EndBlock =
|
|
|
|
CGF.createBasicBlock("typeid.end");
|
|
|
|
|
|
|
|
llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
|
|
|
|
CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
|
|
|
|
|
|
|
|
CGF.EmitBlock(BadTypeidBlock);
|
|
|
|
EmitBadTypeidCall(CGF);
|
|
|
|
CGF.EmitBlock(EndBlock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
|
|
|
|
StdTypeInfoPtrTy->getPointerTo());
|
|
|
|
|
|
|
|
// Load the type info.
|
|
|
|
Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
|
|
|
|
return CGF.Builder.CreateLoad(Value);
|
|
|
|
}
|
|
|
|
|
2011-01-28 16:37:24 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *StdTypeInfoPtrTy =
|
2011-04-18 08:57:03 +08:00
|
|
|
ConvertType(E->getType())->getPointerTo();
|
2009-12-11 10:46:30 +08:00
|
|
|
|
2009-12-17 15:09:17 +08:00
|
|
|
if (E->isTypeOperand()) {
|
|
|
|
llvm::Constant *TypeInfo =
|
|
|
|
CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
|
2011-04-18 08:57:03 +08:00
|
|
|
return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
|
2009-12-17 15:09:17 +08:00
|
|
|
}
|
2011-04-11 22:13:40 +08:00
|
|
|
|
2011-04-18 08:57:03 +08:00
|
|
|
// C++ [expr.typeid]p2:
|
|
|
|
// When typeid is applied to a glvalue expression whose type is a
|
|
|
|
// polymorphic class type, the result refers to a std::type_info object
|
|
|
|
// representing the type of the most derived object (that is, the dynamic
|
|
|
|
// type) to which the glvalue refers.
|
2012-08-14 04:08:14 +08:00
|
|
|
if (E->isPotentiallyEvaluated())
|
|
|
|
return EmitTypeidFromVTable(*this, E->getExprOperand(),
|
|
|
|
StdTypeInfoPtrTy);
|
2011-04-18 08:57:03 +08:00
|
|
|
|
|
|
|
QualType OperandTy = E->getExprOperand()->getType();
|
|
|
|
return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
|
|
|
|
StdTypeInfoPtrTy);
|
2009-11-15 16:09:41 +08:00
|
|
|
}
|
2009-11-16 14:50:58 +08:00
|
|
|
|
2011-04-11 08:46:40 +08:00
|
|
|
static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
|
|
|
|
// void *__dynamic_cast(const void *sub,
|
|
|
|
// const abi::__class_type_info *src,
|
|
|
|
// const abi::__class_type_info *dst,
|
|
|
|
// std::ptrdiff_t src2dst_offset);
|
|
|
|
|
2012-02-07 08:39:47 +08:00
|
|
|
llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *PtrDiffTy =
|
2011-04-11 08:46:40 +08:00
|
|
|
CGF.ConvertType(CGF.getContext().getPointerDiffType());
|
|
|
|
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
|
2013-02-04 01:44:25 +08:00
|
|
|
|
|
|
|
llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
|
|
|
|
|
|
|
|
// Mark the function as nounwind readonly.
|
|
|
|
llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
|
|
|
|
llvm::Attribute::ReadOnly };
|
|
|
|
llvm::AttributeSet Attrs = llvm::AttributeSet::get(
|
|
|
|
CGF.getLLVMContext(), llvm::AttributeSet::FunctionIndex, FuncAttrs);
|
|
|
|
|
|
|
|
return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
|
2011-04-11 08:46:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
|
|
|
|
// void __cxa_bad_cast();
|
2012-02-07 08:39:47 +08:00
|
|
|
llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
|
2011-04-11 08:46:40 +08:00
|
|
|
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
|
|
|
|
}
|
|
|
|
|
2011-04-11 09:45:29 +08:00
|
|
|
static void EmitBadCastCall(CodeGenFunction &CGF) {
|
2011-04-13 10:35:36 +08:00
|
|
|
llvm::Value *Fn = getBadCastFn(CGF);
|
2013-03-01 03:01:20 +08:00
|
|
|
CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
|
2011-04-11 09:45:29 +08:00
|
|
|
CGF.Builder.CreateUnreachable();
|
|
|
|
}
|
|
|
|
|
2013-02-04 03:59:25 +08:00
|
|
|
/// \brief Compute the src2dst_offset hint as described in the
|
|
|
|
/// Itanium C++ ABI [2.9.7]
|
|
|
|
static CharUnits computeOffsetHint(ASTContext &Context,
|
|
|
|
const CXXRecordDecl *Src,
|
|
|
|
const CXXRecordDecl *Dst) {
|
|
|
|
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
|
|
|
|
/*DetectVirtual=*/false);
|
|
|
|
|
|
|
|
// If Dst is not derived from Src we can skip the whole computation below and
|
|
|
|
// return that Src is not a public base of Dst. Record all inheritance paths.
|
|
|
|
if (!Dst->isDerivedFrom(Src, Paths))
|
|
|
|
return CharUnits::fromQuantity(-2ULL);
|
|
|
|
|
|
|
|
unsigned NumPublicPaths = 0;
|
|
|
|
CharUnits Offset;
|
|
|
|
|
|
|
|
// Now walk all possible inheritance paths.
|
|
|
|
for (CXXBasePaths::paths_iterator I = Paths.begin(), E = Paths.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
if (I->Access != AS_public) // Ignore non-public inheritance.
|
|
|
|
continue;
|
|
|
|
|
|
|
|
++NumPublicPaths;
|
|
|
|
|
|
|
|
for (CXXBasePath::iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
|
|
|
|
// If the path contains a virtual base class we can't give any hint.
|
|
|
|
// -1: no hint.
|
|
|
|
if (J->Base->isVirtual())
|
|
|
|
return CharUnits::fromQuantity(-1ULL);
|
|
|
|
|
|
|
|
if (NumPublicPaths > 1) // Won't use offsets, skip computation.
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Accumulate the base class offsets.
|
|
|
|
const ASTRecordLayout &L = Context.getASTRecordLayout(J->Class);
|
|
|
|
Offset += L.getBaseClassOffset(J->Base->getType()->getAsCXXRecordDecl());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// -2: Src is not a public base of Dst.
|
|
|
|
if (NumPublicPaths == 0)
|
|
|
|
return CharUnits::fromQuantity(-2ULL);
|
|
|
|
|
|
|
|
// -3: Src is a multiple public base type but never a virtual base type.
|
|
|
|
if (NumPublicPaths > 1)
|
|
|
|
return CharUnits::fromQuantity(-3ULL);
|
|
|
|
|
|
|
|
// Otherwise, the Src type is a unique public nonvirtual base type of Dst.
|
|
|
|
// Return the offset of Src from the origin of Dst.
|
|
|
|
return Offset;
|
|
|
|
}
|
|
|
|
|
2011-04-11 08:46:40 +08:00
|
|
|
static llvm::Value *
|
|
|
|
EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
|
|
|
|
QualType SrcTy, QualType DestTy,
|
|
|
|
llvm::BasicBlock *CastEnd) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *PtrDiffLTy =
|
2011-04-11 08:46:40 +08:00
|
|
|
CGF.ConvertType(CGF.getContext().getPointerDiffType());
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
|
2011-04-11 08:46:40 +08:00
|
|
|
|
|
|
|
if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
|
|
|
|
if (PTy->getPointeeType()->isVoidType()) {
|
|
|
|
// C++ [expr.dynamic.cast]p7:
|
|
|
|
// If T is "pointer to cv void," then the result is a pointer to the
|
|
|
|
// most derived object pointed to by v.
|
|
|
|
|
|
|
|
// Get the vtable pointer.
|
|
|
|
llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
|
|
|
|
|
|
|
|
// Get the offset-to-top from the vtable.
|
|
|
|
llvm::Value *OffsetToTop =
|
|
|
|
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
|
|
|
|
OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
|
|
|
|
|
|
|
|
// Finally, add the offset to the pointer.
|
|
|
|
Value = CGF.EmitCastToVoidPtr(Value);
|
|
|
|
Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
|
|
|
|
|
|
|
|
return CGF.Builder.CreateBitCast(Value, DestLTy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
QualType SrcRecordTy;
|
|
|
|
QualType DestRecordTy;
|
|
|
|
|
|
|
|
if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
|
|
|
|
SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
|
|
|
|
DestRecordTy = DestPTy->getPointeeType();
|
2009-11-16 14:50:58 +08:00
|
|
|
} else {
|
2011-04-11 08:46:40 +08:00
|
|
|
SrcRecordTy = SrcTy;
|
|
|
|
DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
|
2009-11-16 14:50:58 +08:00
|
|
|
}
|
|
|
|
|
2011-04-11 08:46:40 +08:00
|
|
|
assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
|
|
|
|
assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
|
|
|
|
|
|
|
|
llvm::Value *SrcRTTI =
|
|
|
|
CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
|
|
|
|
llvm::Value *DestRTTI =
|
|
|
|
CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
|
|
|
|
|
2013-02-04 03:59:25 +08:00
|
|
|
// Compute the offset hint.
|
|
|
|
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
|
|
|
|
const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
|
|
|
|
llvm::Value *OffsetHint =
|
|
|
|
llvm::ConstantInt::get(PtrDiffLTy,
|
|
|
|
computeOffsetHint(CGF.getContext(), SrcDecl,
|
|
|
|
DestDecl).getQuantity());
|
2011-04-11 08:46:40 +08:00
|
|
|
|
|
|
|
// Emit the call to __dynamic_cast.
|
|
|
|
Value = CGF.EmitCastToVoidPtr(Value);
|
2013-03-01 03:01:20 +08:00
|
|
|
|
|
|
|
llvm::Value *args[] = { Value, SrcRTTI, DestRTTI, OffsetHint };
|
|
|
|
Value = CGF.EmitNounwindRuntimeCall(getDynamicCastFn(CGF), args);
|
2011-04-11 08:46:40 +08:00
|
|
|
Value = CGF.Builder.CreateBitCast(Value, DestLTy);
|
|
|
|
|
|
|
|
/// C++ [expr.dynamic.cast]p9:
|
|
|
|
/// A failed cast to reference type throws std::bad_cast
|
|
|
|
if (DestTy->isReferenceType()) {
|
|
|
|
llvm::BasicBlock *BadCastBlock =
|
|
|
|
CGF.createBasicBlock("dynamic_cast.bad_cast");
|
|
|
|
|
|
|
|
llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
|
|
|
|
CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
|
|
|
|
|
|
|
|
CGF.EmitBlock(BadCastBlock);
|
2011-04-11 09:45:29 +08:00
|
|
|
EmitBadCastCall(CGF);
|
2009-11-16 14:50:58 +08:00
|
|
|
}
|
|
|
|
|
2011-04-11 08:46:40 +08:00
|
|
|
return Value;
|
|
|
|
}
|
|
|
|
|
2011-04-11 09:45:29 +08:00
|
|
|
static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
|
|
|
|
QualType DestTy) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
|
2011-04-11 09:45:29 +08:00
|
|
|
if (DestTy->isPointerType())
|
|
|
|
return llvm::Constant::getNullValue(DestLTy);
|
|
|
|
|
|
|
|
/// C++ [expr.dynamic.cast]p9:
|
|
|
|
/// A failed cast to reference type throws std::bad_cast
|
|
|
|
EmitBadCastCall(CGF);
|
|
|
|
|
|
|
|
CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
|
|
|
|
return llvm::UndefValue::get(DestLTy);
|
|
|
|
}
|
|
|
|
|
2011-04-11 08:46:40 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
|
|
|
|
const CXXDynamicCastExpr *DCE) {
|
|
|
|
QualType DestTy = DCE->getTypeAsWritten();
|
2009-11-16 14:50:58 +08:00
|
|
|
|
2011-04-11 09:45:29 +08:00
|
|
|
if (DCE->isAlwaysNull())
|
|
|
|
return EmitDynamicCastToNull(*this, DestTy);
|
|
|
|
|
|
|
|
QualType SrcTy = DCE->getSubExpr()->getType();
|
|
|
|
|
2011-04-11 08:46:40 +08:00
|
|
|
// C++ [expr.dynamic.cast]p4:
|
|
|
|
// If the value of v is a null pointer value in the pointer case, the result
|
|
|
|
// is the null pointer value of type T.
|
|
|
|
bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
|
|
|
|
|
|
|
|
llvm::BasicBlock *CastNull = 0;
|
|
|
|
llvm::BasicBlock *CastNotNull = 0;
|
|
|
|
llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
|
|
|
|
|
|
|
|
if (ShouldNullCheckSrcValue) {
|
|
|
|
CastNull = createBasicBlock("dynamic_cast.null");
|
|
|
|
CastNotNull = createBasicBlock("dynamic_cast.notnull");
|
2009-11-16 14:50:58 +08:00
|
|
|
|
2011-04-11 08:46:40 +08:00
|
|
|
llvm::Value *IsNull = Builder.CreateIsNull(Value);
|
|
|
|
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
|
|
|
|
EmitBlock(CastNotNull);
|
2009-11-16 14:50:58 +08:00
|
|
|
}
|
2011-04-11 08:46:40 +08:00
|
|
|
|
|
|
|
Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
|
|
|
|
|
|
|
|
if (ShouldNullCheckSrcValue) {
|
|
|
|
EmitBranch(CastEnd);
|
|
|
|
|
|
|
|
EmitBlock(CastNull);
|
|
|
|
EmitBranch(CastEnd);
|
2009-11-16 14:50:58 +08:00
|
|
|
}
|
2011-04-11 08:46:40 +08:00
|
|
|
|
|
|
|
EmitBlock(CastEnd);
|
|
|
|
|
|
|
|
if (ShouldNullCheckSrcValue) {
|
|
|
|
llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
|
|
|
|
PHI->addIncoming(Value, CastNotNull);
|
|
|
|
PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
|
|
|
|
|
|
|
|
Value = PHI;
|
2009-11-16 14:50:58 +08:00
|
|
|
}
|
|
|
|
|
2011-04-11 08:46:40 +08:00
|
|
|
return Value;
|
2009-11-16 14:50:58 +08:00
|
|
|
}
|
2012-02-09 11:32:31 +08:00
|
|
|
|
|
|
|
void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
|
2012-02-09 11:47:20 +08:00
|
|
|
RunCleanupsScope Scope(*this);
|
2012-04-16 11:54:45 +08:00
|
|
|
LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(),
|
|
|
|
Slot.getAlignment());
|
2012-02-09 11:47:20 +08:00
|
|
|
|
2012-02-09 11:32:31 +08:00
|
|
|
CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
|
|
|
|
for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
|
|
|
|
e = E->capture_init_end();
|
2012-02-29 11:25:18 +08:00
|
|
|
i != e; ++i, ++CurField) {
|
2012-02-09 11:32:31 +08:00
|
|
|
// Emit initialization
|
2012-04-16 11:54:45 +08:00
|
|
|
|
2012-06-07 04:45:41 +08:00
|
|
|
LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
|
2012-02-14 10:31:03 +08:00
|
|
|
ArrayRef<VarDecl *> ArrayIndexes;
|
|
|
|
if (CurField->getType()->isArrayType())
|
|
|
|
ArrayIndexes = E->getCaptureInitIndexVars(i);
|
2012-06-07 04:45:41 +08:00
|
|
|
EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
|
2012-02-09 11:32:31 +08:00
|
|
|
}
|
|
|
|
}
|