2009-11-24 13:51:11 +08:00
|
|
|
//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
|
2009-09-23 06:53:17 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code dealing with code generation of C++ expressions
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CodeGenFunction.h"
|
2011-10-07 02:29:37 +08:00
|
|
|
#include "CGCUDARuntime.h"
|
2010-08-31 15:33:07 +08:00
|
|
|
#include "CGCXXABI.h"
|
2010-10-01 03:05:55 +08:00
|
|
|
#include "CGDebugInfo.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "CGObjCRuntime.h"
|
2013-10-31 05:53:58 +08:00
|
|
|
#include "clang/CodeGen/CGFunctionInfo.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "clang/Frontend/CodeGenOptions.h"
|
2014-03-04 19:02:08 +08:00
|
|
|
#include "llvm/IR/CallSite.h"
|
2013-01-02 19:45:17 +08:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2011-04-13 10:35:36 +08:00
|
|
|
|
2009-09-23 06:53:17 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
2014-11-01 04:09:12 +08:00
|
|
|
static RequiredArgs commonEmitCXXMemberOrOperatorCall(
|
|
|
|
CodeGenFunction &CGF, const CXXMethodDecl *MD, llvm::Value *Callee,
|
|
|
|
ReturnValueSlot ReturnValue, llvm::Value *This, llvm::Value *ImplicitParam,
|
|
|
|
QualType ImplicitParamTy, const CallExpr *CE, CallArgList &Args) {
|
2014-08-26 04:17:35 +08:00
|
|
|
assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
|
|
|
|
isa<CXXOperatorCallExpr>(CE));
|
2010-01-02 04:29:01 +08:00
|
|
|
assert(MD->isInstance() &&
|
2014-08-26 04:17:35 +08:00
|
|
|
"Trying to emit a member or operator call expr on a static method!");
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2012-08-24 08:54:33 +08:00
|
|
|
// C++11 [class.mfct.non-static]p2:
|
|
|
|
// If a non-static member function of a class X is called for an object that
|
|
|
|
// is not of type X, or of a type derived from X, the behavior is undefined.
|
2014-08-26 04:17:35 +08:00
|
|
|
SourceLocation CallLoc;
|
|
|
|
if (CE)
|
|
|
|
CallLoc = CE->getExprLoc();
|
2014-11-01 04:09:12 +08:00
|
|
|
CGF.EmitTypeCheck(
|
|
|
|
isa<CXXConstructorDecl>(MD) ? CodeGenFunction::TCK_ConstructorCall
|
|
|
|
: CodeGenFunction::TCK_MemberCall,
|
|
|
|
CallLoc, This, CGF.getContext().getRecordType(MD->getParent()));
|
2010-01-02 04:29:01 +08:00
|
|
|
|
|
|
|
// Push the this ptr.
|
2014-11-01 04:09:12 +08:00
|
|
|
Args.add(RValue::get(This), MD->getThisType(CGF.getContext()));
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2013-02-13 16:37:51 +08:00
|
|
|
// If there is an implicit parameter (e.g. VTT), emit it.
|
|
|
|
if (ImplicitParam) {
|
|
|
|
Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
|
2010-01-02 09:01:18 +08:00
|
|
|
}
|
2012-02-17 11:33:10 +08:00
|
|
|
|
|
|
|
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
|
|
|
|
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
|
2014-09-09 01:22:45 +08:00
|
|
|
|
2012-02-17 11:33:10 +08:00
|
|
|
// And the rest of the call args.
|
2014-09-09 01:22:45 +08:00
|
|
|
if (CE) {
|
2014-08-26 04:17:35 +08:00
|
|
|
// Special case: skip first argument of CXXOperatorCall (it is "this").
|
2014-09-09 01:22:45 +08:00
|
|
|
unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
|
2014-11-01 04:09:12 +08:00
|
|
|
CGF.EmitCallArgs(Args, FPT, CE->arg_begin() + ArgsToSkip, CE->arg_end(),
|
|
|
|
CE->getDirectCallee());
|
2014-08-26 04:17:35 +08:00
|
|
|
} else {
|
2014-09-09 01:22:45 +08:00
|
|
|
assert(
|
|
|
|
FPT->getNumParams() == 0 &&
|
|
|
|
"No CallExpr specified for function with non-zero number of arguments");
|
2014-08-26 04:17:35 +08:00
|
|
|
}
|
2014-11-01 04:09:12 +08:00
|
|
|
return required;
|
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2014-11-01 04:09:12 +08:00
|
|
|
RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
|
|
|
|
const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
|
|
|
|
llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
|
|
|
|
const CallExpr *CE) {
|
|
|
|
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
|
|
|
|
CallArgList Args;
|
|
|
|
RequiredArgs required = commonEmitCXXMemberOrOperatorCall(
|
|
|
|
*this, MD, Callee, ReturnValue, This, ImplicitParam, ImplicitParamTy, CE,
|
|
|
|
Args);
|
2012-07-07 14:41:13 +08:00
|
|
|
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
|
2010-03-31 04:24:48 +08:00
|
|
|
Callee, ReturnValue, Args, MD);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2014-11-01 04:09:12 +08:00
|
|
|
RValue CodeGenFunction::EmitCXXStructorCall(
|
|
|
|
const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
|
|
|
|
llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
|
|
|
|
const CallExpr *CE, StructorType Type) {
|
|
|
|
CallArgList Args;
|
|
|
|
commonEmitCXXMemberOrOperatorCall(*this, MD, Callee, ReturnValue, This,
|
|
|
|
ImplicitParam, ImplicitParamTy, CE, Args);
|
|
|
|
return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(MD, Type),
|
|
|
|
Callee, ReturnValue, Args, MD);
|
|
|
|
}
|
|
|
|
|
2012-06-28 22:28:57 +08:00
|
|
|
static CXXRecordDecl *getCXXRecord(const Expr *E) {
|
|
|
|
QualType T = E->getType();
|
|
|
|
if (const PointerType *PTy = T->getAs<PointerType>())
|
|
|
|
T = PTy->getPointeeType();
|
|
|
|
const RecordType *Ty = T->castAs<RecordType>();
|
|
|
|
return cast<CXXRecordDecl>(Ty->getDecl());
|
|
|
|
}
|
|
|
|
|
2011-01-18 13:04:39 +08:00
|
|
|
// Note: This function also emit constructor calls to support a MSVC
|
|
|
|
// extensions allowing explicit constructor function call.
|
2010-01-02 04:29:01 +08:00
|
|
|
RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
|
|
|
|
ReturnValueSlot ReturnValue) {
|
2011-04-11 15:02:50 +08:00
|
|
|
const Expr *callee = CE->getCallee()->IgnoreParens();
|
|
|
|
|
|
|
|
if (isa<BinaryOperator>(callee))
|
2010-01-02 04:29:01 +08:00
|
|
|
return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
|
2011-04-11 15:02:50 +08:00
|
|
|
|
|
|
|
const MemberExpr *ME = cast<MemberExpr>(callee);
|
2010-01-02 04:29:01 +08:00
|
|
|
const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
|
|
|
|
|
|
|
|
if (MD->isStatic()) {
|
|
|
|
// The method is static, emit it as we would a regular call.
|
|
|
|
llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
|
2014-08-22 04:26:47 +08:00
|
|
|
return EmitCall(getContext().getPointerType(MD->getType()), Callee, CE,
|
|
|
|
ReturnValue);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
bool HasQualifier = ME->hasQualifier();
|
|
|
|
NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
|
|
|
|
bool IsArrow = ME->isArrow();
|
2012-06-28 09:56:38 +08:00
|
|
|
const Expr *Base = ME->getBase();
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
|
|
|
|
return EmitCXXMemberOrOperatorMemberCallExpr(
|
|
|
|
CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);
|
|
|
|
}
|
|
|
|
|
|
|
|
RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
|
|
|
|
const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
|
|
|
|
bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
|
|
|
|
const Expr *Base) {
|
|
|
|
assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
|
|
|
|
|
|
|
|
// Compute the object pointer.
|
|
|
|
bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
|
2012-06-28 09:56:38 +08:00
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
const CXXMethodDecl *DevirtualizedMethod = nullptr;
|
2013-08-26 06:46:27 +08:00
|
|
|
if (CanUseVirtualCall && CanDevirtualizeMemberFunctionCall(Base, MD)) {
|
2012-06-28 22:28:57 +08:00
|
|
|
const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
|
|
|
|
DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
|
|
|
|
assert(DevirtualizedMethod);
|
|
|
|
const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
|
|
|
|
const Expr *Inner = Base->ignoreParenBaseCasts();
|
2014-09-29 18:32:21 +08:00
|
|
|
if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
|
|
|
|
MD->getReturnType().getCanonicalType())
|
|
|
|
// If the return types are not the same, this might be a case where more
|
|
|
|
// code needs to run to compensate for it. For example, the derived
|
|
|
|
// method might return a type that inherits form from the return
|
|
|
|
// type of MD and has a prefix.
|
|
|
|
// For now we just avoid devirtualizing these covariant cases.
|
|
|
|
DevirtualizedMethod = nullptr;
|
|
|
|
else if (getCXXRecord(Inner) == DevirtualizedClass)
|
2012-06-28 22:28:57 +08:00
|
|
|
// If the class of the Inner expression is where the dynamic method
|
|
|
|
// is defined, build the this pointer from it.
|
|
|
|
Base = Inner;
|
|
|
|
else if (getCXXRecord(Base) != DevirtualizedClass) {
|
|
|
|
// If the method is defined in a class that is not the best dynamic
|
|
|
|
// one or the one of the full expression, we would have to build
|
|
|
|
// a derived-to-base cast to compute the correct this pointer, but
|
|
|
|
// we don't have support for that yet, so do a virtual call.
|
2014-05-21 13:09:00 +08:00
|
|
|
DevirtualizedMethod = nullptr;
|
2012-06-28 22:28:57 +08:00
|
|
|
}
|
|
|
|
}
|
2012-06-28 09:56:38 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
llvm::Value *This;
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
if (IsArrow)
|
2012-06-28 22:28:57 +08:00
|
|
|
This = EmitScalarExpr(Base);
|
2010-12-04 16:14:53 +08:00
|
|
|
else
|
2012-06-28 22:28:57 +08:00
|
|
|
This = EmitLValue(Base).getAddress();
|
2012-06-28 09:56:38 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2010-09-03 09:26:39 +08:00
|
|
|
if (MD->isTrivial()) {
|
2014-05-21 13:09:00 +08:00
|
|
|
if (isa<CXXDestructorDecl>(MD)) return RValue::get(nullptr);
|
2011-01-18 13:04:39 +08:00
|
|
|
if (isa<CXXConstructorDecl>(MD) &&
|
|
|
|
cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
|
2014-05-21 13:09:00 +08:00
|
|
|
return RValue::get(nullptr);
|
2011-01-18 13:04:39 +08:00
|
|
|
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
if (!MD->getParent()->mayInsertExtraPadding()) {
|
|
|
|
if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
|
|
|
|
// We don't like to generate the trivial copy/move assignment operator
|
|
|
|
// when it isn't necessary; just produce the proper effect here.
|
|
|
|
// Special case: skip first argument of CXXOperatorCall (it is "this").
|
|
|
|
unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
|
|
|
|
llvm::Value *RHS =
|
|
|
|
EmitLValue(*(CE->arg_begin() + ArgsToSkip)).getAddress();
|
|
|
|
EmitAggregateAssign(This, RHS, CE->getType());
|
|
|
|
return RValue::get(This);
|
|
|
|
}
|
2014-08-26 05:58:56 +08:00
|
|
|
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
if (isa<CXXConstructorDecl>(MD) &&
|
|
|
|
cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
|
|
|
|
// Trivial move and copy ctor are the same.
|
|
|
|
assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
|
|
|
|
llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
|
|
|
|
EmitAggregateCopy(This, RHS, CE->arg_begin()->getType());
|
|
|
|
return RValue::get(This);
|
|
|
|
}
|
|
|
|
llvm_unreachable("unknown trivial member function");
|
2011-01-18 13:04:39 +08:00
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2010-09-03 09:26:39 +08:00
|
|
|
// Compute the function type we're calling.
|
2014-12-03 04:41:18 +08:00
|
|
|
const CXXMethodDecl *CalleeDecl =
|
|
|
|
DevirtualizedMethod ? DevirtualizedMethod : MD;
|
2014-05-21 13:09:00 +08:00
|
|
|
const CGFunctionInfo *FInfo = nullptr;
|
2014-12-03 04:41:18 +08:00
|
|
|
if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
|
2014-09-09 00:01:27 +08:00
|
|
|
FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
|
|
|
|
Dtor, StructorType::Complete);
|
2014-12-03 04:41:18 +08:00
|
|
|
else if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
|
2014-09-09 00:01:27 +08:00
|
|
|
FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
|
|
|
|
Ctor, StructorType::Complete);
|
2011-01-18 13:04:39 +08:00
|
|
|
else
|
2012-10-25 08:12:49 +08:00
|
|
|
FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
|
2010-09-03 09:26:39 +08:00
|
|
|
|
[ms-cxxabi] Emit linkonce complete dtors in TUs that need them
Based on Peter Collingbourne's destructor patches.
Prior to this change, clang was considering ?1 to be the complete
destructor and the base destructor, which was wrong. This lead to
crashes when clang tried to emit two LLVM functions with the same name.
In this ABI, TUs with non-inline dtors might not emit a complete
destructor. They are emitted as inline thunks in TUs that need them,
and they always delegate to the base dtors of the complete class and its
virtual bases. This change uses the DeferredDecls machinery to emit
complete dtors as needed.
Currently in clang try body destructors can catch exceptions thrown by
virtual base destructors. In the Microsoft C++ ABI, clang may not have
the destructor definition, in which case clang won't wrap the virtual
virtual base destructor calls in a try-catch. Diagnosing this in user
code is TODO.
Finally, for classes that don't use virtual inheritance, MSVC always
calls the base destructor (?1) directly. This is a useful code size
optimization that avoids emitting lots of extra thunks or aliases.
Implementing it also means our existing tests continue to pass, and is
consistent with MSVC's output.
We can do the same for Itanium by tweaking GetAddrOfCXXDestructor, but
it will require further testing.
Reviewers: rjmccall
CC: cfe-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1066
llvm-svn: 186828
2013-07-22 21:51:44 +08:00
|
|
|
llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
|
2010-09-03 09:26:39 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
// C++ [class.virtual]p12:
|
|
|
|
// Explicit qualification with the scope operator (5.1) suppresses the
|
|
|
|
// virtual call mechanism.
|
|
|
|
//
|
|
|
|
// We also don't emit a virtual call if the base expression has a record type
|
|
|
|
// because then we know what the type is.
|
2012-06-28 22:28:57 +08:00
|
|
|
bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
|
2013-06-20 07:23:19 +08:00
|
|
|
llvm::Value *Callee;
|
2013-07-01 04:40:16 +08:00
|
|
|
|
2010-09-03 09:26:39 +08:00
|
|
|
if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
|
2013-07-01 04:40:16 +08:00
|
|
|
assert(CE->arg_begin() == CE->arg_end() &&
|
|
|
|
"Destructor shouldn't have explicit parameters");
|
|
|
|
assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
|
2010-09-03 09:26:39 +08:00
|
|
|
if (UseVirtualCall) {
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
CGM.getCXXABI().EmitVirtualDestructorCall(
|
|
|
|
*this, Dtor, Dtor_Complete, This, cast<CXXMemberCallExpr>(CE));
|
2010-01-02 04:29:01 +08:00
|
|
|
} else {
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
|
|
|
|
Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
|
2012-06-28 22:28:57 +08:00
|
|
|
else if (!DevirtualizedMethod)
|
2014-09-11 23:42:06 +08:00
|
|
|
Callee =
|
|
|
|
CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty);
|
2012-06-27 01:45:31 +08:00
|
|
|
else {
|
2012-06-28 22:28:57 +08:00
|
|
|
const CXXDestructorDecl *DDtor =
|
|
|
|
cast<CXXDestructorDecl>(DevirtualizedMethod);
|
2012-06-27 01:45:31 +08:00
|
|
|
Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
|
|
|
|
}
|
2014-08-26 04:17:35 +08:00
|
|
|
EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
|
|
|
|
/*ImplicitParam=*/nullptr, QualType(), CE);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
2014-05-21 13:09:00 +08:00
|
|
|
return RValue::get(nullptr);
|
2013-07-01 04:40:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
|
2011-01-18 13:04:39 +08:00
|
|
|
Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
|
2010-09-03 09:26:39 +08:00
|
|
|
} else if (UseVirtualCall) {
|
2013-08-21 14:25:03 +08:00
|
|
|
Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty);
|
2010-01-02 04:29:01 +08:00
|
|
|
} else {
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
|
|
|
|
Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
|
2012-06-28 22:28:57 +08:00
|
|
|
else if (!DevirtualizedMethod)
|
2012-06-27 03:18:25 +08:00
|
|
|
Callee = CGM.GetAddrOfFunction(MD, Ty);
|
2012-06-27 01:45:31 +08:00
|
|
|
else {
|
2012-06-28 22:28:57 +08:00
|
|
|
Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
|
2012-06-27 01:45:31 +08:00
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2014-03-15 01:43:37 +08:00
|
|
|
if (MD->isVirtual()) {
|
|
|
|
This = CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
|
|
|
|
*this, MD, This, UseVirtualCall);
|
|
|
|
}
|
2013-08-21 14:25:03 +08:00
|
|
|
|
2014-08-26 04:17:35 +08:00
|
|
|
return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
|
|
|
|
/*ImplicitParam=*/nullptr, QualType(), CE);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
RValue
|
|
|
|
CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
|
|
|
|
ReturnValueSlot ReturnValue) {
|
|
|
|
const BinaryOperator *BO =
|
|
|
|
cast<BinaryOperator>(E->getCallee()->IgnoreParens());
|
|
|
|
const Expr *BaseExpr = BO->getLHS();
|
|
|
|
const Expr *MemFnExpr = BO->getRHS();
|
|
|
|
|
|
|
|
const MemberPointerType *MPT =
|
2011-04-27 04:42:42 +08:00
|
|
|
MemFnExpr->getType()->castAs<MemberPointerType>();
|
2010-08-22 08:05:51 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
const FunctionProtoType *FPT =
|
2011-04-27 04:42:42 +08:00
|
|
|
MPT->getPointeeType()->castAs<FunctionProtoType>();
|
2010-01-02 04:29:01 +08:00
|
|
|
const CXXRecordDecl *RD =
|
|
|
|
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
|
|
|
|
|
|
|
|
// Get the member function pointer.
|
2010-08-22 18:59:02 +08:00
|
|
|
llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
|
|
|
// Emit the 'this' pointer.
|
|
|
|
llvm::Value *This;
|
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
if (BO->getOpcode() == BO_PtrMemI)
|
2010-01-02 04:29:01 +08:00
|
|
|
This = EmitScalarExpr(BaseExpr);
|
|
|
|
else
|
|
|
|
This = EmitLValue(BaseExpr).getAddress();
|
|
|
|
|
2012-10-10 03:52:38 +08:00
|
|
|
EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This,
|
|
|
|
QualType(MPT->getClass(), 0));
|
2012-08-24 08:54:33 +08:00
|
|
|
|
2010-08-22 08:05:51 +08:00
|
|
|
// Ask the ABI to load the callee. Note that This is modified.
|
|
|
|
llvm::Value *Callee =
|
2014-02-21 07:22:07 +08:00
|
|
|
CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This, MemFnPtr, MPT);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
|
|
|
CallArgList Args;
|
|
|
|
|
|
|
|
QualType ThisType =
|
|
|
|
getContext().getPointerType(getContext().getTagDeclType(RD));
|
|
|
|
|
|
|
|
// Push the this ptr.
|
2011-05-03 01:57:46 +08:00
|
|
|
Args.add(RValue::get(This), ThisType);
|
2012-07-07 14:41:13 +08:00
|
|
|
|
|
|
|
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
|
|
|
// And the rest of the call args
|
2014-09-09 01:22:45 +08:00
|
|
|
EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end(), E->getDirectCallee());
|
2013-10-02 05:51:38 +08:00
|
|
|
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
|
|
|
|
Callee, ReturnValue, Args);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
RValue
|
|
|
|
CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
|
|
|
|
const CXXMethodDecl *MD,
|
|
|
|
ReturnValueSlot ReturnValue) {
|
|
|
|
assert(MD->isInstance() &&
|
|
|
|
"Trying to emit a member call expr on a static method!");
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
return EmitCXXMemberOrOperatorMemberCallExpr(
|
|
|
|
E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
|
|
|
|
/*IsArrow=*/false, E->getArg(0));
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2011-10-07 02:29:37 +08:00
|
|
|
RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
|
|
|
|
ReturnValueSlot ReturnValue) {
|
|
|
|
return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
|
|
|
|
}
|
|
|
|
|
2011-10-14 10:27:24 +08:00
|
|
|
static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *DestPtr,
|
|
|
|
const CXXRecordDecl *Base) {
|
|
|
|
if (Base->isEmpty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
|
|
|
|
|
|
|
|
const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
|
|
|
|
CharUnits Size = Layout.getNonVirtualSize();
|
2014-01-09 08:30:56 +08:00
|
|
|
CharUnits Align = Layout.getNonVirtualAlignment();
|
2011-10-14 10:27:24 +08:00
|
|
|
|
|
|
|
llvm::Value *SizeVal = CGF.CGM.getSize(Size);
|
|
|
|
|
|
|
|
// If the type contains a pointer to data member we can't memset it to zero.
|
|
|
|
// Instead, create a null constant and copy it to the destination.
|
|
|
|
// TODO: there are other patterns besides zero that we can usefully memset,
|
|
|
|
// like -1, which happens to be the pattern used by member-pointers.
|
|
|
|
// TODO: isZeroInitializable can be over-conservative in the case where a
|
|
|
|
// virtual base contains a member pointer.
|
|
|
|
if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
|
|
|
|
llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
|
|
|
|
|
|
|
|
llvm::GlobalVariable *NullVariable =
|
|
|
|
new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
|
|
|
|
/*isConstant=*/true,
|
|
|
|
llvm::GlobalVariable::PrivateLinkage,
|
|
|
|
NullConstant, Twine());
|
|
|
|
NullVariable->setAlignment(Align.getQuantity());
|
|
|
|
llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
|
|
|
|
|
|
|
|
// Get and call the appropriate llvm.memcpy overload.
|
|
|
|
CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, just memset the whole thing to zero. This is legal
|
|
|
|
// because in LLVM, all default initializers (other than the ones we just
|
|
|
|
// handled above) are guaranteed to have a bit pattern of all zeros.
|
|
|
|
CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
|
|
|
|
Align.getQuantity());
|
|
|
|
}
|
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
void
|
2010-09-15 18:14:12 +08:00
|
|
|
CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
|
|
|
|
AggValueSlot Dest) {
|
|
|
|
assert(!Dest.isIgnored() && "Must have a destination!");
|
2010-01-02 04:29:01 +08:00
|
|
|
const CXXConstructorDecl *CD = E->getConstructor();
|
2010-08-23 00:15:35 +08:00
|
|
|
|
|
|
|
// If we require zero initialization before (or instead of) calling the
|
|
|
|
// constructor, as can be the case with a non-user-provided default
|
2011-04-29 06:57:55 +08:00
|
|
|
// constructor, emit the zero initialization now, unless destination is
|
|
|
|
// already zeroed.
|
2011-10-14 10:27:24 +08:00
|
|
|
if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
|
|
|
|
switch (E->getConstructionKind()) {
|
|
|
|
case CXXConstructExpr::CK_Delegating:
|
|
|
|
case CXXConstructExpr::CK_Complete:
|
|
|
|
EmitNullInitialization(Dest.getAddr(), E->getType());
|
|
|
|
break;
|
|
|
|
case CXXConstructExpr::CK_VirtualBase:
|
|
|
|
case CXXConstructExpr::CK_NonVirtualBase:
|
|
|
|
EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-08-23 00:15:35 +08:00
|
|
|
|
|
|
|
// If this is a call to a trivial default constructor, do nothing.
|
|
|
|
if (CD->isTrivial() && CD->isDefaultConstructor())
|
|
|
|
return;
|
|
|
|
|
2010-09-18 08:58:34 +08:00
|
|
|
// Elide the constructor if we're constructing from a temporary.
|
|
|
|
// The temporary check is required because Sema sets this on NRVO
|
|
|
|
// returns.
|
2012-11-02 06:30:59 +08:00
|
|
|
if (getLangOpts().ElideConstructors && E->isElidable()) {
|
2010-09-18 08:58:34 +08:00
|
|
|
assert(getContext().hasSameUnqualifiedType(E->getType(),
|
|
|
|
E->getArg(0)->getType()));
|
2010-09-15 18:14:12 +08:00
|
|
|
if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
|
|
|
|
EmitAggExpr(E->getArg(0), Dest);
|
2010-05-15 08:13:29 +08:00
|
|
|
return;
|
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
2010-08-23 00:15:35 +08:00
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
if (const ConstantArrayType *arrayType
|
|
|
|
= getContext().getAsConstantArrayType(E->getType())) {
|
2014-08-22 04:26:47 +08:00
|
|
|
EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(), E);
|
2011-07-13 14:10:41 +08:00
|
|
|
} else {
|
2011-05-07 05:28:42 +08:00
|
|
|
CXXCtorType Type = Ctor_Complete;
|
2011-05-04 04:19:28 +08:00
|
|
|
bool ForVirtualBase = false;
|
2013-01-31 13:50:40 +08:00
|
|
|
bool Delegating = false;
|
|
|
|
|
2011-05-04 04:19:28 +08:00
|
|
|
switch (E->getConstructionKind()) {
|
|
|
|
case CXXConstructExpr::CK_Delegating:
|
2011-05-01 15:04:31 +08:00
|
|
|
// We should be emitting a constructor; GlobalDecl will assert this
|
|
|
|
Type = CurGD.getCtorType();
|
2013-01-31 13:50:40 +08:00
|
|
|
Delegating = true;
|
2011-05-04 04:19:28 +08:00
|
|
|
break;
|
2011-05-01 15:04:31 +08:00
|
|
|
|
2011-05-04 04:19:28 +08:00
|
|
|
case CXXConstructExpr::CK_Complete:
|
|
|
|
Type = Ctor_Complete;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CXXConstructExpr::CK_VirtualBase:
|
|
|
|
ForVirtualBase = true;
|
|
|
|
// fall-through
|
|
|
|
|
|
|
|
case CXXConstructExpr::CK_NonVirtualBase:
|
|
|
|
Type = Ctor_Base;
|
|
|
|
}
|
2010-05-03 07:20:53 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
// Call the constructor.
|
2013-01-31 13:50:40 +08:00
|
|
|
EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest.getAddr(),
|
2014-08-22 04:26:47 +08:00
|
|
|
E);
|
2010-05-03 07:20:53 +08:00
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2010-11-14 05:53:34 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
|
|
|
|
llvm::Value *Src,
|
2010-12-03 01:02:11 +08:00
|
|
|
const Expr *Exp) {
|
2010-12-06 16:20:24 +08:00
|
|
|
if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
|
2010-11-14 05:53:34 +08:00
|
|
|
Exp = E->getSubExpr();
|
|
|
|
assert(isa<CXXConstructExpr>(Exp) &&
|
|
|
|
"EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
|
|
|
|
const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
|
|
|
|
const CXXConstructorDecl *CD = E->getConstructor();
|
|
|
|
RunCleanupsScope Scope(*this);
|
|
|
|
|
|
|
|
// If we require zero initialization before (or instead of) calling the
|
|
|
|
// constructor, as can be the case with a non-user-provided default
|
|
|
|
// constructor, emit the zero initialization now.
|
|
|
|
// FIXME. Do I still need this for a copy ctor synthesis?
|
|
|
|
if (E->requiresZeroInitialization())
|
|
|
|
EmitNullInitialization(Dest, E->getType());
|
|
|
|
|
2010-11-15 21:54:43 +08:00
|
|
|
assert(!getContext().getAsConstantArrayType(E->getType())
|
|
|
|
&& "EmitSynthesizedCXXCopyCtor - Copied-in Array");
|
2014-08-26 05:58:56 +08:00
|
|
|
EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
|
2010-11-14 05:53:34 +08:00
|
|
|
}
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
|
|
|
|
const CXXNewExpr *E) {
|
2009-12-14 04:04:38 +08:00
|
|
|
if (!E->isArray())
|
2010-01-27 03:44:24 +08:00
|
|
|
return CharUnits::Zero();
|
2009-12-14 04:04:38 +08:00
|
|
|
|
2011-05-16 09:05:12 +08:00
|
|
|
// No cookie is required if the operator new[] being used is the
|
|
|
|
// reserved placement operator new[].
|
|
|
|
if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
|
2010-08-23 09:17:59 +08:00
|
|
|
return CharUnits::Zero();
|
|
|
|
|
2011-01-27 17:37:56 +08:00
|
|
|
return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
|
2009-09-24 00:07:23 +08:00
|
|
|
}
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
|
|
|
|
const CXXNewExpr *e,
|
2012-02-23 01:37:52 +08:00
|
|
|
unsigned minElements,
|
2011-05-15 15:14:44 +08:00
|
|
|
llvm::Value *&numElements,
|
|
|
|
llvm::Value *&sizeWithoutCookie) {
|
|
|
|
QualType type = e->getAllocatedType();
|
|
|
|
|
|
|
|
if (!e->isArray()) {
|
|
|
|
CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
|
|
|
|
sizeWithoutCookie
|
|
|
|
= llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
|
|
|
|
return sizeWithoutCookie;
|
2010-07-21 09:10:17 +08:00
|
|
|
}
|
2009-09-24 00:07:23 +08:00
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
// The width of size_t.
|
|
|
|
unsigned sizeWidth = CGF.SizeTy->getBitWidth();
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
// Figure out the cookie size.
|
2011-05-15 15:14:44 +08:00
|
|
|
llvm::APInt cookieSize(sizeWidth,
|
|
|
|
CalculateCookiePadding(CGF, e).getQuantity());
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2009-09-24 00:07:23 +08:00
|
|
|
// Emit the array size expression.
|
2010-08-26 23:23:38 +08:00
|
|
|
// We multiply the size of all dimensions for NumElements.
|
|
|
|
// e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
|
2011-05-15 15:14:44 +08:00
|
|
|
numElements = CGF.EmitScalarExpr(e->getArraySize());
|
|
|
|
assert(isa<llvm::IntegerType>(numElements->getType()));
|
|
|
|
|
|
|
|
// The number of elements can be have an arbitrary integer type;
|
|
|
|
// essentially, we need to multiply it by a constant factor, add a
|
|
|
|
// cookie size, and verify that the result is representable as a
|
|
|
|
// size_t. That's just a gloss, though, and it's wrong in one
|
|
|
|
// important way: if the count is negative, it's an error even if
|
|
|
|
// the cookie size would bring the total size >= 0.
|
2011-05-21 00:38:50 +08:00
|
|
|
bool isSigned
|
|
|
|
= e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::IntegerType *numElementsType
|
2011-05-15 15:14:44 +08:00
|
|
|
= cast<llvm::IntegerType>(numElements->getType());
|
|
|
|
unsigned numElementsWidth = numElementsType->getBitWidth();
|
|
|
|
|
|
|
|
// Compute the constant factor.
|
|
|
|
llvm::APInt arraySizeMultiplier(sizeWidth, 1);
|
2010-08-26 23:23:38 +08:00
|
|
|
while (const ConstantArrayType *CAT
|
2011-05-15 15:14:44 +08:00
|
|
|
= CGF.getContext().getAsConstantArrayType(type)) {
|
|
|
|
type = CAT->getElementType();
|
|
|
|
arraySizeMultiplier *= CAT->getSize();
|
2010-08-26 23:23:38 +08:00
|
|
|
}
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
|
|
|
|
llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
|
|
|
|
typeSizeMultiplier *= arraySizeMultiplier;
|
|
|
|
|
|
|
|
// This will be a size_t.
|
|
|
|
llvm::Value *size;
|
2010-07-21 04:19:24 +08:00
|
|
|
|
2010-07-21 05:55:52 +08:00
|
|
|
// If someone is doing 'new int[42]' there is no need to do a dynamic check.
|
|
|
|
// Don't bloat the -O0 code.
|
2011-05-15 15:14:44 +08:00
|
|
|
if (llvm::ConstantInt *numElementsC =
|
|
|
|
dyn_cast<llvm::ConstantInt>(numElements)) {
|
|
|
|
const llvm::APInt &count = numElementsC->getValue();
|
|
|
|
|
|
|
|
bool hasAnyOverflow = false;
|
|
|
|
|
|
|
|
// If 'count' was a negative number, it's an overflow.
|
|
|
|
if (isSigned && count.isNegative())
|
|
|
|
hasAnyOverflow = true;
|
|
|
|
|
|
|
|
// We want to do all this arithmetic in size_t. If numElements is
|
|
|
|
// wider than that, check whether it's already too big, and if so,
|
|
|
|
// overflow.
|
|
|
|
else if (numElementsWidth > sizeWidth &&
|
|
|
|
numElementsWidth - sizeWidth > count.countLeadingZeros())
|
|
|
|
hasAnyOverflow = true;
|
|
|
|
|
|
|
|
// Okay, compute a count at the right width.
|
|
|
|
llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
|
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
// If there is a brace-initializer, we cannot allocate fewer elements than
|
|
|
|
// there are initializers. If we do, that's treated like an overflow.
|
|
|
|
if (adjustedCount.ult(minElements))
|
|
|
|
hasAnyOverflow = true;
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
// Scale numElements by that. This might overflow, but we don't
|
|
|
|
// care because it only overflows if allocationSize does, too, and
|
|
|
|
// if that overflows then we shouldn't use this.
|
|
|
|
numElements = llvm::ConstantInt::get(CGF.SizeTy,
|
|
|
|
adjustedCount * arraySizeMultiplier);
|
|
|
|
|
|
|
|
// Compute the size before cookie, and track whether it overflowed.
|
|
|
|
bool overflow;
|
|
|
|
llvm::APInt allocationSize
|
|
|
|
= adjustedCount.umul_ov(typeSizeMultiplier, overflow);
|
|
|
|
hasAnyOverflow |= overflow;
|
|
|
|
|
|
|
|
// Add in the cookie, and check whether it's overflowed.
|
|
|
|
if (cookieSize != 0) {
|
|
|
|
// Save the current size without a cookie. This shouldn't be
|
|
|
|
// used if there was overflow.
|
|
|
|
sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
|
|
|
|
|
|
|
|
allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
|
|
|
|
hasAnyOverflow |= overflow;
|
2010-09-02 17:58:18 +08:00
|
|
|
}
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
// On overflow, produce a -1 so operator new will fail.
|
2014-08-29 01:24:14 +08:00
|
|
|
if (hasAnyOverflow) {
|
|
|
|
size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
|
|
|
|
} else {
|
2011-05-15 15:14:44 +08:00
|
|
|
size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
|
2014-08-29 01:24:14 +08:00
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
// Otherwise, we might need to use the overflow intrinsics.
|
|
|
|
} else {
|
2012-02-23 01:37:52 +08:00
|
|
|
// There are up to five conditions we need to test for:
|
2011-05-15 15:14:44 +08:00
|
|
|
// 1) if isSigned, we need to check whether numElements is negative;
|
|
|
|
// 2) if numElementsWidth > sizeWidth, we need to check whether
|
|
|
|
// numElements is larger than something representable in size_t;
|
2012-02-23 01:37:52 +08:00
|
|
|
// 3) if minElements > 0, we need to check whether numElements is smaller
|
|
|
|
// than that.
|
|
|
|
// 4) we need to compute
|
2011-05-15 15:14:44 +08:00
|
|
|
// sizeWithoutCookie := numElements * typeSizeMultiplier
|
|
|
|
// and check whether it overflows; and
|
2012-02-23 01:37:52 +08:00
|
|
|
// 5) if we need a cookie, we need to compute
|
2011-05-15 15:14:44 +08:00
|
|
|
// size := sizeWithoutCookie + cookieSize
|
|
|
|
// and check whether it overflows.
|
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *hasOverflow = nullptr;
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
// If numElementsWidth > sizeWidth, then one way or another, we're
|
|
|
|
// going to have to do a comparison for (2), and this happens to
|
|
|
|
// take care of (1), too.
|
|
|
|
if (numElementsWidth > sizeWidth) {
|
|
|
|
llvm::APInt threshold(numElementsWidth, 1);
|
|
|
|
threshold <<= sizeWidth;
|
|
|
|
|
|
|
|
llvm::Value *thresholdV
|
|
|
|
= llvm::ConstantInt::get(numElementsType, threshold);
|
|
|
|
|
|
|
|
hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
|
|
|
|
numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
|
|
|
|
|
|
|
|
// Otherwise, if we're signed, we want to sext up to size_t.
|
|
|
|
} else if (isSigned) {
|
|
|
|
if (numElementsWidth < sizeWidth)
|
|
|
|
numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
|
|
|
|
|
|
|
|
// If there's a non-1 type size multiplier, then we can do the
|
|
|
|
// signedness check at the same time as we do the multiply
|
|
|
|
// because a negative number times anything will cause an
|
2012-02-23 01:37:52 +08:00
|
|
|
// unsigned overflow. Otherwise, we have to do it here. But at least
|
|
|
|
// in this case, we can subsume the >= minElements check.
|
2011-05-15 15:14:44 +08:00
|
|
|
if (typeSizeMultiplier == 1)
|
|
|
|
hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
|
2012-02-23 01:37:52 +08:00
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, minElements));
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
// Otherwise, zext up to size_t if necessary.
|
|
|
|
} else if (numElementsWidth < sizeWidth) {
|
|
|
|
numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
|
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
assert(numElements->getType() == CGF.SizeTy);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
if (minElements) {
|
|
|
|
// Don't allow allocation of fewer elements than we have initializers.
|
|
|
|
if (!hasOverflow) {
|
|
|
|
hasOverflow = CGF.Builder.CreateICmpULT(numElements,
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, minElements));
|
|
|
|
} else if (numElementsWidth > sizeWidth) {
|
|
|
|
// The other existing overflow subsumes this check.
|
|
|
|
// We do an unsigned comparison, since any signed value < -1 is
|
|
|
|
// taken care of either above or below.
|
|
|
|
hasOverflow = CGF.Builder.CreateOr(hasOverflow,
|
|
|
|
CGF.Builder.CreateICmpULT(numElements,
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, minElements)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
size = numElements;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
// Multiply by the type size if necessary. This multiplier
|
|
|
|
// includes all the factors for nested arrays.
|
|
|
|
//
|
|
|
|
// This step also causes numElements to be scaled up by the
|
|
|
|
// nested-array factor if necessary. Overflow on this computation
|
|
|
|
// can be ignored because the result shouldn't be used if
|
|
|
|
// allocation fails.
|
|
|
|
if (typeSizeMultiplier != 1) {
|
|
|
|
llvm::Value *umul_with_overflow
|
2011-07-15 01:45:50 +08:00
|
|
|
= CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
llvm::Value *tsmV =
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
|
|
|
|
llvm::Value *result =
|
|
|
|
CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
|
|
|
|
|
|
|
|
llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
|
|
|
|
if (hasOverflow)
|
|
|
|
hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
|
|
|
|
else
|
|
|
|
hasOverflow = overflowed;
|
|
|
|
|
|
|
|
size = CGF.Builder.CreateExtractValue(result, 0);
|
|
|
|
|
|
|
|
// Also scale up numElements by the array size multiplier.
|
|
|
|
if (arraySizeMultiplier != 1) {
|
|
|
|
// If the base element type size is 1, then we can re-use the
|
|
|
|
// multiply we just did.
|
|
|
|
if (typeSize.isOne()) {
|
|
|
|
assert(arraySizeMultiplier == typeSizeMultiplier);
|
|
|
|
numElements = size;
|
|
|
|
|
|
|
|
// Otherwise we need a separate multiply.
|
|
|
|
} else {
|
|
|
|
llvm::Value *asmV =
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
|
|
|
|
numElements = CGF.Builder.CreateMul(numElements, asmV);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// numElements doesn't need to be scaled.
|
|
|
|
assert(arraySizeMultiplier == 1);
|
2010-09-02 17:58:18 +08:00
|
|
|
}
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
// Add in the cookie size if necessary.
|
|
|
|
if (cookieSize != 0) {
|
|
|
|
sizeWithoutCookie = size;
|
|
|
|
|
|
|
|
llvm::Value *uadd_with_overflow
|
2011-07-15 01:45:50 +08:00
|
|
|
= CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
|
|
|
|
llvm::Value *result =
|
|
|
|
CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
|
|
|
|
|
|
|
|
llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
|
|
|
|
if (hasOverflow)
|
|
|
|
hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
|
|
|
|
else
|
|
|
|
hasOverflow = overflowed;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
size = CGF.Builder.CreateExtractValue(result, 0);
|
2010-09-02 17:58:18 +08:00
|
|
|
}
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
// If we had any possibility of dynamic overflow, make a select to
|
|
|
|
// overwrite 'size' with an all-ones value, which should cause
|
|
|
|
// operator new to throw.
|
|
|
|
if (hasOverflow)
|
2014-08-29 01:24:14 +08:00
|
|
|
size = CGF.Builder.CreateSelect(hasOverflow,
|
|
|
|
llvm::Constant::getAllOnesValue(CGF.SizeTy),
|
|
|
|
size);
|
2010-07-21 05:55:52 +08:00
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
if (cookieSize == 0)
|
|
|
|
sizeWithoutCookie = size;
|
2010-09-02 17:58:18 +08:00
|
|
|
else
|
2011-05-15 15:14:44 +08:00
|
|
|
assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
return size;
|
2009-09-24 00:07:23 +08:00
|
|
|
}
|
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
|
2015-01-14 15:38:27 +08:00
|
|
|
QualType AllocType, llvm::Value *NewPtr) {
|
2013-12-11 09:40:16 +08:00
|
|
|
// FIXME: Refactor with EmitExprAsInit.
|
2011-12-03 10:13:40 +08:00
|
|
|
CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
|
2013-03-08 05:37:08 +08:00
|
|
|
switch (CGF.getEvaluationKind(AllocType)) {
|
|
|
|
case TEK_Scalar:
|
2014-12-11 03:04:09 +08:00
|
|
|
CGF.EmitScalarInit(Init, nullptr,
|
2015-01-14 15:38:27 +08:00
|
|
|
CGF.MakeAddrLValue(NewPtr, AllocType, Alignment), false);
|
2013-03-08 05:37:08 +08:00
|
|
|
return;
|
|
|
|
case TEK_Complex:
|
|
|
|
CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType,
|
|
|
|
Alignment),
|
|
|
|
/*isInit*/ true);
|
|
|
|
return;
|
|
|
|
case TEK_Aggregate: {
|
2010-09-15 18:14:12 +08:00
|
|
|
AggValueSlot Slot
|
2011-12-03 08:54:26 +08:00
|
|
|
= AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
|
2011-08-26 04:40:09 +08:00
|
|
|
AggValueSlot::IsDestructed,
|
2011-08-26 15:31:35 +08:00
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2012-03-30 01:37:10 +08:00
|
|
|
AggValueSlot::IsNotAliased);
|
2010-09-15 18:14:12 +08:00
|
|
|
CGF.EmitAggExpr(Init, Slot);
|
2013-03-08 05:37:08 +08:00
|
|
|
return;
|
|
|
|
}
|
2010-09-15 18:14:12 +08:00
|
|
|
}
|
2013-03-08 05:37:08 +08:00
|
|
|
llvm_unreachable("bad evaluation kind");
|
2010-06-26 02:26:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2014-06-03 14:58:52 +08:00
|
|
|
CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
|
|
|
|
QualType ElementType,
|
|
|
|
llvm::Value *BeginPtr,
|
|
|
|
llvm::Value *NumElements,
|
|
|
|
llvm::Value *AllocSizeWithoutCookie) {
|
|
|
|
// If we have a type with trivial initialization and no initializer,
|
|
|
|
// there's nothing to do.
|
2012-02-16 20:22:20 +08:00
|
|
|
if (!E->hasInitializer())
|
2014-06-03 14:58:52 +08:00
|
|
|
return;
|
2011-09-15 14:49:18 +08:00
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
llvm::Value *CurPtr = BeginPtr;
|
2011-09-15 14:49:18 +08:00
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
unsigned InitListElements = 0;
|
2012-02-23 01:37:52 +08:00
|
|
|
|
|
|
|
const Expr *Init = E->getInitializer();
|
2014-06-03 14:58:52 +08:00
|
|
|
llvm::AllocaInst *EndOfInit = nullptr;
|
|
|
|
QualType::DestructionKind DtorKind = ElementType.isDestructedType();
|
|
|
|
EHScopeStack::stable_iterator Cleanup;
|
|
|
|
llvm::Instruction *CleanupDominator = nullptr;
|
2013-12-11 09:40:16 +08:00
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
// If the initializer is an initializer list, first do the explicit elements.
|
|
|
|
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
|
2014-06-03 14:58:52 +08:00
|
|
|
InitListElements = ILE->getNumInits();
|
2012-02-24 08:13:55 +08:00
|
|
|
|
2013-12-11 09:40:16 +08:00
|
|
|
// If this is a multi-dimensional array new, we will initialize multiple
|
|
|
|
// elements with each init list element.
|
|
|
|
QualType AllocType = E->getAllocatedType();
|
|
|
|
if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
|
|
|
|
AllocType->getAsArrayTypeUnsafe())) {
|
2014-06-03 14:58:52 +08:00
|
|
|
unsigned AS = CurPtr->getType()->getPointerAddressSpace();
|
2013-12-11 09:40:16 +08:00
|
|
|
llvm::Type *AllocPtrTy = ConvertTypeForMem(AllocType)->getPointerTo(AS);
|
2014-06-03 14:58:52 +08:00
|
|
|
CurPtr = Builder.CreateBitCast(CurPtr, AllocPtrTy);
|
|
|
|
InitListElements *= getContext().getConstantArrayElementCount(CAT);
|
2013-12-11 09:40:16 +08:00
|
|
|
}
|
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
// Enter a partial-destruction Cleanup if necessary.
|
|
|
|
if (needsEHCleanup(DtorKind)) {
|
|
|
|
// In principle we could tell the Cleanup where we are more
|
2012-02-24 08:13:55 +08:00
|
|
|
// directly, but the control flow can get so varied here that it
|
|
|
|
// would actually be quite complex. Therefore we go through an
|
|
|
|
// alloca.
|
2014-06-03 14:58:52 +08:00
|
|
|
EndOfInit = CreateTempAlloca(BeginPtr->getType(), "array.init.end");
|
|
|
|
CleanupDominator = Builder.CreateStore(BeginPtr, EndOfInit);
|
|
|
|
pushIrregularPartialArrayCleanup(BeginPtr, EndOfInit, ElementType,
|
|
|
|
getDestroyer(DtorKind));
|
|
|
|
Cleanup = EHStack.stable_begin();
|
2012-02-24 08:13:55 +08:00
|
|
|
}
|
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
|
2012-02-24 08:13:55 +08:00
|
|
|
// Tell the cleanup that it needs to destroy up to this
|
|
|
|
// element. TODO: some of these stores can be trivially
|
|
|
|
// observed to be unnecessary.
|
2014-06-03 14:58:52 +08:00
|
|
|
if (EndOfInit)
|
|
|
|
Builder.CreateStore(Builder.CreateBitCast(CurPtr, BeginPtr->getType()),
|
|
|
|
EndOfInit);
|
|
|
|
// FIXME: If the last initializer is an incomplete initializer list for
|
|
|
|
// an array, and we have an array filler, we can fold together the two
|
|
|
|
// initialization loops.
|
2013-12-11 09:40:16 +08:00
|
|
|
StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
|
2014-06-03 14:58:52 +08:00
|
|
|
ILE->getInit(i)->getType(), CurPtr);
|
|
|
|
CurPtr = Builder.CreateConstInBoundsGEP1_32(CurPtr, 1, "array.exp.next");
|
2012-02-23 01:37:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// The remaining elements are filled with the array filler expression.
|
|
|
|
Init = ILE->getArrayFiller();
|
2013-12-11 09:40:16 +08:00
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
// Extract the initializer for the individual array elements by pulling
|
|
|
|
// out the array filler from all the nested initializer lists. This avoids
|
|
|
|
// generating a nested loop for the initialization.
|
|
|
|
while (Init && Init->getType()->isConstantArrayType()) {
|
|
|
|
auto *SubILE = dyn_cast<InitListExpr>(Init);
|
|
|
|
if (!SubILE)
|
|
|
|
break;
|
|
|
|
assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
|
|
|
|
Init = SubILE->getArrayFiller();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Switch back to initializing one base element at a time.
|
|
|
|
CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr->getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to perform zero-initialization using memset.
|
|
|
|
auto TryMemsetInitialization = [&]() -> bool {
|
|
|
|
// FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
|
|
|
|
// we can initialize with a memset to -1.
|
|
|
|
if (!CGM.getTypes().isZeroInitializable(ElementType))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Optimization: since zero initialization will just set the memory
|
|
|
|
// to all zeroes, generate a single memset to do it in one shot.
|
|
|
|
|
|
|
|
// Subtract out the size of any elements we've already initialized.
|
|
|
|
auto *RemainingSize = AllocSizeWithoutCookie;
|
|
|
|
if (InitListElements) {
|
|
|
|
// We know this can't overflow; we check this when doing the allocation.
|
|
|
|
auto *InitializedSize = llvm::ConstantInt::get(
|
|
|
|
RemainingSize->getType(),
|
|
|
|
getContext().getTypeSizeInChars(ElementType).getQuantity() *
|
|
|
|
InitListElements);
|
|
|
|
RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the memset.
|
|
|
|
CharUnits Alignment = getContext().getTypeAlignInChars(ElementType);
|
|
|
|
Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize,
|
|
|
|
Alignment.getQuantity(), false);
|
|
|
|
return true;
|
|
|
|
};
|
|
|
|
|
2014-06-03 16:26:00 +08:00
|
|
|
// If all elements have already been initialized, skip any further
|
|
|
|
// initialization.
|
|
|
|
llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
|
|
|
|
if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
|
|
|
|
// If there was a Cleanup, deactivate it.
|
|
|
|
if (CleanupDominator)
|
|
|
|
DeactivateCleanupBlock(Cleanup, CleanupDominator);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(Init && "have trailing elements to initialize but no initializer");
|
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
// If this is a constructor call, try to optimize it out, and failing that
|
|
|
|
// emit a single loop to initialize all remaining elements.
|
2014-06-03 16:26:00 +08:00
|
|
|
if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
|
2014-06-03 14:58:52 +08:00
|
|
|
CXXConstructorDecl *Ctor = CCE->getConstructor();
|
|
|
|
if (Ctor->isTrivial()) {
|
|
|
|
// If new expression did not specify value-initialization, then there
|
|
|
|
// is no initialization.
|
|
|
|
if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (TryMemsetInitialization())
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the new Cleanup position for irregular Cleanups.
|
|
|
|
//
|
|
|
|
// FIXME: Share this cleanup with the constructor call emission rather than
|
|
|
|
// having it create a cleanup of its own.
|
|
|
|
if (EndOfInit) Builder.CreateStore(CurPtr, EndOfInit);
|
|
|
|
|
|
|
|
// Emit a constructor call loop to initialize the remaining elements.
|
|
|
|
if (InitListElements)
|
|
|
|
NumElements = Builder.CreateSub(
|
|
|
|
NumElements,
|
|
|
|
llvm::ConstantInt::get(NumElements->getType(), InitListElements));
|
2014-08-22 04:26:47 +08:00
|
|
|
EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
|
2014-06-03 14:58:52 +08:00
|
|
|
CCE->requiresZeroInitialization());
|
|
|
|
return;
|
2012-02-23 01:37:52 +08:00
|
|
|
}
|
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
// If this is value-initialization, we can usually use memset.
|
|
|
|
ImplicitValueInitExpr IVIE(ElementType);
|
2014-06-03 16:26:00 +08:00
|
|
|
if (isa<ImplicitValueInitExpr>(Init)) {
|
2014-06-03 14:58:52 +08:00
|
|
|
if (TryMemsetInitialization())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Switch to an ImplicitValueInitExpr for the element type. This handles
|
|
|
|
// only one case: multidimensional array new of pointers to members. In
|
|
|
|
// all other cases, we already have an initializer for the array element.
|
|
|
|
Init = &IVIE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point we should have found an initializer for the individual
|
|
|
|
// elements of the array.
|
|
|
|
assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
|
|
|
|
"got wrong type of element to initialize");
|
|
|
|
|
2014-06-03 16:26:00 +08:00
|
|
|
// If we have an empty initializer list, we can usually use memset.
|
|
|
|
if (auto *ILE = dyn_cast<InitListExpr>(Init))
|
|
|
|
if (ILE->getNumInits() == 0 && TryMemsetInitialization())
|
|
|
|
return;
|
2014-05-03 17:16:57 +08:00
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
// Create the loop blocks.
|
|
|
|
llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
|
|
|
|
llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
|
|
|
|
llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
|
|
|
|
|
|
|
|
// Find the end of the array, hoisted out of the loop.
|
|
|
|
llvm::Value *EndPtr =
|
|
|
|
Builder.CreateInBoundsGEP(BeginPtr, NumElements, "array.end");
|
2011-09-15 14:49:18 +08:00
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
// If the number of elements isn't constant, we have to now check if there is
|
|
|
|
// anything left to initialize.
|
2014-06-03 14:58:52 +08:00
|
|
|
if (!ConstNum) {
|
|
|
|
llvm::Value *IsEmpty = Builder.CreateICmpEQ(CurPtr, EndPtr,
|
2011-09-15 14:49:18 +08:00
|
|
|
"array.isempty");
|
2014-06-03 14:58:52 +08:00
|
|
|
Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
|
2011-09-15 14:49:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Enter the loop.
|
2014-06-03 14:58:52 +08:00
|
|
|
EmitBlock(LoopBB);
|
2011-09-15 14:49:18 +08:00
|
|
|
|
|
|
|
// Set up the current-element phi.
|
2014-06-03 14:58:52 +08:00
|
|
|
llvm::PHINode *CurPtrPhi =
|
|
|
|
Builder.CreatePHI(CurPtr->getType(), 2, "array.cur");
|
|
|
|
CurPtrPhi->addIncoming(CurPtr, EntryBB);
|
|
|
|
CurPtr = CurPtrPhi;
|
|
|
|
|
|
|
|
// Store the new Cleanup position for irregular Cleanups.
|
|
|
|
if (EndOfInit) Builder.CreateStore(CurPtr, EndOfInit);
|
|
|
|
|
|
|
|
// Enter a partial-destruction Cleanup if necessary.
|
|
|
|
if (!CleanupDominator && needsEHCleanup(DtorKind)) {
|
|
|
|
pushRegularPartialArrayCleanup(BeginPtr, CurPtr, ElementType,
|
|
|
|
getDestroyer(DtorKind));
|
|
|
|
Cleanup = EHStack.stable_begin();
|
|
|
|
CleanupDominator = Builder.CreateUnreachable();
|
2011-09-15 14:49:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the initializer into this element.
|
2014-06-03 14:58:52 +08:00
|
|
|
StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr);
|
2011-09-15 14:49:18 +08:00
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
// Leave the Cleanup if we entered one.
|
|
|
|
if (CleanupDominator) {
|
|
|
|
DeactivateCleanupBlock(Cleanup, CleanupDominator);
|
|
|
|
CleanupDominator->eraseFromParent();
|
2011-11-10 18:43:54 +08:00
|
|
|
}
|
2011-09-15 14:49:18 +08:00
|
|
|
|
2013-12-14 08:40:05 +08:00
|
|
|
// Advance to the next element by adjusting the pointer type as necessary.
|
2014-06-03 14:58:52 +08:00
|
|
|
llvm::Value *NextPtr =
|
|
|
|
Builder.CreateConstInBoundsGEP1_32(CurPtr, 1, "array.next");
|
|
|
|
|
2011-09-15 14:49:18 +08:00
|
|
|
// Check whether we've gotten to the end of the array and, if so,
|
|
|
|
// exit the loop.
|
2014-06-03 14:58:52 +08:00
|
|
|
llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
|
|
|
|
Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
|
|
|
|
CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
|
2011-09-15 14:49:18 +08:00
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
EmitBlock(ContBB);
|
2010-06-26 02:26:07 +08:00
|
|
|
}
|
|
|
|
|
2009-09-24 00:07:23 +08:00
|
|
|
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
|
2011-09-15 14:49:18 +08:00
|
|
|
QualType ElementType,
|
2009-09-24 00:07:23 +08:00
|
|
|
llvm::Value *NewPtr,
|
2010-07-21 09:10:17 +08:00
|
|
|
llvm::Value *NumElements,
|
|
|
|
llvm::Value *AllocSizeWithoutCookie) {
|
DebugInfo: Use the preferred location rather than the start location for expression line info
This causes things like assignment to refer to the '=' rather than the
LHS when attributing the store instruction, for example.
There were essentially 3 options for this:
* The beginning of an expression (this was the behavior prior to this
commit). This meant that stepping through subexpressions would bounce
around from subexpressions back to the start of the outer expression,
etc. (eg: x + y + z would go x, y, x, z, x (the repeated 'x's would be
where the actual addition occurred)).
* The end of an expression. This seems to be what GCC does /mostly/, and
certainly this for function calls. This has the advantage that
progress is always 'forwards' (never jumping backwards - except for
independent subexpressions if they're evaluated in interesting orders,
etc). "x + y + z" would go "x y z" with the additions occurring at y
and z after the respective loads.
The problem with this is that the user would still have to think
fairly hard about precedence to realize which subexpression is being
evaluated or which operator overload is being called in, say, an asan
backtrace.
* The preferred location or 'exprloc'. In this case you get sort of what
you'd expect, though it's a bit confusing in its own way due to going
'backwards'. In this case the locations would be: "x y + z +" in
lovely postfix arithmetic order. But this does mean that if the op+
were an operator overload, say, and in a backtrace, the backtrace will
point to the exact '+' that's being called, not to the end of one of
its operands.
(actually the operator overload case doesn't work yet for other reasons,
but that's being fixed - but this at least gets scalar/complex
assignments and other plain operators right)
llvm-svn: 227027
2015-01-25 09:19:10 +08:00
|
|
|
ApplyDebugLocation DL(CGF, E);
|
2014-06-03 14:58:52 +08:00
|
|
|
if (E->isArray())
|
|
|
|
CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements,
|
|
|
|
AllocSizeWithoutCookie);
|
|
|
|
else if (const Expr *Init = E->getInitializer())
|
2015-01-14 15:38:27 +08:00
|
|
|
StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
|
2009-09-24 00:07:23 +08:00
|
|
|
}
|
|
|
|
|
2013-07-22 07:12:18 +08:00
|
|
|
/// Emit a call to an operator new or operator delete function, as implicitly
|
|
|
|
/// created by new-expressions and delete-expressions.
|
|
|
|
static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
|
|
|
|
const FunctionDecl *Callee,
|
|
|
|
const FunctionProtoType *CalleeType,
|
|
|
|
const CallArgList &Args) {
|
|
|
|
llvm::Instruction *CallOrInvoke;
|
2013-07-30 04:14:16 +08:00
|
|
|
llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee);
|
2013-07-22 07:12:18 +08:00
|
|
|
RValue RV =
|
2014-12-13 07:41:25 +08:00
|
|
|
CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
|
|
|
|
Args, CalleeType, /*chainCall=*/false),
|
|
|
|
CalleeAddr, ReturnValueSlot(), Args, Callee, &CallOrInvoke);
|
2013-07-22 07:12:18 +08:00
|
|
|
|
|
|
|
/// C++1y [expr.new]p10:
|
|
|
|
/// [In a new-expression,] an implementation is allowed to omit a call
|
|
|
|
/// to a replaceable global allocation function.
|
|
|
|
///
|
|
|
|
/// We model such elidable calls with the 'builtin' attribute.
|
2013-10-22 22:23:09 +08:00
|
|
|
llvm::Function *Fn = dyn_cast<llvm::Function>(CalleeAddr);
|
2013-07-30 04:14:16 +08:00
|
|
|
if (Callee->isReplaceableGlobalAllocationFunction() &&
|
2013-10-22 22:23:09 +08:00
|
|
|
Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
|
2013-07-22 07:12:18 +08:00
|
|
|
// FIXME: Add addAttribute to CallSite.
|
|
|
|
if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
|
|
|
|
CI->addAttribute(llvm::AttributeSet::FunctionIndex,
|
|
|
|
llvm::Attribute::Builtin);
|
|
|
|
else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
|
|
|
|
II->addAttribute(llvm::AttributeSet::FunctionIndex,
|
|
|
|
llvm::Attribute::Builtin);
|
|
|
|
else
|
|
|
|
llvm_unreachable("unexpected kind of call instruction");
|
|
|
|
}
|
|
|
|
|
|
|
|
return RV;
|
|
|
|
}
|
|
|
|
|
2014-06-04 07:27:44 +08:00
|
|
|
RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
|
|
|
|
const Expr *Arg,
|
|
|
|
bool IsDelete) {
|
|
|
|
CallArgList Args;
|
|
|
|
const Stmt *ArgS = Arg;
|
|
|
|
EmitCallArgs(Args, *Type->param_type_begin(),
|
|
|
|
ConstExprIterator(&ArgS), ConstExprIterator(&ArgS + 1));
|
|
|
|
// Find the allocation or deallocation function that we're calling.
|
|
|
|
ASTContext &Ctx = getContext();
|
|
|
|
DeclarationName Name = Ctx.DeclarationNames
|
|
|
|
.getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
|
|
|
|
for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
|
2014-06-05 08:43:02 +08:00
|
|
|
if (auto *FD = dyn_cast<FunctionDecl>(Decl))
|
|
|
|
if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
|
|
|
|
return EmitNewDeleteCall(*this, cast<FunctionDecl>(Decl), Type, Args);
|
2014-06-04 07:27:44 +08:00
|
|
|
llvm_unreachable("predeclared global operator new/delete is missing");
|
|
|
|
}
|
|
|
|
|
2010-09-14 15:57:04 +08:00
|
|
|
namespace {
|
|
|
|
/// A cleanup to call the given 'operator delete' function upon
|
|
|
|
/// abnormal exit from a new expression.
|
|
|
|
class CallDeleteDuringNew : public EHScopeStack::Cleanup {
|
|
|
|
size_t NumPlacementArgs;
|
|
|
|
const FunctionDecl *OperatorDelete;
|
|
|
|
llvm::Value *Ptr;
|
|
|
|
llvm::Value *AllocSize;
|
|
|
|
|
|
|
|
RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
|
|
|
|
|
|
|
|
public:
|
|
|
|
static size_t getExtraSize(size_t NumPlacementArgs) {
|
|
|
|
return NumPlacementArgs * sizeof(RValue);
|
|
|
|
}
|
|
|
|
|
|
|
|
CallDeleteDuringNew(size_t NumPlacementArgs,
|
|
|
|
const FunctionDecl *OperatorDelete,
|
|
|
|
llvm::Value *Ptr,
|
|
|
|
llvm::Value *AllocSize)
|
|
|
|
: NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
|
|
|
|
Ptr(Ptr), AllocSize(AllocSize) {}
|
|
|
|
|
|
|
|
void setPlacementArg(unsigned I, RValue Arg) {
|
|
|
|
assert(I < NumPlacementArgs && "index out of range");
|
|
|
|
getPlacementArgs()[I] = Arg;
|
|
|
|
}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2010-09-14 15:57:04 +08:00
|
|
|
const FunctionProtoType *FPT
|
|
|
|
= OperatorDelete->getType()->getAs<FunctionProtoType>();
|
2014-01-21 04:26:09 +08:00
|
|
|
assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
|
|
|
|
(FPT->getNumParams() == 2 && NumPlacementArgs == 0));
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
|
|
CallArgList DeleteArgs;
|
|
|
|
|
|
|
|
// The first argument is always a void*.
|
2014-01-21 04:26:09 +08:00
|
|
|
FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(RValue::get(Ptr), *AI++);
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
|
|
// A member 'operator delete' can take an extra 'size_t' argument.
|
2014-01-21 04:26:09 +08:00
|
|
|
if (FPT->getNumParams() == NumPlacementArgs + 2)
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(RValue::get(AllocSize), *AI++);
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
|
|
// Pass the rest of the arguments, which must match exactly.
|
|
|
|
for (unsigned I = 0; I != NumPlacementArgs; ++I)
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(getPlacementArgs()[I], *AI++);
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
|
|
// Call 'operator delete'.
|
2013-07-22 07:12:18 +08:00
|
|
|
EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
|
2010-09-14 15:57:04 +08:00
|
|
|
}
|
|
|
|
};
|
2010-09-17 08:50:28 +08:00
|
|
|
|
|
|
|
/// A cleanup to call the given 'operator delete' function upon
|
|
|
|
/// abnormal exit from a new expression when the new expression is
|
|
|
|
/// conditional.
|
|
|
|
class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
|
|
|
|
size_t NumPlacementArgs;
|
|
|
|
const FunctionDecl *OperatorDelete;
|
2011-01-28 18:53:53 +08:00
|
|
|
DominatingValue<RValue>::saved_type Ptr;
|
|
|
|
DominatingValue<RValue>::saved_type AllocSize;
|
2010-09-17 08:50:28 +08:00
|
|
|
|
2011-01-28 18:53:53 +08:00
|
|
|
DominatingValue<RValue>::saved_type *getPlacementArgs() {
|
|
|
|
return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
|
2010-09-17 08:50:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
static size_t getExtraSize(size_t NumPlacementArgs) {
|
2011-01-28 18:53:53 +08:00
|
|
|
return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
|
2010-09-17 08:50:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
|
|
|
|
const FunctionDecl *OperatorDelete,
|
2011-01-28 18:53:53 +08:00
|
|
|
DominatingValue<RValue>::saved_type Ptr,
|
|
|
|
DominatingValue<RValue>::saved_type AllocSize)
|
2010-09-17 08:50:28 +08:00
|
|
|
: NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
|
|
|
|
Ptr(Ptr), AllocSize(AllocSize) {}
|
|
|
|
|
2011-01-28 18:53:53 +08:00
|
|
|
void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
|
2010-09-17 08:50:28 +08:00
|
|
|
assert(I < NumPlacementArgs && "index out of range");
|
|
|
|
getPlacementArgs()[I] = Arg;
|
|
|
|
}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2010-09-17 08:50:28 +08:00
|
|
|
const FunctionProtoType *FPT
|
|
|
|
= OperatorDelete->getType()->getAs<FunctionProtoType>();
|
2014-01-21 04:26:09 +08:00
|
|
|
assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
|
|
|
|
(FPT->getNumParams() == 2 && NumPlacementArgs == 0));
|
2010-09-17 08:50:28 +08:00
|
|
|
|
|
|
|
CallArgList DeleteArgs;
|
|
|
|
|
|
|
|
// The first argument is always a void*.
|
2014-01-21 04:26:09 +08:00
|
|
|
FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(Ptr.restore(CGF), *AI++);
|
2010-09-17 08:50:28 +08:00
|
|
|
|
|
|
|
// A member 'operator delete' can take an extra 'size_t' argument.
|
2014-01-21 04:26:09 +08:00
|
|
|
if (FPT->getNumParams() == NumPlacementArgs + 2) {
|
2011-01-28 18:53:53 +08:00
|
|
|
RValue RV = AllocSize.restore(CGF);
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(RV, *AI++);
|
2010-09-17 08:50:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Pass the rest of the arguments, which must match exactly.
|
|
|
|
for (unsigned I = 0; I != NumPlacementArgs; ++I) {
|
2011-01-28 18:53:53 +08:00
|
|
|
RValue RV = getPlacementArgs()[I].restore(CGF);
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(RV, *AI++);
|
2010-09-17 08:50:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Call 'operator delete'.
|
2013-07-22 07:12:18 +08:00
|
|
|
EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
|
2010-09-17 08:50:28 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Enter a cleanup to call 'operator delete' if the initializer in a
|
|
|
|
/// new-expression throws.
|
|
|
|
static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
|
|
|
|
const CXXNewExpr *E,
|
|
|
|
llvm::Value *NewPtr,
|
|
|
|
llvm::Value *AllocSize,
|
|
|
|
const CallArgList &NewArgs) {
|
|
|
|
// If we're not inside a conditional branch, then the cleanup will
|
|
|
|
// dominate and we can do the easier (and more efficient) thing.
|
|
|
|
if (!CGF.isInConditionalBranch()) {
|
|
|
|
CallDeleteDuringNew *Cleanup = CGF.EHStack
|
|
|
|
.pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
|
|
|
|
E->getNumPlacementArgs(),
|
|
|
|
E->getOperatorDelete(),
|
|
|
|
NewPtr, AllocSize);
|
|
|
|
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
|
2011-05-03 02:05:27 +08:00
|
|
|
Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
|
2010-09-17 08:50:28 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we need to save all this stuff.
|
2011-01-28 18:53:53 +08:00
|
|
|
DominatingValue<RValue>::saved_type SavedNewPtr =
|
|
|
|
DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
|
|
|
|
DominatingValue<RValue>::saved_type SavedAllocSize =
|
|
|
|
DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
|
2010-09-17 08:50:28 +08:00
|
|
|
|
|
|
|
CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
|
2011-11-10 18:43:54 +08:00
|
|
|
.pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
|
2010-09-17 08:50:28 +08:00
|
|
|
E->getNumPlacementArgs(),
|
|
|
|
E->getOperatorDelete(),
|
|
|
|
SavedNewPtr,
|
|
|
|
SavedAllocSize);
|
|
|
|
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
|
2011-01-28 18:53:53 +08:00
|
|
|
Cleanup->setPlacementArg(I,
|
2011-05-03 02:05:27 +08:00
|
|
|
DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
|
2010-09-17 08:50:28 +08:00
|
|
|
|
2011-11-10 18:43:54 +08:00
|
|
|
CGF.initFullExprCleanup();
|
2010-09-14 15:57:04 +08:00
|
|
|
}
|
|
|
|
|
2009-09-23 06:53:17 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
|
2011-03-07 11:12:35 +08:00
|
|
|
// The element type being allocated.
|
|
|
|
QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
// 1. Build a call to the allocation function.
|
|
|
|
FunctionDecl *allocator = E->getOperatorNew();
|
|
|
|
const FunctionProtoType *allocatorType =
|
|
|
|
allocator->getType()->castAs<FunctionProtoType>();
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
CallArgList allocatorArgs;
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
// The allocation size is the first argument.
|
2011-03-07 11:12:35 +08:00
|
|
|
QualType sizeType = getContext().getSizeType();
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
// If there is a brace-initializer, cannot allocate fewer elements than inits.
|
|
|
|
unsigned minElements = 0;
|
|
|
|
if (E->isArray() && E->hasInitializer()) {
|
|
|
|
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
|
|
|
|
minElements = ILE->getNumInits();
|
|
|
|
}
|
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *numElements = nullptr;
|
|
|
|
llvm::Value *allocSizeWithoutCookie = nullptr;
|
2011-03-07 11:12:35 +08:00
|
|
|
llvm::Value *allocSize =
|
2012-02-23 01:37:52 +08:00
|
|
|
EmitCXXNewAllocSize(*this, E, minElements, numElements,
|
|
|
|
allocSizeWithoutCookie);
|
2014-08-28 08:22:11 +08:00
|
|
|
|
2011-05-03 01:57:46 +08:00
|
|
|
allocatorArgs.add(RValue::get(allocSize), sizeType);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
// We start at 1 here because the first argument (the allocation size)
|
|
|
|
// has already been emitted.
|
2014-08-28 08:22:11 +08:00
|
|
|
EmitCallArgs(allocatorArgs, allocatorType, E->placement_arg_begin(),
|
2014-09-09 01:22:45 +08:00
|
|
|
E->placement_arg_end(), /* CalleeDecl */ nullptr,
|
|
|
|
/*ParamsToSkip*/ 1);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-05-16 09:05:12 +08:00
|
|
|
// Emit the allocation call. If the allocator is a global placement
|
|
|
|
// operator, just "inline" it directly.
|
|
|
|
RValue RV;
|
|
|
|
if (allocator->isReservedGlobalPlacementOperator()) {
|
|
|
|
assert(allocatorArgs.size() == 2);
|
|
|
|
RV = allocatorArgs[1].RV;
|
|
|
|
// TODO: kill any unnecessary computations done for the size
|
|
|
|
// argument.
|
|
|
|
} else {
|
2013-07-22 07:12:18 +08:00
|
|
|
RV = EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
|
2011-05-16 09:05:12 +08:00
|
|
|
}
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
// Emit a null check on the allocation result if the allocation
|
|
|
|
// function is allowed to return null (because it has a non-throwing
|
2015-02-14 09:52:20 +08:00
|
|
|
// exception spec or is the reserved placement new) and we have an
|
2011-03-07 11:12:35 +08:00
|
|
|
// interesting initializer.
|
2015-02-14 09:52:20 +08:00
|
|
|
bool nullCheck = E->shouldNullCheckAllocation(getContext()) &&
|
2012-02-16 20:22:20 +08:00
|
|
|
(!allocType.isPODType(getContext()) || E->hasInitializer());
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::BasicBlock *nullCheckBB = nullptr;
|
|
|
|
llvm::BasicBlock *contBB = nullptr;
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
llvm::Value *allocation = RV.getScalarVal();
|
2012-10-25 23:39:14 +08:00
|
|
|
unsigned AS = allocation->getType()->getPointerAddressSpace();
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-07 09:52:56 +08:00
|
|
|
// The null-check means that the initializer is conditionally
|
|
|
|
// evaluated.
|
|
|
|
ConditionalEvaluation conditional(*this);
|
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
if (nullCheck) {
|
|
|
|
conditional.begin(*this);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
nullCheckBB = Builder.GetInsertBlock();
|
|
|
|
llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
|
|
|
|
contBB = createBasicBlock("new.cont");
|
2011-03-07 09:52:56 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
|
|
|
|
Builder.CreateCondBr(isNull, contBB, notNullBB);
|
|
|
|
EmitBlock(notNullBB);
|
2009-09-23 06:53:17 +08:00
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2010-09-14 15:57:04 +08:00
|
|
|
// If there's an operator delete, enter a cleanup to call it if an
|
|
|
|
// exception is thrown.
|
2011-03-07 11:12:35 +08:00
|
|
|
EHScopeStack::stable_iterator operatorDeleteCleanup;
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Instruction *cleanupDominator = nullptr;
|
2011-05-16 09:05:12 +08:00
|
|
|
if (E->getOperatorDelete() &&
|
|
|
|
!E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
|
2011-03-07 11:12:35 +08:00
|
|
|
EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
|
|
|
|
operatorDeleteCleanup = EHStack.stable_begin();
|
2011-11-10 18:43:54 +08:00
|
|
|
cleanupDominator = Builder.CreateUnreachable();
|
2010-09-14 15:57:04 +08:00
|
|
|
}
|
|
|
|
|
2011-09-07 02:53:03 +08:00
|
|
|
assert((allocSize == allocSizeWithoutCookie) ==
|
|
|
|
CalculateCookiePadding(*this, E).isZero());
|
|
|
|
if (allocSize != allocSizeWithoutCookie) {
|
|
|
|
assert(E->isArray());
|
|
|
|
allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
|
|
|
|
numElements,
|
|
|
|
E, allocType);
|
|
|
|
}
|
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *elementPtrTy
|
2011-03-07 11:12:35 +08:00
|
|
|
= ConvertTypeForMem(allocType)->getPointerTo(AS);
|
|
|
|
llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
|
2010-09-14 15:57:04 +08:00
|
|
|
|
2011-09-15 14:49:18 +08:00
|
|
|
EmitNewInitializer(*this, E, allocType, result, numElements,
|
|
|
|
allocSizeWithoutCookie);
|
2010-09-02 17:58:18 +08:00
|
|
|
if (E->isArray()) {
|
|
|
|
// NewPtr is a pointer to the base element type. If we're
|
|
|
|
// allocating an array of arrays, we'll need to cast back to the
|
|
|
|
// array pointer type.
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *resultType = ConvertTypeForMem(E->getType());
|
2011-03-07 11:12:35 +08:00
|
|
|
if (result->getType() != resultType)
|
|
|
|
result = Builder.CreateBitCast(result, resultType);
|
2010-03-25 00:57:01 +08:00
|
|
|
}
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
|
|
// Deactivate the 'operator delete' cleanup if we finished
|
|
|
|
// initialization.
|
2011-11-10 18:43:54 +08:00
|
|
|
if (operatorDeleteCleanup.isValid()) {
|
|
|
|
DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
|
|
|
|
cleanupDominator->eraseFromParent();
|
|
|
|
}
|
2012-02-16 20:22:20 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
if (nullCheck) {
|
2011-03-07 09:52:56 +08:00
|
|
|
conditional.end(*this);
|
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
|
|
|
|
EmitBlock(contBB);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-30 19:28:58 +08:00
|
|
|
llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
|
2011-03-07 11:12:35 +08:00
|
|
|
PHI->addIncoming(result, notNullBB);
|
|
|
|
PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
|
|
|
|
nullCheckBB);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
result = PHI;
|
2009-09-23 06:53:17 +08:00
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
return result;
|
2009-09-23 06:53:17 +08:00
|
|
|
}
|
|
|
|
|
2009-11-18 08:50:08 +08:00
|
|
|
void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
|
|
|
|
llvm::Value *Ptr,
|
|
|
|
QualType DeleteTy) {
|
2010-09-02 17:58:18 +08:00
|
|
|
assert(DeleteFD->getOverloadedOperator() == OO_Delete);
|
|
|
|
|
2009-11-18 08:50:08 +08:00
|
|
|
const FunctionProtoType *DeleteFTy =
|
|
|
|
DeleteFD->getType()->getAs<FunctionProtoType>();
|
|
|
|
|
|
|
|
CallArgList DeleteArgs;
|
|
|
|
|
2009-12-14 04:04:38 +08:00
|
|
|
// Check if we need to pass the size to the delete operator.
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *Size = nullptr;
|
2009-12-14 04:04:38 +08:00
|
|
|
QualType SizeTy;
|
2014-01-21 04:26:09 +08:00
|
|
|
if (DeleteFTy->getNumParams() == 2) {
|
|
|
|
SizeTy = DeleteFTy->getParamType(1);
|
2010-01-27 03:59:28 +08:00
|
|
|
CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
|
|
|
|
Size = llvm::ConstantInt::get(ConvertType(SizeTy),
|
|
|
|
DeleteTypeSize.getQuantity());
|
2009-12-14 04:04:38 +08:00
|
|
|
}
|
2014-01-21 04:26:09 +08:00
|
|
|
|
|
|
|
QualType ArgTy = DeleteFTy->getParamType(0);
|
2009-12-14 04:04:38 +08:00
|
|
|
llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
|
2009-12-14 04:04:38 +08:00
|
|
|
|
|
|
|
if (Size)
|
2011-05-03 01:57:46 +08:00
|
|
|
DeleteArgs.add(RValue::get(Size), SizeTy);
|
2009-11-18 08:50:08 +08:00
|
|
|
|
|
|
|
// Emit the call to delete.
|
2013-07-22 07:12:18 +08:00
|
|
|
EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
|
2009-11-18 08:50:08 +08:00
|
|
|
}
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
namespace {
|
|
|
|
/// Calls the given 'operator delete' on a single object.
|
|
|
|
struct CallObjectDelete : EHScopeStack::Cleanup {
|
|
|
|
llvm::Value *Ptr;
|
|
|
|
const FunctionDecl *OperatorDelete;
|
|
|
|
QualType ElementType;
|
|
|
|
|
|
|
|
CallObjectDelete(llvm::Value *Ptr,
|
|
|
|
const FunctionDecl *OperatorDelete,
|
|
|
|
QualType ElementType)
|
|
|
|
: Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2010-09-02 17:58:18 +08:00
|
|
|
CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2014-11-01 04:09:12 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
|
|
|
|
llvm::Value *CompletePtr,
|
|
|
|
QualType ElementType) {
|
|
|
|
EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
|
|
|
|
OperatorDelete, ElementType);
|
|
|
|
}
|
|
|
|
|
2015-02-26 07:48:43 +08:00
|
|
|
static void EmitDelete(CodeGenFunction &CGF,
|
|
|
|
const CXXDeleteExpr *DE,
|
|
|
|
llvm::Value *Ptr,
|
|
|
|
QualType ElementType);
|
|
|
|
|
|
|
|
static void EmitSizedDelete(CodeGenFunction &CGF,
|
|
|
|
const CXXDeleteExpr *DE,
|
|
|
|
llvm::Value *Ptr,
|
|
|
|
QualType ElementType,
|
|
|
|
FunctionDecl* UnsizedDealloc) {
|
|
|
|
|
|
|
|
if (CGF.getLangOpts().DefineSizedDeallocation) {
|
|
|
|
// The delete operator in use is fixed. So simply emit the delete expr.
|
|
|
|
EmitDelete(CGF, DE, Ptr, ElementType);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(UnsizedDealloc && "We must be emiting a 'sized' delete expr");
|
|
|
|
|
|
|
|
// Branch off over the value of operator delete:
|
|
|
|
// Use the sized form if available, and default on the unsized form otherwise.
|
|
|
|
llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("if.then");
|
|
|
|
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("if.end");
|
|
|
|
llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("if.else");
|
|
|
|
|
|
|
|
// Emit the condition.
|
|
|
|
const FunctionDecl *OpDelFD = DE->getOperatorDelete();
|
|
|
|
llvm::Value *OpDelAddr = CGF.CGM.GetAddrOfFunction(OpDelFD);
|
|
|
|
//llvm::Function *OpDel = dyn_cast<llvm::Function>(OpDelAddr);
|
|
|
|
llvm::Value *SDE = CGF.Builder.CreateIsNotNull(OpDelAddr, "sized.del.exists");
|
|
|
|
CGF.Builder.CreateCondBr(SDE, ThenBlock, ElseBlock);
|
|
|
|
|
|
|
|
// Emit the 'then' code.
|
|
|
|
CGF.EmitBlock(ThenBlock);
|
|
|
|
EmitDelete(CGF, DE, Ptr, ElementType);
|
|
|
|
CGF.EmitBranch(ContBlock);
|
|
|
|
|
|
|
|
// Compute the 'unsized' delete expr.
|
|
|
|
CXXDeleteExpr * E = const_cast<CXXDeleteExpr*>(DE);
|
|
|
|
CXXDeleteExpr *UnsizedDE =
|
|
|
|
new (CGF.getContext()) CXXDeleteExpr(CGF.getContext().VoidTy,
|
|
|
|
E->isGlobalDelete(),
|
|
|
|
E->isArrayForm(),
|
|
|
|
E->isArrayFormAsWritten(),
|
|
|
|
E->doesUsualArrayDeleteWantSize(),
|
|
|
|
UnsizedDealloc,
|
|
|
|
E->getArgument(),
|
|
|
|
E->getLocStart());
|
|
|
|
// Emit the 'else' code.
|
|
|
|
{
|
|
|
|
// There is no need to emit line number for an unconditional branch.
|
|
|
|
auto NL = ApplyDebugLocation::CreateEmpty(CGF);
|
|
|
|
CGF.EmitBlock(ElseBlock);
|
|
|
|
}
|
|
|
|
EmitDelete(CGF, UnsizedDE, Ptr, ElementType);
|
|
|
|
{
|
|
|
|
// There is no need to emit line number for an unconditional branch.
|
|
|
|
auto NL = ApplyDebugLocation::CreateEmpty(CGF);
|
|
|
|
CGF.EmitBranch(ContBlock);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the continuation block for code after the if.
|
|
|
|
CGF.EmitBlock(ContBlock, true);
|
|
|
|
}
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
/// Emit the code for deleting a single object.
|
|
|
|
static void EmitObjectDelete(CodeGenFunction &CGF,
|
2014-11-01 15:37:17 +08:00
|
|
|
const CXXDeleteExpr *DE,
|
2010-09-02 17:58:18 +08:00
|
|
|
llvm::Value *Ptr,
|
2014-11-01 15:37:17 +08:00
|
|
|
QualType ElementType) {
|
2010-09-02 17:58:18 +08:00
|
|
|
// Find the destructor for the type, if applicable. If the
|
|
|
|
// destructor is virtual, we'll just emit the vcall and return.
|
2014-05-21 13:09:00 +08:00
|
|
|
const CXXDestructorDecl *Dtor = nullptr;
|
2010-09-02 17:58:18 +08:00
|
|
|
if (const RecordType *RT = ElementType->getAs<RecordType>()) {
|
|
|
|
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
|
2011-08-03 02:05:30 +08:00
|
|
|
if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
|
2010-09-02 17:58:18 +08:00
|
|
|
Dtor = RD->getDestructor();
|
|
|
|
|
|
|
|
if (Dtor->isVirtual()) {
|
2014-11-01 15:37:17 +08:00
|
|
|
CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
|
|
|
|
Dtor);
|
2010-09-02 17:58:18 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure that we call delete even if the dtor throws.
|
2011-01-28 16:37:24 +08:00
|
|
|
// This doesn't have to a conditional cleanup because we're going
|
|
|
|
// to pop it off in a second.
|
2014-11-01 15:37:17 +08:00
|
|
|
const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
|
2010-09-02 17:58:18 +08:00
|
|
|
CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
|
|
|
|
Ptr, OperatorDelete, ElementType);
|
|
|
|
|
|
|
|
if (Dtor)
|
|
|
|
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
|
2013-01-31 13:50:40 +08:00
|
|
|
/*ForVirtualBase=*/false,
|
|
|
|
/*Delegating=*/false,
|
|
|
|
Ptr);
|
2012-03-11 15:00:24 +08:00
|
|
|
else if (CGF.getLangOpts().ObjCAutoRefCount &&
|
2011-06-16 07:02:42 +08:00
|
|
|
ElementType->isObjCLifetimeType()) {
|
|
|
|
switch (ElementType.getObjCLifetime()) {
|
|
|
|
case Qualifiers::OCL_None:
|
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
|
|
|
break;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
case Qualifiers::OCL_Strong: {
|
|
|
|
// Load the pointer value.
|
|
|
|
llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
|
|
|
|
ElementType.isVolatileQualified());
|
|
|
|
|
2013-03-13 11:10:54 +08:00
|
|
|
CGF.EmitARCRelease(PtrValue, ARCPreciseLifetime);
|
2011-06-16 07:02:42 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Weak:
|
|
|
|
CGF.EmitARCDestroyWeak(Ptr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
CGF.PopCleanupBlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
/// Calls the given 'operator delete' on an array of objects.
|
|
|
|
struct CallArrayDelete : EHScopeStack::Cleanup {
|
|
|
|
llvm::Value *Ptr;
|
|
|
|
const FunctionDecl *OperatorDelete;
|
|
|
|
llvm::Value *NumElements;
|
|
|
|
QualType ElementType;
|
|
|
|
CharUnits CookieSize;
|
|
|
|
|
|
|
|
CallArrayDelete(llvm::Value *Ptr,
|
|
|
|
const FunctionDecl *OperatorDelete,
|
|
|
|
llvm::Value *NumElements,
|
|
|
|
QualType ElementType,
|
|
|
|
CharUnits CookieSize)
|
|
|
|
: Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
|
|
|
|
ElementType(ElementType), CookieSize(CookieSize) {}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2010-09-02 17:58:18 +08:00
|
|
|
const FunctionProtoType *DeleteFTy =
|
|
|
|
OperatorDelete->getType()->getAs<FunctionProtoType>();
|
2014-01-21 04:26:09 +08:00
|
|
|
assert(DeleteFTy->getNumParams() == 1 || DeleteFTy->getNumParams() == 2);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
|
|
CallArgList Args;
|
|
|
|
|
|
|
|
// Pass the pointer as the first argument.
|
2014-01-21 04:26:09 +08:00
|
|
|
QualType VoidPtrTy = DeleteFTy->getParamType(0);
|
2010-09-02 17:58:18 +08:00
|
|
|
llvm::Value *DeletePtr
|
|
|
|
= CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
|
2011-05-03 01:57:46 +08:00
|
|
|
Args.add(RValue::get(DeletePtr), VoidPtrTy);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
|
|
// Pass the original requested size as the second argument.
|
2014-01-21 04:26:09 +08:00
|
|
|
if (DeleteFTy->getNumParams() == 2) {
|
|
|
|
QualType size_t = DeleteFTy->getParamType(1);
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::IntegerType *SizeTy
|
2010-09-02 17:58:18 +08:00
|
|
|
= cast<llvm::IntegerType>(CGF.ConvertType(size_t));
|
|
|
|
|
|
|
|
CharUnits ElementTypeSize =
|
|
|
|
CGF.CGM.getContext().getTypeSizeInChars(ElementType);
|
|
|
|
|
|
|
|
// The size of an element, multiplied by the number of elements.
|
|
|
|
llvm::Value *Size
|
|
|
|
= llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
|
|
|
|
Size = CGF.Builder.CreateMul(Size, NumElements);
|
|
|
|
|
|
|
|
// Plus the size of the cookie if applicable.
|
|
|
|
if (!CookieSize.isZero()) {
|
|
|
|
llvm::Value *CookieSizeV
|
|
|
|
= llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
|
|
|
|
Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
|
|
|
|
}
|
|
|
|
|
2011-05-03 01:57:46 +08:00
|
|
|
Args.add(RValue::get(Size), size_t);
|
2010-09-02 17:58:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the call to delete.
|
2013-07-22 07:12:18 +08:00
|
|
|
EmitNewDeleteCall(CGF, OperatorDelete, DeleteFTy, Args);
|
2010-09-02 17:58:18 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Emit the code for deleting an array of objects.
|
|
|
|
static void EmitArrayDelete(CodeGenFunction &CGF,
|
2011-01-27 17:37:56 +08:00
|
|
|
const CXXDeleteExpr *E,
|
2011-07-13 09:41:37 +08:00
|
|
|
llvm::Value *deletedPtr,
|
|
|
|
QualType elementType) {
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *numElements = nullptr;
|
|
|
|
llvm::Value *allocatedPtr = nullptr;
|
2011-07-13 09:41:37 +08:00
|
|
|
CharUnits cookieSize;
|
|
|
|
CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
|
|
|
|
numElements, allocatedPtr, cookieSize);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-07-13 09:41:37 +08:00
|
|
|
assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
|
|
// Make sure that we call delete even if one of the dtors throws.
|
2011-07-13 09:41:37 +08:00
|
|
|
const FunctionDecl *operatorDelete = E->getOperatorDelete();
|
2010-09-02 17:58:18 +08:00
|
|
|
CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
|
2011-07-13 09:41:37 +08:00
|
|
|
allocatedPtr, operatorDelete,
|
|
|
|
numElements, elementType,
|
|
|
|
cookieSize);
|
|
|
|
|
|
|
|
// Destroy the elements.
|
|
|
|
if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
|
|
|
|
assert(numElements && "no element count for a type with a destructor!");
|
|
|
|
|
|
|
|
llvm::Value *arrayEnd =
|
|
|
|
CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
|
2011-07-13 16:09:46 +08:00
|
|
|
|
|
|
|
// Note that it is legal to allocate a zero-length array, and we
|
|
|
|
// can never fold the check away because the length should always
|
|
|
|
// come from a cookie.
|
2011-07-13 09:41:37 +08:00
|
|
|
CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
|
|
|
|
CGF.getDestroyer(dtorKind),
|
2011-07-13 16:09:46 +08:00
|
|
|
/*checkZeroLength*/ true,
|
2011-07-13 09:41:37 +08:00
|
|
|
CGF.needsEHCleanup(dtorKind));
|
2010-09-02 17:58:18 +08:00
|
|
|
}
|
|
|
|
|
2011-07-13 09:41:37 +08:00
|
|
|
// Pop the cleanup block.
|
2010-09-02 17:58:18 +08:00
|
|
|
CGF.PopCleanupBlock();
|
|
|
|
}
|
|
|
|
|
2015-02-26 07:48:43 +08:00
|
|
|
static void EmitDelete(CodeGenFunction &CGF,
|
|
|
|
const CXXDeleteExpr *DE,
|
|
|
|
llvm::Value *Ptr,
|
|
|
|
QualType ElementType) {
|
|
|
|
if (DE->isArrayForm()) {
|
|
|
|
EmitArrayDelete(CGF, DE, Ptr, ElementType);
|
|
|
|
} else {
|
|
|
|
EmitObjectDelete(CGF, DE, Ptr, ElementType);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-23 06:53:17 +08:00
|
|
|
void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
|
2009-09-30 02:16:17 +08:00
|
|
|
const Expr *Arg = E->getArgument();
|
|
|
|
llvm::Value *Ptr = EmitScalarExpr(Arg);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
// Null check the pointer.
|
|
|
|
llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
|
|
|
|
llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
|
|
|
|
|
2011-04-11 08:30:07 +08:00
|
|
|
llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
|
|
|
|
EmitBlock(DeleteNotNull);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
|
|
// We might be deleting a pointer to array. If so, GEP down to the
|
|
|
|
// first non-array element.
|
|
|
|
// (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
|
|
|
|
QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
|
|
|
|
if (DeleteTy->isConstantArrayType()) {
|
|
|
|
llvm::Value *Zero = Builder.getInt32(0);
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<llvm::Value*,8> GEP;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
|
|
GEP.push_back(Zero); // point at the outermost array
|
|
|
|
|
|
|
|
// For each layer of array type we're pointing at:
|
|
|
|
while (const ConstantArrayType *Arr
|
|
|
|
= getContext().getAsConstantArrayType(DeleteTy)) {
|
|
|
|
// 1. Unpeel the array type.
|
|
|
|
DeleteTy = Arr->getElementType();
|
|
|
|
|
|
|
|
// 2. GEP to the first element of the array.
|
|
|
|
GEP.push_back(Zero);
|
2009-09-23 06:53:17 +08:00
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2011-07-22 16:16:57 +08:00
|
|
|
Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
|
2009-09-23 06:53:17 +08:00
|
|
|
}
|
|
|
|
|
2010-09-03 01:38:50 +08:00
|
|
|
assert(ConvertTypeForMem(DeleteTy) ==
|
|
|
|
cast<llvm::PointerType>(Ptr->getType())->getElementType());
|
2010-09-02 17:58:18 +08:00
|
|
|
|
2015-02-26 07:48:43 +08:00
|
|
|
const FunctionDecl *Dealloc = E->getOperatorDelete();
|
|
|
|
if (FunctionDecl* UnsizedDealloc =
|
|
|
|
Dealloc->getCorrespondingUnsizedGlobalDeallocationFunction())
|
|
|
|
EmitSizedDelete(*this, E, Ptr, DeleteTy, UnsizedDealloc);
|
|
|
|
else
|
|
|
|
EmitDelete(*this, E, Ptr, DeleteTy);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
EmitBlock(DeleteEnd);
|
|
|
|
}
|
2009-11-15 16:09:41 +08:00
|
|
|
|
2014-07-19 08:17:06 +08:00
|
|
|
static bool isGLValueFromPointerDeref(const Expr *E) {
|
|
|
|
E = E->IgnoreParens();
|
|
|
|
|
|
|
|
if (const auto *CE = dyn_cast<CastExpr>(E)) {
|
|
|
|
if (!CE->getSubExpr()->isGLValue())
|
|
|
|
return false;
|
|
|
|
return isGLValueFromPointerDeref(CE->getSubExpr());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
|
|
|
|
return isGLValueFromPointerDeref(OVE->getSourceExpr());
|
|
|
|
|
|
|
|
if (const auto *BO = dyn_cast<BinaryOperator>(E))
|
|
|
|
if (BO->getOpcode() == BO_Comma)
|
|
|
|
return isGLValueFromPointerDeref(BO->getRHS());
|
|
|
|
|
|
|
|
if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
|
|
|
|
return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
|
|
|
|
isGLValueFromPointerDeref(ACO->getFalseExpr());
|
|
|
|
|
|
|
|
// C++11 [expr.sub]p1:
|
|
|
|
// The expression E1[E2] is identical (by definition) to *((E1)+(E2))
|
|
|
|
if (isa<ArraySubscriptExpr>(E))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (const auto *UO = dyn_cast<UnaryOperator>(E))
|
|
|
|
if (UO->getOpcode() == UO_Deref)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-06-19 05:15:55 +08:00
|
|
|
static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *StdTypeInfoPtrTy) {
|
2011-04-18 08:57:03 +08:00
|
|
|
// Get the vtable pointer.
|
|
|
|
llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
|
|
|
|
|
|
|
|
// C++ [expr.typeid]p2:
|
|
|
|
// If the glvalue expression is obtained by applying the unary * operator to
|
|
|
|
// a pointer and the pointer is a null pointer value, the typeid expression
|
|
|
|
// throws the std::bad_typeid exception.
|
2014-07-19 08:17:06 +08:00
|
|
|
//
|
|
|
|
// However, this paragraph's intent is not clear. We choose a very generous
|
|
|
|
// interpretation which implores us to consider comma operators, conditional
|
|
|
|
// operators, parentheses and other such constructs.
|
2014-06-23 03:05:33 +08:00
|
|
|
QualType SrcRecordTy = E->getType();
|
2014-07-19 08:17:06 +08:00
|
|
|
if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(
|
|
|
|
isGLValueFromPointerDeref(E), SrcRecordTy)) {
|
2014-06-23 03:05:33 +08:00
|
|
|
llvm::BasicBlock *BadTypeidBlock =
|
2011-04-18 08:57:03 +08:00
|
|
|
CGF.createBasicBlock("typeid.bad_typeid");
|
2014-06-23 03:05:33 +08:00
|
|
|
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
|
2011-04-18 08:57:03 +08:00
|
|
|
|
2014-06-23 03:05:33 +08:00
|
|
|
llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
|
|
|
|
CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
|
2011-04-18 08:57:03 +08:00
|
|
|
|
2014-06-23 03:05:33 +08:00
|
|
|
CGF.EmitBlock(BadTypeidBlock);
|
|
|
|
CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
|
|
|
|
CGF.EmitBlock(EndBlock);
|
2011-04-18 08:57:03 +08:00
|
|
|
}
|
|
|
|
|
2014-06-23 03:05:33 +08:00
|
|
|
return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
|
|
|
|
StdTypeInfoPtrTy);
|
2011-04-18 08:57:03 +08:00
|
|
|
}
|
|
|
|
|
2011-01-28 16:37:24 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *StdTypeInfoPtrTy =
|
2011-04-18 08:57:03 +08:00
|
|
|
ConvertType(E->getType())->getPointerTo();
|
2009-12-11 10:46:30 +08:00
|
|
|
|
2009-12-17 15:09:17 +08:00
|
|
|
if (E->isTypeOperand()) {
|
2013-09-27 15:04:31 +08:00
|
|
|
llvm::Constant *TypeInfo =
|
|
|
|
CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
|
2011-04-18 08:57:03 +08:00
|
|
|
return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
|
2009-12-17 15:09:17 +08:00
|
|
|
}
|
2011-04-11 22:13:40 +08:00
|
|
|
|
2011-04-18 08:57:03 +08:00
|
|
|
// C++ [expr.typeid]p2:
|
|
|
|
// When typeid is applied to a glvalue expression whose type is a
|
|
|
|
// polymorphic class type, the result refers to a std::type_info object
|
|
|
|
// representing the type of the most derived object (that is, the dynamic
|
|
|
|
// type) to which the glvalue refers.
|
2012-08-14 04:08:14 +08:00
|
|
|
if (E->isPotentiallyEvaluated())
|
|
|
|
return EmitTypeidFromVTable(*this, E->getExprOperand(),
|
|
|
|
StdTypeInfoPtrTy);
|
2011-04-18 08:57:03 +08:00
|
|
|
|
|
|
|
QualType OperandTy = E->getExprOperand()->getType();
|
|
|
|
return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
|
|
|
|
StdTypeInfoPtrTy);
|
2009-11-15 16:09:41 +08:00
|
|
|
}
|
2009-11-16 14:50:58 +08:00
|
|
|
|
2011-04-11 09:45:29 +08:00
|
|
|
static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
|
|
|
|
QualType DestTy) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
|
2011-04-11 09:45:29 +08:00
|
|
|
if (DestTy->isPointerType())
|
|
|
|
return llvm::Constant::getNullValue(DestLTy);
|
|
|
|
|
|
|
|
/// C++ [expr.dynamic.cast]p9:
|
|
|
|
/// A failed cast to reference type throws std::bad_cast
|
2014-06-23 03:05:33 +08:00
|
|
|
if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
|
|
|
|
return nullptr;
|
2011-04-11 09:45:29 +08:00
|
|
|
|
|
|
|
CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
|
|
|
|
return llvm::UndefValue::get(DestLTy);
|
|
|
|
}
|
|
|
|
|
2011-04-11 08:46:40 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
|
|
|
|
const CXXDynamicCastExpr *DCE) {
|
|
|
|
QualType DestTy = DCE->getTypeAsWritten();
|
2009-11-16 14:50:58 +08:00
|
|
|
|
2011-04-11 09:45:29 +08:00
|
|
|
if (DCE->isAlwaysNull())
|
2014-06-23 03:05:33 +08:00
|
|
|
if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
|
|
|
|
return T;
|
2011-04-11 09:45:29 +08:00
|
|
|
|
|
|
|
QualType SrcTy = DCE->getSubExpr()->getType();
|
|
|
|
|
2014-06-23 03:05:33 +08:00
|
|
|
// C++ [expr.dynamic.cast]p7:
|
|
|
|
// If T is "pointer to cv void," then the result is a pointer to the most
|
|
|
|
// derived object pointed to by v.
|
|
|
|
const PointerType *DestPTy = DestTy->getAs<PointerType>();
|
|
|
|
|
|
|
|
bool isDynamicCastToVoid;
|
|
|
|
QualType SrcRecordTy;
|
|
|
|
QualType DestRecordTy;
|
|
|
|
if (DestPTy) {
|
|
|
|
isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
|
|
|
|
SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
|
|
|
|
DestRecordTy = DestPTy->getPointeeType();
|
|
|
|
} else {
|
|
|
|
isDynamicCastToVoid = false;
|
|
|
|
SrcRecordTy = SrcTy;
|
|
|
|
DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
|
|
|
|
|
2011-04-11 08:46:40 +08:00
|
|
|
// C++ [expr.dynamic.cast]p4:
|
|
|
|
// If the value of v is a null pointer value in the pointer case, the result
|
|
|
|
// is the null pointer value of type T.
|
2014-06-23 03:05:33 +08:00
|
|
|
bool ShouldNullCheckSrcValue =
|
|
|
|
CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
|
|
|
|
SrcRecordTy);
|
2014-05-21 13:09:00 +08:00
|
|
|
|
|
|
|
llvm::BasicBlock *CastNull = nullptr;
|
|
|
|
llvm::BasicBlock *CastNotNull = nullptr;
|
2011-04-11 08:46:40 +08:00
|
|
|
llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
|
|
|
|
|
|
|
|
if (ShouldNullCheckSrcValue) {
|
|
|
|
CastNull = createBasicBlock("dynamic_cast.null");
|
|
|
|
CastNotNull = createBasicBlock("dynamic_cast.notnull");
|
2009-11-16 14:50:58 +08:00
|
|
|
|
2011-04-11 08:46:40 +08:00
|
|
|
llvm::Value *IsNull = Builder.CreateIsNull(Value);
|
|
|
|
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
|
|
|
|
EmitBlock(CastNotNull);
|
2009-11-16 14:50:58 +08:00
|
|
|
}
|
2011-04-11 08:46:40 +08:00
|
|
|
|
2014-06-23 03:05:33 +08:00
|
|
|
if (isDynamicCastToVoid) {
|
|
|
|
Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, Value, SrcRecordTy,
|
|
|
|
DestTy);
|
|
|
|
} else {
|
|
|
|
assert(DestRecordTy->isRecordType() &&
|
|
|
|
"destination type must be a record type!");
|
|
|
|
Value = CGM.getCXXABI().EmitDynamicCastCall(*this, Value, SrcRecordTy,
|
|
|
|
DestTy, DestRecordTy, CastEnd);
|
|
|
|
}
|
2011-04-11 08:46:40 +08:00
|
|
|
|
|
|
|
if (ShouldNullCheckSrcValue) {
|
|
|
|
EmitBranch(CastEnd);
|
|
|
|
|
|
|
|
EmitBlock(CastNull);
|
|
|
|
EmitBranch(CastEnd);
|
2009-11-16 14:50:58 +08:00
|
|
|
}
|
2011-04-11 08:46:40 +08:00
|
|
|
|
|
|
|
EmitBlock(CastEnd);
|
|
|
|
|
|
|
|
if (ShouldNullCheckSrcValue) {
|
|
|
|
llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
|
|
|
|
PHI->addIncoming(Value, CastNotNull);
|
|
|
|
PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
|
|
|
|
|
|
|
|
Value = PHI;
|
2009-11-16 14:50:58 +08:00
|
|
|
}
|
|
|
|
|
2011-04-11 08:46:40 +08:00
|
|
|
return Value;
|
2009-11-16 14:50:58 +08:00
|
|
|
}
|
2012-02-09 11:32:31 +08:00
|
|
|
|
|
|
|
void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
|
2012-02-09 11:47:20 +08:00
|
|
|
RunCleanupsScope Scope(*this);
|
2014-08-28 12:28:19 +08:00
|
|
|
LValue SlotLV =
|
|
|
|
MakeAddrLValue(Slot.getAddr(), E->getType(), Slot.getAlignment());
|
2012-02-09 11:47:20 +08:00
|
|
|
|
2012-02-09 11:32:31 +08:00
|
|
|
CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
|
|
|
|
for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
|
|
|
|
e = E->capture_init_end();
|
2012-02-29 11:25:18 +08:00
|
|
|
i != e; ++i, ++CurField) {
|
2012-02-09 11:32:31 +08:00
|
|
|
// Emit initialization
|
2012-06-07 04:45:41 +08:00
|
|
|
LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
|
2014-08-28 12:28:19 +08:00
|
|
|
if (CurField->hasCapturedVLAType()) {
|
|
|
|
auto VAT = CurField->getCapturedVLAType();
|
|
|
|
EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
|
|
|
|
} else {
|
|
|
|
ArrayRef<VarDecl *> ArrayIndexes;
|
|
|
|
if (CurField->getType()->isArrayType())
|
|
|
|
ArrayIndexes = E->getCaptureInitIndexVars(i);
|
|
|
|
EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
|
|
|
|
}
|
2012-02-09 11:32:31 +08:00
|
|
|
}
|
|
|
|
}
|