2009-11-24 13:51:11 +08:00
|
|
|
|
//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
|
2009-09-23 06:53:17 +08:00
|
|
|
|
//
|
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
|
//
|
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
|
//
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
//
|
|
|
|
|
// This contains code dealing with code generation of C++ expressions
|
|
|
|
|
//
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
|
|
#include "CodeGenFunction.h"
|
2011-10-07 02:29:37 +08:00
|
|
|
|
#include "CGCUDARuntime.h"
|
2010-08-31 15:33:07 +08:00
|
|
|
|
#include "CGCXXABI.h"
|
2010-10-01 03:05:55 +08:00
|
|
|
|
#include "CGDebugInfo.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
|
#include "CGObjCRuntime.h"
|
2017-08-16 05:42:52 +08:00
|
|
|
|
#include "ConstantEmitter.h"
|
2013-10-31 05:53:58 +08:00
|
|
|
|
#include "clang/CodeGen/CGFunctionInfo.h"
|
2016-04-09 00:52:00 +08:00
|
|
|
|
#include "clang/Frontend/CodeGenOptions.h"
|
2014-03-04 19:02:08 +08:00
|
|
|
|
#include "llvm/IR/CallSite.h"
|
2013-01-02 19:45:17 +08:00
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2011-04-13 10:35:36 +08:00
|
|
|
|
|
2009-09-23 06:53:17 +08:00
|
|
|
|
using namespace clang;
|
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
|
2017-02-24 06:07:35 +08:00
|
|
|
|
namespace {
|
|
|
|
|
struct MemberCallInfo {
|
|
|
|
|
RequiredArgs ReqArgs;
|
|
|
|
|
// Number of prefix arguments for the call. Ignores the `this` pointer.
|
|
|
|
|
unsigned PrefixSize;
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static MemberCallInfo
|
2016-03-10 08:20:33 +08:00
|
|
|
|
commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
|
|
|
|
|
llvm::Value *This, llvm::Value *ImplicitParam,
|
|
|
|
|
QualType ImplicitParamTy, const CallExpr *CE,
|
2016-09-29 03:09:10 +08:00
|
|
|
|
CallArgList &Args, CallArgList *RtlArgs) {
|
2014-08-26 04:17:35 +08:00
|
|
|
|
assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
|
|
|
|
|
isa<CXXOperatorCallExpr>(CE));
|
2010-01-02 04:29:01 +08:00
|
|
|
|
assert(MD->isInstance() &&
|
2014-08-26 04:17:35 +08:00
|
|
|
|
"Trying to emit a member or operator call expr on a static method!");
|
2016-09-07 23:15:51 +08:00
|
|
|
|
ASTContext &C = CGF.getContext();
|
2010-01-02 04:29:01 +08:00
|
|
|
|
|
|
|
|
|
// Push the this ptr.
|
2016-09-07 23:15:51 +08:00
|
|
|
|
const CXXRecordDecl *RD =
|
|
|
|
|
CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
|
|
|
|
|
Args.add(RValue::get(This),
|
|
|
|
|
RD ? C.getPointerType(C.getTypeDeclType(RD)) : C.VoidPtrTy);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
|
2013-02-13 16:37:51 +08:00
|
|
|
|
// If there is an implicit parameter (e.g. VTT), emit it.
|
|
|
|
|
if (ImplicitParam) {
|
|
|
|
|
Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
|
2010-01-02 09:01:18 +08:00
|
|
|
|
}
|
2012-02-17 11:33:10 +08:00
|
|
|
|
|
|
|
|
|
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
|
2016-06-17 07:06:04 +08:00
|
|
|
|
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size(), MD);
|
2017-02-24 06:07:35 +08:00
|
|
|
|
unsigned PrefixSize = Args.size() - 1;
|
2014-09-09 01:22:45 +08:00
|
|
|
|
|
2012-02-17 11:33:10 +08:00
|
|
|
|
// And the rest of the call args.
|
2016-09-29 03:09:10 +08:00
|
|
|
|
if (RtlArgs) {
|
|
|
|
|
// Special case: if the caller emitted the arguments right-to-left already
|
|
|
|
|
// (prior to emitting the *this argument), we're done. This happens for
|
|
|
|
|
// assignment operators.
|
|
|
|
|
Args.addFrom(*RtlArgs);
|
|
|
|
|
} else if (CE) {
|
2014-08-26 04:17:35 +08:00
|
|
|
|
// Special case: skip first argument of CXXOperatorCall (it is "this").
|
2014-09-09 01:22:45 +08:00
|
|
|
|
unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
|
2015-07-22 02:37:18 +08:00
|
|
|
|
CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
|
2014-11-01 04:09:12 +08:00
|
|
|
|
CE->getDirectCallee());
|
2014-08-26 04:17:35 +08:00
|
|
|
|
} else {
|
2014-09-09 01:22:45 +08:00
|
|
|
|
assert(
|
|
|
|
|
FPT->getNumParams() == 0 &&
|
|
|
|
|
"No CallExpr specified for function with non-zero number of arguments");
|
2014-08-26 04:17:35 +08:00
|
|
|
|
}
|
2017-02-24 06:07:35 +08:00
|
|
|
|
return {required, PrefixSize};
|
2014-11-01 04:09:12 +08:00
|
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
|
|
2014-11-01 04:09:12 +08:00
|
|
|
|
RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
|
2016-10-27 07:46:34 +08:00
|
|
|
|
const CXXMethodDecl *MD, const CGCallee &Callee,
|
|
|
|
|
ReturnValueSlot ReturnValue,
|
2014-11-01 04:09:12 +08:00
|
|
|
|
llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
|
2016-09-29 03:09:10 +08:00
|
|
|
|
const CallExpr *CE, CallArgList *RtlArgs) {
|
2014-11-01 04:09:12 +08:00
|
|
|
|
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
|
|
|
|
|
CallArgList Args;
|
2017-02-24 06:07:35 +08:00
|
|
|
|
MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall(
|
2016-09-29 03:09:10 +08:00
|
|
|
|
*this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
|
2017-02-24 06:07:35 +08:00
|
|
|
|
auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(
|
|
|
|
|
Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize);
|
2017-12-21 08:10:25 +08:00
|
|
|
|
return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr,
|
|
|
|
|
CE ? CE->getExprLoc() : SourceLocation());
|
2010-01-02 04:29:01 +08:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-10 08:20:37 +08:00
|
|
|
|
RValue CodeGenFunction::EmitCXXDestructorCall(
|
2016-10-27 07:46:34 +08:00
|
|
|
|
const CXXDestructorDecl *DD, const CGCallee &Callee, llvm::Value *This,
|
2016-03-10 08:20:37 +08:00
|
|
|
|
llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE,
|
|
|
|
|
StructorType Type) {
|
2014-11-01 04:09:12 +08:00
|
|
|
|
CallArgList Args;
|
2016-03-10 08:20:37 +08:00
|
|
|
|
commonEmitCXXMemberOrOperatorCall(*this, DD, This, ImplicitParam,
|
2016-09-29 03:09:10 +08:00
|
|
|
|
ImplicitParamTy, CE, Args, nullptr);
|
2016-03-10 08:20:37 +08:00
|
|
|
|
return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(DD, Type),
|
2016-10-27 07:46:34 +08:00
|
|
|
|
Callee, ReturnValueSlot(), Args);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
|
|
|
|
|
const CXXPseudoDestructorExpr *E) {
|
|
|
|
|
QualType DestroyedType = E->getDestroyedType();
|
|
|
|
|
if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
|
|
|
|
|
// Automatic Reference Counting:
|
|
|
|
|
// If the pseudo-expression names a retainable object with weak or
|
|
|
|
|
// strong lifetime, the object shall be released.
|
|
|
|
|
Expr *BaseExpr = E->getBase();
|
|
|
|
|
Address BaseValue = Address::invalid();
|
|
|
|
|
Qualifiers BaseQuals;
|
|
|
|
|
|
|
|
|
|
// If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
|
|
|
|
|
if (E->isArrow()) {
|
|
|
|
|
BaseValue = EmitPointerWithAlignment(BaseExpr);
|
|
|
|
|
const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
|
|
|
|
|
BaseQuals = PTy->getPointeeType().getQualifiers();
|
|
|
|
|
} else {
|
|
|
|
|
LValue BaseLV = EmitLValue(BaseExpr);
|
|
|
|
|
BaseValue = BaseLV.getAddress();
|
|
|
|
|
QualType BaseTy = BaseExpr->getType();
|
|
|
|
|
BaseQuals = BaseTy.getQualifiers();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (DestroyedType.getObjCLifetime()) {
|
|
|
|
|
case Qualifiers::OCL_None:
|
|
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Strong:
|
|
|
|
|
EmitARCRelease(Builder.CreateLoad(BaseValue,
|
|
|
|
|
DestroyedType.isVolatileQualified()),
|
|
|
|
|
ARCPreciseLifetime);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Weak:
|
|
|
|
|
EmitARCDestroyWeak(BaseValue);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// C++ [expr.pseudo]p1:
|
|
|
|
|
// The result shall only be used as the operand for the function call
|
|
|
|
|
// operator (), and the result of such a call has type void. The only
|
|
|
|
|
// effect is the evaluation of the postfix-expression before the dot or
|
|
|
|
|
// arrow.
|
|
|
|
|
EmitIgnoredExpr(E->getBase());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return RValue::get(nullptr);
|
2014-11-01 04:09:12 +08:00
|
|
|
|
}
|
|
|
|
|
|
2012-06-28 22:28:57 +08:00
|
|
|
|
static CXXRecordDecl *getCXXRecord(const Expr *E) {
|
|
|
|
|
QualType T = E->getType();
|
|
|
|
|
if (const PointerType *PTy = T->getAs<PointerType>())
|
|
|
|
|
T = PTy->getPointeeType();
|
|
|
|
|
const RecordType *Ty = T->castAs<RecordType>();
|
|
|
|
|
return cast<CXXRecordDecl>(Ty->getDecl());
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-18 13:04:39 +08:00
|
|
|
|
// Note: This function also emit constructor calls to support a MSVC
|
|
|
|
|
// extensions allowing explicit constructor function call.
|
2010-01-02 04:29:01 +08:00
|
|
|
|
RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
|
|
|
|
|
ReturnValueSlot ReturnValue) {
|
2011-04-11 15:02:50 +08:00
|
|
|
|
const Expr *callee = CE->getCallee()->IgnoreParens();
|
|
|
|
|
|
|
|
|
|
if (isa<BinaryOperator>(callee))
|
2010-01-02 04:29:01 +08:00
|
|
|
|
return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
|
2011-04-11 15:02:50 +08:00
|
|
|
|
|
|
|
|
|
const MemberExpr *ME = cast<MemberExpr>(callee);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
|
|
|
|
|
|
|
|
|
|
if (MD->isStatic()) {
|
|
|
|
|
// The method is static, emit it as we would a regular call.
|
2016-10-27 07:46:34 +08:00
|
|
|
|
CGCallee callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD), MD);
|
|
|
|
|
return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,
|
2014-08-22 04:26:47 +08:00
|
|
|
|
ReturnValue);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
}
|
|
|
|
|
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
|
bool HasQualifier = ME->hasQualifier();
|
|
|
|
|
NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
|
|
|
|
|
bool IsArrow = ME->isArrow();
|
2012-06-28 09:56:38 +08:00
|
|
|
|
const Expr *Base = ME->getBase();
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
|
|
|
|
|
|
return EmitCXXMemberOrOperatorMemberCallExpr(
|
|
|
|
|
CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
|
|
|
|
|
const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
|
|
|
|
|
bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
|
|
|
|
|
const Expr *Base) {
|
|
|
|
|
assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
|
|
|
|
|
|
|
|
|
|
// Compute the object pointer.
|
|
|
|
|
bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
|
2012-06-28 09:56:38 +08:00
|
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
|
const CXXMethodDecl *DevirtualizedMethod = nullptr;
|
2017-07-13 14:08:27 +08:00
|
|
|
|
if (CanUseVirtualCall &&
|
|
|
|
|
MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) {
|
2012-06-28 22:28:57 +08:00
|
|
|
|
const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
|
|
|
|
|
DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
|
|
|
|
|
assert(DevirtualizedMethod);
|
|
|
|
|
const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
|
|
|
|
|
const Expr *Inner = Base->ignoreParenBaseCasts();
|
2014-09-29 18:32:21 +08:00
|
|
|
|
if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
|
|
|
|
|
MD->getReturnType().getCanonicalType())
|
|
|
|
|
// If the return types are not the same, this might be a case where more
|
|
|
|
|
// code needs to run to compensate for it. For example, the derived
|
|
|
|
|
// method might return a type that inherits form from the return
|
|
|
|
|
// type of MD and has a prefix.
|
|
|
|
|
// For now we just avoid devirtualizing these covariant cases.
|
|
|
|
|
DevirtualizedMethod = nullptr;
|
|
|
|
|
else if (getCXXRecord(Inner) == DevirtualizedClass)
|
2012-06-28 22:28:57 +08:00
|
|
|
|
// If the class of the Inner expression is where the dynamic method
|
|
|
|
|
// is defined, build the this pointer from it.
|
|
|
|
|
Base = Inner;
|
|
|
|
|
else if (getCXXRecord(Base) != DevirtualizedClass) {
|
|
|
|
|
// If the method is defined in a class that is not the best dynamic
|
|
|
|
|
// one or the one of the full expression, we would have to build
|
|
|
|
|
// a derived-to-base cast to compute the correct this pointer, but
|
|
|
|
|
// we don't have support for that yet, so do a virtual call.
|
2014-05-21 13:09:00 +08:00
|
|
|
|
DevirtualizedMethod = nullptr;
|
2012-06-28 22:28:57 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2012-06-28 09:56:38 +08:00
|
|
|
|
|
2016-09-29 03:09:10 +08:00
|
|
|
|
// C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
|
|
|
|
|
// operator before the LHS.
|
|
|
|
|
CallArgList RtlArgStorage;
|
|
|
|
|
CallArgList *RtlArgs = nullptr;
|
|
|
|
|
if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
|
|
|
|
|
if (OCE->isAssignmentOp()) {
|
|
|
|
|
RtlArgs = &RtlArgStorage;
|
|
|
|
|
EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),
|
|
|
|
|
drop_begin(CE->arguments(), 1), CE->getDirectCallee(),
|
Switch to a different workaround for unimplementability of P0145R3 in MS ABIs.
Instead of ignoring the evaluation order rule, ignore the "destroy parameters
in reverse construction order" rule for the small number of problematic cases.
This only causes incorrect behavior in the rare case where both parameters to
an overloaded operator <<, >>, ->*, &&, ||, or comma are of class type with
non-trivial destructor, and the program is depending on those parameters being
destroyed in reverse construction order.
We could do a little better here by reversing the order of parameter
destruction for those functions (and reversing the argument evaluation order
for all direct calls, not just those with operator syntax), but that is not a
complete solution to the problem, as the same situation can be reached by an
indirect function call.
Approach reviewed off-line by rnk.
llvm-svn: 282777
2016-09-30 05:30:12 +08:00
|
|
|
|
/*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft);
|
2016-09-29 03:09:10 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-25 22:21:55 +08:00
|
|
|
|
LValue This;
|
|
|
|
|
if (IsArrow) {
|
|
|
|
|
LValueBaseInfo BaseInfo;
|
|
|
|
|
TBAAAccessInfo TBAAInfo;
|
|
|
|
|
Address ThisValue = EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
|
|
|
|
|
This = MakeAddrLValue(ThisValue, Base->getType(), BaseInfo, TBAAInfo);
|
|
|
|
|
} else {
|
|
|
|
|
This = EmitLValue(Base);
|
|
|
|
|
}
|
2012-06-28 09:56:38 +08:00
|
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
|
|
2015-04-30 03:26:57 +08:00
|
|
|
|
if (MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion())) {
|
2014-05-21 13:09:00 +08:00
|
|
|
|
if (isa<CXXDestructorDecl>(MD)) return RValue::get(nullptr);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
if (isa<CXXConstructorDecl>(MD) &&
|
2011-01-18 13:04:39 +08:00
|
|
|
|
cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
|
2014-05-21 13:09:00 +08:00
|
|
|
|
return RValue::get(nullptr);
|
2011-01-18 13:04:39 +08:00
|
|
|
|
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
|
if (!MD->getParent()->mayInsertExtraPadding()) {
|
|
|
|
|
if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
|
|
|
|
|
// We don't like to generate the trivial copy/move assignment operator
|
|
|
|
|
// when it isn't necessary; just produce the proper effect here.
|
2016-09-29 03:09:10 +08:00
|
|
|
|
LValue RHS = isa<CXXOperatorCallExpr>(CE)
|
|
|
|
|
? MakeNaturalAlignAddrLValue(
|
2018-03-15 23:25:19 +08:00
|
|
|
|
(*RtlArgs)[0].getRValue(*this).getScalarVal(),
|
2016-09-29 03:09:10 +08:00
|
|
|
|
(*(CE->arg_begin() + 1))->getType())
|
|
|
|
|
: EmitLValue(*CE->arg_begin());
|
2018-01-25 22:21:55 +08:00
|
|
|
|
EmitAggregateAssign(This, RHS, CE->getType());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
return RValue::get(This.getPointer());
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
|
}
|
2014-08-26 05:58:56 +08:00
|
|
|
|
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
|
if (isa<CXXConstructorDecl>(MD) &&
|
|
|
|
|
cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
|
|
|
|
|
// Trivial move and copy ctor are the same.
|
|
|
|
|
assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
|
2018-01-25 22:21:55 +08:00
|
|
|
|
const Expr *Arg = *CE->arg_begin();
|
|
|
|
|
LValue RHS = EmitLValue(Arg);
|
|
|
|
|
LValue Dest = MakeAddrLValue(This.getAddress(), Arg->getType());
|
2018-04-06 04:52:58 +08:00
|
|
|
|
// This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
|
|
|
|
|
// constructing a new complete object of type Ctor.
|
|
|
|
|
EmitAggregateCopy(Dest, RHS, Arg->getType(),
|
|
|
|
|
AggValueSlot::DoesNotOverlap);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
return RValue::get(This.getPointer());
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
|
}
|
|
|
|
|
llvm_unreachable("unknown trivial member function");
|
2011-01-18 13:04:39 +08:00
|
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
|
}
|
|
|
|
|
|
2010-09-03 09:26:39 +08:00
|
|
|
|
// Compute the function type we're calling.
|
2014-12-03 04:41:18 +08:00
|
|
|
|
const CXXMethodDecl *CalleeDecl =
|
|
|
|
|
DevirtualizedMethod ? DevirtualizedMethod : MD;
|
2014-05-21 13:09:00 +08:00
|
|
|
|
const CGFunctionInfo *FInfo = nullptr;
|
2014-12-03 04:41:18 +08:00
|
|
|
|
if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
|
2014-09-09 00:01:27 +08:00
|
|
|
|
FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
|
|
|
|
|
Dtor, StructorType::Complete);
|
2014-12-03 04:41:18 +08:00
|
|
|
|
else if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
|
2014-09-09 00:01:27 +08:00
|
|
|
|
FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
|
|
|
|
|
Ctor, StructorType::Complete);
|
2011-01-18 13:04:39 +08:00
|
|
|
|
else
|
2012-10-25 08:12:49 +08:00
|
|
|
|
FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
|
2010-09-03 09:26:39 +08:00
|
|
|
|
|
[ms-cxxabi] Emit linkonce complete dtors in TUs that need them
Based on Peter Collingbourne's destructor patches.
Prior to this change, clang was considering ?1 to be the complete
destructor and the base destructor, which was wrong. This lead to
crashes when clang tried to emit two LLVM functions with the same name.
In this ABI, TUs with non-inline dtors might not emit a complete
destructor. They are emitted as inline thunks in TUs that need them,
and they always delegate to the base dtors of the complete class and its
virtual bases. This change uses the DeferredDecls machinery to emit
complete dtors as needed.
Currently in clang try body destructors can catch exceptions thrown by
virtual base destructors. In the Microsoft C++ ABI, clang may not have
the destructor definition, in which case clang won't wrap the virtual
virtual base destructor calls in a try-catch. Diagnosing this in user
code is TODO.
Finally, for classes that don't use virtual inheritance, MSVC always
calls the base destructor (?1) directly. This is a useful code size
optimization that avoids emitting lots of extra thunks or aliases.
Implementing it also means our existing tests continue to pass, and is
consistent with MSVC's output.
We can do the same for Itanium by tweaking GetAddrOfCXXDestructor, but
it will require further testing.
Reviewers: rjmccall
CC: cfe-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1066
llvm-svn: 186828
2013-07-22 21:51:44 +08:00
|
|
|
|
llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
|
2010-09-03 09:26:39 +08:00
|
|
|
|
|
2016-11-17 08:39:48 +08:00
|
|
|
|
// C++11 [class.mfct.non-static]p2:
|
|
|
|
|
// If a non-static member function of a class X is called for an object that
|
|
|
|
|
// is not of type X, or of a type derived from X, the behavior is undefined.
|
|
|
|
|
SourceLocation CallLoc;
|
|
|
|
|
ASTContext &C = getContext();
|
|
|
|
|
if (CE)
|
|
|
|
|
CallLoc = CE->getExprLoc();
|
|
|
|
|
|
Retry^2: [ubsan] Reduce null checking of C++ object pointers (PR27581)
This patch teaches ubsan to insert exactly one null check for the 'this'
pointer per method/lambda.
Previously, given a load of a member variable from an instance method
('this->x'), ubsan would insert a null check for 'this', and another
null check for '&this->x', before allowing the load to occur.
Similarly, given a call to a method from another method bound to the
same instance ('this->foo()'), ubsan would a redundant null check for
'this'. There is also a redundant null check in the case where the
object pointer is a reference ('Ref.foo()').
This patch teaches ubsan to remove the redundant null checks identified
above.
Testing: check-clang, check-ubsan, and a stage2 ubsan build.
I also compiled X86FastISel.cpp with -fsanitize=null using
patched/unpatched clangs based on r293572. Here are the number of null
checks emitted:
-------------------------------------
| Setup | # of null checks |
-------------------------------------
| unpatched, -O0 | 21767 |
| patched, -O0 | 10758 |
-------------------------------------
Changes since the initial commit:
- Don't introduce any unintentional object-size or alignment checks.
- Don't rely on IRGen of C labels in the test.
Differential Revision: https://reviews.llvm.org/D29530
llvm-svn: 295515
2017-02-18 07:22:59 +08:00
|
|
|
|
SanitizerSet SkippedChecks;
|
2017-04-15 06:03:34 +08:00
|
|
|
|
if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
|
|
|
|
|
auto *IOA = CMCE->getImplicitObjectArgument();
|
|
|
|
|
bool IsImplicitObjectCXXThis = IsWrappedCXXThis(IOA);
|
|
|
|
|
if (IsImplicitObjectCXXThis)
|
|
|
|
|
SkippedChecks.set(SanitizerKind::Alignment, true);
|
|
|
|
|
if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(IOA))
|
Retry^2: [ubsan] Reduce null checking of C++ object pointers (PR27581)
This patch teaches ubsan to insert exactly one null check for the 'this'
pointer per method/lambda.
Previously, given a load of a member variable from an instance method
('this->x'), ubsan would insert a null check for 'this', and another
null check for '&this->x', before allowing the load to occur.
Similarly, given a call to a method from another method bound to the
same instance ('this->foo()'), ubsan would a redundant null check for
'this'. There is also a redundant null check in the case where the
object pointer is a reference ('Ref.foo()').
This patch teaches ubsan to remove the redundant null checks identified
above.
Testing: check-clang, check-ubsan, and a stage2 ubsan build.
I also compiled X86FastISel.cpp with -fsanitize=null using
patched/unpatched clangs based on r293572. Here are the number of null
checks emitted:
-------------------------------------
| Setup | # of null checks |
-------------------------------------
| unpatched, -O0 | 21767 |
| patched, -O0 | 10758 |
-------------------------------------
Changes since the initial commit:
- Don't introduce any unintentional object-size or alignment checks.
- Don't rely on IRGen of C labels in the test.
Differential Revision: https://reviews.llvm.org/D29530
llvm-svn: 295515
2017-02-18 07:22:59 +08:00
|
|
|
|
SkippedChecks.set(SanitizerKind::Null, true);
|
2017-04-15 06:03:34 +08:00
|
|
|
|
}
|
Retry^2: [ubsan] Reduce null checking of C++ object pointers (PR27581)
This patch teaches ubsan to insert exactly one null check for the 'this'
pointer per method/lambda.
Previously, given a load of a member variable from an instance method
('this->x'), ubsan would insert a null check for 'this', and another
null check for '&this->x', before allowing the load to occur.
Similarly, given a call to a method from another method bound to the
same instance ('this->foo()'), ubsan would a redundant null check for
'this'. There is also a redundant null check in the case where the
object pointer is a reference ('Ref.foo()').
This patch teaches ubsan to remove the redundant null checks identified
above.
Testing: check-clang, check-ubsan, and a stage2 ubsan build.
I also compiled X86FastISel.cpp with -fsanitize=null using
patched/unpatched clangs based on r293572. Here are the number of null
checks emitted:
-------------------------------------
| Setup | # of null checks |
-------------------------------------
| unpatched, -O0 | 21767 |
| patched, -O0 | 10758 |
-------------------------------------
Changes since the initial commit:
- Don't introduce any unintentional object-size or alignment checks.
- Don't rely on IRGen of C labels in the test.
Differential Revision: https://reviews.llvm.org/D29530
llvm-svn: 295515
2017-02-18 07:22:59 +08:00
|
|
|
|
EmitTypeCheck(
|
|
|
|
|
isa<CXXConstructorDecl>(CalleeDecl) ? CodeGenFunction::TCK_ConstructorCall
|
|
|
|
|
: CodeGenFunction::TCK_MemberCall,
|
|
|
|
|
CallLoc, This.getPointer(), C.getRecordType(CalleeDecl->getParent()),
|
|
|
|
|
/*Alignment=*/CharUnits::Zero(), SkippedChecks);
|
2016-11-17 08:39:48 +08:00
|
|
|
|
|
2016-10-20 04:21:16 +08:00
|
|
|
|
// FIXME: Uses of 'MD' past this point need to be audited. We may need to use
|
|
|
|
|
// 'CalleeDecl' instead.
|
|
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
|
// C++ [class.virtual]p12:
|
|
|
|
|
// Explicit qualification with the scope operator (5.1) suppresses the
|
|
|
|
|
// virtual call mechanism.
|
|
|
|
|
//
|
|
|
|
|
// We also don't emit a virtual call if the base expression has a record type
|
|
|
|
|
// because then we know what the type is.
|
2012-06-28 22:28:57 +08:00
|
|
|
|
bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2010-09-03 09:26:39 +08:00
|
|
|
|
if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
|
2013-07-01 04:40:16 +08:00
|
|
|
|
assert(CE->arg_begin() == CE->arg_end() &&
|
|
|
|
|
"Destructor shouldn't have explicit parameters");
|
|
|
|
|
assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
|
2010-09-03 09:26:39 +08:00
|
|
|
|
if (UseVirtualCall) {
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
|
CGM.getCXXABI().EmitVirtualDestructorCall(
|
2018-01-25 22:21:55 +08:00
|
|
|
|
*this, Dtor, Dtor_Complete, This.getAddress(),
|
|
|
|
|
cast<CXXMemberCallExpr>(CE));
|
2010-01-02 04:29:01 +08:00
|
|
|
|
} else {
|
2016-10-27 07:46:34 +08:00
|
|
|
|
CGCallee Callee;
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
|
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
|
|
|
|
|
Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
|
2012-06-28 22:28:57 +08:00
|
|
|
|
else if (!DevirtualizedMethod)
|
2016-10-27 07:46:34 +08:00
|
|
|
|
Callee = CGCallee::forDirect(
|
|
|
|
|
CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty),
|
|
|
|
|
Dtor);
|
2012-06-27 01:45:31 +08:00
|
|
|
|
else {
|
2012-06-28 22:28:57 +08:00
|
|
|
|
const CXXDestructorDecl *DDtor =
|
|
|
|
|
cast<CXXDestructorDecl>(DevirtualizedMethod);
|
2016-10-27 07:46:34 +08:00
|
|
|
|
Callee = CGCallee::forDirect(
|
|
|
|
|
CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty),
|
|
|
|
|
DDtor);
|
2012-06-27 01:45:31 +08:00
|
|
|
|
}
|
2016-10-20 04:21:16 +08:00
|
|
|
|
EmitCXXMemberOrOperatorCall(
|
|
|
|
|
CalleeDecl, Callee, ReturnValue, This.getPointer(),
|
|
|
|
|
/*ImplicitParam=*/nullptr, QualType(), CE, nullptr);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
}
|
2014-05-21 13:09:00 +08:00
|
|
|
|
return RValue::get(nullptr);
|
2013-07-01 04:40:16 +08:00
|
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2016-10-27 07:46:34 +08:00
|
|
|
|
CGCallee Callee;
|
2013-07-01 04:40:16 +08:00
|
|
|
|
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
|
2016-10-27 07:46:34 +08:00
|
|
|
|
Callee = CGCallee::forDirect(
|
|
|
|
|
CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty),
|
|
|
|
|
Ctor);
|
2010-09-03 09:26:39 +08:00
|
|
|
|
} else if (UseVirtualCall) {
|
2018-02-06 07:09:13 +08:00
|
|
|
|
Callee = CGCallee::forVirtual(CE, MD, This.getAddress(), Ty);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
} else {
|
2015-04-02 08:23:30 +08:00
|
|
|
|
if (SanOpts.has(SanitizerKind::CFINVCall) &&
|
|
|
|
|
MD->getParent()->isDynamicClass()) {
|
2017-12-14 05:53:04 +08:00
|
|
|
|
llvm::Value *VTable;
|
|
|
|
|
const CXXRecordDecl *RD;
|
|
|
|
|
std::tie(VTable, RD) =
|
2018-01-25 22:21:55 +08:00
|
|
|
|
CGM.getCXXABI().LoadVTablePtr(*this, This.getAddress(),
|
|
|
|
|
MD->getParent());
|
2017-12-14 05:53:04 +08:00
|
|
|
|
EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getLocStart());
|
2015-04-02 08:23:30 +08:00
|
|
|
|
}
|
|
|
|
|
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
|
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
|
|
|
|
|
Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
|
2012-06-28 22:28:57 +08:00
|
|
|
|
else if (!DevirtualizedMethod)
|
2016-10-27 07:46:34 +08:00
|
|
|
|
Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), MD);
|
2012-06-27 01:45:31 +08:00
|
|
|
|
else {
|
2016-10-27 07:46:34 +08:00
|
|
|
|
Callee = CGCallee::forDirect(
|
|
|
|
|
CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
|
|
|
|
|
DevirtualizedMethod);
|
2012-06-27 01:45:31 +08:00
|
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
|
}
|
|
|
|
|
|
2014-03-15 01:43:37 +08:00
|
|
|
|
if (MD->isVirtual()) {
|
2018-01-25 22:21:55 +08:00
|
|
|
|
Address NewThisAddr =
|
|
|
|
|
CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
|
|
|
|
|
*this, CalleeDecl, This.getAddress(), UseVirtualCall);
|
|
|
|
|
This.setAddress(NewThisAddr);
|
2014-03-15 01:43:37 +08:00
|
|
|
|
}
|
2013-08-21 14:25:03 +08:00
|
|
|
|
|
2016-10-20 04:21:16 +08:00
|
|
|
|
return EmitCXXMemberOrOperatorCall(
|
|
|
|
|
CalleeDecl, Callee, ReturnValue, This.getPointer(),
|
|
|
|
|
/*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
RValue
|
|
|
|
|
CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
|
|
|
|
|
ReturnValueSlot ReturnValue) {
|
|
|
|
|
const BinaryOperator *BO =
|
|
|
|
|
cast<BinaryOperator>(E->getCallee()->IgnoreParens());
|
|
|
|
|
const Expr *BaseExpr = BO->getLHS();
|
|
|
|
|
const Expr *MemFnExpr = BO->getRHS();
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
|
|
|
|
const MemberPointerType *MPT =
|
2011-04-27 04:42:42 +08:00
|
|
|
|
MemFnExpr->getType()->castAs<MemberPointerType>();
|
2010-08-22 08:05:51 +08:00
|
|
|
|
|
2018-07-31 03:24:48 +08:00
|
|
|
|
const FunctionProtoType *FPT =
|
2011-04-27 04:42:42 +08:00
|
|
|
|
MPT->getPointeeType()->castAs<FunctionProtoType>();
|
2018-07-31 03:24:48 +08:00
|
|
|
|
const CXXRecordDecl *RD =
|
2010-01-02 04:29:01 +08:00
|
|
|
|
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
|
|
|
|
|
|
|
|
|
|
// Emit the 'this' pointer.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Address This = Address::invalid();
|
2010-08-25 19:45:40 +08:00
|
|
|
|
if (BO->getOpcode() == BO_PtrMemI)
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
This = EmitPointerWithAlignment(BaseExpr);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
else
|
2010-01-02 04:29:01 +08:00
|
|
|
|
This = EmitLValue(BaseExpr).getAddress();
|
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
|
2012-10-10 03:52:38 +08:00
|
|
|
|
QualType(MPT->getClass(), 0));
|
2012-08-24 08:54:33 +08:00
|
|
|
|
|
2016-09-27 07:56:57 +08:00
|
|
|
|
// Get the member function pointer.
|
|
|
|
|
llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
|
|
|
|
|
|
2010-08-22 08:05:51 +08:00
|
|
|
|
// Ask the ABI to load the callee. Note that This is modified.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
llvm::Value *ThisPtrForCall = nullptr;
|
2016-10-27 07:46:34 +08:00
|
|
|
|
CGCallee Callee =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
|
|
|
|
|
ThisPtrForCall, MemFnPtr, MPT);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
|
CallArgList Args;
|
|
|
|
|
|
2018-07-31 03:24:48 +08:00
|
|
|
|
QualType ThisType =
|
2010-01-02 04:29:01 +08:00
|
|
|
|
getContext().getPointerType(getContext().getTagDeclType(RD));
|
|
|
|
|
|
|
|
|
|
// Push the this ptr.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Args.add(RValue::get(ThisPtrForCall), ThisType);
|
2012-07-07 14:41:13 +08:00
|
|
|
|
|
2016-06-17 07:06:04 +08:00
|
|
|
|
RequiredArgs required =
|
|
|
|
|
RequiredArgs::forPrototypePlus(FPT, 1, /*FD=*/nullptr);
|
|
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
|
// And the rest of the call args
|
2016-06-17 07:06:04 +08:00
|
|
|
|
EmitCallArgs(Args, FPT, E->arguments());
|
2017-02-24 06:07:35 +08:00
|
|
|
|
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required,
|
|
|
|
|
/*PrefixSize=*/0),
|
2017-12-21 08:10:25 +08:00
|
|
|
|
Callee, ReturnValue, Args, nullptr, E->getExprLoc());
|
2010-01-02 04:29:01 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
RValue
|
|
|
|
|
CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
|
|
|
|
|
const CXXMethodDecl *MD,
|
|
|
|
|
ReturnValueSlot ReturnValue) {
|
|
|
|
|
assert(MD->isInstance() &&
|
|
|
|
|
"Trying to emit a member call expr on a static method!");
|
Fix incorrect codegen for devirtualized calls to virtual overloaded operators.
Consider this program:
struct A {
virtual void operator-() { printf("base\n"); }
};
struct B final : public A {
virtual void operator-() override { printf("derived\n"); }
};
int main() {
B* b = new B;
-static_cast<A&>(*b);
}
Before this patch, clang saw the virtual call to A::operator-(), figured out
that it can be devirtualized, and then just called A::operator-() directly,
without going through the vtable. Instead, it should've looked up which
operator-() the call devirtualizes to and should've called that.
For regular virtual member calls, clang gets all this right already. So
instead of giving EmitCXXOperatorMemberCallee() all the logic that
EmitCXXMemberCallExpr() already has, cut the latter function into two pieces,
call the second piece EmitCXXMemberOrOperatorMemberCallExpr(), and use it also
to generate code for calls to virtual member operators.
This way, virtual overloaded operators automatically don't get devirtualized
if they have covariant returns (like it was done for regular calls in r218602),
etc.
This also happens to fix (or at least improve) codegen for explicit constructor
calls (`A a; a.A::A()`) in MS mode with -fsanitize-address-field-padding=1.
(This adjustment for virtual operator calls seems still wrong with the MS ABI.)
llvm-svn: 223185
2014-12-03 09:21:41 +08:00
|
|
|
|
return EmitCXXMemberOrOperatorMemberCallExpr(
|
|
|
|
|
E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
|
|
|
|
|
/*IsArrow=*/false, E->getArg(0));
|
2010-01-02 04:29:01 +08:00
|
|
|
|
}
|
|
|
|
|
|
2011-10-07 02:29:37 +08:00
|
|
|
|
RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
|
|
|
|
|
ReturnValueSlot ReturnValue) {
|
|
|
|
|
return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-14 10:27:24 +08:00
|
|
|
|
static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Address DestPtr,
|
2011-10-14 10:27:24 +08:00
|
|
|
|
const CXXRecordDecl *Base) {
|
|
|
|
|
if (Base->isEmpty())
|
|
|
|
|
return;
|
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
|
2011-10-14 10:27:24 +08:00
|
|
|
|
|
|
|
|
|
const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
|
2015-11-02 17:01:44 +08:00
|
|
|
|
CharUnits NVSize = Layout.getNonVirtualSize();
|
|
|
|
|
|
|
|
|
|
// We cannot simply zero-initialize the entire base sub-object if vbptrs are
|
|
|
|
|
// present, they are initialized by the most derived class before calling the
|
|
|
|
|
// constructor.
|
|
|
|
|
SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;
|
|
|
|
|
Stores.emplace_back(CharUnits::Zero(), NVSize);
|
|
|
|
|
|
|
|
|
|
// Each store is split by the existence of a vbptr.
|
|
|
|
|
CharUnits VBPtrWidth = CGF.getPointerSize();
|
|
|
|
|
std::vector<CharUnits> VBPtrOffsets =
|
|
|
|
|
CGF.CGM.getCXXABI().getVBPtrOffsets(Base);
|
|
|
|
|
for (CharUnits VBPtrOffset : VBPtrOffsets) {
|
2016-05-12 11:51:52 +08:00
|
|
|
|
// Stop before we hit any virtual base pointers located in virtual bases.
|
|
|
|
|
if (VBPtrOffset >= NVSize)
|
|
|
|
|
break;
|
2015-11-02 17:01:44 +08:00
|
|
|
|
std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();
|
|
|
|
|
CharUnits LastStoreOffset = LastStore.first;
|
|
|
|
|
CharUnits LastStoreSize = LastStore.second;
|
|
|
|
|
|
|
|
|
|
CharUnits SplitBeforeOffset = LastStoreOffset;
|
|
|
|
|
CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;
|
|
|
|
|
assert(!SplitBeforeSize.isNegative() && "negative store size!");
|
|
|
|
|
if (!SplitBeforeSize.isZero())
|
|
|
|
|
Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize);
|
|
|
|
|
|
|
|
|
|
CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;
|
|
|
|
|
CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset;
|
|
|
|
|
assert(!SplitAfterSize.isNegative() && "negative store size!");
|
|
|
|
|
if (!SplitAfterSize.isZero())
|
|
|
|
|
Stores.emplace_back(SplitAfterOffset, SplitAfterSize);
|
|
|
|
|
}
|
2011-10-14 10:27:24 +08:00
|
|
|
|
|
|
|
|
|
// If the type contains a pointer to data member we can't memset it to zero.
|
|
|
|
|
// Instead, create a null constant and copy it to the destination.
|
|
|
|
|
// TODO: there are other patterns besides zero that we can usefully memset,
|
|
|
|
|
// like -1, which happens to be the pattern used by member-pointers.
|
|
|
|
|
// TODO: isZeroInitializable can be over-conservative in the case where a
|
|
|
|
|
// virtual base contains a member pointer.
|
2015-11-02 17:01:44 +08:00
|
|
|
|
llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base);
|
|
|
|
|
if (!NullConstantForBase->isNullValue()) {
|
|
|
|
|
llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(
|
|
|
|
|
CGF.CGM.getModule(), NullConstantForBase->getType(),
|
|
|
|
|
/*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
|
|
|
|
|
NullConstantForBase, Twine());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
|
|
|
|
|
CharUnits Align = std::max(Layout.getNonVirtualAlignment(),
|
|
|
|
|
DestPtr.getAlignment());
|
2011-10-14 10:27:24 +08:00
|
|
|
|
NullVariable->setAlignment(Align.getQuantity());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
|
|
|
|
|
Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align);
|
2011-10-14 10:27:24 +08:00
|
|
|
|
|
|
|
|
|
// Get and call the appropriate llvm.memcpy overload.
|
2015-11-02 17:01:44 +08:00
|
|
|
|
for (std::pair<CharUnits, CharUnits> Store : Stores) {
|
|
|
|
|
CharUnits StoreOffset = Store.first;
|
|
|
|
|
CharUnits StoreSize = Store.second;
|
|
|
|
|
llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
|
|
|
|
|
CGF.Builder.CreateMemCpy(
|
|
|
|
|
CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
|
|
|
|
|
CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset),
|
|
|
|
|
StoreSizeVal);
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-14 10:27:24 +08:00
|
|
|
|
// Otherwise, just memset the whole thing to zero. This is legal
|
|
|
|
|
// because in LLVM, all default initializers (other than the ones we just
|
|
|
|
|
// handled above) are guaranteed to have a bit pattern of all zeros.
|
2015-11-02 17:01:44 +08:00
|
|
|
|
} else {
|
|
|
|
|
for (std::pair<CharUnits, CharUnits> Store : Stores) {
|
|
|
|
|
CharUnits StoreOffset = Store.first;
|
|
|
|
|
CharUnits StoreSize = Store.second;
|
|
|
|
|
llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
|
|
|
|
|
CGF.Builder.CreateMemSet(
|
|
|
|
|
CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
|
|
|
|
|
CGF.Builder.getInt8(0), StoreSizeVal);
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-10-14 10:27:24 +08:00
|
|
|
|
}
|
|
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
|
void
|
2010-09-15 18:14:12 +08:00
|
|
|
|
CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
|
|
|
|
|
AggValueSlot Dest) {
|
|
|
|
|
assert(!Dest.isIgnored() && "Must have a destination!");
|
2010-01-02 04:29:01 +08:00
|
|
|
|
const CXXConstructorDecl *CD = E->getConstructor();
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2010-08-23 00:15:35 +08:00
|
|
|
|
// If we require zero initialization before (or instead of) calling the
|
|
|
|
|
// constructor, as can be the case with a non-user-provided default
|
2011-04-29 06:57:55 +08:00
|
|
|
|
// constructor, emit the zero initialization now, unless destination is
|
|
|
|
|
// already zeroed.
|
2011-10-14 10:27:24 +08:00
|
|
|
|
if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
|
|
|
|
|
switch (E->getConstructionKind()) {
|
|
|
|
|
case CXXConstructExpr::CK_Delegating:
|
|
|
|
|
case CXXConstructExpr::CK_Complete:
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
EmitNullInitialization(Dest.getAddress(), E->getType());
|
2011-10-14 10:27:24 +08:00
|
|
|
|
break;
|
|
|
|
|
case CXXConstructExpr::CK_VirtualBase:
|
|
|
|
|
case CXXConstructExpr::CK_NonVirtualBase:
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
EmitNullBaseClassInitialization(*this, Dest.getAddress(),
|
|
|
|
|
CD->getParent());
|
2011-10-14 10:27:24 +08:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2010-08-23 00:15:35 +08:00
|
|
|
|
// If this is a call to a trivial default constructor, do nothing.
|
|
|
|
|
if (CD->isTrivial() && CD->isDefaultConstructor())
|
|
|
|
|
return;
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2010-09-18 08:58:34 +08:00
|
|
|
|
// Elide the constructor if we're constructing from a temporary.
|
|
|
|
|
// The temporary check is required because Sema sets this on NRVO
|
|
|
|
|
// returns.
|
2012-11-02 06:30:59 +08:00
|
|
|
|
if (getLangOpts().ElideConstructors && E->isElidable()) {
|
2010-09-18 08:58:34 +08:00
|
|
|
|
assert(getContext().hasSameUnqualifiedType(E->getType(),
|
|
|
|
|
E->getArg(0)->getType()));
|
2010-09-15 18:14:12 +08:00
|
|
|
|
if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
|
|
|
|
|
EmitAggExpr(E->getArg(0), Dest);
|
2010-05-15 08:13:29 +08:00
|
|
|
|
return;
|
|
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2016-04-29 17:39:50 +08:00
|
|
|
|
if (const ArrayType *arrayType
|
|
|
|
|
= getContext().getAsArrayType(E->getType())) {
|
2018-07-28 23:33:03 +08:00
|
|
|
|
EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E,
|
|
|
|
|
Dest.isSanitizerChecked());
|
2011-07-13 14:10:41 +08:00
|
|
|
|
} else {
|
2011-05-07 05:28:42 +08:00
|
|
|
|
CXXCtorType Type = Ctor_Complete;
|
2011-05-04 04:19:28 +08:00
|
|
|
|
bool ForVirtualBase = false;
|
2013-01-31 13:50:40 +08:00
|
|
|
|
bool Delegating = false;
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2011-05-04 04:19:28 +08:00
|
|
|
|
switch (E->getConstructionKind()) {
|
|
|
|
|
case CXXConstructExpr::CK_Delegating:
|
2011-05-01 15:04:31 +08:00
|
|
|
|
// We should be emitting a constructor; GlobalDecl will assert this
|
|
|
|
|
Type = CurGD.getCtorType();
|
2013-01-31 13:50:40 +08:00
|
|
|
|
Delegating = true;
|
2011-05-04 04:19:28 +08:00
|
|
|
|
break;
|
2011-05-01 15:04:31 +08:00
|
|
|
|
|
2011-05-04 04:19:28 +08:00
|
|
|
|
case CXXConstructExpr::CK_Complete:
|
|
|
|
|
Type = Ctor_Complete;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case CXXConstructExpr::CK_VirtualBase:
|
|
|
|
|
ForVirtualBase = true;
|
2017-12-20 06:06:11 +08:00
|
|
|
|
LLVM_FALLTHROUGH;
|
2011-05-04 04:19:28 +08:00
|
|
|
|
|
|
|
|
|
case CXXConstructExpr::CK_NonVirtualBase:
|
|
|
|
|
Type = Ctor_Base;
|
|
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
|
// Call the constructor.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating,
|
2018-07-28 23:33:03 +08:00
|
|
|
|
Dest.getAddress(), E, Dest.mayOverlap(),
|
|
|
|
|
Dest.isSanitizerChecked());
|
2010-05-03 07:20:53 +08:00
|
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
|
}
|
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
|
|
|
|
|
const Expr *Exp) {
|
2010-12-06 16:20:24 +08:00
|
|
|
|
if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
|
2010-11-14 05:53:34 +08:00
|
|
|
|
Exp = E->getSubExpr();
|
2018-07-31 03:24:48 +08:00
|
|
|
|
assert(isa<CXXConstructExpr>(Exp) &&
|
2010-11-14 05:53:34 +08:00
|
|
|
|
"EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
|
|
|
|
|
const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
|
|
|
|
|
const CXXConstructorDecl *CD = E->getConstructor();
|
|
|
|
|
RunCleanupsScope Scope(*this);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2010-11-14 05:53:34 +08:00
|
|
|
|
// If we require zero initialization before (or instead of) calling the
|
|
|
|
|
// constructor, as can be the case with a non-user-provided default
|
|
|
|
|
// constructor, emit the zero initialization now.
|
|
|
|
|
// FIXME. Do I still need this for a copy ctor synthesis?
|
|
|
|
|
if (E->requiresZeroInitialization())
|
|
|
|
|
EmitNullInitialization(Dest, E->getType());
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2010-11-15 21:54:43 +08:00
|
|
|
|
assert(!getContext().getAsConstantArrayType(E->getType())
|
|
|
|
|
&& "EmitSynthesizedCXXCopyCtor - Copied-in Array");
|
2014-08-26 05:58:56 +08:00
|
|
|
|
EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
|
2010-11-14 05:53:34 +08:00
|
|
|
|
}
|
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
|
static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
|
|
|
|
|
const CXXNewExpr *E) {
|
2009-12-14 04:04:38 +08:00
|
|
|
|
if (!E->isArray())
|
2010-01-27 03:44:24 +08:00
|
|
|
|
return CharUnits::Zero();
|
2009-12-14 04:04:38 +08:00
|
|
|
|
|
2011-05-16 09:05:12 +08:00
|
|
|
|
// No cookie is required if the operator new[] being used is the
|
|
|
|
|
// reserved placement operator new[].
|
|
|
|
|
if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
|
2010-08-23 09:17:59 +08:00
|
|
|
|
return CharUnits::Zero();
|
|
|
|
|
|
2011-01-27 17:37:56 +08:00
|
|
|
|
return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
|
2009-09-24 00:07:23 +08:00
|
|
|
|
}
|
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
|
static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
|
|
|
|
|
const CXXNewExpr *e,
|
2012-02-23 01:37:52 +08:00
|
|
|
|
unsigned minElements,
|
2011-05-15 15:14:44 +08:00
|
|
|
|
llvm::Value *&numElements,
|
|
|
|
|
llvm::Value *&sizeWithoutCookie) {
|
|
|
|
|
QualType type = e->getAllocatedType();
|
|
|
|
|
|
|
|
|
|
if (!e->isArray()) {
|
|
|
|
|
CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
|
|
|
|
|
sizeWithoutCookie
|
|
|
|
|
= llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
|
|
|
|
|
return sizeWithoutCookie;
|
2010-07-21 09:10:17 +08:00
|
|
|
|
}
|
2009-09-24 00:07:23 +08:00
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
|
// The width of size_t.
|
|
|
|
|
unsigned sizeWidth = CGF.SizeTy->getBitWidth();
|
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
|
// Figure out the cookie size.
|
2011-05-15 15:14:44 +08:00
|
|
|
|
llvm::APInt cookieSize(sizeWidth,
|
|
|
|
|
CalculateCookiePadding(CGF, e).getQuantity());
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2009-09-24 00:07:23 +08:00
|
|
|
|
// Emit the array size expression.
|
2010-08-26 23:23:38 +08:00
|
|
|
|
// We multiply the size of all dimensions for NumElements.
|
|
|
|
|
// e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
|
2017-08-16 05:42:52 +08:00
|
|
|
|
numElements =
|
|
|
|
|
ConstantEmitter(CGF).tryEmitAbstract(e->getArraySize(), e->getType());
|
2017-02-14 07:49:55 +08:00
|
|
|
|
if (!numElements)
|
|
|
|
|
numElements = CGF.EmitScalarExpr(e->getArraySize());
|
2011-05-15 15:14:44 +08:00
|
|
|
|
assert(isa<llvm::IntegerType>(numElements->getType()));
|
|
|
|
|
|
|
|
|
|
// The number of elements can be have an arbitrary integer type;
|
|
|
|
|
// essentially, we need to multiply it by a constant factor, add a
|
|
|
|
|
// cookie size, and verify that the result is representable as a
|
|
|
|
|
// size_t. That's just a gloss, though, and it's wrong in one
|
|
|
|
|
// important way: if the count is negative, it's an error even if
|
|
|
|
|
// the cookie size would bring the total size >= 0.
|
2018-07-31 03:24:48 +08:00
|
|
|
|
bool isSigned
|
2011-05-21 00:38:50 +08:00
|
|
|
|
= e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
|
2011-07-18 12:24:23 +08:00
|
|
|
|
llvm::IntegerType *numElementsType
|
2011-05-15 15:14:44 +08:00
|
|
|
|
= cast<llvm::IntegerType>(numElements->getType());
|
|
|
|
|
unsigned numElementsWidth = numElementsType->getBitWidth();
|
|
|
|
|
|
|
|
|
|
// Compute the constant factor.
|
|
|
|
|
llvm::APInt arraySizeMultiplier(sizeWidth, 1);
|
2010-08-26 23:23:38 +08:00
|
|
|
|
while (const ConstantArrayType *CAT
|
2011-05-15 15:14:44 +08:00
|
|
|
|
= CGF.getContext().getAsConstantArrayType(type)) {
|
|
|
|
|
type = CAT->getElementType();
|
|
|
|
|
arraySizeMultiplier *= CAT->getSize();
|
2010-08-26 23:23:38 +08:00
|
|
|
|
}
|
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
|
CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
|
|
|
|
|
llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
|
|
|
|
|
typeSizeMultiplier *= arraySizeMultiplier;
|
|
|
|
|
|
|
|
|
|
// This will be a size_t.
|
|
|
|
|
llvm::Value *size;
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2010-07-21 05:55:52 +08:00
|
|
|
|
// If someone is doing 'new int[42]' there is no need to do a dynamic check.
|
|
|
|
|
// Don't bloat the -O0 code.
|
2011-05-15 15:14:44 +08:00
|
|
|
|
if (llvm::ConstantInt *numElementsC =
|
|
|
|
|
dyn_cast<llvm::ConstantInt>(numElements)) {
|
|
|
|
|
const llvm::APInt &count = numElementsC->getValue();
|
|
|
|
|
|
|
|
|
|
bool hasAnyOverflow = false;
|
|
|
|
|
|
|
|
|
|
// If 'count' was a negative number, it's an overflow.
|
|
|
|
|
if (isSigned && count.isNegative())
|
|
|
|
|
hasAnyOverflow = true;
|
|
|
|
|
|
|
|
|
|
// We want to do all this arithmetic in size_t. If numElements is
|
|
|
|
|
// wider than that, check whether it's already too big, and if so,
|
|
|
|
|
// overflow.
|
|
|
|
|
else if (numElementsWidth > sizeWidth &&
|
|
|
|
|
numElementsWidth - sizeWidth > count.countLeadingZeros())
|
|
|
|
|
hasAnyOverflow = true;
|
|
|
|
|
|
|
|
|
|
// Okay, compute a count at the right width.
|
|
|
|
|
llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
|
|
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
|
// If there is a brace-initializer, we cannot allocate fewer elements than
|
|
|
|
|
// there are initializers. If we do, that's treated like an overflow.
|
|
|
|
|
if (adjustedCount.ult(minElements))
|
|
|
|
|
hasAnyOverflow = true;
|
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
|
// Scale numElements by that. This might overflow, but we don't
|
|
|
|
|
// care because it only overflows if allocationSize does, too, and
|
|
|
|
|
// if that overflows then we shouldn't use this.
|
|
|
|
|
numElements = llvm::ConstantInt::get(CGF.SizeTy,
|
|
|
|
|
adjustedCount * arraySizeMultiplier);
|
|
|
|
|
|
|
|
|
|
// Compute the size before cookie, and track whether it overflowed.
|
|
|
|
|
bool overflow;
|
|
|
|
|
llvm::APInt allocationSize
|
|
|
|
|
= adjustedCount.umul_ov(typeSizeMultiplier, overflow);
|
|
|
|
|
hasAnyOverflow |= overflow;
|
|
|
|
|
|
|
|
|
|
// Add in the cookie, and check whether it's overflowed.
|
|
|
|
|
if (cookieSize != 0) {
|
|
|
|
|
// Save the current size without a cookie. This shouldn't be
|
|
|
|
|
// used if there was overflow.
|
|
|
|
|
sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
|
|
|
|
|
|
|
|
|
|
allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
|
|
|
|
|
hasAnyOverflow |= overflow;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
}
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
|
|
// On overflow, produce a -1 so operator new will fail.
|
2014-08-29 01:24:14 +08:00
|
|
|
|
if (hasAnyOverflow) {
|
|
|
|
|
size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
|
|
|
|
|
} else {
|
2011-05-15 15:14:44 +08:00
|
|
|
|
size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
|
2014-08-29 01:24:14 +08:00
|
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
|
// Otherwise, we might need to use the overflow intrinsics.
|
|
|
|
|
} else {
|
2012-02-23 01:37:52 +08:00
|
|
|
|
// There are up to five conditions we need to test for:
|
2011-05-15 15:14:44 +08:00
|
|
|
|
// 1) if isSigned, we need to check whether numElements is negative;
|
|
|
|
|
// 2) if numElementsWidth > sizeWidth, we need to check whether
|
|
|
|
|
// numElements is larger than something representable in size_t;
|
2012-02-23 01:37:52 +08:00
|
|
|
|
// 3) if minElements > 0, we need to check whether numElements is smaller
|
|
|
|
|
// than that.
|
|
|
|
|
// 4) we need to compute
|
2011-05-15 15:14:44 +08:00
|
|
|
|
// sizeWithoutCookie := numElements * typeSizeMultiplier
|
|
|
|
|
// and check whether it overflows; and
|
2012-02-23 01:37:52 +08:00
|
|
|
|
// 5) if we need a cookie, we need to compute
|
2011-05-15 15:14:44 +08:00
|
|
|
|
// size := sizeWithoutCookie + cookieSize
|
|
|
|
|
// and check whether it overflows.
|
|
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
|
llvm::Value *hasOverflow = nullptr;
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
|
|
// If numElementsWidth > sizeWidth, then one way or another, we're
|
|
|
|
|
// going to have to do a comparison for (2), and this happens to
|
|
|
|
|
// take care of (1), too.
|
|
|
|
|
if (numElementsWidth > sizeWidth) {
|
|
|
|
|
llvm::APInt threshold(numElementsWidth, 1);
|
|
|
|
|
threshold <<= sizeWidth;
|
|
|
|
|
|
|
|
|
|
llvm::Value *thresholdV
|
|
|
|
|
= llvm::ConstantInt::get(numElementsType, threshold);
|
|
|
|
|
|
|
|
|
|
hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
|
|
|
|
|
numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
|
|
|
|
|
|
|
|
|
|
// Otherwise, if we're signed, we want to sext up to size_t.
|
|
|
|
|
} else if (isSigned) {
|
|
|
|
|
if (numElementsWidth < sizeWidth)
|
|
|
|
|
numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
|
// If there's a non-1 type size multiplier, then we can do the
|
|
|
|
|
// signedness check at the same time as we do the multiply
|
|
|
|
|
// because a negative number times anything will cause an
|
2012-02-23 01:37:52 +08:00
|
|
|
|
// unsigned overflow. Otherwise, we have to do it here. But at least
|
|
|
|
|
// in this case, we can subsume the >= minElements check.
|
2011-05-15 15:14:44 +08:00
|
|
|
|
if (typeSizeMultiplier == 1)
|
|
|
|
|
hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
|
2012-02-23 01:37:52 +08:00
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, minElements));
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
|
|
// Otherwise, zext up to size_t if necessary.
|
|
|
|
|
} else if (numElementsWidth < sizeWidth) {
|
|
|
|
|
numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
|
|
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
|
assert(numElements->getType() == CGF.SizeTy);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
|
if (minElements) {
|
|
|
|
|
// Don't allow allocation of fewer elements than we have initializers.
|
|
|
|
|
if (!hasOverflow) {
|
|
|
|
|
hasOverflow = CGF.Builder.CreateICmpULT(numElements,
|
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, minElements));
|
|
|
|
|
} else if (numElementsWidth > sizeWidth) {
|
|
|
|
|
// The other existing overflow subsumes this check.
|
|
|
|
|
// We do an unsigned comparison, since any signed value < -1 is
|
|
|
|
|
// taken care of either above or below.
|
|
|
|
|
hasOverflow = CGF.Builder.CreateOr(hasOverflow,
|
|
|
|
|
CGF.Builder.CreateICmpULT(numElements,
|
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, minElements)));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
|
size = numElements;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
|
// Multiply by the type size if necessary. This multiplier
|
|
|
|
|
// includes all the factors for nested arrays.
|
|
|
|
|
//
|
|
|
|
|
// This step also causes numElements to be scaled up by the
|
|
|
|
|
// nested-array factor if necessary. Overflow on this computation
|
|
|
|
|
// can be ignored because the result shouldn't be used if
|
|
|
|
|
// allocation fails.
|
|
|
|
|
if (typeSizeMultiplier != 1) {
|
|
|
|
|
llvm::Value *umul_with_overflow
|
2011-07-15 01:45:50 +08:00
|
|
|
|
= CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
|
|
llvm::Value *tsmV =
|
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
|
|
|
|
|
llvm::Value *result =
|
2015-05-19 06:14:03 +08:00
|
|
|
|
CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV});
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
|
|
llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
|
|
|
|
|
if (hasOverflow)
|
|
|
|
|
hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
|
|
|
|
|
else
|
|
|
|
|
hasOverflow = overflowed;
|
|
|
|
|
|
|
|
|
|
size = CGF.Builder.CreateExtractValue(result, 0);
|
|
|
|
|
|
|
|
|
|
// Also scale up numElements by the array size multiplier.
|
|
|
|
|
if (arraySizeMultiplier != 1) {
|
|
|
|
|
// If the base element type size is 1, then we can re-use the
|
|
|
|
|
// multiply we just did.
|
|
|
|
|
if (typeSize.isOne()) {
|
|
|
|
|
assert(arraySizeMultiplier == typeSizeMultiplier);
|
|
|
|
|
numElements = size;
|
|
|
|
|
|
|
|
|
|
// Otherwise we need a separate multiply.
|
|
|
|
|
} else {
|
|
|
|
|
llvm::Value *asmV =
|
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
|
|
|
|
|
numElements = CGF.Builder.CreateMul(numElements, asmV);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// numElements doesn't need to be scaled.
|
|
|
|
|
assert(arraySizeMultiplier == 1);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
|
// Add in the cookie size if necessary.
|
|
|
|
|
if (cookieSize != 0) {
|
|
|
|
|
sizeWithoutCookie = size;
|
|
|
|
|
|
|
|
|
|
llvm::Value *uadd_with_overflow
|
2011-07-15 01:45:50 +08:00
|
|
|
|
= CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
|
|
llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
|
|
|
|
|
llvm::Value *result =
|
2015-05-19 06:14:03 +08:00
|
|
|
|
CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV});
|
2011-05-15 15:14:44 +08:00
|
|
|
|
|
|
|
|
|
llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
|
|
|
|
|
if (hasOverflow)
|
|
|
|
|
hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
|
|
|
|
|
else
|
|
|
|
|
hasOverflow = overflowed;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
|
size = CGF.Builder.CreateExtractValue(result, 0);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
}
|
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
|
// If we had any possibility of dynamic overflow, make a select to
|
|
|
|
|
// overwrite 'size' with an all-ones value, which should cause
|
|
|
|
|
// operator new to throw.
|
|
|
|
|
if (hasOverflow)
|
2014-08-29 01:24:14 +08:00
|
|
|
|
size = CGF.Builder.CreateSelect(hasOverflow,
|
|
|
|
|
llvm::Constant::getAllOnesValue(CGF.SizeTy),
|
|
|
|
|
size);
|
2010-07-21 05:55:52 +08:00
|
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
|
if (cookieSize == 0)
|
|
|
|
|
sizeWithoutCookie = size;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
else
|
2011-05-15 15:14:44 +08:00
|
|
|
|
assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2011-05-15 15:14:44 +08:00
|
|
|
|
return size;
|
2009-09-24 00:07:23 +08:00
|
|
|
|
}
|
|
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
|
static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
|
2018-04-06 04:52:58 +08:00
|
|
|
|
QualType AllocType, Address NewPtr,
|
|
|
|
|
AggValueSlot::Overlap_t MayOverlap) {
|
2013-12-11 09:40:16 +08:00
|
|
|
|
// FIXME: Refactor with EmitExprAsInit.
|
2013-03-08 05:37:08 +08:00
|
|
|
|
switch (CGF.getEvaluationKind(AllocType)) {
|
|
|
|
|
case TEK_Scalar:
|
2014-12-11 03:04:09 +08:00
|
|
|
|
CGF.EmitScalarInit(Init, nullptr,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
CGF.MakeAddrLValue(NewPtr, AllocType), false);
|
2013-03-08 05:37:08 +08:00
|
|
|
|
return;
|
|
|
|
|
case TEK_Complex:
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),
|
2013-03-08 05:37:08 +08:00
|
|
|
|
/*isInit*/ true);
|
|
|
|
|
return;
|
|
|
|
|
case TEK_Aggregate: {
|
2010-09-15 18:14:12 +08:00
|
|
|
|
AggValueSlot Slot
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
= AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
|
2011-08-26 04:40:09 +08:00
|
|
|
|
AggValueSlot::IsDestructed,
|
2011-08-26 15:31:35 +08:00
|
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2018-04-06 04:52:58 +08:00
|
|
|
|
AggValueSlot::IsNotAliased,
|
2018-07-28 23:33:03 +08:00
|
|
|
|
MayOverlap, AggValueSlot::IsNotZeroed,
|
|
|
|
|
AggValueSlot::IsSanitizerChecked);
|
2010-09-15 18:14:12 +08:00
|
|
|
|
CGF.EmitAggExpr(Init, Slot);
|
2013-03-08 05:37:08 +08:00
|
|
|
|
return;
|
|
|
|
|
}
|
2010-09-15 18:14:12 +08:00
|
|
|
|
}
|
2013-03-08 05:37:08 +08:00
|
|
|
|
llvm_unreachable("bad evaluation kind");
|
2010-06-26 02:26:07 +08:00
|
|
|
|
}
|
|
|
|
|
|
2015-04-04 23:12:29 +08:00
|
|
|
|
void CodeGenFunction::EmitNewArrayInitializer(
|
|
|
|
|
const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Address BeginPtr, llvm::Value *NumElements,
|
2015-04-04 23:12:29 +08:00
|
|
|
|
llvm::Value *AllocSizeWithoutCookie) {
|
2014-06-03 14:58:52 +08:00
|
|
|
|
// If we have a type with trivial initialization and no initializer,
|
|
|
|
|
// there's nothing to do.
|
2012-02-16 20:22:20 +08:00
|
|
|
|
if (!E->hasInitializer())
|
2014-06-03 14:58:52 +08:00
|
|
|
|
return;
|
2011-09-15 14:49:18 +08:00
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Address CurPtr = BeginPtr;
|
2011-09-15 14:49:18 +08:00
|
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
|
unsigned InitListElements = 0;
|
2012-02-23 01:37:52 +08:00
|
|
|
|
|
|
|
|
|
const Expr *Init = E->getInitializer();
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Address EndOfInit = Address::invalid();
|
2014-06-03 14:58:52 +08:00
|
|
|
|
QualType::DestructionKind DtorKind = ElementType.isDestructedType();
|
|
|
|
|
EHScopeStack::stable_iterator Cleanup;
|
|
|
|
|
llvm::Instruction *CleanupDominator = nullptr;
|
2013-12-11 09:40:16 +08:00
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
|
|
|
|
|
CharUnits ElementAlign =
|
|
|
|
|
BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
|
|
|
|
|
|
PR22924, PR22845, some of CWG1464: When checking the initializer for an array
new expression, distinguish between the case of a constant and non-constant
initializer. In the former case, if the bound is erroneous (too many
initializer elements, bound is negative, or allocated size overflows), reject,
and take the bound into account when determining whether we need to
default-construct any elements. In the remanining cases, move the logic to
check for default-constructibility of trailing elements into the initialization
code rather than inventing a bogus array bound, to cope with cases where the
number of initialized elements is not the same as the number of initializer
list elements (this can happen due to string literal initialization or brace
elision).
This also fixes rejects-valid and crash-on-valid errors when initializing a
new'd array of character type from a braced string literal.
llvm-svn: 283406
2016-10-06 06:41:02 +08:00
|
|
|
|
// Attempt to perform zero-initialization using memset.
|
|
|
|
|
auto TryMemsetInitialization = [&]() -> bool {
|
|
|
|
|
// FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
|
|
|
|
|
// we can initialize with a memset to -1.
|
|
|
|
|
if (!CGM.getTypes().isZeroInitializable(ElementType))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
// Optimization: since zero initialization will just set the memory
|
|
|
|
|
// to all zeroes, generate a single memset to do it in one shot.
|
|
|
|
|
|
|
|
|
|
// Subtract out the size of any elements we've already initialized.
|
|
|
|
|
auto *RemainingSize = AllocSizeWithoutCookie;
|
|
|
|
|
if (InitListElements) {
|
|
|
|
|
// We know this can't overflow; we check this when doing the allocation.
|
|
|
|
|
auto *InitializedSize = llvm::ConstantInt::get(
|
|
|
|
|
RemainingSize->getType(),
|
|
|
|
|
getContext().getTypeSizeInChars(ElementType).getQuantity() *
|
|
|
|
|
InitListElements);
|
|
|
|
|
RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create the memset.
|
|
|
|
|
Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
|
|
|
|
|
return true;
|
|
|
|
|
};
|
|
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
|
// If the initializer is an initializer list, first do the explicit elements.
|
|
|
|
|
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
|
PR22924, PR22845, some of CWG1464: When checking the initializer for an array
new expression, distinguish between the case of a constant and non-constant
initializer. In the former case, if the bound is erroneous (too many
initializer elements, bound is negative, or allocated size overflows), reject,
and take the bound into account when determining whether we need to
default-construct any elements. In the remanining cases, move the logic to
check for default-constructibility of trailing elements into the initialization
code rather than inventing a bogus array bound, to cope with cases where the
number of initialized elements is not the same as the number of initializer
list elements (this can happen due to string literal initialization or brace
elision).
This also fixes rejects-valid and crash-on-valid errors when initializing a
new'd array of character type from a braced string literal.
llvm-svn: 283406
2016-10-06 06:41:02 +08:00
|
|
|
|
// Initializing from a (braced) string literal is a special case; the init
|
|
|
|
|
// list element does not initialize a (single) array element.
|
|
|
|
|
if (ILE->isStringLiteralInit()) {
|
|
|
|
|
// Initialize the initial portion of length equal to that of the string
|
|
|
|
|
// literal. The allocation must be for at least this much; we emitted a
|
|
|
|
|
// check for that earlier.
|
|
|
|
|
AggValueSlot Slot =
|
|
|
|
|
AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),
|
|
|
|
|
AggValueSlot::IsDestructed,
|
|
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2018-04-06 04:52:58 +08:00
|
|
|
|
AggValueSlot::IsNotAliased,
|
2018-07-28 23:33:03 +08:00
|
|
|
|
AggValueSlot::DoesNotOverlap,
|
|
|
|
|
AggValueSlot::IsNotZeroed,
|
|
|
|
|
AggValueSlot::IsSanitizerChecked);
|
PR22924, PR22845, some of CWG1464: When checking the initializer for an array
new expression, distinguish between the case of a constant and non-constant
initializer. In the former case, if the bound is erroneous (too many
initializer elements, bound is negative, or allocated size overflows), reject,
and take the bound into account when determining whether we need to
default-construct any elements. In the remanining cases, move the logic to
check for default-constructibility of trailing elements into the initialization
code rather than inventing a bogus array bound, to cope with cases where the
number of initialized elements is not the same as the number of initializer
list elements (this can happen due to string literal initialization or brace
elision).
This also fixes rejects-valid and crash-on-valid errors when initializing a
new'd array of character type from a braced string literal.
llvm-svn: 283406
2016-10-06 06:41:02 +08:00
|
|
|
|
EmitAggExpr(ILE->getInit(0), Slot);
|
|
|
|
|
|
|
|
|
|
// Move past these elements.
|
|
|
|
|
InitListElements =
|
|
|
|
|
cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
|
|
|
|
|
->getSize().getZExtValue();
|
|
|
|
|
CurPtr =
|
|
|
|
|
Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
|
|
|
|
|
Builder.getSize(InitListElements),
|
|
|
|
|
"string.init.end"),
|
|
|
|
|
CurPtr.getAlignment().alignmentAtOffset(InitListElements *
|
|
|
|
|
ElementSize));
|
|
|
|
|
|
|
|
|
|
// Zero out the rest, if any remain.
|
|
|
|
|
llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
|
|
|
|
|
if (!ConstNum || !ConstNum->equalsInt(InitListElements)) {
|
|
|
|
|
bool OK = TryMemsetInitialization();
|
|
|
|
|
(void)OK;
|
|
|
|
|
assert(OK && "couldn't memset character type?");
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
|
InitListElements = ILE->getNumInits();
|
2012-02-24 08:13:55 +08:00
|
|
|
|
|
2013-12-11 09:40:16 +08:00
|
|
|
|
// If this is a multi-dimensional array new, we will initialize multiple
|
|
|
|
|
// elements with each init list element.
|
|
|
|
|
QualType AllocType = E->getAllocatedType();
|
|
|
|
|
if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
|
|
|
|
|
AllocType->getAsArrayTypeUnsafe())) {
|
2015-04-04 23:12:29 +08:00
|
|
|
|
ElementTy = ConvertTypeForMem(AllocType);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy);
|
2014-06-03 14:58:52 +08:00
|
|
|
|
InitListElements *= getContext().getConstantArrayElementCount(CAT);
|
2013-12-11 09:40:16 +08:00
|
|
|
|
}
|
|
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
|
// Enter a partial-destruction Cleanup if necessary.
|
|
|
|
|
if (needsEHCleanup(DtorKind)) {
|
|
|
|
|
// In principle we could tell the Cleanup where we are more
|
2012-02-24 08:13:55 +08:00
|
|
|
|
// directly, but the control flow can get so varied here that it
|
|
|
|
|
// would actually be quite complex. Therefore we go through an
|
|
|
|
|
// alloca.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
|
|
|
|
|
"array.init.end");
|
|
|
|
|
CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit);
|
|
|
|
|
pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit,
|
|
|
|
|
ElementType, ElementAlign,
|
2014-06-03 14:58:52 +08:00
|
|
|
|
getDestroyer(DtorKind));
|
|
|
|
|
Cleanup = EHStack.stable_begin();
|
2012-02-24 08:13:55 +08:00
|
|
|
|
}
|
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
CharUnits StartAlign = CurPtr.getAlignment();
|
2012-02-23 01:37:52 +08:00
|
|
|
|
for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
|
2012-02-24 08:13:55 +08:00
|
|
|
|
// Tell the cleanup that it needs to destroy up to this
|
|
|
|
|
// element. TODO: some of these stores can be trivially
|
|
|
|
|
// observed to be unnecessary.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
if (EndOfInit.isValid()) {
|
|
|
|
|
auto FinishedPtr =
|
|
|
|
|
Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType());
|
|
|
|
|
Builder.CreateStore(FinishedPtr, EndOfInit);
|
|
|
|
|
}
|
2014-06-03 14:58:52 +08:00
|
|
|
|
// FIXME: If the last initializer is an incomplete initializer list for
|
|
|
|
|
// an array, and we have an array filler, we can fold together the two
|
|
|
|
|
// initialization loops.
|
2013-12-11 09:40:16 +08:00
|
|
|
|
StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
|
2018-04-06 04:52:58 +08:00
|
|
|
|
ILE->getInit(i)->getType(), CurPtr,
|
|
|
|
|
AggValueSlot::DoesNotOverlap);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
|
|
|
|
|
Builder.getSize(1),
|
|
|
|
|
"array.exp.next"),
|
|
|
|
|
StartAlign.alignmentAtOffset((i + 1) * ElementSize));
|
2012-02-23 01:37:52 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The remaining elements are filled with the array filler expression.
|
|
|
|
|
Init = ILE->getArrayFiller();
|
2013-12-11 09:40:16 +08:00
|
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
|
// Extract the initializer for the individual array elements by pulling
|
|
|
|
|
// out the array filler from all the nested initializer lists. This avoids
|
|
|
|
|
// generating a nested loop for the initialization.
|
|
|
|
|
while (Init && Init->getType()->isConstantArrayType()) {
|
|
|
|
|
auto *SubILE = dyn_cast<InitListExpr>(Init);
|
|
|
|
|
if (!SubILE)
|
|
|
|
|
break;
|
|
|
|
|
assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
|
|
|
|
|
Init = SubILE->getArrayFiller();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Switch back to initializing one base element at a time.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
|
2014-06-03 14:58:52 +08:00
|
|
|
|
}
|
|
|
|
|
|
2014-06-03 16:26:00 +08:00
|
|
|
|
// If all elements have already been initialized, skip any further
|
|
|
|
|
// initialization.
|
|
|
|
|
llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
|
|
|
|
|
if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
|
|
|
|
|
// If there was a Cleanup, deactivate it.
|
|
|
|
|
if (CleanupDominator)
|
|
|
|
|
DeactivateCleanupBlock(Cleanup, CleanupDominator);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(Init && "have trailing elements to initialize but no initializer");
|
|
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
|
// If this is a constructor call, try to optimize it out, and failing that
|
|
|
|
|
// emit a single loop to initialize all remaining elements.
|
2014-06-03 16:26:00 +08:00
|
|
|
|
if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
|
2014-06-03 14:58:52 +08:00
|
|
|
|
CXXConstructorDecl *Ctor = CCE->getConstructor();
|
|
|
|
|
if (Ctor->isTrivial()) {
|
|
|
|
|
// If new expression did not specify value-initialization, then there
|
|
|
|
|
// is no initialization.
|
|
|
|
|
if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (TryMemsetInitialization())
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Store the new Cleanup position for irregular Cleanups.
|
|
|
|
|
//
|
|
|
|
|
// FIXME: Share this cleanup with the constructor call emission rather than
|
|
|
|
|
// having it create a cleanup of its own.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
if (EndOfInit.isValid())
|
|
|
|
|
Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
|
2014-06-03 14:58:52 +08:00
|
|
|
|
|
|
|
|
|
// Emit a constructor call loop to initialize the remaining elements.
|
|
|
|
|
if (InitListElements)
|
|
|
|
|
NumElements = Builder.CreateSub(
|
|
|
|
|
NumElements,
|
|
|
|
|
llvm::ConstantInt::get(NumElements->getType(), InitListElements));
|
2014-08-22 04:26:47 +08:00
|
|
|
|
EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
|
2018-07-28 23:33:03 +08:00
|
|
|
|
/*NewPointerIsChecked*/true,
|
2014-06-03 14:58:52 +08:00
|
|
|
|
CCE->requiresZeroInitialization());
|
|
|
|
|
return;
|
2012-02-23 01:37:52 +08:00
|
|
|
|
}
|
|
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
|
// If this is value-initialization, we can usually use memset.
|
|
|
|
|
ImplicitValueInitExpr IVIE(ElementType);
|
2014-06-03 16:26:00 +08:00
|
|
|
|
if (isa<ImplicitValueInitExpr>(Init)) {
|
2014-06-03 14:58:52 +08:00
|
|
|
|
if (TryMemsetInitialization())
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
// Switch to an ImplicitValueInitExpr for the element type. This handles
|
|
|
|
|
// only one case: multidimensional array new of pointers to members. In
|
|
|
|
|
// all other cases, we already have an initializer for the array element.
|
|
|
|
|
Init = &IVIE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// At this point we should have found an initializer for the individual
|
|
|
|
|
// elements of the array.
|
|
|
|
|
assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
|
|
|
|
|
"got wrong type of element to initialize");
|
|
|
|
|
|
2014-06-03 16:26:00 +08:00
|
|
|
|
// If we have an empty initializer list, we can usually use memset.
|
|
|
|
|
if (auto *ILE = dyn_cast<InitListExpr>(Init))
|
|
|
|
|
if (ILE->getNumInits() == 0 && TryMemsetInitialization())
|
|
|
|
|
return;
|
2014-05-03 17:16:57 +08:00
|
|
|
|
|
2015-06-10 08:27:52 +08:00
|
|
|
|
// If we have a struct whose every field is value-initialized, we can
|
|
|
|
|
// usually use memset.
|
|
|
|
|
if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
|
|
|
|
|
if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
|
|
|
|
|
if (RType->getDecl()->isStruct()) {
|
2016-03-09 06:17:41 +08:00
|
|
|
|
unsigned NumElements = 0;
|
|
|
|
|
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))
|
|
|
|
|
NumElements = CXXRD->getNumBases();
|
2015-06-10 08:27:52 +08:00
|
|
|
|
for (auto *Field : RType->getDecl()->fields())
|
|
|
|
|
if (!Field->isUnnamedBitfield())
|
2016-03-09 06:17:41 +08:00
|
|
|
|
++NumElements;
|
|
|
|
|
// FIXME: Recurse into nested InitListExprs.
|
|
|
|
|
if (ILE->getNumInits() == NumElements)
|
2015-06-10 08:27:52 +08:00
|
|
|
|
for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
|
|
|
|
|
if (!isa<ImplicitValueInitExpr>(ILE->getInit(i)))
|
2016-03-09 06:17:41 +08:00
|
|
|
|
--NumElements;
|
|
|
|
|
if (ILE->getNumInits() == NumElements && TryMemsetInitialization())
|
2015-06-10 08:27:52 +08:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
|
// Create the loop blocks.
|
|
|
|
|
llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
|
|
|
|
|
llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
|
|
|
|
|
llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
|
|
|
|
|
|
|
|
|
|
// Find the end of the array, hoisted out of the loop.
|
|
|
|
|
llvm::Value *EndPtr =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Builder.CreateInBoundsGEP(BeginPtr.getPointer(), NumElements, "array.end");
|
2011-09-15 14:49:18 +08:00
|
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
|
// If the number of elements isn't constant, we have to now check if there is
|
|
|
|
|
// anything left to initialize.
|
2014-06-03 14:58:52 +08:00
|
|
|
|
if (!ConstNum) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
llvm::Value *IsEmpty =
|
|
|
|
|
Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty");
|
2014-06-03 14:58:52 +08:00
|
|
|
|
Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
|
2011-09-15 14:49:18 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Enter the loop.
|
2014-06-03 14:58:52 +08:00
|
|
|
|
EmitBlock(LoopBB);
|
2011-09-15 14:49:18 +08:00
|
|
|
|
|
|
|
|
|
// Set up the current-element phi.
|
2014-06-03 14:58:52 +08:00
|
|
|
|
llvm::PHINode *CurPtrPhi =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
|
|
|
|
|
CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
|
|
|
|
|
|
|
|
|
|
CurPtr = Address(CurPtrPhi, ElementAlign);
|
2014-06-03 14:58:52 +08:00
|
|
|
|
|
|
|
|
|
// Store the new Cleanup position for irregular Cleanups.
|
2018-07-31 03:24:48 +08:00
|
|
|
|
if (EndOfInit.isValid())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
|
2014-06-03 14:58:52 +08:00
|
|
|
|
|
|
|
|
|
// Enter a partial-destruction Cleanup if necessary.
|
|
|
|
|
if (!CleanupDominator && needsEHCleanup(DtorKind)) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(),
|
|
|
|
|
ElementType, ElementAlign,
|
2014-06-03 14:58:52 +08:00
|
|
|
|
getDestroyer(DtorKind));
|
|
|
|
|
Cleanup = EHStack.stable_begin();
|
|
|
|
|
CleanupDominator = Builder.CreateUnreachable();
|
2011-09-15 14:49:18 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Emit the initializer into this element.
|
2018-04-06 04:52:58 +08:00
|
|
|
|
StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr,
|
|
|
|
|
AggValueSlot::DoesNotOverlap);
|
2011-09-15 14:49:18 +08:00
|
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
|
// Leave the Cleanup if we entered one.
|
|
|
|
|
if (CleanupDominator) {
|
|
|
|
|
DeactivateCleanupBlock(Cleanup, CleanupDominator);
|
|
|
|
|
CleanupDominator->eraseFromParent();
|
2011-11-10 18:43:54 +08:00
|
|
|
|
}
|
2011-09-15 14:49:18 +08:00
|
|
|
|
|
2013-12-14 08:40:05 +08:00
|
|
|
|
// Advance to the next element by adjusting the pointer type as necessary.
|
2014-06-03 14:58:52 +08:00
|
|
|
|
llvm::Value *NextPtr =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1,
|
|
|
|
|
"array.next");
|
2014-06-03 14:58:52 +08:00
|
|
|
|
|
2011-09-15 14:49:18 +08:00
|
|
|
|
// Check whether we've gotten to the end of the array and, if so,
|
|
|
|
|
// exit the loop.
|
2014-06-03 14:58:52 +08:00
|
|
|
|
llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
|
|
|
|
|
Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
|
|
|
|
|
CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
|
2011-09-15 14:49:18 +08:00
|
|
|
|
|
2014-06-03 14:58:52 +08:00
|
|
|
|
EmitBlock(ContBB);
|
2010-06-26 02:26:07 +08:00
|
|
|
|
}
|
|
|
|
|
|
2009-09-24 00:07:23 +08:00
|
|
|
|
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
|
2015-04-04 23:12:29 +08:00
|
|
|
|
QualType ElementType, llvm::Type *ElementTy,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Address NewPtr, llvm::Value *NumElements,
|
2010-07-21 09:10:17 +08:00
|
|
|
|
llvm::Value *AllocSizeWithoutCookie) {
|
DebugInfo: Use the preferred location rather than the start location for expression line info
This causes things like assignment to refer to the '=' rather than the
LHS when attributing the store instruction, for example.
There were essentially 3 options for this:
* The beginning of an expression (this was the behavior prior to this
commit). This meant that stepping through subexpressions would bounce
around from subexpressions back to the start of the outer expression,
etc. (eg: x + y + z would go x, y, x, z, x (the repeated 'x's would be
where the actual addition occurred)).
* The end of an expression. This seems to be what GCC does /mostly/, and
certainly this for function calls. This has the advantage that
progress is always 'forwards' (never jumping backwards - except for
independent subexpressions if they're evaluated in interesting orders,
etc). "x + y + z" would go "x y z" with the additions occurring at y
and z after the respective loads.
The problem with this is that the user would still have to think
fairly hard about precedence to realize which subexpression is being
evaluated or which operator overload is being called in, say, an asan
backtrace.
* The preferred location or 'exprloc'. In this case you get sort of what
you'd expect, though it's a bit confusing in its own way due to going
'backwards'. In this case the locations would be: "x y + z +" in
lovely postfix arithmetic order. But this does mean that if the op+
were an operator overload, say, and in a backtrace, the backtrace will
point to the exact '+' that's being called, not to the end of one of
its operands.
(actually the operator overload case doesn't work yet for other reasons,
but that's being fixed - but this at least gets scalar/complex
assignments and other plain operators right)
llvm-svn: 227027
2015-01-25 09:19:10 +08:00
|
|
|
|
ApplyDebugLocation DL(CGF, E);
|
2014-06-03 14:58:52 +08:00
|
|
|
|
if (E->isArray())
|
2015-04-04 23:12:29 +08:00
|
|
|
|
CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements,
|
2014-06-03 14:58:52 +08:00
|
|
|
|
AllocSizeWithoutCookie);
|
|
|
|
|
else if (const Expr *Init = E->getInitializer())
|
2018-04-06 04:52:58 +08:00
|
|
|
|
StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr,
|
|
|
|
|
AggValueSlot::DoesNotOverlap);
|
2009-09-24 00:07:23 +08:00
|
|
|
|
}
|
|
|
|
|
|
2013-07-22 07:12:18 +08:00
|
|
|
|
/// Emit a call to an operator new or operator delete function, as implicitly
|
|
|
|
|
/// created by new-expressions and delete-expressions.
|
|
|
|
|
static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
|
2016-10-27 07:46:34 +08:00
|
|
|
|
const FunctionDecl *CalleeDecl,
|
2013-07-22 07:12:18 +08:00
|
|
|
|
const FunctionProtoType *CalleeType,
|
|
|
|
|
const CallArgList &Args) {
|
|
|
|
|
llvm::Instruction *CallOrInvoke;
|
2016-10-27 07:46:34 +08:00
|
|
|
|
llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
|
|
|
|
|
CGCallee Callee = CGCallee::forDirect(CalleePtr, CalleeDecl);
|
2013-07-22 07:12:18 +08:00
|
|
|
|
RValue RV =
|
2014-12-13 07:41:25 +08:00
|
|
|
|
CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
|
|
|
|
|
Args, CalleeType, /*chainCall=*/false),
|
2016-10-27 07:46:34 +08:00
|
|
|
|
Callee, ReturnValueSlot(), Args, &CallOrInvoke);
|
2013-07-22 07:12:18 +08:00
|
|
|
|
|
|
|
|
|
/// C++1y [expr.new]p10:
|
|
|
|
|
/// [In a new-expression,] an implementation is allowed to omit a call
|
|
|
|
|
/// to a replaceable global allocation function.
|
|
|
|
|
///
|
|
|
|
|
/// We model such elidable calls with the 'builtin' attribute.
|
2016-10-27 07:46:34 +08:00
|
|
|
|
llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
|
|
|
|
|
if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
|
2013-10-22 22:23:09 +08:00
|
|
|
|
Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
|
2013-07-22 07:12:18 +08:00
|
|
|
|
// FIXME: Add addAttribute to CallSite.
|
|
|
|
|
if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
|
2017-03-22 00:57:30 +08:00
|
|
|
|
CI->addAttribute(llvm::AttributeList::FunctionIndex,
|
2013-07-22 07:12:18 +08:00
|
|
|
|
llvm::Attribute::Builtin);
|
|
|
|
|
else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
|
2017-03-22 00:57:30 +08:00
|
|
|
|
II->addAttribute(llvm::AttributeList::FunctionIndex,
|
2013-07-22 07:12:18 +08:00
|
|
|
|
llvm::Attribute::Builtin);
|
|
|
|
|
else
|
|
|
|
|
llvm_unreachable("unexpected kind of call instruction");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return RV;
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-04 07:27:44 +08:00
|
|
|
|
RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
|
2018-03-22 03:19:48 +08:00
|
|
|
|
const CallExpr *TheCall,
|
2014-06-04 07:27:44 +08:00
|
|
|
|
bool IsDelete) {
|
|
|
|
|
CallArgList Args;
|
2018-03-22 03:19:48 +08:00
|
|
|
|
EmitCallArgs(Args, Type->getParamTypes(), TheCall->arguments());
|
2014-06-04 07:27:44 +08:00
|
|
|
|
// Find the allocation or deallocation function that we're calling.
|
|
|
|
|
ASTContext &Ctx = getContext();
|
|
|
|
|
DeclarationName Name = Ctx.DeclarationNames
|
|
|
|
|
.getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
|
2018-03-22 03:19:48 +08:00
|
|
|
|
|
2014-06-04 07:27:44 +08:00
|
|
|
|
for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
|
2014-06-05 08:43:02 +08:00
|
|
|
|
if (auto *FD = dyn_cast<FunctionDecl>(Decl))
|
|
|
|
|
if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
|
2018-03-22 03:19:48 +08:00
|
|
|
|
return EmitNewDeleteCall(*this, FD, Type, Args);
|
2014-06-04 07:27:44 +08:00
|
|
|
|
llvm_unreachable("predeclared global operator new/delete is missing");
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-13 09:55:36 +08:00
|
|
|
|
namespace {
|
|
|
|
|
/// The parameters to pass to a usual operator delete.
|
|
|
|
|
struct UsualDeleteParams {
|
|
|
|
|
bool DestroyingDelete = false;
|
|
|
|
|
bool Size = false;
|
|
|
|
|
bool Alignment = false;
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) {
|
|
|
|
|
UsualDeleteParams Params;
|
|
|
|
|
|
|
|
|
|
const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
|
2016-10-11 02:54:32 +08:00
|
|
|
|
auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
2016-10-11 02:54:32 +08:00
|
|
|
|
// The first argument is always a void*.
|
|
|
|
|
++AI;
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
2017-10-13 09:55:36 +08:00
|
|
|
|
// The next parameter may be a std::destroying_delete_t.
|
|
|
|
|
if (FD->isDestroyingOperatorDelete()) {
|
|
|
|
|
Params.DestroyingDelete = true;
|
|
|
|
|
assert(AI != AE);
|
|
|
|
|
++AI;
|
|
|
|
|
}
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
2017-10-13 09:55:36 +08:00
|
|
|
|
// Figure out what other parameters we should be implicitly passing.
|
2016-10-11 02:54:32 +08:00
|
|
|
|
if (AI != AE && (*AI)->isIntegerType()) {
|
2017-10-13 09:55:36 +08:00
|
|
|
|
Params.Size = true;
|
2016-10-11 02:54:32 +08:00
|
|
|
|
++AI;
|
|
|
|
|
}
|
2016-10-10 22:13:55 +08:00
|
|
|
|
|
2016-10-11 02:54:32 +08:00
|
|
|
|
if (AI != AE && (*AI)->isAlignValT()) {
|
2017-10-13 09:55:36 +08:00
|
|
|
|
Params.Alignment = true;
|
2016-10-11 02:54:32 +08:00
|
|
|
|
++AI;
|
|
|
|
|
}
|
2016-10-10 22:13:55 +08:00
|
|
|
|
|
2016-10-11 02:54:32 +08:00
|
|
|
|
assert(AI == AE && "unexpected usual deallocation function parameter");
|
2017-10-13 09:55:36 +08:00
|
|
|
|
return Params;
|
2016-10-11 02:54:32 +08:00
|
|
|
|
}
|
2016-10-10 22:13:55 +08:00
|
|
|
|
|
2016-10-11 02:54:32 +08:00
|
|
|
|
namespace {
|
|
|
|
|
/// A cleanup to call the given 'operator delete' function upon abnormal
|
|
|
|
|
/// exit from a new expression. Templated on a traits type that deals with
|
|
|
|
|
/// ensuring that the arguments dominate the cleanup if necessary.
|
|
|
|
|
template<typename Traits>
|
|
|
|
|
class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
|
|
|
|
|
/// Type used to hold llvm::Value*s.
|
|
|
|
|
typedef typename Traits::ValueTy ValueTy;
|
|
|
|
|
/// Type used to hold RValues.
|
|
|
|
|
typedef typename Traits::RValueTy RValueTy;
|
|
|
|
|
struct PlacementArg {
|
|
|
|
|
RValueTy ArgValue;
|
|
|
|
|
QualType ArgType;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
unsigned NumPlacementArgs : 31;
|
|
|
|
|
unsigned PassAlignmentToPlacementDelete : 1;
|
2010-09-17 08:50:28 +08:00
|
|
|
|
const FunctionDecl *OperatorDelete;
|
2016-10-11 02:54:32 +08:00
|
|
|
|
ValueTy Ptr;
|
|
|
|
|
ValueTy AllocSize;
|
|
|
|
|
CharUnits AllocAlign;
|
2010-09-17 08:50:28 +08:00
|
|
|
|
|
2016-10-11 02:54:32 +08:00
|
|
|
|
PlacementArg *getPlacementArgs() {
|
|
|
|
|
return reinterpret_cast<PlacementArg *>(this + 1);
|
2010-09-17 08:50:28 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
static size_t getExtraSize(size_t NumPlacementArgs) {
|
2016-10-11 02:54:32 +08:00
|
|
|
|
return NumPlacementArgs * sizeof(PlacementArg);
|
2010-09-17 08:50:28 +08:00
|
|
|
|
}
|
|
|
|
|
|
2016-10-11 02:54:32 +08:00
|
|
|
|
CallDeleteDuringNew(size_t NumPlacementArgs,
|
|
|
|
|
const FunctionDecl *OperatorDelete, ValueTy Ptr,
|
|
|
|
|
ValueTy AllocSize, bool PassAlignmentToPlacementDelete,
|
|
|
|
|
CharUnits AllocAlign)
|
|
|
|
|
: NumPlacementArgs(NumPlacementArgs),
|
|
|
|
|
PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete),
|
|
|
|
|
OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize),
|
|
|
|
|
AllocAlign(AllocAlign) {}
|
|
|
|
|
|
|
|
|
|
void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
|
2010-09-17 08:50:28 +08:00
|
|
|
|
assert(I < NumPlacementArgs && "index out of range");
|
2016-10-11 02:54:32 +08:00
|
|
|
|
getPlacementArgs()[I] = {Arg, Type};
|
2010-09-17 08:50:28 +08:00
|
|
|
|
}
|
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2016-10-11 02:54:32 +08:00
|
|
|
|
const FunctionProtoType *FPT =
|
|
|
|
|
OperatorDelete->getType()->getAs<FunctionProtoType>();
|
2010-09-17 08:50:28 +08:00
|
|
|
|
CallArgList DeleteArgs;
|
|
|
|
|
|
2017-10-13 09:55:36 +08:00
|
|
|
|
// The first argument is always a void* (or C* for a destroying operator
|
|
|
|
|
// delete for class type C).
|
2016-10-11 02:54:32 +08:00
|
|
|
|
DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
|
|
|
|
|
|
|
|
|
|
// Figure out what other parameters we should be implicitly passing.
|
2017-10-13 09:55:36 +08:00
|
|
|
|
UsualDeleteParams Params;
|
2016-10-11 02:54:32 +08:00
|
|
|
|
if (NumPlacementArgs) {
|
|
|
|
|
// A placement deallocation function is implicitly passed an alignment
|
|
|
|
|
// if the placement allocation function was, but is never passed a size.
|
2017-10-13 09:55:36 +08:00
|
|
|
|
Params.Alignment = PassAlignmentToPlacementDelete;
|
2016-10-11 02:54:32 +08:00
|
|
|
|
} else {
|
|
|
|
|
// For a non-placement new-expression, 'operator delete' can take a
|
|
|
|
|
// size and/or an alignment if it has the right parameters.
|
2017-10-13 09:55:36 +08:00
|
|
|
|
Params = getUsualDeleteParams(OperatorDelete);
|
2016-10-10 22:13:55 +08:00
|
|
|
|
}
|
2016-10-10 14:42:31 +08:00
|
|
|
|
|
2017-10-13 09:55:36 +08:00
|
|
|
|
assert(!Params.DestroyingDelete &&
|
|
|
|
|
"should not call destroying delete in a new-expression");
|
|
|
|
|
|
2016-10-11 02:54:32 +08:00
|
|
|
|
// The second argument can be a std::size_t (for non-placement delete).
|
2017-10-13 09:55:36 +08:00
|
|
|
|
if (Params.Size)
|
2016-10-11 02:54:32 +08:00
|
|
|
|
DeleteArgs.add(Traits::get(CGF, AllocSize),
|
|
|
|
|
CGF.getContext().getSizeType());
|
|
|
|
|
|
|
|
|
|
// The next (second or third) argument can be a std::align_val_t, which
|
|
|
|
|
// is an enum whose underlying type is std::size_t.
|
|
|
|
|
// FIXME: Use the right type as the parameter type. Note that in a call
|
|
|
|
|
// to operator delete(size_t, ...), we may not have it available.
|
2017-10-13 09:55:36 +08:00
|
|
|
|
if (Params.Alignment)
|
2016-10-11 02:54:32 +08:00
|
|
|
|
DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
|
|
|
|
|
CGF.SizeTy, AllocAlign.getQuantity())),
|
|
|
|
|
CGF.getContext().getSizeType());
|
|
|
|
|
|
2010-09-17 08:50:28 +08:00
|
|
|
|
// Pass the rest of the arguments, which must match exactly.
|
|
|
|
|
for (unsigned I = 0; I != NumPlacementArgs; ++I) {
|
2016-10-11 02:54:32 +08:00
|
|
|
|
auto Arg = getPlacementArgs()[I];
|
|
|
|
|
DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
|
2010-09-17 08:50:28 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Call 'operator delete'.
|
2013-07-22 07:12:18 +08:00
|
|
|
|
EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
|
2010-09-17 08:50:28 +08:00
|
|
|
|
}
|
|
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
|
}
|
2010-09-17 08:50:28 +08:00
|
|
|
|
|
|
|
|
|
/// Enter a cleanup to call 'operator delete' if the initializer in a
|
|
|
|
|
/// new-expression throws.
|
|
|
|
|
static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
|
|
|
|
|
const CXXNewExpr *E,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Address NewPtr,
|
2010-09-17 08:50:28 +08:00
|
|
|
|
llvm::Value *AllocSize,
|
2016-10-11 02:54:32 +08:00
|
|
|
|
CharUnits AllocAlign,
|
2010-09-17 08:50:28 +08:00
|
|
|
|
const CallArgList &NewArgs) {
|
2016-10-11 02:54:32 +08:00
|
|
|
|
unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1;
|
|
|
|
|
|
2010-09-17 08:50:28 +08:00
|
|
|
|
// If we're not inside a conditional branch, then the cleanup will
|
|
|
|
|
// dominate and we can do the easier (and more efficient) thing.
|
|
|
|
|
if (!CGF.isInConditionalBranch()) {
|
2016-10-11 02:54:32 +08:00
|
|
|
|
struct DirectCleanupTraits {
|
|
|
|
|
typedef llvm::Value *ValueTy;
|
|
|
|
|
typedef RValue RValueTy;
|
|
|
|
|
static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); }
|
|
|
|
|
static RValue get(CodeGenFunction &, RValueTy V) { return V; }
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
|
|
|
|
|
|
|
|
|
|
DirectCleanup *Cleanup = CGF.EHStack
|
|
|
|
|
.pushCleanupWithExtra<DirectCleanup>(EHCleanup,
|
|
|
|
|
E->getNumPlacementArgs(),
|
|
|
|
|
E->getOperatorDelete(),
|
|
|
|
|
NewPtr.getPointer(),
|
|
|
|
|
AllocSize,
|
|
|
|
|
E->passAlignment(),
|
|
|
|
|
AllocAlign);
|
|
|
|
|
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
|
|
|
|
|
auto &Arg = NewArgs[I + NumNonPlacementArgs];
|
2018-03-15 23:25:19 +08:00
|
|
|
|
Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty);
|
2016-10-11 02:54:32 +08:00
|
|
|
|
}
|
2010-09-17 08:50:28 +08:00
|
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Otherwise, we need to save all this stuff.
|
2011-01-28 18:53:53 +08:00
|
|
|
|
DominatingValue<RValue>::saved_type SavedNewPtr =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer()));
|
2011-01-28 18:53:53 +08:00
|
|
|
|
DominatingValue<RValue>::saved_type SavedAllocSize =
|
|
|
|
|
DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
|
2010-09-17 08:50:28 +08:00
|
|
|
|
|
2016-10-11 02:54:32 +08:00
|
|
|
|
struct ConditionalCleanupTraits {
|
|
|
|
|
typedef DominatingValue<RValue>::saved_type ValueTy;
|
|
|
|
|
typedef DominatingValue<RValue>::saved_type RValueTy;
|
|
|
|
|
static RValue get(CodeGenFunction &CGF, ValueTy V) {
|
|
|
|
|
return V.restore(CGF);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
|
|
|
|
|
|
|
|
|
|
ConditionalCleanup *Cleanup = CGF.EHStack
|
|
|
|
|
.pushCleanupWithExtra<ConditionalCleanup>(EHCleanup,
|
|
|
|
|
E->getNumPlacementArgs(),
|
|
|
|
|
E->getOperatorDelete(),
|
|
|
|
|
SavedNewPtr,
|
|
|
|
|
SavedAllocSize,
|
|
|
|
|
E->passAlignment(),
|
|
|
|
|
AllocAlign);
|
|
|
|
|
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
|
|
|
|
|
auto &Arg = NewArgs[I + NumNonPlacementArgs];
|
2018-03-15 23:25:19 +08:00
|
|
|
|
Cleanup->setPlacementArg(
|
|
|
|
|
I, DominatingValue<RValue>::save(CGF, Arg.getRValue(CGF)), Arg.Ty);
|
2016-10-11 02:54:32 +08:00
|
|
|
|
}
|
2010-09-17 08:50:28 +08:00
|
|
|
|
|
2011-11-10 18:43:54 +08:00
|
|
|
|
CGF.initFullExprCleanup();
|
2010-09-14 15:57:04 +08:00
|
|
|
|
}
|
|
|
|
|
|
2009-09-23 06:53:17 +08:00
|
|
|
|
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
|
2011-03-07 11:12:35 +08:00
|
|
|
|
// The element type being allocated.
|
|
|
|
|
QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
|
// 1. Build a call to the allocation function.
|
|
|
|
|
FunctionDecl *allocator = E->getOperatorNew();
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
2012-02-23 01:37:52 +08:00
|
|
|
|
// If there is a brace-initializer, cannot allocate fewer elements than inits.
|
|
|
|
|
unsigned minElements = 0;
|
|
|
|
|
if (E->isArray() && E->hasInitializer()) {
|
PR22924, PR22845, some of CWG1464: When checking the initializer for an array
new expression, distinguish between the case of a constant and non-constant
initializer. In the former case, if the bound is erroneous (too many
initializer elements, bound is negative, or allocated size overflows), reject,
and take the bound into account when determining whether we need to
default-construct any elements. In the remanining cases, move the logic to
check for default-constructibility of trailing elements into the initialization
code rather than inventing a bogus array bound, to cope with cases where the
number of initialized elements is not the same as the number of initializer
list elements (this can happen due to string literal initialization or brace
elision).
This also fixes rejects-valid and crash-on-valid errors when initializing a
new'd array of character type from a braced string literal.
llvm-svn: 283406
2016-10-06 06:41:02 +08:00
|
|
|
|
const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer());
|
|
|
|
|
if (ILE && ILE->isStringLiteralInit())
|
|
|
|
|
minElements =
|
|
|
|
|
cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
|
|
|
|
|
->getSize().getZExtValue();
|
|
|
|
|
else if (ILE)
|
2012-02-23 01:37:52 +08:00
|
|
|
|
minElements = ILE->getNumInits();
|
|
|
|
|
}
|
|
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
|
llvm::Value *numElements = nullptr;
|
|
|
|
|
llvm::Value *allocSizeWithoutCookie = nullptr;
|
2011-03-07 11:12:35 +08:00
|
|
|
|
llvm::Value *allocSize =
|
2012-02-23 01:37:52 +08:00
|
|
|
|
EmitCXXNewAllocSize(*this, E, minElements, numElements,
|
|
|
|
|
allocSizeWithoutCookie);
|
2016-10-11 02:54:32 +08:00
|
|
|
|
CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
|
2014-08-28 08:22:11 +08:00
|
|
|
|
|
2011-05-16 09:05:12 +08:00
|
|
|
|
// Emit the allocation call. If the allocator is a global placement
|
|
|
|
|
// operator, just "inline" it directly.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Address allocation = Address::invalid();
|
|
|
|
|
CallArgList allocatorArgs;
|
2011-05-16 09:05:12 +08:00
|
|
|
|
if (allocator->isReservedGlobalPlacementOperator()) {
|
2015-09-30 07:55:17 +08:00
|
|
|
|
assert(E->getNumPlacementArgs() == 1);
|
|
|
|
|
const Expr *arg = *E->placement_arguments().begin();
|
|
|
|
|
|
2017-05-19 01:07:11 +08:00
|
|
|
|
LValueBaseInfo BaseInfo;
|
|
|
|
|
allocation = EmitPointerWithAlignment(arg, &BaseInfo);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
|
|
|
|
|
// The pointer expression will, in many cases, be an opaque void*.
|
|
|
|
|
// In these cases, discard the computed alignment and use the
|
|
|
|
|
// formal alignment of the allocated type.
|
2017-05-19 01:07:11 +08:00
|
|
|
|
if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
|
2016-10-11 02:54:32 +08:00
|
|
|
|
allocation = Address(allocation.getPointer(), allocAlign);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
|
2015-09-30 07:55:17 +08:00
|
|
|
|
// Set up allocatorArgs for the call to operator delete if it's not
|
|
|
|
|
// the reserved global operator.
|
|
|
|
|
if (E->getOperatorDelete() &&
|
|
|
|
|
!E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
|
|
|
|
|
allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());
|
|
|
|
|
allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType());
|
|
|
|
|
}
|
|
|
|
|
|
2011-05-16 09:05:12 +08:00
|
|
|
|
} else {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
const FunctionProtoType *allocatorType =
|
|
|
|
|
allocator->getType()->castAs<FunctionProtoType>();
|
2016-10-11 02:54:32 +08:00
|
|
|
|
unsigned ParamsToSkip = 0;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
|
|
|
|
|
// The allocation size is the first argument.
|
|
|
|
|
QualType sizeType = getContext().getSizeType();
|
|
|
|
|
allocatorArgs.add(RValue::get(allocSize), sizeType);
|
2016-10-11 02:54:32 +08:00
|
|
|
|
++ParamsToSkip;
|
|
|
|
|
|
|
|
|
|
if (allocSize != allocSizeWithoutCookie) {
|
|
|
|
|
CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
|
|
|
|
|
allocAlign = std::max(allocAlign, cookieAlign);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The allocation alignment may be passed as the second argument.
|
|
|
|
|
if (E->passAlignment()) {
|
|
|
|
|
QualType AlignValT = sizeType;
|
|
|
|
|
if (allocatorType->getNumParams() > 1) {
|
|
|
|
|
AlignValT = allocatorType->getParamType(1);
|
|
|
|
|
assert(getContext().hasSameUnqualifiedType(
|
|
|
|
|
AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
|
|
|
|
|
sizeType) &&
|
|
|
|
|
"wrong type for alignment parameter");
|
|
|
|
|
++ParamsToSkip;
|
|
|
|
|
} else {
|
|
|
|
|
// Corner case, passing alignment to 'operator new(size_t, ...)'.
|
|
|
|
|
assert(allocator->isVariadic() && "can't pass alignment to allocator");
|
|
|
|
|
}
|
|
|
|
|
allocatorArgs.add(
|
|
|
|
|
RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())),
|
|
|
|
|
AlignValT);
|
|
|
|
|
}
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
|
2016-10-11 02:54:32 +08:00
|
|
|
|
// FIXME: Why do we not pass a CalleeDecl here?
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
|
2017-03-06 13:28:22 +08:00
|
|
|
|
/*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
|
|
|
|
|
RValue RV =
|
|
|
|
|
EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
|
|
|
|
|
|
2016-10-11 02:54:32 +08:00
|
|
|
|
// If this was a call to a global replaceable allocation function that does
|
|
|
|
|
// not take an alignment argument, the allocator is known to produce
|
|
|
|
|
// storage that's suitably aligned for any object that fits, up to a known
|
|
|
|
|
// threshold. Otherwise assume it's suitably aligned for the allocated type.
|
|
|
|
|
CharUnits allocationAlign = allocAlign;
|
|
|
|
|
if (!E->passAlignment() &&
|
|
|
|
|
allocator->isReplaceableGlobalAllocationFunction()) {
|
|
|
|
|
unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>(
|
|
|
|
|
Target.getNewAlign(), getContext().getTypeSize(allocType)));
|
|
|
|
|
allocationAlign = std::max(
|
|
|
|
|
allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
allocation = Address(RV.getScalarVal(), allocationAlign);
|
2011-05-16 09:05:12 +08:00
|
|
|
|
}
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
|
// Emit a null check on the allocation result if the allocation
|
|
|
|
|
// function is allowed to return null (because it has a non-throwing
|
2015-02-14 09:52:20 +08:00
|
|
|
|
// exception spec or is the reserved placement new) and we have an
|
2011-03-07 11:12:35 +08:00
|
|
|
|
// interesting initializer.
|
2015-02-14 09:52:20 +08:00
|
|
|
|
bool nullCheck = E->shouldNullCheckAllocation(getContext()) &&
|
2012-02-16 20:22:20 +08:00
|
|
|
|
(!allocType.isPODType(getContext()) || E->hasInitializer());
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
|
llvm::BasicBlock *nullCheckBB = nullptr;
|
|
|
|
|
llvm::BasicBlock *contBB = nullptr;
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
2011-03-07 09:52:56 +08:00
|
|
|
|
// The null-check means that the initializer is conditionally
|
|
|
|
|
// evaluated.
|
|
|
|
|
ConditionalEvaluation conditional(*this);
|
|
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
|
if (nullCheck) {
|
|
|
|
|
conditional.begin(*this);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
|
nullCheckBB = Builder.GetInsertBlock();
|
|
|
|
|
llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
|
|
|
|
|
contBB = createBasicBlock("new.cont");
|
2011-03-07 09:52:56 +08:00
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
llvm::Value *isNull =
|
|
|
|
|
Builder.CreateIsNull(allocation.getPointer(), "new.isnull");
|
2011-03-07 11:12:35 +08:00
|
|
|
|
Builder.CreateCondBr(isNull, contBB, notNullBB);
|
|
|
|
|
EmitBlock(notNullBB);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2010-09-14 15:57:04 +08:00
|
|
|
|
// If there's an operator delete, enter a cleanup to call it if an
|
|
|
|
|
// exception is thrown.
|
2011-03-07 11:12:35 +08:00
|
|
|
|
EHScopeStack::stable_iterator operatorDeleteCleanup;
|
2014-05-21 13:09:00 +08:00
|
|
|
|
llvm::Instruction *cleanupDominator = nullptr;
|
2011-05-16 09:05:12 +08:00
|
|
|
|
if (E->getOperatorDelete() &&
|
|
|
|
|
!E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
|
2016-10-11 02:54:32 +08:00
|
|
|
|
EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign,
|
|
|
|
|
allocatorArgs);
|
2011-03-07 11:12:35 +08:00
|
|
|
|
operatorDeleteCleanup = EHStack.stable_begin();
|
2011-11-10 18:43:54 +08:00
|
|
|
|
cleanupDominator = Builder.CreateUnreachable();
|
2010-09-14 15:57:04 +08:00
|
|
|
|
}
|
|
|
|
|
|
2011-09-07 02:53:03 +08:00
|
|
|
|
assert((allocSize == allocSizeWithoutCookie) ==
|
|
|
|
|
CalculateCookiePadding(*this, E).isZero());
|
|
|
|
|
if (allocSize != allocSizeWithoutCookie) {
|
|
|
|
|
assert(E->isArray());
|
|
|
|
|
allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
|
|
|
|
|
numElements,
|
|
|
|
|
E, allocType);
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-04 23:12:29 +08:00
|
|
|
|
llvm::Type *elementTy = ConvertTypeForMem(allocType);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Address result = Builder.CreateElementBitCast(allocation, elementTy);
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
2018-05-03 19:03:01 +08:00
|
|
|
|
// Passing pointer through launder.invariant.group to avoid propagation of
|
2015-09-16 05:46:47 +08:00
|
|
|
|
// vptrs information which may be included in previous type.
|
2017-05-20 16:56:18 +08:00
|
|
|
|
// To not break LTO with different optimizations levels, we do it regardless
|
|
|
|
|
// of optimization level.
|
2015-09-16 05:46:47 +08:00
|
|
|
|
if (CGM.getCodeGenOpts().StrictVTablePointers &&
|
|
|
|
|
allocator->isReservedGlobalPlacementOperator())
|
2018-05-03 19:03:01 +08:00
|
|
|
|
result = Address(Builder.CreateLaunderInvariantGroup(result.getPointer()),
|
2015-09-16 05:46:47 +08:00
|
|
|
|
result.getAlignment());
|
|
|
|
|
|
2018-07-28 23:33:03 +08:00
|
|
|
|
// Emit sanitizer checks for pointer value now, so that in the case of an
|
|
|
|
|
// array it was checked only once and not at each constructor call.
|
|
|
|
|
EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall,
|
|
|
|
|
E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
|
|
|
|
|
result.getPointer(), allocType);
|
|
|
|
|
|
2015-04-04 23:12:29 +08:00
|
|
|
|
EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
|
2011-09-15 14:49:18 +08:00
|
|
|
|
allocSizeWithoutCookie);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
if (E->isArray()) {
|
|
|
|
|
// NewPtr is a pointer to the base element type. If we're
|
|
|
|
|
// allocating an array of arrays, we'll need to cast back to the
|
|
|
|
|
// array pointer type.
|
2011-07-18 12:24:23 +08:00
|
|
|
|
llvm::Type *resultType = ConvertTypeForMem(E->getType());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
if (result.getType() != resultType)
|
2011-03-07 11:12:35 +08:00
|
|
|
|
result = Builder.CreateBitCast(result, resultType);
|
2010-03-25 00:57:01 +08:00
|
|
|
|
}
|
2010-09-14 15:57:04 +08:00
|
|
|
|
|
|
|
|
|
// Deactivate the 'operator delete' cleanup if we finished
|
|
|
|
|
// initialization.
|
2011-11-10 18:43:54 +08:00
|
|
|
|
if (operatorDeleteCleanup.isValid()) {
|
|
|
|
|
DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
|
|
|
|
|
cleanupDominator->eraseFromParent();
|
|
|
|
|
}
|
2012-02-16 20:22:20 +08:00
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
llvm::Value *resultPtr = result.getPointer();
|
2011-03-07 11:12:35 +08:00
|
|
|
|
if (nullCheck) {
|
2011-03-07 09:52:56 +08:00
|
|
|
|
conditional.end(*this);
|
|
|
|
|
|
2011-03-07 11:12:35 +08:00
|
|
|
|
llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
|
|
|
|
|
EmitBlock(contBB);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2);
|
|
|
|
|
PHI->addIncoming(resultPtr, notNullBB);
|
|
|
|
|
PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()),
|
2011-03-07 11:12:35 +08:00
|
|
|
|
nullCheckBB);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
resultPtr = PHI;
|
2009-09-23 06:53:17 +08:00
|
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
return resultPtr;
|
2009-09-23 06:53:17 +08:00
|
|
|
|
}
|
|
|
|
|
|
2009-11-18 08:50:08 +08:00
|
|
|
|
void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
|
2016-10-11 02:54:32 +08:00
|
|
|
|
llvm::Value *Ptr, QualType DeleteTy,
|
|
|
|
|
llvm::Value *NumElements,
|
|
|
|
|
CharUnits CookieSize) {
|
|
|
|
|
assert((!NumElements && CookieSize.isZero()) ||
|
|
|
|
|
DeleteFD->getOverloadedOperator() == OO_Array_Delete);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2009-11-18 08:50:08 +08:00
|
|
|
|
const FunctionProtoType *DeleteFTy =
|
|
|
|
|
DeleteFD->getType()->getAs<FunctionProtoType>();
|
|
|
|
|
|
|
|
|
|
CallArgList DeleteArgs;
|
|
|
|
|
|
2017-10-13 09:55:36 +08:00
|
|
|
|
auto Params = getUsualDeleteParams(DeleteFD);
|
2016-10-11 02:54:32 +08:00
|
|
|
|
auto ParamTypeIt = DeleteFTy->param_type_begin();
|
|
|
|
|
|
|
|
|
|
// Pass the pointer itself.
|
|
|
|
|
QualType ArgTy = *ParamTypeIt++;
|
2016-10-10 22:13:55 +08:00
|
|
|
|
llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
|
|
|
|
|
DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
|
2016-10-10 14:42:31 +08:00
|
|
|
|
|
2017-10-13 09:55:36 +08:00
|
|
|
|
// Pass the std::destroying_delete tag if present.
|
|
|
|
|
if (Params.DestroyingDelete) {
|
|
|
|
|
QualType DDTag = *ParamTypeIt++;
|
|
|
|
|
// Just pass an 'undef'. We expect the tag type to be an empty struct.
|
|
|
|
|
auto *V = llvm::UndefValue::get(getTypes().ConvertType(DDTag));
|
|
|
|
|
DeleteArgs.add(RValue::get(V), DDTag);
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-11 02:54:32 +08:00
|
|
|
|
// Pass the size if the delete function has a size_t parameter.
|
2017-10-13 09:55:36 +08:00
|
|
|
|
if (Params.Size) {
|
2016-10-11 02:54:32 +08:00
|
|
|
|
QualType SizeType = *ParamTypeIt++;
|
|
|
|
|
CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
|
|
|
|
|
llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType),
|
|
|
|
|
DeleteTypeSize.getQuantity());
|
|
|
|
|
|
|
|
|
|
// For array new, multiply by the number of elements.
|
|
|
|
|
if (NumElements)
|
|
|
|
|
Size = Builder.CreateMul(Size, NumElements);
|
|
|
|
|
|
|
|
|
|
// If there is a cookie, add the cookie size.
|
|
|
|
|
if (!CookieSize.isZero())
|
|
|
|
|
Size = Builder.CreateAdd(
|
|
|
|
|
Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()));
|
|
|
|
|
|
|
|
|
|
DeleteArgs.add(RValue::get(Size), SizeType);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Pass the alignment if the delete function has an align_val_t parameter.
|
2017-10-13 09:55:36 +08:00
|
|
|
|
if (Params.Alignment) {
|
2016-10-11 02:54:32 +08:00
|
|
|
|
QualType AlignValType = *ParamTypeIt++;
|
|
|
|
|
CharUnits DeleteTypeAlign = getContext().toCharUnitsFromBits(
|
|
|
|
|
getContext().getTypeAlignIfKnown(DeleteTy));
|
|
|
|
|
llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType),
|
|
|
|
|
DeleteTypeAlign.getQuantity());
|
|
|
|
|
DeleteArgs.add(RValue::get(Align), AlignValType);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(ParamTypeIt == DeleteFTy->param_type_end() &&
|
|
|
|
|
"unknown parameter to usual delete function");
|
2009-11-18 08:50:08 +08:00
|
|
|
|
|
|
|
|
|
// Emit the call to delete.
|
2013-07-22 07:12:18 +08:00
|
|
|
|
EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
|
2009-11-18 08:50:08 +08:00
|
|
|
|
}
|
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
|
namespace {
|
|
|
|
|
/// Calls the given 'operator delete' on a single object.
|
2015-08-19 06:40:54 +08:00
|
|
|
|
struct CallObjectDelete final : EHScopeStack::Cleanup {
|
2010-09-02 17:58:18 +08:00
|
|
|
|
llvm::Value *Ptr;
|
|
|
|
|
const FunctionDecl *OperatorDelete;
|
|
|
|
|
QualType ElementType;
|
|
|
|
|
|
|
|
|
|
CallObjectDelete(llvm::Value *Ptr,
|
|
|
|
|
const FunctionDecl *OperatorDelete,
|
|
|
|
|
QualType ElementType)
|
|
|
|
|
: Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
|
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2010-09-02 17:58:18 +08:00
|
|
|
|
CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
|
|
|
|
|
}
|
|
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2014-11-01 04:09:12 +08:00
|
|
|
|
void
|
|
|
|
|
CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
|
|
|
|
|
llvm::Value *CompletePtr,
|
|
|
|
|
QualType ElementType) {
|
|
|
|
|
EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
|
|
|
|
|
OperatorDelete, ElementType);
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-13 09:55:36 +08:00
|
|
|
|
/// Emit the code for deleting a single object with a destroying operator
|
|
|
|
|
/// delete. If the element type has a non-virtual destructor, Ptr has already
|
|
|
|
|
/// been converted to the type of the parameter of 'operator delete'. Otherwise
|
|
|
|
|
/// Ptr points to an object of the static type.
|
|
|
|
|
static void EmitDestroyingObjectDelete(CodeGenFunction &CGF,
|
|
|
|
|
const CXXDeleteExpr *DE, Address Ptr,
|
|
|
|
|
QualType ElementType) {
|
|
|
|
|
auto *Dtor = ElementType->getAsCXXRecordDecl()->getDestructor();
|
|
|
|
|
if (Dtor && Dtor->isVirtual())
|
|
|
|
|
CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
|
|
|
|
|
Dtor);
|
|
|
|
|
else
|
|
|
|
|
CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.getPointer(), ElementType);
|
|
|
|
|
}
|
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
|
/// Emit the code for deleting a single object.
|
|
|
|
|
static void EmitObjectDelete(CodeGenFunction &CGF,
|
2014-11-01 15:37:17 +08:00
|
|
|
|
const CXXDeleteExpr *DE,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Address Ptr,
|
2014-11-01 15:37:17 +08:00
|
|
|
|
QualType ElementType) {
|
2016-11-17 08:39:48 +08:00
|
|
|
|
// C++11 [expr.delete]p3:
|
|
|
|
|
// If the static type of the object to be deleted is different from its
|
|
|
|
|
// dynamic type, the static type shall be a base class of the dynamic type
|
|
|
|
|
// of the object to be deleted and the static type shall have a virtual
|
|
|
|
|
// destructor or the behavior is undefined.
|
|
|
|
|
CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall,
|
|
|
|
|
DE->getExprLoc(), Ptr.getPointer(),
|
|
|
|
|
ElementType);
|
|
|
|
|
|
2017-10-13 09:55:36 +08:00
|
|
|
|
const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
|
|
|
|
|
assert(!OperatorDelete->isDestroyingOperatorDelete());
|
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
|
// Find the destructor for the type, if applicable. If the
|
|
|
|
|
// destructor is virtual, we'll just emit the vcall and return.
|
2014-05-21 13:09:00 +08:00
|
|
|
|
const CXXDestructorDecl *Dtor = nullptr;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
if (const RecordType *RT = ElementType->getAs<RecordType>()) {
|
|
|
|
|
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
|
2011-08-03 02:05:30 +08:00
|
|
|
|
if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
|
2010-09-02 17:58:18 +08:00
|
|
|
|
Dtor = RD->getDestructor();
|
|
|
|
|
|
|
|
|
|
if (Dtor->isVirtual()) {
|
2014-11-01 15:37:17 +08:00
|
|
|
|
CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
|
|
|
|
|
Dtor);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Make sure that we call delete even if the dtor throws.
|
2011-01-28 16:37:24 +08:00
|
|
|
|
// This doesn't have to a conditional cleanup because we're going
|
|
|
|
|
// to pop it off in a second.
|
2010-09-02 17:58:18 +08:00
|
|
|
|
CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Ptr.getPointer(),
|
|
|
|
|
OperatorDelete, ElementType);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
|
|
|
|
if (Dtor)
|
|
|
|
|
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
|
2013-01-31 13:50:40 +08:00
|
|
|
|
/*ForVirtualBase=*/false,
|
|
|
|
|
/*Delegating=*/false,
|
|
|
|
|
Ptr);
|
Define weak and __weak to mean ARC-style weak references, even in MRC.
Previously, __weak was silently accepted and ignored in MRC mode.
That makes this a potentially source-breaking change that we have to
roll out cautiously. Accordingly, for the time being, actual support
for __weak references in MRC is experimental, and the compiler will
reject attempts to actually form such references. The intent is to
eventually enable the feature by default in all non-GC modes.
(It is, of course, incompatible with ObjC GC's interpretation of
__weak.)
If you like, you can enable this feature with
-Xclang -fobjc-weak
but like any -Xclang option, this option may be removed at any point,
e.g. if/when it is eventually enabled by default.
This patch also enables the use of the ARC __unsafe_unretained qualifier
in MRC. Unlike __weak, this is being enabled immediately. Since
variables are essentially __unsafe_unretained by default in MRC,
the only practical uses are (1) communication and (2) changing the
default behavior of by-value block capture.
As an implementation matter, this means that the ObjC ownership
qualifiers may appear in any ObjC language mode, and so this patch
removes a number of checks for getLangOpts().ObjCAutoRefCount
that were guarding the processing of these qualifiers. I don't
expect this to be a significant drain on performance; it may even
be faster to just check for these qualifiers directly on a type
(since it's probably in a register anyway) than to do N dependent
loads to grab the LangOptions.
rdar://9674298
llvm-svn: 251041
2015-10-23 02:38:17 +08:00
|
|
|
|
else if (auto Lifetime = ElementType.getObjCLifetime()) {
|
|
|
|
|
switch (Lifetime) {
|
2011-06-16 07:02:42 +08:00
|
|
|
|
case Qualifiers::OCL_None:
|
|
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
|
|
|
|
break;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
case Qualifiers::OCL_Strong:
|
|
|
|
|
CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
break;
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
|
case Qualifiers::OCL_Weak:
|
|
|
|
|
CGF.EmitARCDestroyWeak(Ptr);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
|
CGF.PopCleanupBlock();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
/// Calls the given 'operator delete' on an array of objects.
|
2015-08-19 06:40:54 +08:00
|
|
|
|
struct CallArrayDelete final : EHScopeStack::Cleanup {
|
2010-09-02 17:58:18 +08:00
|
|
|
|
llvm::Value *Ptr;
|
|
|
|
|
const FunctionDecl *OperatorDelete;
|
|
|
|
|
llvm::Value *NumElements;
|
|
|
|
|
QualType ElementType;
|
|
|
|
|
CharUnits CookieSize;
|
|
|
|
|
|
|
|
|
|
CallArrayDelete(llvm::Value *Ptr,
|
|
|
|
|
const FunctionDecl *OperatorDelete,
|
|
|
|
|
llvm::Value *NumElements,
|
|
|
|
|
QualType ElementType,
|
|
|
|
|
CharUnits CookieSize)
|
|
|
|
|
: Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
|
|
|
|
|
ElementType(ElementType), CookieSize(CookieSize) {}
|
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2016-10-11 02:54:32 +08:00
|
|
|
|
CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,
|
|
|
|
|
CookieSize);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
}
|
|
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
|
|
|
|
/// Emit the code for deleting an array of objects.
|
|
|
|
|
static void EmitArrayDelete(CodeGenFunction &CGF,
|
2011-01-27 17:37:56 +08:00
|
|
|
|
const CXXDeleteExpr *E,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Address deletedPtr,
|
2011-07-13 09:41:37 +08:00
|
|
|
|
QualType elementType) {
|
2014-05-21 13:09:00 +08:00
|
|
|
|
llvm::Value *numElements = nullptr;
|
|
|
|
|
llvm::Value *allocatedPtr = nullptr;
|
2011-07-13 09:41:37 +08:00
|
|
|
|
CharUnits cookieSize;
|
|
|
|
|
CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
|
|
|
|
|
numElements, allocatedPtr, cookieSize);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2011-07-13 09:41:37 +08:00
|
|
|
|
assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
|
|
|
|
// Make sure that we call delete even if one of the dtors throws.
|
2011-07-13 09:41:37 +08:00
|
|
|
|
const FunctionDecl *operatorDelete = E->getOperatorDelete();
|
2010-09-02 17:58:18 +08:00
|
|
|
|
CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
|
2011-07-13 09:41:37 +08:00
|
|
|
|
allocatedPtr, operatorDelete,
|
|
|
|
|
numElements, elementType,
|
|
|
|
|
cookieSize);
|
|
|
|
|
|
|
|
|
|
// Destroy the elements.
|
|
|
|
|
if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
|
|
|
|
|
assert(numElements && "no element count for a type with a destructor!");
|
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
|
|
|
|
|
CharUnits elementAlign =
|
|
|
|
|
deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
|
|
|
|
|
|
|
|
|
|
llvm::Value *arrayBegin = deletedPtr.getPointer();
|
2011-07-13 09:41:37 +08:00
|
|
|
|
llvm::Value *arrayEnd =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
CGF.Builder.CreateInBoundsGEP(arrayBegin, numElements, "delete.end");
|
2011-07-13 16:09:46 +08:00
|
|
|
|
|
|
|
|
|
// Note that it is legal to allocate a zero-length array, and we
|
|
|
|
|
// can never fold the check away because the length should always
|
|
|
|
|
// come from a cookie.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign,
|
2011-07-13 09:41:37 +08:00
|
|
|
|
CGF.getDestroyer(dtorKind),
|
2011-07-13 16:09:46 +08:00
|
|
|
|
/*checkZeroLength*/ true,
|
2011-07-13 09:41:37 +08:00
|
|
|
|
CGF.needsEHCleanup(dtorKind));
|
2010-09-02 17:58:18 +08:00
|
|
|
|
}
|
|
|
|
|
|
2011-07-13 09:41:37 +08:00
|
|
|
|
// Pop the cleanup block.
|
2010-09-02 17:58:18 +08:00
|
|
|
|
CGF.PopCleanupBlock();
|
|
|
|
|
}
|
|
|
|
|
|
2009-09-23 06:53:17 +08:00
|
|
|
|
void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
|
2009-09-30 02:16:17 +08:00
|
|
|
|
const Expr *Arg = E->getArgument();
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Address Ptr = EmitPointerWithAlignment(Arg);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
|
|
// Null check the pointer.
|
|
|
|
|
llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
|
|
|
|
|
llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
|
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull");
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
|
|
Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
|
|
|
|
|
EmitBlock(DeleteNotNull);
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2017-10-13 09:55:36 +08:00
|
|
|
|
QualType DeleteTy = E->getDestroyedType();
|
|
|
|
|
|
|
|
|
|
// A destroying operator delete overrides the entire operation of the
|
|
|
|
|
// delete expression.
|
|
|
|
|
if (E->getOperatorDelete()->isDestroyingOperatorDelete()) {
|
|
|
|
|
EmitDestroyingObjectDelete(*this, E, Ptr, DeleteTy);
|
|
|
|
|
EmitBlock(DeleteEnd);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2010-09-02 17:58:18 +08:00
|
|
|
|
// We might be deleting a pointer to array. If so, GEP down to the
|
|
|
|
|
// first non-array element.
|
|
|
|
|
// (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
|
|
|
|
|
if (DeleteTy->isConstantArrayType()) {
|
|
|
|
|
llvm::Value *Zero = Builder.getInt32(0);
|
2011-07-23 18:55:15 +08:00
|
|
|
|
SmallVector<llvm::Value*,8> GEP;
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
|
|
|
|
GEP.push_back(Zero); // point at the outermost array
|
|
|
|
|
|
|
|
|
|
// For each layer of array type we're pointing at:
|
|
|
|
|
while (const ConstantArrayType *Arr
|
|
|
|
|
= getContext().getAsConstantArrayType(DeleteTy)) {
|
|
|
|
|
// 1. Unpeel the array type.
|
|
|
|
|
DeleteTy = Arr->getElementType();
|
|
|
|
|
|
|
|
|
|
// 2. GEP to the first element of the array.
|
|
|
|
|
GEP.push_back(Zero);
|
2009-09-23 06:53:17 +08:00
|
|
|
|
}
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getPointer(), GEP, "del.first"),
|
|
|
|
|
Ptr.getAlignment());
|
2009-09-23 06:53:17 +08:00
|
|
|
|
}
|
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
|
2010-09-02 17:58:18 +08:00
|
|
|
|
|
2015-03-20 01:03:58 +08:00
|
|
|
|
if (E->isArrayForm()) {
|
|
|
|
|
EmitArrayDelete(*this, E, Ptr, DeleteTy);
|
|
|
|
|
} else {
|
|
|
|
|
EmitObjectDelete(*this, E, Ptr, DeleteTy);
|
|
|
|
|
}
|
2009-09-23 06:53:17 +08:00
|
|
|
|
|
|
|
|
|
EmitBlock(DeleteEnd);
|
|
|
|
|
}
|
2009-11-15 16:09:41 +08:00
|
|
|
|
|
2014-07-19 08:17:06 +08:00
|
|
|
|
static bool isGLValueFromPointerDeref(const Expr *E) {
|
|
|
|
|
E = E->IgnoreParens();
|
|
|
|
|
|
|
|
|
|
if (const auto *CE = dyn_cast<CastExpr>(E)) {
|
|
|
|
|
if (!CE->getSubExpr()->isGLValue())
|
|
|
|
|
return false;
|
|
|
|
|
return isGLValueFromPointerDeref(CE->getSubExpr());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
|
|
|
|
|
return isGLValueFromPointerDeref(OVE->getSourceExpr());
|
|
|
|
|
|
|
|
|
|
if (const auto *BO = dyn_cast<BinaryOperator>(E))
|
|
|
|
|
if (BO->getOpcode() == BO_Comma)
|
|
|
|
|
return isGLValueFromPointerDeref(BO->getRHS());
|
|
|
|
|
|
|
|
|
|
if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
|
|
|
|
|
return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
|
|
|
|
|
isGLValueFromPointerDeref(ACO->getFalseExpr());
|
|
|
|
|
|
|
|
|
|
// C++11 [expr.sub]p1:
|
|
|
|
|
// The expression E1[E2] is identical (by definition) to *((E1)+(E2))
|
|
|
|
|
if (isa<ArraySubscriptExpr>(E))
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
if (const auto *UO = dyn_cast<UnaryOperator>(E))
|
|
|
|
|
if (UO->getOpcode() == UO_Deref)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-19 05:15:55 +08:00
|
|
|
|
static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
|
2011-07-18 12:24:23 +08:00
|
|
|
|
llvm::Type *StdTypeInfoPtrTy) {
|
2011-04-18 08:57:03 +08:00
|
|
|
|
// Get the vtable pointer.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Address ThisPtr = CGF.EmitLValue(E).getAddress();
|
2011-04-18 08:57:03 +08:00
|
|
|
|
|
2017-12-28 20:45:41 +08:00
|
|
|
|
QualType SrcRecordTy = E->getType();
|
|
|
|
|
|
|
|
|
|
// C++ [class.cdtor]p4:
|
|
|
|
|
// If the operand of typeid refers to the object under construction or
|
|
|
|
|
// destruction and the static type of the operand is neither the constructor
|
|
|
|
|
// or destructor’s class nor one of its bases, the behavior is undefined.
|
|
|
|
|
CGF.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation, E->getExprLoc(),
|
|
|
|
|
ThisPtr.getPointer(), SrcRecordTy);
|
|
|
|
|
|
2011-04-18 08:57:03 +08:00
|
|
|
|
// C++ [expr.typeid]p2:
|
|
|
|
|
// If the glvalue expression is obtained by applying the unary * operator to
|
|
|
|
|
// a pointer and the pointer is a null pointer value, the typeid expression
|
|
|
|
|
// throws the std::bad_typeid exception.
|
2014-07-19 08:17:06 +08:00
|
|
|
|
//
|
|
|
|
|
// However, this paragraph's intent is not clear. We choose a very generous
|
|
|
|
|
// interpretation which implores us to consider comma operators, conditional
|
|
|
|
|
// operators, parentheses and other such constructs.
|
|
|
|
|
if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(
|
|
|
|
|
isGLValueFromPointerDeref(E), SrcRecordTy)) {
|
2014-06-23 03:05:33 +08:00
|
|
|
|
llvm::BasicBlock *BadTypeidBlock =
|
2011-04-18 08:57:03 +08:00
|
|
|
|
CGF.createBasicBlock("typeid.bad_typeid");
|
2014-06-23 03:05:33 +08:00
|
|
|
|
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
|
2011-04-18 08:57:03 +08:00
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer());
|
2014-06-23 03:05:33 +08:00
|
|
|
|
CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
|
2011-04-18 08:57:03 +08:00
|
|
|
|
|
2014-06-23 03:05:33 +08:00
|
|
|
|
CGF.EmitBlock(BadTypeidBlock);
|
|
|
|
|
CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
|
|
|
|
|
CGF.EmitBlock(EndBlock);
|
2011-04-18 08:57:03 +08:00
|
|
|
|
}
|
|
|
|
|
|
2014-06-23 03:05:33 +08:00
|
|
|
|
return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
|
|
|
|
|
StdTypeInfoPtrTy);
|
2011-04-18 08:57:03 +08:00
|
|
|
|
}
|
|
|
|
|
|
2011-01-28 16:37:24 +08:00
|
|
|
|
llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
|
2018-07-31 03:24:48 +08:00
|
|
|
|
llvm::Type *StdTypeInfoPtrTy =
|
2011-04-18 08:57:03 +08:00
|
|
|
|
ConvertType(E->getType())->getPointerTo();
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2009-12-17 15:09:17 +08:00
|
|
|
|
if (E->isTypeOperand()) {
|
2013-09-27 15:04:31 +08:00
|
|
|
|
llvm::Constant *TypeInfo =
|
|
|
|
|
CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
|
2011-04-18 08:57:03 +08:00
|
|
|
|
return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
|
2009-12-17 15:09:17 +08:00
|
|
|
|
}
|
2011-04-11 22:13:40 +08:00
|
|
|
|
|
2011-04-18 08:57:03 +08:00
|
|
|
|
// C++ [expr.typeid]p2:
|
|
|
|
|
// When typeid is applied to a glvalue expression whose type is a
|
|
|
|
|
// polymorphic class type, the result refers to a std::type_info object
|
|
|
|
|
// representing the type of the most derived object (that is, the dynamic
|
|
|
|
|
// type) to which the glvalue refers.
|
2012-08-14 04:08:14 +08:00
|
|
|
|
if (E->isPotentiallyEvaluated())
|
2018-07-31 03:24:48 +08:00
|
|
|
|
return EmitTypeidFromVTable(*this, E->getExprOperand(),
|
2012-08-14 04:08:14 +08:00
|
|
|
|
StdTypeInfoPtrTy);
|
2011-04-18 08:57:03 +08:00
|
|
|
|
|
|
|
|
|
QualType OperandTy = E->getExprOperand()->getType();
|
|
|
|
|
return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
|
|
|
|
|
StdTypeInfoPtrTy);
|
2009-11-15 16:09:41 +08:00
|
|
|
|
}
|
2009-11-16 14:50:58 +08:00
|
|
|
|
|
2011-04-11 09:45:29 +08:00
|
|
|
|
static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
|
|
|
|
|
QualType DestTy) {
|
2011-07-18 12:24:23 +08:00
|
|
|
|
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
|
2011-04-11 09:45:29 +08:00
|
|
|
|
if (DestTy->isPointerType())
|
|
|
|
|
return llvm::Constant::getNullValue(DestLTy);
|
|
|
|
|
|
|
|
|
|
/// C++ [expr.dynamic.cast]p9:
|
|
|
|
|
/// A failed cast to reference type throws std::bad_cast
|
2014-06-23 03:05:33 +08:00
|
|
|
|
if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
|
|
|
|
|
return nullptr;
|
2011-04-11 09:45:29 +08:00
|
|
|
|
|
|
|
|
|
CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
|
|
|
|
|
return llvm::UndefValue::get(DestLTy);
|
|
|
|
|
}
|
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
|
2011-04-11 08:46:40 +08:00
|
|
|
|
const CXXDynamicCastExpr *DCE) {
|
2015-10-20 12:24:12 +08:00
|
|
|
|
CGM.EmitExplicitCastExprType(DCE, this);
|
2011-04-11 08:46:40 +08:00
|
|
|
|
QualType DestTy = DCE->getTypeAsWritten();
|
2009-11-16 14:50:58 +08:00
|
|
|
|
|
2011-04-11 09:45:29 +08:00
|
|
|
|
QualType SrcTy = DCE->getSubExpr()->getType();
|
|
|
|
|
|
2014-06-23 03:05:33 +08:00
|
|
|
|
// C++ [expr.dynamic.cast]p7:
|
|
|
|
|
// If T is "pointer to cv void," then the result is a pointer to the most
|
|
|
|
|
// derived object pointed to by v.
|
|
|
|
|
const PointerType *DestPTy = DestTy->getAs<PointerType>();
|
|
|
|
|
|
|
|
|
|
bool isDynamicCastToVoid;
|
|
|
|
|
QualType SrcRecordTy;
|
|
|
|
|
QualType DestRecordTy;
|
|
|
|
|
if (DestPTy) {
|
|
|
|
|
isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
|
|
|
|
|
SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
|
|
|
|
|
DestRecordTy = DestPTy->getPointeeType();
|
|
|
|
|
} else {
|
|
|
|
|
isDynamicCastToVoid = false;
|
|
|
|
|
SrcRecordTy = SrcTy;
|
|
|
|
|
DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-28 20:45:41 +08:00
|
|
|
|
// C++ [class.cdtor]p5:
|
|
|
|
|
// If the operand of the dynamic_cast refers to the object under
|
|
|
|
|
// construction or destruction and the static type of the operand is not a
|
|
|
|
|
// pointer to or object of the constructor or destructor’s own class or one
|
|
|
|
|
// of its bases, the dynamic_cast results in undefined behavior.
|
|
|
|
|
EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(),
|
|
|
|
|
SrcRecordTy);
|
|
|
|
|
|
|
|
|
|
if (DCE->isAlwaysNull())
|
|
|
|
|
if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
|
|
|
|
|
return T;
|
|
|
|
|
|
2014-06-23 03:05:33 +08:00
|
|
|
|
assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
|
|
|
|
|
|
2018-07-31 03:24:48 +08:00
|
|
|
|
// C++ [expr.dynamic.cast]p4:
|
2011-04-11 08:46:40 +08:00
|
|
|
|
// If the value of v is a null pointer value in the pointer case, the result
|
|
|
|
|
// is the null pointer value of type T.
|
2014-06-23 03:05:33 +08:00
|
|
|
|
bool ShouldNullCheckSrcValue =
|
|
|
|
|
CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
|
|
|
|
|
SrcRecordTy);
|
2014-05-21 13:09:00 +08:00
|
|
|
|
|
|
|
|
|
llvm::BasicBlock *CastNull = nullptr;
|
|
|
|
|
llvm::BasicBlock *CastNotNull = nullptr;
|
2011-04-11 08:46:40 +08:00
|
|
|
|
llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
2011-04-11 08:46:40 +08:00
|
|
|
|
if (ShouldNullCheckSrcValue) {
|
|
|
|
|
CastNull = createBasicBlock("dynamic_cast.null");
|
|
|
|
|
CastNotNull = createBasicBlock("dynamic_cast.notnull");
|
2009-11-16 14:50:58 +08:00
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer());
|
2011-04-11 08:46:40 +08:00
|
|
|
|
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
|
|
|
|
|
EmitBlock(CastNotNull);
|
2009-11-16 14:50:58 +08:00
|
|
|
|
}
|
2011-04-11 08:46:40 +08:00
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
llvm::Value *Value;
|
2014-06-23 03:05:33 +08:00
|
|
|
|
if (isDynamicCastToVoid) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy,
|
2014-06-23 03:05:33 +08:00
|
|
|
|
DestTy);
|
|
|
|
|
} else {
|
|
|
|
|
assert(DestRecordTy->isRecordType() &&
|
|
|
|
|
"destination type must be a record type!");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
|
2014-06-23 03:05:33 +08:00
|
|
|
|
DestTy, DestRecordTy, CastEnd);
|
2015-11-23 11:01:14 +08:00
|
|
|
|
CastNotNull = Builder.GetInsertBlock();
|
2014-06-23 03:05:33 +08:00
|
|
|
|
}
|
2011-04-11 08:46:40 +08:00
|
|
|
|
|
|
|
|
|
if (ShouldNullCheckSrcValue) {
|
|
|
|
|
EmitBranch(CastEnd);
|
|
|
|
|
|
|
|
|
|
EmitBlock(CastNull);
|
|
|
|
|
EmitBranch(CastEnd);
|
2009-11-16 14:50:58 +08:00
|
|
|
|
}
|
2011-04-11 08:46:40 +08:00
|
|
|
|
|
|
|
|
|
EmitBlock(CastEnd);
|
|
|
|
|
|
|
|
|
|
if (ShouldNullCheckSrcValue) {
|
|
|
|
|
llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
|
|
|
|
|
PHI->addIncoming(Value, CastNotNull);
|
|
|
|
|
PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
|
|
|
|
|
|
|
|
|
|
Value = PHI;
|
2009-11-16 14:50:58 +08:00
|
|
|
|
}
|
|
|
|
|
|
2011-04-11 08:46:40 +08:00
|
|
|
|
return Value;
|
2009-11-16 14:50:58 +08:00
|
|
|
|
}
|
2012-02-09 11:32:31 +08:00
|
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
|
2012-02-09 11:47:20 +08:00
|
|
|
|
RunCleanupsScope Scope(*this);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
LValue SlotLV = MakeAddrLValue(Slot.getAddress(), E->getType());
|
2012-02-09 11:47:20 +08:00
|
|
|
|
|
2012-02-09 11:32:31 +08:00
|
|
|
|
CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
|
2015-07-18 02:21:37 +08:00
|
|
|
|
for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
|
|
|
|
|
e = E->capture_init_end();
|
2012-02-29 11:25:18 +08:00
|
|
|
|
i != e; ++i, ++CurField) {
|
2012-02-09 11:32:31 +08:00
|
|
|
|
// Emit initialization
|
2012-06-07 04:45:41 +08:00
|
|
|
|
LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
|
2014-08-28 12:28:19 +08:00
|
|
|
|
if (CurField->hasCapturedVLAType()) {
|
|
|
|
|
auto VAT = CurField->getCapturedVLAType();
|
|
|
|
|
EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
|
|
|
|
|
} else {
|
2016-12-14 08:03:17 +08:00
|
|
|
|
EmitInitializerForField(*CurField, LV, *i);
|
2014-08-28 12:28:19 +08:00
|
|
|
|
}
|
2012-02-09 11:32:31 +08:00
|
|
|
|
}
|
|
|
|
|
}
|