2015-10-07 07:40:43 +08:00
|
|
|
//===--- CGClass.cpp - Emit LLVM Code for C++ classes -----------*- C++ -*-===//
|
2009-09-12 12:27:24 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2009-09-12 12:27:24 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code dealing with C++ code generation of classes
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-02-25 10:48:22 +08:00
|
|
|
#include "CGBlocks.h"
|
2014-01-07 19:51:46 +08:00
|
|
|
#include "CGCXXABI.h"
|
2010-08-12 05:04:37 +08:00
|
|
|
#include "CGDebugInfo.h"
|
2013-02-17 15:22:09 +08:00
|
|
|
#include "CGRecordLayout.h"
|
2009-09-12 12:27:24 +08:00
|
|
|
#include "CodeGenFunction.h"
|
2018-12-13 18:15:27 +08:00
|
|
|
#include "TargetInfo.h"
|
2009-10-07 06:43:30 +08:00
|
|
|
#include "clang/AST/CXXInheritance.h"
|
2013-09-29 16:45:24 +08:00
|
|
|
#include "clang/AST/DeclTemplate.h"
|
2010-09-17 10:31:44 +08:00
|
|
|
#include "clang/AST/EvaluatedExprVisitor.h"
|
2009-09-12 12:27:24 +08:00
|
|
|
#include "clang/AST/RecordLayout.h"
|
2010-02-19 17:25:03 +08:00
|
|
|
#include "clang/AST/StmtCXX.h"
|
2018-12-11 11:18:39 +08:00
|
|
|
#include "clang/Basic/CodeGenOptions.h"
|
2013-02-17 15:22:09 +08:00
|
|
|
#include "clang/Basic/TargetBuiltins.h"
|
2013-10-31 05:53:58 +08:00
|
|
|
#include "clang/CodeGen/CGFunctionInfo.h"
|
2015-02-21 04:30:56 +08:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2015-09-16 05:46:55 +08:00
|
|
|
#include "llvm/IR/Metadata.h"
|
2016-01-16 08:31:22 +08:00
|
|
|
#include "llvm/Transforms/Utils/SanitizerStats.h"
|
2009-10-07 06:43:30 +08:00
|
|
|
|
2009-09-12 12:27:24 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
/// Return the best known alignment for an unknown pointer to a
|
|
|
|
/// particular class.
|
|
|
|
CharUnits CodeGenModule::getClassPointerAlignment(const CXXRecordDecl *RD) {
|
|
|
|
if (!RD->isCompleteDefinition())
|
|
|
|
return CharUnits::One(); // Hopefully won't be used anywhere.
|
|
|
|
|
|
|
|
auto &layout = getContext().getASTRecordLayout(RD);
|
|
|
|
|
|
|
|
// If the class is final, then we know that the pointer points to an
|
|
|
|
// object of that type and can use the full alignment.
|
|
|
|
if (RD->hasAttr<FinalAttr>()) {
|
|
|
|
return layout.getAlignment();
|
|
|
|
|
|
|
|
// Otherwise, we have to assume it could be a subclass.
|
|
|
|
} else {
|
|
|
|
return layout.getNonVirtualAlignment();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the best known alignment for a pointer to a virtual base,
|
|
|
|
/// given the alignment of a pointer to the derived class.
|
|
|
|
CharUnits CodeGenModule::getVBaseAlignment(CharUnits actualDerivedAlign,
|
|
|
|
const CXXRecordDecl *derivedClass,
|
|
|
|
const CXXRecordDecl *vbaseClass) {
|
|
|
|
// The basic idea here is that an underaligned derived pointer might
|
|
|
|
// indicate an underaligned base pointer.
|
|
|
|
|
|
|
|
assert(vbaseClass->isCompleteDefinition());
|
|
|
|
auto &baseLayout = getContext().getASTRecordLayout(vbaseClass);
|
|
|
|
CharUnits expectedVBaseAlign = baseLayout.getNonVirtualAlignment();
|
|
|
|
|
|
|
|
return getDynamicOffsetAlignment(actualDerivedAlign, derivedClass,
|
|
|
|
expectedVBaseAlign);
|
|
|
|
}
|
|
|
|
|
|
|
|
CharUnits
|
|
|
|
CodeGenModule::getDynamicOffsetAlignment(CharUnits actualBaseAlign,
|
|
|
|
const CXXRecordDecl *baseDecl,
|
|
|
|
CharUnits expectedTargetAlign) {
|
|
|
|
// If the base is an incomplete type (which is, alas, possible with
|
|
|
|
// member pointers), be pessimistic.
|
|
|
|
if (!baseDecl->isCompleteDefinition())
|
|
|
|
return std::min(actualBaseAlign, expectedTargetAlign);
|
|
|
|
|
|
|
|
auto &baseLayout = getContext().getASTRecordLayout(baseDecl);
|
|
|
|
CharUnits expectedBaseAlign = baseLayout.getNonVirtualAlignment();
|
|
|
|
|
|
|
|
// If the class is properly aligned, assume the target offset is, too.
|
|
|
|
//
|
|
|
|
// This actually isn't necessarily the right thing to do --- if the
|
|
|
|
// class is a complete object, but it's only properly aligned for a
|
|
|
|
// base subobject, then the alignments of things relative to it are
|
|
|
|
// probably off as well. (Note that this requires the alignment of
|
|
|
|
// the target to be greater than the NV alignment of the derived
|
|
|
|
// class.)
|
|
|
|
//
|
|
|
|
// However, our approach to this kind of under-alignment can only
|
|
|
|
// ever be best effort; after all, we're never going to propagate
|
|
|
|
// alignments through variables or parameters. Note, in particular,
|
|
|
|
// that constructing a polymorphic type in an address that's less
|
|
|
|
// than pointer-aligned will generally trap in the constructor,
|
|
|
|
// unless we someday add some sort of attribute to change the
|
|
|
|
// assumed alignment of 'this'. So our goal here is pretty much
|
|
|
|
// just to allow the user to explicitly say that a pointer is
|
2016-01-29 09:35:53 +08:00
|
|
|
// under-aligned and then safely access its fields and vtables.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
if (actualBaseAlign >= expectedBaseAlign) {
|
|
|
|
return expectedTargetAlign;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we might be offset by an arbitrary multiple of the
|
|
|
|
// actual alignment. The correct adjustment is to take the min of
|
|
|
|
// the two alignments.
|
|
|
|
return std::min(actualBaseAlign, expectedTargetAlign);
|
|
|
|
}
|
|
|
|
|
|
|
|
Address CodeGenFunction::LoadCXXThisAddress() {
|
|
|
|
assert(CurFuncDecl && "loading 'this' without a func declaration?");
|
|
|
|
assert(isa<CXXMethodDecl>(CurFuncDecl));
|
|
|
|
|
|
|
|
// Lazily compute CXXThisAlignment.
|
|
|
|
if (CXXThisAlignment.isZero()) {
|
|
|
|
// Just use the best known alignment for the parent.
|
|
|
|
// TODO: if we're currently emitting a complete-object ctor/dtor,
|
|
|
|
// we can always use the complete-object alignment.
|
|
|
|
auto RD = cast<CXXMethodDecl>(CurFuncDecl)->getParent();
|
|
|
|
CXXThisAlignment = CGM.getClassPointerAlignment(RD);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Address(LoadCXXThis(), CXXThisAlignment);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Emit the address of a field using a member data pointer.
|
|
|
|
///
|
|
|
|
/// \param E Only used for emergency diagnostics
|
|
|
|
Address
|
|
|
|
CodeGenFunction::EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
|
|
|
|
llvm::Value *memberPtr,
|
|
|
|
const MemberPointerType *memberPtrType,
|
2017-10-14 00:38:32 +08:00
|
|
|
LValueBaseInfo *BaseInfo,
|
|
|
|
TBAAAccessInfo *TBAAInfo) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
// Ask the ABI to compute the actual address.
|
|
|
|
llvm::Value *ptr =
|
|
|
|
CGM.getCXXABI().EmitMemberDataPointerAddress(*this, E, base,
|
|
|
|
memberPtr, memberPtrType);
|
|
|
|
|
|
|
|
QualType memberType = memberPtrType->getPointeeType();
|
2017-10-14 00:58:30 +08:00
|
|
|
CharUnits memberAlign = getNaturalTypeAlignment(memberType, BaseInfo,
|
|
|
|
TBAAInfo);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
memberAlign =
|
|
|
|
CGM.getDynamicOffsetAlignment(base.getAlignment(),
|
|
|
|
memberPtrType->getClass()->getAsCXXRecordDecl(),
|
|
|
|
memberAlign);
|
|
|
|
return Address(ptr, memberAlign);
|
|
|
|
}
|
|
|
|
|
2015-06-23 15:31:11 +08:00
|
|
|
CharUnits CodeGenModule::computeNonVirtualBaseClassOffset(
|
|
|
|
const CXXRecordDecl *DerivedClass, CastExpr::path_const_iterator Start,
|
|
|
|
CastExpr::path_const_iterator End) {
|
2011-03-22 08:53:26 +08:00
|
|
|
CharUnits Offset = CharUnits::Zero();
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2015-06-23 15:31:11 +08:00
|
|
|
const ASTContext &Context = getContext();
|
2010-04-25 05:06:20 +08:00
|
|
|
const CXXRecordDecl *RD = DerivedClass;
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-08-07 14:22:56 +08:00
|
|
|
for (CastExpr::path_const_iterator I = Start; I != End; ++I) {
|
2010-04-25 05:06:20 +08:00
|
|
|
const CXXBaseSpecifier *Base = *I;
|
|
|
|
assert(!Base->isVirtual() && "Should not see virtual bases here!");
|
|
|
|
|
|
|
|
// Get the layout.
|
|
|
|
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
|
2015-05-20 23:53:59 +08:00
|
|
|
|
|
|
|
const CXXRecordDecl *BaseDecl =
|
2010-04-25 05:06:20 +08:00
|
|
|
cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-04-25 05:06:20 +08:00
|
|
|
// Add the offset.
|
2011-03-22 08:53:26 +08:00
|
|
|
Offset += Layout.getBaseClassOffset(BaseDecl);
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-04-25 05:06:20 +08:00
|
|
|
RD = BaseDecl;
|
|
|
|
}
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2011-03-22 08:53:26 +08:00
|
|
|
return Offset;
|
2010-04-25 05:06:20 +08:00
|
|
|
}
|
2009-09-12 12:27:24 +08:00
|
|
|
|
2010-04-25 05:23:59 +08:00
|
|
|
llvm::Constant *
|
|
|
|
CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
|
2010-08-07 14:22:56 +08:00
|
|
|
CastExpr::path_const_iterator PathBegin,
|
|
|
|
CastExpr::path_const_iterator PathEnd) {
|
|
|
|
assert(PathBegin != PathEnd && "Base path should not be empty!");
|
2010-04-25 05:23:59 +08:00
|
|
|
|
2015-05-20 23:53:59 +08:00
|
|
|
CharUnits Offset =
|
2015-06-23 15:31:11 +08:00
|
|
|
computeNonVirtualBaseClassOffset(ClassDecl, PathBegin, PathEnd);
|
2011-03-22 08:53:26 +08:00
|
|
|
if (Offset.isZero())
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
|
|
|
|
2015-05-20 23:53:59 +08:00
|
|
|
llvm::Type *PtrDiffTy =
|
2010-04-25 05:23:59 +08:00
|
|
|
Types.ConvertType(getContext().getPointerDiffType());
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2011-03-22 08:53:26 +08:00
|
|
|
return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
|
2009-09-29 11:13:20 +08:00
|
|
|
}
|
|
|
|
|
2010-04-25 07:01:49 +08:00
|
|
|
/// Gets the address of a direct base class within a complete object.
|
2010-02-16 12:15:37 +08:00
|
|
|
/// This should only be used for (1) non-virtual bases or (2) virtual bases
|
|
|
|
/// when the type is known to be complete (e.g. in complete destructors).
|
|
|
|
///
|
|
|
|
/// The object pointed to by 'This' is assumed to be non-null.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address
|
|
|
|
CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(Address This,
|
2010-04-25 07:01:49 +08:00
|
|
|
const CXXRecordDecl *Derived,
|
|
|
|
const CXXRecordDecl *Base,
|
|
|
|
bool BaseIsVirtual) {
|
2010-02-16 12:15:37 +08:00
|
|
|
// 'this' must be a pointer (in some address space) to Derived.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
assert(This.getElementType() == ConvertType(Derived));
|
2010-02-16 12:15:37 +08:00
|
|
|
|
|
|
|
// Compute the offset of the virtual base.
|
2011-03-22 09:21:15 +08:00
|
|
|
CharUnits Offset;
|
2010-02-16 12:15:37 +08:00
|
|
|
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
|
2010-04-25 07:01:49 +08:00
|
|
|
if (BaseIsVirtual)
|
2011-03-22 09:21:15 +08:00
|
|
|
Offset = Layout.getVBaseClassOffset(Base);
|
2010-02-16 12:15:37 +08:00
|
|
|
else
|
2011-03-22 09:21:15 +08:00
|
|
|
Offset = Layout.getBaseClassOffset(Base);
|
2010-02-16 12:15:37 +08:00
|
|
|
|
|
|
|
// Shift and cast down to the base type.
|
|
|
|
// TODO: for complete types, this should be possible with a GEP.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address V = This;
|
|
|
|
if (!Offset.isZero()) {
|
|
|
|
V = Builder.CreateElementBitCast(V, Int8Ty);
|
|
|
|
V = Builder.CreateConstInBoundsByteGEP(V, Offset);
|
2010-02-16 12:15:37 +08:00
|
|
|
}
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
V = Builder.CreateElementBitCast(V, ConvertType(Base));
|
2010-02-16 12:15:37 +08:00
|
|
|
|
|
|
|
return V;
|
2010-03-29 03:40:00 +08:00
|
|
|
}
|
2010-02-16 12:15:37 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
static Address
|
|
|
|
ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr,
|
2012-08-01 13:04:58 +08:00
|
|
|
CharUnits nonVirtualOffset,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *virtualOffset,
|
|
|
|
const CXXRecordDecl *derivedClass,
|
|
|
|
const CXXRecordDecl *nearestVBase) {
|
2012-08-01 13:04:58 +08:00
|
|
|
// Assert that we have something to do.
|
2014-05-21 13:09:00 +08:00
|
|
|
assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr);
|
2012-08-01 13:04:58 +08:00
|
|
|
|
|
|
|
// Compute the offset from the static and dynamic components.
|
|
|
|
llvm::Value *baseOffset;
|
|
|
|
if (!nonVirtualOffset.isZero()) {
|
|
|
|
baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy,
|
|
|
|
nonVirtualOffset.getQuantity());
|
|
|
|
if (virtualOffset) {
|
|
|
|
baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
baseOffset = virtualOffset;
|
|
|
|
}
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-04-21 00:03:35 +08:00
|
|
|
// Apply the base offset.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *ptr = addr.getPointer();
|
2012-08-01 13:04:58 +08:00
|
|
|
ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
|
|
|
|
ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
|
|
|
// If we have a virtual component, the alignment of the result will
|
|
|
|
// be relative only to the known alignment of that vbase.
|
|
|
|
CharUnits alignment;
|
|
|
|
if (virtualOffset) {
|
|
|
|
assert(nearestVBase && "virtual offset without vbase?");
|
|
|
|
alignment = CGF.CGM.getVBaseAlignment(addr.getAlignment(),
|
|
|
|
derivedClass, nearestVBase);
|
|
|
|
} else {
|
|
|
|
alignment = addr.getAlignment();
|
|
|
|
}
|
|
|
|
alignment = alignment.alignmentAtOffset(nonVirtualOffset);
|
|
|
|
|
|
|
|
return Address(ptr, alignment);
|
2010-04-21 00:03:35 +08:00
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address CodeGenFunction::GetAddressOfBaseClass(
|
|
|
|
Address Value, const CXXRecordDecl *Derived,
|
2014-10-14 07:59:00 +08:00
|
|
|
CastExpr::path_const_iterator PathBegin,
|
|
|
|
CastExpr::path_const_iterator PathEnd, bool NullCheckValue,
|
|
|
|
SourceLocation Loc) {
|
2010-08-07 14:22:56 +08:00
|
|
|
assert(PathBegin != PathEnd && "Base path should not be empty!");
|
2010-04-25 05:06:20 +08:00
|
|
|
|
2010-08-07 14:22:56 +08:00
|
|
|
CastExpr::path_const_iterator Start = PathBegin;
|
2014-05-21 13:09:00 +08:00
|
|
|
const CXXRecordDecl *VBase = nullptr;
|
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
// Sema has done some convenient canonicalization here: if the
|
|
|
|
// access path involved any virtual steps, the conversion path will
|
|
|
|
// *start* with a step down to the correct virtual base subobject,
|
|
|
|
// and hence will not require any further steps.
|
2010-04-25 05:06:20 +08:00
|
|
|
if ((*Start)->isVirtual()) {
|
2015-05-20 23:53:59 +08:00
|
|
|
VBase =
|
2010-04-25 05:06:20 +08:00
|
|
|
cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
|
|
|
|
++Start;
|
|
|
|
}
|
2012-08-01 13:04:58 +08:00
|
|
|
|
|
|
|
// Compute the static offset of the ultimate destination within its
|
|
|
|
// allocating subobject (the virtual base, if there is one, or else
|
|
|
|
// the "complete" object that we see).
|
2015-06-23 15:31:11 +08:00
|
|
|
CharUnits NonVirtualOffset = CGM.computeNonVirtualBaseClassOffset(
|
|
|
|
VBase ? VBase : Derived, Start, PathEnd);
|
2010-04-25 05:06:20 +08:00
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
// If there's a virtual step, we can sometimes "devirtualize" it.
|
|
|
|
// For now, that's limited to when the derived type is final.
|
|
|
|
// TODO: "devirtualize" this for accesses to known-complete objects.
|
|
|
|
if (VBase && Derived->hasAttr<FinalAttr>()) {
|
|
|
|
const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived);
|
|
|
|
CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase);
|
|
|
|
NonVirtualOffset += vBaseOffset;
|
2014-05-21 13:09:00 +08:00
|
|
|
VBase = nullptr; // we no longer have a virtual step
|
2012-08-01 13:04:58 +08:00
|
|
|
}
|
|
|
|
|
2010-04-25 05:06:20 +08:00
|
|
|
// Get the base pointer type.
|
2015-05-20 23:53:59 +08:00
|
|
|
llvm::Type *BasePtrTy =
|
2010-08-07 14:22:56 +08:00
|
|
|
ConvertType((PathEnd[-1])->getType())->getPointerTo();
|
2012-08-01 13:04:58 +08:00
|
|
|
|
2014-10-14 07:59:00 +08:00
|
|
|
QualType DerivedTy = getContext().getRecordType(Derived);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived);
|
2014-10-14 07:59:00 +08:00
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
// If the static offset is zero and we don't have a virtual step,
|
|
|
|
// just do a bitcast; null checks are unnecessary.
|
2011-03-22 08:53:26 +08:00
|
|
|
if (NonVirtualOffset.isZero() && !VBase) {
|
2014-10-14 07:59:00 +08:00
|
|
|
if (sanitizePerformTypeCheck()) {
|
2017-02-18 07:22:55 +08:00
|
|
|
SanitizerSet SkippedChecks;
|
|
|
|
SkippedChecks.set(SanitizerKind::Null, !NullCheckValue);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitTypeCheck(TCK_Upcast, Loc, Value.getPointer(),
|
2017-02-18 07:22:55 +08:00
|
|
|
DerivedTy, DerivedAlign, SkippedChecks);
|
2014-10-14 07:59:00 +08:00
|
|
|
}
|
2010-04-25 05:06:20 +08:00
|
|
|
return Builder.CreateBitCast(Value, BasePtrTy);
|
2014-05-21 13:09:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::BasicBlock *origBB = nullptr;
|
|
|
|
llvm::BasicBlock *endBB = nullptr;
|
2012-08-01 13:04:58 +08:00
|
|
|
|
|
|
|
// Skip over the offset (and the vtable load) if we're supposed to
|
|
|
|
// null-check the pointer.
|
2010-04-25 05:06:20 +08:00
|
|
|
if (NullCheckValue) {
|
2012-08-01 13:04:58 +08:00
|
|
|
origBB = Builder.GetInsertBlock();
|
|
|
|
llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
|
|
|
|
endBB = createBasicBlock("cast.end");
|
2015-05-20 23:53:59 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *isNull = Builder.CreateIsNull(Value.getPointer());
|
2012-08-01 13:04:58 +08:00
|
|
|
Builder.CreateCondBr(isNull, endBB, notNullBB);
|
|
|
|
EmitBlock(notNullBB);
|
2010-04-25 05:06:20 +08:00
|
|
|
}
|
|
|
|
|
2014-10-14 07:59:00 +08:00
|
|
|
if (sanitizePerformTypeCheck()) {
|
2017-02-18 07:22:55 +08:00
|
|
|
SanitizerSet SkippedChecks;
|
|
|
|
SkippedChecks.set(SanitizerKind::Null, true);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc,
|
2017-02-18 07:22:55 +08:00
|
|
|
Value.getPointer(), DerivedTy, DerivedAlign, SkippedChecks);
|
2014-10-14 07:59:00 +08:00
|
|
|
}
|
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
// Compute the virtual offset.
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *VirtualOffset = nullptr;
|
2011-01-29 11:18:56 +08:00
|
|
|
if (VBase) {
|
2013-05-30 02:02:47 +08:00
|
|
|
VirtualOffset =
|
|
|
|
CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase);
|
2011-01-29 11:18:56 +08:00
|
|
|
}
|
2010-04-25 05:06:20 +08:00
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
// Apply both offsets.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset,
|
|
|
|
VirtualOffset, Derived, VBase);
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
// Cast to the destination type.
|
2010-04-25 05:06:20 +08:00
|
|
|
Value = Builder.CreateBitCast(Value, BasePtrTy);
|
2012-08-01 13:04:58 +08:00
|
|
|
|
|
|
|
// Build a phi if we needed a null check.
|
2010-04-25 05:06:20 +08:00
|
|
|
if (NullCheckValue) {
|
2012-08-01 13:04:58 +08:00
|
|
|
llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
|
|
|
|
Builder.CreateBr(endBB);
|
|
|
|
EmitBlock(endBB);
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2012-08-01 13:04:58 +08:00
|
|
|
llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
PHI->addIncoming(Value.getPointer(), notNullBB);
|
2012-08-01 13:04:58 +08:00
|
|
|
PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Value = Address(PHI, Value.getAlignment());
|
2010-04-25 05:06:20 +08:00
|
|
|
}
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-04-25 05:06:20 +08:00
|
|
|
return Value;
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address
|
|
|
|
CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
|
2010-04-25 07:01:49 +08:00
|
|
|
const CXXRecordDecl *Derived,
|
2010-08-07 14:22:56 +08:00
|
|
|
CastExpr::path_const_iterator PathBegin,
|
|
|
|
CastExpr::path_const_iterator PathEnd,
|
2009-11-24 01:57:54 +08:00
|
|
|
bool NullCheckValue) {
|
2010-08-07 14:22:56 +08:00
|
|
|
assert(PathBegin != PathEnd && "Base path should not be empty!");
|
2010-04-25 05:23:59 +08:00
|
|
|
|
2009-11-24 01:57:54 +08:00
|
|
|
QualType DerivedTy =
|
2010-04-25 07:01:49 +08:00
|
|
|
getContext().getCanonicalType(getContext().getTagDeclType(Derived));
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
|
2013-02-14 05:18:23 +08:00
|
|
|
|
2010-01-31 09:43:37 +08:00
|
|
|
llvm::Value *NonVirtualOffset =
|
2010-08-07 14:22:56 +08:00
|
|
|
CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-01-31 09:43:37 +08:00
|
|
|
if (!NonVirtualOffset) {
|
|
|
|
// No offset, we can just cast back.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return Builder.CreateBitCast(BaseAddr, DerivedPtrTy);
|
2010-01-31 09:43:37 +08:00
|
|
|
}
|
2014-05-21 13:09:00 +08:00
|
|
|
|
|
|
|
llvm::BasicBlock *CastNull = nullptr;
|
|
|
|
llvm::BasicBlock *CastNotNull = nullptr;
|
|
|
|
llvm::BasicBlock *CastEnd = nullptr;
|
|
|
|
|
2009-11-24 01:57:54 +08:00
|
|
|
if (NullCheckValue) {
|
|
|
|
CastNull = createBasicBlock("cast.null");
|
|
|
|
CastNotNull = createBasicBlock("cast.notnull");
|
|
|
|
CastEnd = createBasicBlock("cast.end");
|
2015-05-20 23:53:59 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *IsNull = Builder.CreateIsNull(BaseAddr.getPointer());
|
2009-11-24 01:57:54 +08:00
|
|
|
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
|
|
|
|
EmitBlock(CastNotNull);
|
|
|
|
}
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-01-31 09:43:37 +08:00
|
|
|
// Apply the offset.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *Value = Builder.CreateBitCast(BaseAddr.getPointer(), Int8PtrTy);
|
2018-01-19 23:14:51 +08:00
|
|
|
Value = Builder.CreateInBoundsGEP(Value, Builder.CreateNeg(NonVirtualOffset),
|
|
|
|
"sub.ptr");
|
2010-01-31 09:43:37 +08:00
|
|
|
|
|
|
|
// Just cast.
|
|
|
|
Value = Builder.CreateBitCast(Value, DerivedPtrTy);
|
2009-11-24 01:57:54 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
// Produce a PHI if we had a null-check.
|
2009-11-24 01:57:54 +08:00
|
|
|
if (NullCheckValue) {
|
|
|
|
Builder.CreateBr(CastEnd);
|
|
|
|
EmitBlock(CastNull);
|
|
|
|
Builder.CreateBr(CastEnd);
|
|
|
|
EmitBlock(CastEnd);
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2011-03-30 19:28:58 +08:00
|
|
|
llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
|
2009-11-24 01:57:54 +08:00
|
|
|
PHI->addIncoming(Value, CastNotNull);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
|
2009-11-24 01:57:54 +08:00
|
|
|
Value = PHI;
|
2009-09-12 14:04:24 +08:00
|
|
|
}
|
2015-05-20 23:53:59 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return Address(Value, CGM.getClassPointerAlignment(Derived));
|
2009-09-12 12:27:24 +08:00
|
|
|
}
|
2013-02-27 21:46:31 +08:00
|
|
|
|
|
|
|
llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
|
|
|
|
bool ForVirtualBase,
|
|
|
|
bool Delegating) {
|
2013-06-29 04:45:28 +08:00
|
|
|
if (!CGM.getCXXABI().NeedsVTTParameter(GD)) {
|
2010-01-02 09:01:18 +08:00
|
|
|
// This constructor/destructor does not need a VTT parameter.
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2010-01-02 09:01:18 +08:00
|
|
|
}
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2013-05-03 15:33:41 +08:00
|
|
|
const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent();
|
2010-01-02 09:01:18 +08:00
|
|
|
const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
|
2010-02-19 03:59:28 +08:00
|
|
|
|
2010-01-02 09:01:18 +08:00
|
|
|
llvm::Value *VTT;
|
|
|
|
|
2010-02-19 03:59:28 +08:00
|
|
|
uint64_t SubVTTIndex;
|
|
|
|
|
2013-01-31 13:50:40 +08:00
|
|
|
if (Delegating) {
|
|
|
|
// If this is a delegating constructor call, just load the VTT.
|
2013-02-27 21:46:31 +08:00
|
|
|
return LoadCXXVTT();
|
2013-01-31 13:50:40 +08:00
|
|
|
} else if (RD == Base) {
|
|
|
|
// If the record matches the base, this is the complete ctor/dtor
|
|
|
|
// variant calling the base variant in a class with virtual bases.
|
2013-06-29 04:45:28 +08:00
|
|
|
assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) &&
|
2010-02-19 03:59:28 +08:00
|
|
|
"doing no-op VTT offset in base dtor/ctor?");
|
2010-05-03 07:33:10 +08:00
|
|
|
assert(!ForVirtualBase && "Can't have same class as virtual base!");
|
2010-02-19 03:59:28 +08:00
|
|
|
SubVTTIndex = 0;
|
|
|
|
} else {
|
2013-02-27 21:46:31 +08:00
|
|
|
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
|
2015-05-20 23:53:59 +08:00
|
|
|
CharUnits BaseOffset = ForVirtualBase ?
|
|
|
|
Layout.getVBaseClassOffset(Base) :
|
2011-03-24 09:21:01 +08:00
|
|
|
Layout.getBaseClassOffset(Base);
|
2010-05-03 07:53:25 +08:00
|
|
|
|
2015-05-20 23:53:59 +08:00
|
|
|
SubVTTIndex =
|
2013-02-27 21:46:31 +08:00
|
|
|
CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
|
2010-02-19 03:59:28 +08:00
|
|
|
assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
|
|
|
|
}
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2013-06-29 04:45:28 +08:00
|
|
|
if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
|
2010-01-02 09:01:18 +08:00
|
|
|
// A VTT parameter was passed to the constructor, use it.
|
2013-02-27 21:46:31 +08:00
|
|
|
VTT = LoadCXXVTT();
|
|
|
|
VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
|
2010-01-02 09:01:18 +08:00
|
|
|
} else {
|
|
|
|
// We're the complete constructor, so get the VTT by name.
|
2013-02-27 21:46:31 +08:00
|
|
|
VTT = CGM.getVTables().GetAddrOfVTT(RD);
|
|
|
|
VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
|
2010-01-02 09:01:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return VTT;
|
|
|
|
}
|
|
|
|
|
2010-07-21 09:23:41 +08:00
|
|
|
namespace {
|
2010-07-21 13:30:47 +08:00
|
|
|
/// Call the destructor for a direct base class.
|
2015-08-19 06:40:54 +08:00
|
|
|
struct CallBaseDtor final : EHScopeStack::Cleanup {
|
2010-07-21 13:30:47 +08:00
|
|
|
const CXXRecordDecl *BaseClass;
|
|
|
|
bool BaseIsVirtual;
|
|
|
|
CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
|
|
|
|
: BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
|
2010-07-21 09:23:41 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2010-07-21 13:30:47 +08:00
|
|
|
const CXXRecordDecl *DerivedClass =
|
|
|
|
cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
|
|
|
|
|
|
|
|
const CXXDestructorDecl *D = BaseClass->getDestructor();
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Addr =
|
|
|
|
CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThisAddress(),
|
2010-07-21 13:30:47 +08:00
|
|
|
DerivedClass, BaseClass,
|
|
|
|
BaseIsVirtual);
|
2013-01-31 13:50:40 +08:00
|
|
|
CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual,
|
|
|
|
/*Delegating=*/false, Addr);
|
2010-07-21 09:23:41 +08:00
|
|
|
}
|
|
|
|
};
|
2010-09-17 10:31:44 +08:00
|
|
|
|
|
|
|
/// A visitor which checks whether an initializer uses 'this' in a
|
|
|
|
/// way which requires the vtable to be properly set.
|
2015-06-10 21:53:15 +08:00
|
|
|
struct DynamicThisUseChecker : ConstEvaluatedExprVisitor<DynamicThisUseChecker> {
|
|
|
|
typedef ConstEvaluatedExprVisitor<DynamicThisUseChecker> super;
|
2010-09-17 10:31:44 +08:00
|
|
|
|
|
|
|
bool UsesThis;
|
|
|
|
|
2015-06-10 21:53:15 +08:00
|
|
|
DynamicThisUseChecker(const ASTContext &C) : super(C), UsesThis(false) {}
|
2010-09-17 10:31:44 +08:00
|
|
|
|
|
|
|
// Black-list all explicit and implicit references to 'this'.
|
|
|
|
//
|
|
|
|
// Do we need to worry about external references to 'this' derived
|
|
|
|
// from arbitrary code? If so, then anything which runs arbitrary
|
|
|
|
// external code might potentially access the vtable.
|
2015-06-10 21:53:15 +08:00
|
|
|
void VisitCXXThisExpr(const CXXThisExpr *E) { UsesThis = true; }
|
2010-09-17 10:31:44 +08:00
|
|
|
};
|
2015-10-07 07:40:43 +08:00
|
|
|
} // end anonymous namespace
|
2010-09-17 10:31:44 +08:00
|
|
|
|
|
|
|
static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
|
|
|
|
DynamicThisUseChecker Checker(C);
|
2015-06-10 21:53:15 +08:00
|
|
|
Checker.Visit(Init);
|
2010-09-17 10:31:44 +08:00
|
|
|
return Checker.UsesThis;
|
2010-07-21 09:23:41 +08:00
|
|
|
}
|
|
|
|
|
2015-05-20 23:53:59 +08:00
|
|
|
static void EmitBaseInitializer(CodeGenFunction &CGF,
|
2009-12-25 06:46:43 +08:00
|
|
|
const CXXRecordDecl *ClassDecl,
|
2011-01-09 04:30:50 +08:00
|
|
|
CXXCtorInitializer *BaseInit,
|
2009-12-25 06:46:43 +08:00
|
|
|
CXXCtorType CtorType) {
|
|
|
|
assert(BaseInit->isBaseInitializer() &&
|
|
|
|
"Must have base initializer!");
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ThisPtr = CGF.LoadCXXThisAddress();
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2009-12-25 06:46:43 +08:00
|
|
|
const Type *BaseType = BaseInit->getBaseClass();
|
|
|
|
CXXRecordDecl *BaseClassDecl =
|
|
|
|
cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
|
|
|
|
|
2010-04-12 08:51:03 +08:00
|
|
|
bool isBaseVirtual = BaseInit->isBaseVirtual();
|
2009-12-25 06:46:43 +08:00
|
|
|
|
|
|
|
// The base constructor doesn't construct virtual bases.
|
|
|
|
if (CtorType == Ctor_Base && isBaseVirtual)
|
|
|
|
return;
|
|
|
|
|
2010-09-17 10:31:44 +08:00
|
|
|
// If the initializer for the base (other than the constructor
|
|
|
|
// itself) accesses 'this' in any way, we need to initialize the
|
|
|
|
// vtables.
|
|
|
|
if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit()))
|
|
|
|
CGF.InitializeVTablePointers(ClassDecl);
|
|
|
|
|
2010-02-16 12:15:37 +08:00
|
|
|
// We can pretend to be a complete class because it only matters for
|
|
|
|
// virtual bases, and we only do virtual bases for complete ctors.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address V =
|
2010-04-25 07:01:49 +08:00
|
|
|
CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
|
2010-07-21 13:30:47 +08:00
|
|
|
BaseClassDecl,
|
|
|
|
isBaseVirtual);
|
2011-08-26 04:40:09 +08:00
|
|
|
AggValueSlot AggSlot =
|
2018-04-06 04:52:58 +08:00
|
|
|
AggValueSlot::forAddr(
|
|
|
|
V, Qualifiers(),
|
|
|
|
AggValueSlot::IsDestructed,
|
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
|
|
|
AggValueSlot::IsNotAliased,
|
|
|
|
CGF.overlapForBaseInit(ClassDecl, BaseClassDecl, isBaseVirtual));
|
2010-09-15 18:14:12 +08:00
|
|
|
|
|
|
|
CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
|
2015-05-20 23:53:59 +08:00
|
|
|
|
|
|
|
if (CGF.CGM.getLangOpts().Exceptions &&
|
2011-02-20 08:20:27 +08:00
|
|
|
!BaseClassDecl->hasTrivialDestructor())
|
2010-07-21 15:22:38 +08:00
|
|
|
CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
|
|
|
|
isBaseVirtual);
|
2009-12-25 06:46:43 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 03:26:57 +08:00
|
|
|
static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) {
|
|
|
|
auto *CD = dyn_cast<CXXConstructorDecl>(D);
|
|
|
|
if (!(CD && CD->isCopyOrMoveConstructor()) &&
|
|
|
|
!D->isCopyAssignmentOperator() && !D->isMoveAssignmentOperator())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// We can emit a memcpy for a trivial copy or move constructor/assignment.
|
|
|
|
if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// We *must* emit a memcpy for a defaulted union copy or move op.
|
|
|
|
if (D->getParent()->isUnion() && D->isDefaulted())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-07-14 15:55:48 +08:00
|
|
|
static void EmitLValueForAnyFieldInitialization(CodeGenFunction &CGF,
|
|
|
|
CXXCtorInitializer *MemberInit,
|
|
|
|
LValue &LHS) {
|
|
|
|
FieldDecl *Field = MemberInit->getAnyMember();
|
|
|
|
if (MemberInit->isIndirectMemberInitializer()) {
|
|
|
|
// If we are initializing an anonymous union field, drill down to the field.
|
|
|
|
IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember();
|
|
|
|
for (const auto *I : IndirectField->chain())
|
|
|
|
LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(I));
|
|
|
|
} else {
|
|
|
|
LHS = CGF.EmitLValueForFieldInitialization(LHS, Field);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-25 06:46:43 +08:00
|
|
|
static void EmitMemberInitializer(CodeGenFunction &CGF,
|
|
|
|
const CXXRecordDecl *ClassDecl,
|
2011-01-09 04:30:50 +08:00
|
|
|
CXXCtorInitializer *MemberInit,
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
const CXXConstructorDecl *Constructor,
|
|
|
|
FunctionArgList &Args) {
|
2015-01-18 08:12:58 +08:00
|
|
|
ApplyDebugLocation Loc(CGF, MemberInit->getSourceLocation());
|
2010-12-04 17:14:42 +08:00
|
|
|
assert(MemberInit->isAnyMemberInitializer() &&
|
2009-12-25 06:46:43 +08:00
|
|
|
"Must have member initializer!");
|
2011-06-12 01:19:42 +08:00
|
|
|
assert(MemberInit->getInit() && "Must have initializer!");
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2009-12-25 06:46:43 +08:00
|
|
|
// non-static data member initializers.
|
2010-12-04 17:14:42 +08:00
|
|
|
FieldDecl *Field = MemberInit->getAnyMember();
|
2012-02-14 10:15:49 +08:00
|
|
|
QualType FieldType = Field->getType();
|
2009-12-25 06:46:43 +08:00
|
|
|
|
|
|
|
llvm::Value *ThisPtr = CGF.LoadCXXThis();
|
2012-04-16 11:54:45 +08:00
|
|
|
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
|
2018-01-27 08:34:09 +08:00
|
|
|
LValue LHS;
|
|
|
|
|
|
|
|
// If a base constructor is being emitted, create an LValue that has the
|
|
|
|
// non-virtual alignment.
|
|
|
|
if (CGF.CurGD.getCtorType() == Ctor_Base)
|
|
|
|
LHS = CGF.MakeNaturalAlignPointeeAddrLValue(ThisPtr, RecordTy);
|
|
|
|
else
|
|
|
|
LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
|
2012-04-16 11:54:45 +08:00
|
|
|
|
2015-07-14 15:55:48 +08:00
|
|
|
EmitLValueForAnyFieldInitialization(CGF, MemberInit, LHS);
|
2009-12-25 06:46:43 +08:00
|
|
|
|
2012-02-14 10:15:49 +08:00
|
|
|
// Special case: if we are in a copy or move constructor, and we are copying
|
|
|
|
// an array of PODs or classes with trivial copy constructors, ignore the
|
|
|
|
// AST and perform the copy we know is equivalent.
|
|
|
|
// FIXME: This is hacky at best... if we had a bit more explicit information
|
|
|
|
// in the AST, we could generalize it more easily.
|
|
|
|
const ConstantArrayType *Array
|
|
|
|
= CGF.getContext().getAsConstantArrayType(FieldType);
|
2013-08-08 00:16:48 +08:00
|
|
|
if (Array && Constructor->isDefaulted() &&
|
2012-02-14 10:15:49 +08:00
|
|
|
Constructor->isCopyOrMoveConstructor()) {
|
|
|
|
QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
|
2012-11-08 07:56:21 +08:00
|
|
|
CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
|
2012-02-14 10:15:49 +08:00
|
|
|
if (BaseElementTy.isPODType(CGF.getContext()) ||
|
2015-04-30 03:26:57 +08:00
|
|
|
(CE && isMemcpyEquivalentSpecialMember(CE->getConstructor()))) {
|
2014-10-15 12:54:54 +08:00
|
|
|
unsigned SrcArgIndex =
|
|
|
|
CGF.CGM.getCXXABI().getSrcArgforCopyCtor(Constructor, Args);
|
2012-02-14 10:15:49 +08:00
|
|
|
llvm::Value *SrcPtr
|
|
|
|
= CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
|
2012-04-16 11:54:45 +08:00
|
|
|
LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
|
|
|
|
LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field);
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2012-02-14 10:15:49 +08:00
|
|
|
// Copy the aggregate.
|
2018-04-06 04:52:58 +08:00
|
|
|
CGF.EmitAggregateCopy(LHS, Src, FieldType, CGF.overlapForFieldInit(Field),
|
|
|
|
LHS.isVolatileQualified());
|
2015-07-08 15:31:02 +08:00
|
|
|
// Ensure that we destroy the objects if an exception is thrown later in
|
|
|
|
// the constructor.
|
|
|
|
QualType::DestructionKind dtorKind = FieldType.isDestructedType();
|
|
|
|
if (CGF.needsEHCleanup(dtorKind))
|
2018-07-31 03:24:48 +08:00
|
|
|
CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
|
2012-02-14 10:15:49 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 08:03:17 +08:00
|
|
|
CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit());
|
2012-02-14 10:15:49 +08:00
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
|
2016-12-14 08:03:17 +08:00
|
|
|
Expr *Init) {
|
2012-02-14 10:15:49 +08:00
|
|
|
QualType FieldType = Field->getType();
|
2013-03-08 05:37:08 +08:00
|
|
|
switch (getEvaluationKind(FieldType)) {
|
|
|
|
case TEK_Scalar:
|
2011-06-16 07:02:42 +08:00
|
|
|
if (LHS.isSimple()) {
|
2015-01-14 15:38:27 +08:00
|
|
|
EmitExprAsInit(Init, Field, LHS, false);
|
2011-06-16 07:02:42 +08:00
|
|
|
} else {
|
2012-02-14 10:31:03 +08:00
|
|
|
RValue RHS = RValue::get(EmitScalarExpr(Init));
|
|
|
|
EmitStoreThroughLValue(RHS, LHS);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
2013-03-08 05:37:08 +08:00
|
|
|
break;
|
|
|
|
case TEK_Complex:
|
2015-01-14 15:38:27 +08:00
|
|
|
EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true);
|
2013-03-08 05:37:08 +08:00
|
|
|
break;
|
|
|
|
case TEK_Aggregate: {
|
2016-12-14 08:03:17 +08:00
|
|
|
AggValueSlot Slot =
|
2018-04-06 04:52:58 +08:00
|
|
|
AggValueSlot::forLValue(
|
|
|
|
LHS,
|
|
|
|
AggValueSlot::IsDestructed,
|
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
|
|
|
AggValueSlot::IsNotAliased,
|
2018-07-28 23:33:03 +08:00
|
|
|
overlapForFieldInit(Field),
|
|
|
|
AggValueSlot::IsNotZeroed,
|
|
|
|
// Checks are made by the code that calls constructor.
|
|
|
|
AggValueSlot::IsSanitizerChecked);
|
2016-12-14 08:03:17 +08:00
|
|
|
EmitAggExpr(Init, Slot);
|
|
|
|
break;
|
2009-12-25 06:46:43 +08:00
|
|
|
}
|
2013-03-08 05:37:08 +08:00
|
|
|
}
|
2013-02-01 13:11:40 +08:00
|
|
|
|
|
|
|
// Ensure that we destroy this object if an exception is thrown
|
|
|
|
// later in the constructor.
|
|
|
|
QualType::DestructionKind dtorKind = FieldType.isDestructedType();
|
|
|
|
if (needsEHCleanup(dtorKind))
|
|
|
|
pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
|
2009-12-25 06:46:43 +08:00
|
|
|
}
|
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
/// Checks whether the given constructor is a valid subject for the
|
|
|
|
/// complete-to-base constructor delegation optimization, i.e.
|
|
|
|
/// emitting the complete constructor as a simple call to the base
|
|
|
|
/// constructor.
|
2017-02-24 09:15:19 +08:00
|
|
|
bool CodeGenFunction::IsConstructorDelegationValid(
|
|
|
|
const CXXConstructorDecl *Ctor) {
|
2010-02-23 08:48:20 +08:00
|
|
|
|
|
|
|
// Currently we disable the optimization for classes with virtual
|
|
|
|
// bases because (1) the addresses of parameter variables need to be
|
|
|
|
// consistent across all initializers but (2) the delegate function
|
|
|
|
// call necessarily creates a second copy of the parameter variable.
|
|
|
|
//
|
|
|
|
// The limiting example (purely theoretical AFAIK):
|
|
|
|
// struct A { A(int &c) { c++; } };
|
|
|
|
// struct B : virtual A {
|
|
|
|
// B(int count) : A(count) { printf("%d\n", count); }
|
|
|
|
// };
|
|
|
|
// ...although even this example could in principle be emitted as a
|
|
|
|
// delegation since the address of the parameter doesn't escape.
|
|
|
|
if (Ctor->getParent()->getNumVBases()) {
|
|
|
|
// TODO: white-list trivial vbase initializers. This case wouldn't
|
|
|
|
// be subject to the restrictions below.
|
|
|
|
|
|
|
|
// TODO: white-list cases where:
|
|
|
|
// - there are no non-reference parameters to the constructor
|
|
|
|
// - the initializers don't access any non-reference parameters
|
|
|
|
// - the initializers don't take the address of non-reference
|
|
|
|
// parameters
|
|
|
|
// - etc.
|
|
|
|
// If we ever add any of the above cases, remember that:
|
|
|
|
// - function-try-blocks will always blacklist this optimization
|
|
|
|
// - we need to perform the constructor prologue and cleanup in
|
|
|
|
// EmitConstructorBody.
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We also disable the optimization for variadic functions because
|
|
|
|
// it's impossible to "re-pass" varargs.
|
|
|
|
if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
|
|
|
|
return false;
|
|
|
|
|
2011-05-01 15:04:31 +08:00
|
|
|
// FIXME: Decide if we can do a delegation of a delegating constructor.
|
|
|
|
if (Ctor->isDelegatingConstructor())
|
|
|
|
return false;
|
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-10-17 04:54:52 +08:00
|
|
|
// Emit code in ctor (Prologue==true) or dtor (Prologue==false)
|
|
|
|
// to poison the extra field paddings inserted under
|
|
|
|
// -fsanitize-address-field-padding=1|2.
|
|
|
|
void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) {
|
|
|
|
ASTContext &Context = getContext();
|
|
|
|
const CXXRecordDecl *ClassDecl =
|
|
|
|
Prologue ? cast<CXXConstructorDecl>(CurGD.getDecl())->getParent()
|
|
|
|
: cast<CXXDestructorDecl>(CurGD.getDecl())->getParent();
|
|
|
|
if (!ClassDecl->mayInsertExtraPadding()) return;
|
|
|
|
|
|
|
|
struct SizeAndOffset {
|
|
|
|
uint64_t Size;
|
|
|
|
uint64_t Offset;
|
|
|
|
};
|
|
|
|
|
|
|
|
unsigned PtrSize = CGM.getDataLayout().getPointerSizeInBits();
|
|
|
|
const ASTRecordLayout &Info = Context.getASTRecordLayout(ClassDecl);
|
|
|
|
|
|
|
|
// Populate sizes and offsets of fields.
|
|
|
|
SmallVector<SizeAndOffset, 16> SSV(Info.getFieldCount());
|
|
|
|
for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i)
|
|
|
|
SSV[i].Offset =
|
|
|
|
Context.toCharUnitsFromBits(Info.getFieldOffset(i)).getQuantity();
|
|
|
|
|
|
|
|
size_t NumFields = 0;
|
|
|
|
for (const auto *Field : ClassDecl->fields()) {
|
|
|
|
const FieldDecl *D = Field;
|
|
|
|
std::pair<CharUnits, CharUnits> FieldInfo =
|
|
|
|
Context.getTypeInfoInChars(D->getType());
|
|
|
|
CharUnits FieldSize = FieldInfo.first;
|
|
|
|
assert(NumFields < SSV.size());
|
|
|
|
SSV[NumFields].Size = D->isBitField() ? 0 : FieldSize.getQuantity();
|
|
|
|
NumFields++;
|
|
|
|
}
|
|
|
|
assert(NumFields == SSV.size());
|
|
|
|
if (SSV.size() <= 1) return;
|
|
|
|
|
|
|
|
// We will insert calls to __asan_* run-time functions.
|
|
|
|
// LLVM AddressSanitizer pass may decide to inline them later.
|
|
|
|
llvm::Type *Args[2] = {IntPtrTy, IntPtrTy};
|
|
|
|
llvm::FunctionType *FTy =
|
|
|
|
llvm::FunctionType::get(CGM.VoidTy, Args, false);
|
|
|
|
llvm::Constant *F = CGM.CreateRuntimeFunction(
|
|
|
|
FTy, Prologue ? "__asan_poison_intra_object_redzone"
|
|
|
|
: "__asan_unpoison_intra_object_redzone");
|
|
|
|
|
|
|
|
llvm::Value *ThisPtr = LoadCXXThis();
|
|
|
|
ThisPtr = Builder.CreatePtrToInt(ThisPtr, IntPtrTy);
|
2014-10-18 05:02:13 +08:00
|
|
|
uint64_t TypeSize = Info.getNonVirtualSize().getQuantity();
|
2014-10-17 04:54:52 +08:00
|
|
|
// For each field check if it has sufficient padding,
|
|
|
|
// if so (un)poison it with a call.
|
|
|
|
for (size_t i = 0; i < SSV.size(); i++) {
|
|
|
|
uint64_t AsanAlignment = 8;
|
|
|
|
uint64_t NextField = i == SSV.size() - 1 ? TypeSize : SSV[i + 1].Offset;
|
|
|
|
uint64_t PoisonSize = NextField - SSV[i].Offset - SSV[i].Size;
|
|
|
|
uint64_t EndOffset = SSV[i].Offset + SSV[i].Size;
|
|
|
|
if (PoisonSize < AsanAlignment || !SSV[i].Size ||
|
|
|
|
(NextField % AsanAlignment) != 0)
|
|
|
|
continue;
|
2015-05-19 06:14:03 +08:00
|
|
|
Builder.CreateCall(
|
|
|
|
F, {Builder.CreateAdd(ThisPtr, Builder.getIntN(PtrSize, EndOffset)),
|
|
|
|
Builder.getIntN(PtrSize, PoisonSize)});
|
2014-10-17 04:54:52 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-19 17:25:03 +08:00
|
|
|
/// EmitConstructorBody - Emits the body of the current constructor.
|
|
|
|
void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
|
2014-10-17 04:54:52 +08:00
|
|
|
EmitAsanPrologueOrEpilogue(true);
|
2010-02-19 17:25:03 +08:00
|
|
|
const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
|
|
|
|
CXXCtorType CtorType = CurGD.getCtorType();
|
|
|
|
|
2014-01-14 06:57:31 +08:00
|
|
|
assert((CGM.getTarget().getCXXABI().hasConstructorVariants() ||
|
|
|
|
CtorType == Ctor_Complete) &&
|
|
|
|
"can only generate complete ctor for this ABI");
|
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
// Before we go any further, try the complete->base constructor
|
|
|
|
// delegation optimization.
|
2012-04-20 16:05:00 +08:00
|
|
|
if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
|
2013-04-17 06:48:15 +08:00
|
|
|
CGM.getTarget().getCXXABI().hasConstructorVariants()) {
|
2018-08-10 05:09:38 +08:00
|
|
|
EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getEndLoc());
|
2010-02-23 08:48:20 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-10-07 07:40:43 +08:00
|
|
|
const FunctionDecl *Definition = nullptr;
|
2014-08-01 09:56:39 +08:00
|
|
|
Stmt *Body = Ctor->getBody(Definition);
|
|
|
|
assert(Definition == Ctor && "emitting wrong constructor body");
|
2010-02-19 17:25:03 +08:00
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
// Enter the function-try-block before the constructor prologue if
|
|
|
|
// applicable.
|
|
|
|
bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
|
|
|
|
if (IsTryBody)
|
2010-07-07 14:56:46 +08:00
|
|
|
EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
|
2010-02-19 17:25:03 +08:00
|
|
|
|
2015-04-24 07:06:47 +08:00
|
|
|
incrementProfileCounter(Body);
|
2014-01-23 10:54:27 +08:00
|
|
|
|
2013-06-13 06:31:48 +08:00
|
|
|
RunCleanupsScope RunCleanups(*this);
|
2010-02-19 17:25:03 +08:00
|
|
|
|
2012-03-30 12:25:03 +08:00
|
|
|
// TODO: in restricted cases, we can emit the vbase initializers of
|
|
|
|
// a complete ctor and then delegate to the base ctor.
|
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
// Emit the constructor prologue, i.e. the base and member
|
|
|
|
// initializers.
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
EmitCtorPrologue(Ctor, CtorType, Args);
|
2010-02-19 17:25:03 +08:00
|
|
|
|
|
|
|
// Emit the body of the statement.
|
2010-02-23 08:48:20 +08:00
|
|
|
if (IsTryBody)
|
2010-02-19 17:25:03 +08:00
|
|
|
EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
|
|
|
|
else if (Body)
|
|
|
|
EmitStmt(Body);
|
|
|
|
|
|
|
|
// Emit any cleanup blocks associated with the member or base
|
|
|
|
// initializers, which includes (along the exceptional path) the
|
|
|
|
// destructors for those members and bases that were fully
|
|
|
|
// constructed.
|
2013-06-13 06:31:48 +08:00
|
|
|
RunCleanups.ForceCleanup();
|
2010-02-19 17:25:03 +08:00
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
if (IsTryBody)
|
2010-07-07 14:56:46 +08:00
|
|
|
ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
|
2010-02-19 17:25:03 +08:00
|
|
|
}
|
|
|
|
|
2013-09-11 10:03:20 +08:00
|
|
|
namespace {
|
|
|
|
/// RAII object to indicate that codegen is copying the value representation
|
|
|
|
/// instead of the object representation. Useful when copying a struct or
|
|
|
|
/// class which has uninitialized members and we're only performing
|
|
|
|
/// lvalue-to-rvalue conversion on the object but not its members.
|
|
|
|
class CopyingValueRepresentation {
|
|
|
|
public:
|
|
|
|
explicit CopyingValueRepresentation(CodeGenFunction &CGF)
|
2014-10-31 03:33:44 +08:00
|
|
|
: CGF(CGF), OldSanOpts(CGF.SanOpts) {
|
2014-11-08 06:29:38 +08:00
|
|
|
CGF.SanOpts.set(SanitizerKind::Bool, false);
|
|
|
|
CGF.SanOpts.set(SanitizerKind::Enum, false);
|
2013-09-11 10:03:20 +08:00
|
|
|
}
|
|
|
|
~CopyingValueRepresentation() {
|
|
|
|
CGF.SanOpts = OldSanOpts;
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
CodeGenFunction &CGF;
|
2014-11-11 09:26:14 +08:00
|
|
|
SanitizerSet OldSanOpts;
|
2013-09-11 10:03:20 +08:00
|
|
|
};
|
2016-02-11 03:11:58 +08:00
|
|
|
} // end anonymous namespace
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2013-02-17 15:22:09 +08:00
|
|
|
namespace {
|
|
|
|
class FieldMemcpyizer {
|
|
|
|
public:
|
|
|
|
FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl,
|
|
|
|
const VarDecl *SrcRec)
|
2015-05-20 23:53:59 +08:00
|
|
|
: CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec),
|
2013-02-17 15:22:09 +08:00
|
|
|
RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)),
|
2014-05-21 13:09:00 +08:00
|
|
|
FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0),
|
|
|
|
LastFieldOffset(0), LastAddedFieldIndex(0) {}
|
2013-02-17 15:22:09 +08:00
|
|
|
|
2014-10-17 04:54:52 +08:00
|
|
|
bool isMemcpyableField(FieldDecl *F) const {
|
|
|
|
// Never memcpy fields when we are adding poisoned paddings.
|
2014-11-11 09:26:14 +08:00
|
|
|
if (CGF.getContext().getLangOpts().SanitizeAddressFieldPadding)
|
2014-10-17 04:54:52 +08:00
|
|
|
return false;
|
2013-02-17 15:22:09 +08:00
|
|
|
Qualifiers Qual = F->getType().getQualifiers();
|
|
|
|
if (Qual.hasVolatile() || Qual.hasObjCLifetime())
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void addMemcpyableField(FieldDecl *F) {
|
2014-05-21 13:09:00 +08:00
|
|
|
if (!FirstField)
|
2013-02-17 15:22:09 +08:00
|
|
|
addInitialField(F);
|
|
|
|
else
|
|
|
|
addNextField(F);
|
|
|
|
}
|
|
|
|
|
2014-10-11 02:57:10 +08:00
|
|
|
CharUnits getMemcpySize(uint64_t FirstByteOffset) const {
|
2018-04-06 04:52:58 +08:00
|
|
|
ASTContext &Ctx = CGF.getContext();
|
2013-02-17 15:22:09 +08:00
|
|
|
unsigned LastFieldSize =
|
2018-04-06 04:52:58 +08:00
|
|
|
LastField->isBitField()
|
|
|
|
? LastField->getBitWidthValue(Ctx)
|
|
|
|
: Ctx.toBits(
|
|
|
|
Ctx.getTypeInfoDataSizeInChars(LastField->getType()).first);
|
|
|
|
uint64_t MemcpySizeBits = LastFieldOffset + LastFieldSize -
|
|
|
|
FirstByteOffset + Ctx.getCharWidth() - 1;
|
|
|
|
CharUnits MemcpySize = Ctx.toCharUnitsFromBits(MemcpySizeBits);
|
2013-02-17 15:22:09 +08:00
|
|
|
return MemcpySize;
|
|
|
|
}
|
|
|
|
|
|
|
|
void emitMemcpy() {
|
|
|
|
// Give the subclass a chance to bail out if it feels the memcpy isn't
|
|
|
|
// worth it (e.g. Hasn't aggregated enough data).
|
2014-05-21 13:09:00 +08:00
|
|
|
if (!FirstField) {
|
2013-02-17 15:22:09 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-10-11 02:57:10 +08:00
|
|
|
uint64_t FirstByteOffset;
|
2013-02-17 15:22:09 +08:00
|
|
|
if (FirstField->isBitField()) {
|
|
|
|
const CGRecordLayout &RL =
|
|
|
|
CGF.getTypes().getCGRecordLayout(FirstField->getParent());
|
|
|
|
const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField);
|
2014-10-11 02:57:10 +08:00
|
|
|
// FirstFieldOffset is not appropriate for bitfields,
|
2015-07-13 19:52:14 +08:00
|
|
|
// we need to use the storage offset instead.
|
Respect alignment of nested bitfields
tools/clang/test/CodeGen/packed-nest-unpacked.c contains this test:
struct XBitfield {
unsigned b1 : 10;
unsigned b2 : 12;
unsigned b3 : 10;
};
struct YBitfield {
char x;
struct XBitfield y;
} __attribute((packed));
struct YBitfield gbitfield;
unsigned test7() {
// CHECK: @test7
// CHECK: load i32, i32* getelementptr inbounds (%struct.YBitfield, %struct.YBitfield* @gbitfield, i32 0, i32 1, i32 0), align 4
return gbitfield.y.b2;
}
The "align 4" is actually wrong. Accessing all of "gbitfield.y" as a single
i32 is of course possible, but that still doesn't make it 4-byte aligned as
it remains packed at offset 1 in the surrounding gbitfield object.
This alignment was changed by commit r169489, which also introduced changes
to bitfield access code in CGExpr.cpp. Code before that change used to take
into account *both* the alignment of the field to be accessed within the
current struct, *and* the alignment of that outer struct itself; this logic
was removed by the above commit.
Neglecting to consider both values can cause incorrect code to be generated
(I've seen an unaligned access crash on SystemZ due to this bug).
In order to always use the best known alignment value, this patch removes
the CGBitFieldInfo::StorageAlignment member and replaces it with a
StorageOffset member specifying the offset from the start of the surrounding
struct to the bitfield's underlying storage. This offset can then be combined
with the best-known alignment for a bitfield access lvalue to determine the
alignment to use when accessing the bitfield's storage.
Differential Revision: http://reviews.llvm.org/D11034
llvm-svn: 241916
2015-07-11 01:30:00 +08:00
|
|
|
FirstByteOffset = CGF.getContext().toBits(BFInfo.StorageOffset);
|
2013-02-27 12:14:49 +08:00
|
|
|
} else {
|
2014-10-11 02:57:10 +08:00
|
|
|
FirstByteOffset = FirstFieldOffset;
|
2013-02-27 12:14:49 +08:00
|
|
|
}
|
|
|
|
|
2014-10-11 02:57:10 +08:00
|
|
|
CharUnits MemcpySize = getMemcpySize(FirstByteOffset);
|
2013-02-17 15:22:09 +08:00
|
|
|
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ThisPtr = CGF.LoadCXXThisAddress();
|
|
|
|
LValue DestLV = CGF.MakeAddrLValue(ThisPtr, RecordTy);
|
2013-02-17 15:22:09 +08:00
|
|
|
LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField);
|
|
|
|
llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec));
|
|
|
|
LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
|
|
|
|
LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField);
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(),
|
|
|
|
Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(),
|
|
|
|
MemcpySize);
|
2013-02-17 15:22:09 +08:00
|
|
|
reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
void reset() {
|
2014-05-21 13:09:00 +08:00
|
|
|
FirstField = nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
CodeGenFunction &CGF;
|
|
|
|
const CXXRecordDecl *ClassDecl;
|
|
|
|
|
|
|
|
private:
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
void emitMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) {
|
|
|
|
llvm::PointerType *DPT = DestPtr.getType();
|
2013-02-17 15:22:09 +08:00
|
|
|
llvm::Type *DBP =
|
|
|
|
llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace());
|
|
|
|
DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::PointerType *SPT = SrcPtr.getType();
|
2013-02-17 15:22:09 +08:00
|
|
|
llvm::Type *SBP =
|
|
|
|
llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace());
|
|
|
|
SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity());
|
2013-02-17 15:22:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void addInitialField(FieldDecl *F) {
|
2016-02-11 03:11:58 +08:00
|
|
|
FirstField = F;
|
|
|
|
LastField = F;
|
|
|
|
FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex());
|
|
|
|
LastFieldOffset = FirstFieldOffset;
|
|
|
|
LastAddedFieldIndex = F->getFieldIndex();
|
|
|
|
}
|
2013-02-17 15:22:09 +08:00
|
|
|
|
|
|
|
void addNextField(FieldDecl *F) {
|
2013-05-07 13:20:46 +08:00
|
|
|
// For the most part, the following invariant will hold:
|
|
|
|
// F->getFieldIndex() == LastAddedFieldIndex + 1
|
|
|
|
// The one exception is that Sema won't add a copy-initializer for an
|
|
|
|
// unnamed bitfield, which will show up here as a gap in the sequence.
|
|
|
|
assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 &&
|
|
|
|
"Cannot aggregate fields out of order.");
|
2013-02-17 15:22:09 +08:00
|
|
|
LastAddedFieldIndex = F->getFieldIndex();
|
|
|
|
|
|
|
|
// The 'first' and 'last' fields are chosen by offset, rather than field
|
|
|
|
// index. This allows the code to support bitfields, as well as regular
|
|
|
|
// fields.
|
|
|
|
uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex());
|
|
|
|
if (FOffset < FirstFieldOffset) {
|
|
|
|
FirstField = F;
|
|
|
|
FirstFieldOffset = FOffset;
|
|
|
|
} else if (FOffset > LastFieldOffset) {
|
|
|
|
LastField = F;
|
|
|
|
LastFieldOffset = FOffset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const VarDecl *SrcRec;
|
|
|
|
const ASTRecordLayout &RecLayout;
|
|
|
|
FieldDecl *FirstField;
|
|
|
|
FieldDecl *LastField;
|
|
|
|
uint64_t FirstFieldOffset, LastFieldOffset;
|
|
|
|
unsigned LastAddedFieldIndex;
|
|
|
|
};
|
|
|
|
|
|
|
|
class ConstructorMemcpyizer : public FieldMemcpyizer {
|
|
|
|
private:
|
|
|
|
/// Get source argument for copy constructor. Returns null if not a copy
|
2014-09-12 07:05:02 +08:00
|
|
|
/// constructor.
|
|
|
|
static const VarDecl *getTrivialCopySource(CodeGenFunction &CGF,
|
|
|
|
const CXXConstructorDecl *CD,
|
2013-02-17 15:22:09 +08:00
|
|
|
FunctionArgList &Args) {
|
2013-08-08 00:16:48 +08:00
|
|
|
if (CD->isCopyOrMoveConstructor() && CD->isDefaulted())
|
2014-09-12 07:05:02 +08:00
|
|
|
return Args[CGF.CGM.getCXXABI().getSrcArgforCopyCtor(CD, Args)];
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true if a CXXCtorInitializer represents a member initialization
|
|
|
|
// that can be rolled into a memcpy.
|
|
|
|
bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const {
|
|
|
|
if (!MemcpyableCtor)
|
|
|
|
return false;
|
|
|
|
FieldDecl *Field = MemberInit->getMember();
|
2014-05-21 13:09:00 +08:00
|
|
|
assert(Field && "No field for member init.");
|
2013-02-17 15:22:09 +08:00
|
|
|
QualType FieldType = Field->getType();
|
|
|
|
CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
|
|
|
|
|
2015-04-30 03:26:57 +08:00
|
|
|
// Bail out on non-memcpyable, not-trivially-copyable members.
|
|
|
|
if (!(CE && isMemcpyEquivalentSpecialMember(CE->getConstructor())) &&
|
2013-02-17 15:22:09 +08:00
|
|
|
!(FieldType.isTriviallyCopyableType(CGF.getContext()) ||
|
|
|
|
FieldType->isReferenceType()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Bail out on volatile fields.
|
|
|
|
if (!isMemcpyableField(Field))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Otherwise we're good.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD,
|
|
|
|
FunctionArgList &Args)
|
2014-09-12 07:05:02 +08:00
|
|
|
: FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CGF, CD, Args)),
|
2013-02-17 15:22:09 +08:00
|
|
|
ConstructorDecl(CD),
|
2013-08-08 00:16:48 +08:00
|
|
|
MemcpyableCtor(CD->isDefaulted() &&
|
2013-02-17 15:22:09 +08:00
|
|
|
CD->isCopyOrMoveConstructor() &&
|
|
|
|
CGF.getLangOpts().getGC() == LangOptions::NonGC),
|
|
|
|
Args(Args) { }
|
|
|
|
|
|
|
|
void addMemberInitializer(CXXCtorInitializer *MemberInit) {
|
|
|
|
if (isMemberInitMemcpyable(MemberInit)) {
|
|
|
|
AggregatedInits.push_back(MemberInit);
|
|
|
|
addMemcpyableField(MemberInit->getMember());
|
|
|
|
} else {
|
|
|
|
emitAggregatedInits();
|
|
|
|
EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit,
|
|
|
|
ConstructorDecl, Args);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void emitAggregatedInits() {
|
|
|
|
if (AggregatedInits.size() <= 1) {
|
|
|
|
// This memcpy is too small to be worthwhile. Fall back on default
|
|
|
|
// codegen.
|
2013-09-11 10:03:20 +08:00
|
|
|
if (!AggregatedInits.empty()) {
|
|
|
|
CopyingValueRepresentation CVR(CGF);
|
2013-02-17 15:22:09 +08:00
|
|
|
EmitMemberInitializer(CGF, ConstructorDecl->getParent(),
|
2013-09-11 10:03:20 +08:00
|
|
|
AggregatedInits[0], ConstructorDecl, Args);
|
2015-07-14 15:55:48 +08:00
|
|
|
AggregatedInits.clear();
|
2013-02-17 15:22:09 +08:00
|
|
|
}
|
|
|
|
reset();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pushEHDestructors();
|
|
|
|
emitMemcpy();
|
|
|
|
AggregatedInits.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
void pushEHDestructors() {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ThisPtr = CGF.LoadCXXThisAddress();
|
2013-02-17 15:22:09 +08:00
|
|
|
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
LValue LHS = CGF.MakeAddrLValue(ThisPtr, RecordTy);
|
2013-02-17 15:22:09 +08:00
|
|
|
|
|
|
|
for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
|
2015-07-14 15:55:48 +08:00
|
|
|
CXXCtorInitializer *MemberInit = AggregatedInits[i];
|
|
|
|
QualType FieldType = MemberInit->getAnyMember()->getType();
|
2013-02-17 15:22:09 +08:00
|
|
|
QualType::DestructionKind dtorKind = FieldType.isDestructedType();
|
2015-07-14 15:55:48 +08:00
|
|
|
if (!CGF.needsEHCleanup(dtorKind))
|
|
|
|
continue;
|
|
|
|
LValue FieldLHS = LHS;
|
|
|
|
EmitLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS);
|
|
|
|
CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType);
|
2013-02-17 15:22:09 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void finish() {
|
|
|
|
emitAggregatedInits();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
const CXXConstructorDecl *ConstructorDecl;
|
|
|
|
bool MemcpyableCtor;
|
|
|
|
FunctionArgList &Args;
|
|
|
|
SmallVector<CXXCtorInitializer*, 16> AggregatedInits;
|
|
|
|
};
|
|
|
|
|
|
|
|
class AssignmentMemcpyizer : public FieldMemcpyizer {
|
|
|
|
private:
|
|
|
|
// Returns the memcpyable field copied by the given statement, if one
|
2013-09-11 10:03:20 +08:00
|
|
|
// exists. Otherwise returns null.
|
|
|
|
FieldDecl *getMemcpyableField(Stmt *S) {
|
2013-02-17 15:22:09 +08:00
|
|
|
if (!AssignmentsMemcpyable)
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
|
|
|
|
// Recognise trivial assignments.
|
|
|
|
if (BO->getOpcode() != BO_Assign)
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS());
|
|
|
|
if (!ME)
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
|
|
|
|
if (!Field || !isMemcpyableField(Field))
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
Stmt *RHS = BO->getRHS();
|
|
|
|
if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS))
|
|
|
|
RHS = EC->getSubExpr();
|
|
|
|
if (!RHS)
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2017-02-03 01:53:34 +08:00
|
|
|
if (MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS)) {
|
|
|
|
if (ME2->getMemberDecl() == Field)
|
|
|
|
return Field;
|
|
|
|
}
|
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
} else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) {
|
|
|
|
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl());
|
2015-04-30 03:26:57 +08:00
|
|
|
if (!(MD && isMemcpyEquivalentSpecialMember(MD)))
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument());
|
|
|
|
if (!IOA)
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl());
|
|
|
|
if (!Field || !isMemcpyableField(Field))
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0));
|
|
|
|
if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl()))
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
return Field;
|
|
|
|
} else if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
|
|
|
|
FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
|
|
|
|
if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy)
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
Expr *DstPtr = CE->getArg(0);
|
|
|
|
if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr))
|
|
|
|
DstPtr = DC->getSubExpr();
|
|
|
|
UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr);
|
|
|
|
if (!DUO || DUO->getOpcode() != UO_AddrOf)
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr());
|
|
|
|
if (!ME)
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
|
|
|
|
if (!Field || !isMemcpyableField(Field))
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
Expr *SrcPtr = CE->getArg(1);
|
|
|
|
if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr))
|
|
|
|
SrcPtr = SC->getSubExpr();
|
|
|
|
UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr);
|
|
|
|
if (!SUO || SUO->getOpcode() != UO_AddrOf)
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr());
|
|
|
|
if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl()))
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
return Field;
|
|
|
|
}
|
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-02-17 15:22:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool AssignmentsMemcpyable;
|
|
|
|
SmallVector<Stmt*, 16> AggregatedStmts;
|
|
|
|
|
|
|
|
public:
|
|
|
|
AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD,
|
|
|
|
FunctionArgList &Args)
|
|
|
|
: FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]),
|
|
|
|
AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) {
|
|
|
|
assert(Args.size() == 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
void emitAssignment(Stmt *S) {
|
|
|
|
FieldDecl *F = getMemcpyableField(S);
|
|
|
|
if (F) {
|
|
|
|
addMemcpyableField(F);
|
|
|
|
AggregatedStmts.push_back(S);
|
2015-05-20 23:53:59 +08:00
|
|
|
} else {
|
2013-02-17 15:22:09 +08:00
|
|
|
emitAggregatedStmts();
|
|
|
|
CGF.EmitStmt(S);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void emitAggregatedStmts() {
|
|
|
|
if (AggregatedStmts.size() <= 1) {
|
2013-09-11 10:03:20 +08:00
|
|
|
if (!AggregatedStmts.empty()) {
|
|
|
|
CopyingValueRepresentation CVR(CGF);
|
|
|
|
CGF.EmitStmt(AggregatedStmts[0]);
|
|
|
|
}
|
2013-02-17 15:22:09 +08:00
|
|
|
reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
emitMemcpy();
|
|
|
|
AggregatedStmts.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
void finish() {
|
|
|
|
emitAggregatedStmts();
|
|
|
|
}
|
|
|
|
};
|
2015-10-07 07:40:43 +08:00
|
|
|
} // end anonymous namespace
|
2013-02-17 15:22:09 +08:00
|
|
|
|
2015-09-16 05:46:47 +08:00
|
|
|
static bool isInitializerOfDynamicClass(const CXXCtorInitializer *BaseInit) {
|
|
|
|
const Type *BaseType = BaseInit->getBaseClass();
|
|
|
|
const auto *BaseClassDecl =
|
|
|
|
cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
|
|
|
|
return BaseClassDecl->isDynamicClass();
|
|
|
|
}
|
|
|
|
|
2009-12-25 06:46:43 +08:00
|
|
|
/// EmitCtorPrologue - This routine generates necessary code to initialize
|
|
|
|
/// base classes and non-static data members belonging to this constructor.
|
|
|
|
void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
|
Reimplement code generation for copying fields in the
implicitly-generated copy constructor. Previously, Sema would perform
some checking and instantiation to determine which copy constructors,
etc., would be called, then CodeGen would attempt to figure out which
copy constructor to call... but would get it wrong, or poke at an
uninstantiated default argument, or fail in other ways.
The new scheme is similar to what we now do for the implicit
copy-assignment operator, where Sema performs all of the semantic
analysis and builds specific ASTs that look similar to the ASTs we'd
get from explicitly writing the copy constructor, so that CodeGen need
only do a direct translation.
However, it's not quite that simple because one cannot explicit write
elementwise copy-construction of an array. So, I've extended
CXXBaseOrMemberInitializer to contain a list of indexing variables
used to copy-construct the elements. For example, if we have:
struct A { A(const A&); };
struct B {
A array[2][3];
};
then we generate an implicit copy assignment operator for B that looks
something like this:
B::B(const B &other) : array[i0][i1](other.array[i0][i1]) { }
CodeGen will loop over the invented variables i0 and i1 to visit all
elements in the array, so that each element in the destination array
will be copy-constructed from the corresponding element in the source
array. Of course, if we're dealing with arrays of scalars or class
types with trivial copy-assignment operators, we just generate a
memcpy rather than a loop.
Fixes PR6928, PR5989, and PR6887. Boost.Regex now compiles and passes
all of its regression tests.
Conspicuously missing from this patch is handling for the exceptional
case, where we need to destruct those objects that we have
constructed. I'll address that case separately.
llvm-svn: 103079
2010-05-05 13:51:00 +08:00
|
|
|
CXXCtorType CtorType,
|
|
|
|
FunctionArgList &Args) {
|
2011-05-01 15:04:31 +08:00
|
|
|
if (CD->isDelegatingConstructor())
|
|
|
|
return EmitDelegatingCXXConstructorCall(CD, Args);
|
|
|
|
|
2009-12-25 06:46:43 +08:00
|
|
|
const CXXRecordDecl *ClassDecl = CD->getParent();
|
2010-02-03 03:58:43 +08:00
|
|
|
|
2013-02-27 21:46:31 +08:00
|
|
|
CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
|
|
|
|
E = CD->init_end();
|
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::BasicBlock *BaseCtorContinueBB = nullptr;
|
2013-02-27 21:46:31 +08:00
|
|
|
if (ClassDecl->getNumVBases() &&
|
|
|
|
!CGM.getTarget().getCXXABI().hasConstructorVariants()) {
|
|
|
|
// The ABIs that don't have constructor variants need to put a branch
|
|
|
|
// before the virtual base initialization code.
|
2013-06-19 23:20:38 +08:00
|
|
|
BaseCtorContinueBB =
|
|
|
|
CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl);
|
2013-02-27 21:46:31 +08:00
|
|
|
assert(BaseCtorContinueBB);
|
|
|
|
}
|
|
|
|
|
2015-10-03 06:12:40 +08:00
|
|
|
llvm::Value *const OldThis = CXXThisValue;
|
2013-02-27 21:46:31 +08:00
|
|
|
// Virtual base initializers first.
|
|
|
|
for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) {
|
2015-10-03 06:12:40 +08:00
|
|
|
if (CGM.getCodeGenOpts().StrictVTablePointers &&
|
|
|
|
CGM.getCodeGenOpts().OptimizationLevel > 0 &&
|
|
|
|
isInitializerOfDynamicClass(*B))
|
2018-05-03 19:03:01 +08:00
|
|
|
CXXThisValue = Builder.CreateLaunderInvariantGroup(LoadCXXThis());
|
2013-02-27 21:46:31 +08:00
|
|
|
EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BaseCtorContinueBB) {
|
|
|
|
// Complete object handler should continue to the remaining initializers.
|
|
|
|
Builder.CreateBr(BaseCtorContinueBB);
|
|
|
|
EmitBlock(BaseCtorContinueBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then, non-virtual base initializers.
|
|
|
|
for (; B != E && (*B)->isBaseInitializer(); B++) {
|
|
|
|
assert(!(*B)->isBaseVirtual());
|
2015-10-03 06:12:40 +08:00
|
|
|
|
|
|
|
if (CGM.getCodeGenOpts().StrictVTablePointers &&
|
|
|
|
CGM.getCodeGenOpts().OptimizationLevel > 0 &&
|
|
|
|
isInitializerOfDynamicClass(*B))
|
2018-05-03 19:03:01 +08:00
|
|
|
CXXThisValue = Builder.CreateLaunderInvariantGroup(LoadCXXThis());
|
2013-02-27 21:46:31 +08:00
|
|
|
EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
|
2009-12-25 06:46:43 +08:00
|
|
|
}
|
|
|
|
|
2015-10-03 06:12:40 +08:00
|
|
|
CXXThisValue = OldThis;
|
2015-09-16 05:46:47 +08:00
|
|
|
|
2010-03-29 05:07:49 +08:00
|
|
|
InitializeVTablePointers(ClassDecl);
|
2010-02-03 03:58:43 +08:00
|
|
|
|
2013-02-27 21:46:31 +08:00
|
|
|
// And finally, initialize class members.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
FieldConstructionScope FCS(*this, LoadCXXThisAddress());
|
2013-02-17 15:22:09 +08:00
|
|
|
ConstructorMemcpyizer CM(*this, CD, Args);
|
2013-02-27 21:46:31 +08:00
|
|
|
for (; B != E; B++) {
|
|
|
|
CXXCtorInitializer *Member = (*B);
|
|
|
|
assert(!Member->isBaseInitializer());
|
|
|
|
assert(Member->isAnyMemberInitializer() &&
|
|
|
|
"Delegating initializer on non-delegating constructor");
|
|
|
|
CM.addMemberInitializer(Member);
|
|
|
|
}
|
2013-02-17 15:22:09 +08:00
|
|
|
CM.finish();
|
2009-12-25 06:46:43 +08:00
|
|
|
}
|
|
|
|
|
2011-05-16 01:36:21 +08:00
|
|
|
static bool
|
|
|
|
FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field);
|
|
|
|
|
|
|
|
static bool
|
2015-05-20 23:53:59 +08:00
|
|
|
HasTrivialDestructorBody(ASTContext &Context,
|
2011-05-16 01:36:21 +08:00
|
|
|
const CXXRecordDecl *BaseClassDecl,
|
|
|
|
const CXXRecordDecl *MostDerivedClassDecl)
|
|
|
|
{
|
|
|
|
// If the destructor is trivial we don't have to check anything else.
|
|
|
|
if (BaseClassDecl->hasTrivialDestructor())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!BaseClassDecl->getDestructor()->hasTrivialBody())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check fields.
|
2014-03-09 04:12:42 +08:00
|
|
|
for (const auto *Field : BaseClassDecl->fields())
|
2011-05-16 01:36:21 +08:00
|
|
|
if (!FieldHasTrivialDestructorBody(Context, Field))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check non-virtual bases.
|
2014-03-13 23:41:46 +08:00
|
|
|
for (const auto &I : BaseClassDecl->bases()) {
|
|
|
|
if (I.isVirtual())
|
2011-05-16 01:36:21 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
const CXXRecordDecl *NonVirtualBase =
|
2014-03-13 23:41:46 +08:00
|
|
|
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
|
2011-05-16 01:36:21 +08:00
|
|
|
if (!HasTrivialDestructorBody(Context, NonVirtualBase,
|
|
|
|
MostDerivedClassDecl))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BaseClassDecl == MostDerivedClassDecl) {
|
|
|
|
// Check virtual bases.
|
2014-03-14 00:15:17 +08:00
|
|
|
for (const auto &I : BaseClassDecl->vbases()) {
|
2011-05-16 01:36:21 +08:00
|
|
|
const CXXRecordDecl *VirtualBase =
|
2014-03-14 00:15:17 +08:00
|
|
|
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
|
2011-05-16 01:36:21 +08:00
|
|
|
if (!HasTrivialDestructorBody(Context, VirtualBase,
|
|
|
|
MostDerivedClassDecl))
|
2015-05-20 23:53:59 +08:00
|
|
|
return false;
|
2011-05-16 01:36:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
FieldHasTrivialDestructorBody(ASTContext &Context,
|
2015-09-04 07:02:30 +08:00
|
|
|
const FieldDecl *Field)
|
2011-05-16 01:36:21 +08:00
|
|
|
{
|
|
|
|
QualType FieldBaseElementType = Context.getBaseElementType(Field->getType());
|
|
|
|
|
|
|
|
const RecordType *RT = FieldBaseElementType->getAs<RecordType>();
|
|
|
|
if (!RT)
|
|
|
|
return true;
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2011-05-16 01:36:21 +08:00
|
|
|
CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
|
2015-06-26 08:18:35 +08:00
|
|
|
|
|
|
|
// The destructor for an implicit anonymous union member is never invoked.
|
|
|
|
if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion())
|
|
|
|
return false;
|
|
|
|
|
2011-05-16 01:36:21 +08:00
|
|
|
return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl);
|
|
|
|
}
|
|
|
|
|
2011-05-15 07:26:09 +08:00
|
|
|
/// CanSkipVTablePointerInitialization - Check whether we need to initialize
|
|
|
|
/// any vtable pointers before calling this destructor.
|
2015-09-04 07:02:30 +08:00
|
|
|
static bool CanSkipVTablePointerInitialization(CodeGenFunction &CGF,
|
2011-05-16 12:08:36 +08:00
|
|
|
const CXXDestructorDecl *Dtor) {
|
2015-09-16 05:46:47 +08:00
|
|
|
const CXXRecordDecl *ClassDecl = Dtor->getParent();
|
|
|
|
if (!ClassDecl->isDynamicClass())
|
|
|
|
return true;
|
|
|
|
|
2011-05-15 07:26:09 +08:00
|
|
|
if (!Dtor->hasTrivialBody())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check the fields.
|
2014-03-09 04:12:42 +08:00
|
|
|
for (const auto *Field : ClassDecl->fields())
|
2015-09-04 07:02:30 +08:00
|
|
|
if (!FieldHasTrivialDestructorBody(CGF.getContext(), Field))
|
2011-05-16 01:36:21 +08:00
|
|
|
return false;
|
2011-05-15 07:26:09 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-02-19 17:25:03 +08:00
|
|
|
/// EmitDestructorBody - Emits the body of the current destructor.
|
|
|
|
void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
|
|
|
|
const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
|
|
|
|
CXXDtorType DtorType = CurGD.getDtorType();
|
|
|
|
|
C++ DR1611, 1658, 2180: implement "potentially constructed subobject" rules for special member functions.
Essentially, as a base class constructor does not construct virtual bases, such
a constructor for an abstract class does not need the corresponding base class
construction to be valid, and likewise for destructors.
This creates an awkward situation: clang will sometimes generate references to
the complete object and deleting destructors for an abstract class (it puts
them in the construction vtable for a derived class). But we can't generate a
"correct" version of these because we can't generate references to base class
constructors any more (if they're template specializations, say, we might not
have instantiated them and can't assume any other TU will emit a copy).
Fortunately, we don't need to, since no correct program can ever invoke them,
so instead emit symbols that just trap.
We should stop emitting references to these symbols, but still need to emit
definitions for compatibility.
llvm-svn: 296275
2017-02-26 07:53:05 +08:00
|
|
|
// For an abstract class, non-base destructors are never used (and can't
|
|
|
|
// be emitted in general, because vbase dtors may not have been validated
|
|
|
|
// by Sema), but the Itanium ABI doesn't make them optional and Clang may
|
|
|
|
// in fact emit references to them from other compilations, so emit them
|
|
|
|
// as functions containing a trap instruction.
|
|
|
|
if (DtorType != Dtor_Base && Dtor->getParent()->isAbstract()) {
|
|
|
|
llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
|
|
|
|
TrapCall->setDoesNotReturn();
|
|
|
|
TrapCall->setDoesNotThrow();
|
|
|
|
Builder.CreateUnreachable();
|
|
|
|
Builder.ClearInsertionPoint();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-05-21 00:16:23 +08:00
|
|
|
Stmt *Body = Dtor->getBody();
|
|
|
|
if (Body)
|
|
|
|
incrementProfileCounter(Body);
|
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// The call to operator delete in a deleting destructor happens
|
|
|
|
// outside of the function-try-block, which means it's always
|
|
|
|
// possible to delegate the destructor body to the complete
|
|
|
|
// destructor. Do so.
|
|
|
|
if (DtorType == Dtor_Deleting) {
|
2017-10-13 09:55:36 +08:00
|
|
|
RunCleanupsScope DtorEpilogue(*this);
|
2010-07-21 13:30:47 +08:00
|
|
|
EnterDtorCleanups(Dtor, Dtor_Deleting);
|
2017-10-13 09:55:36 +08:00
|
|
|
if (HaveInsertPoint())
|
|
|
|
EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
|
|
|
|
/*Delegating=*/false, LoadCXXThisAddress());
|
2010-07-21 13:30:47 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-02-19 17:25:03 +08:00
|
|
|
// If the body is a function-try-block, enter the try before
|
2010-07-21 13:30:47 +08:00
|
|
|
// anything else.
|
|
|
|
bool isTryBody = (Body && isa<CXXTryStmt>(Body));
|
2010-02-19 17:25:03 +08:00
|
|
|
if (isTryBody)
|
2010-07-07 14:56:46 +08:00
|
|
|
EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
|
2014-10-17 04:54:52 +08:00
|
|
|
EmitAsanPrologueOrEpilogue(false);
|
2010-02-19 17:25:03 +08:00
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// Enter the epilogue cleanups.
|
|
|
|
RunCleanupsScope DtorEpilogue(*this);
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-02-19 17:25:03 +08:00
|
|
|
// If this is the complete variant, just invoke the base variant;
|
|
|
|
// the epilogue will destruct the virtual bases. But we can't do
|
|
|
|
// this optimization if the body is a function-try-block, because
|
2015-05-20 23:53:59 +08:00
|
|
|
// we'd introduce *two* handler blocks. In the Microsoft ABI, we
|
[ms-cxxabi] Emit linkonce complete dtors in TUs that need them
Based on Peter Collingbourne's destructor patches.
Prior to this change, clang was considering ?1 to be the complete
destructor and the base destructor, which was wrong. This lead to
crashes when clang tried to emit two LLVM functions with the same name.
In this ABI, TUs with non-inline dtors might not emit a complete
destructor. They are emitted as inline thunks in TUs that need them,
and they always delegate to the base dtors of the complete class and its
virtual bases. This change uses the DeferredDecls machinery to emit
complete dtors as needed.
Currently in clang try body destructors can catch exceptions thrown by
virtual base destructors. In the Microsoft C++ ABI, clang may not have
the destructor definition, in which case clang won't wrap the virtual
virtual base destructor calls in a try-catch. Diagnosing this in user
code is TODO.
Finally, for classes that don't use virtual inheritance, MSVC always
calls the base destructor (?1) directly. This is a useful code size
optimization that avoids emitting lots of extra thunks or aliases.
Implementing it also means our existing tests continue to pass, and is
consistent with MSVC's output.
We can do the same for Itanium by tweaking GetAddrOfCXXDestructor, but
it will require further testing.
Reviewers: rjmccall
CC: cfe-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1066
llvm-svn: 186828
2013-07-22 21:51:44 +08:00
|
|
|
// always delegate because we might not have a definition in this TU.
|
2010-07-21 13:30:47 +08:00
|
|
|
switch (DtorType) {
|
2017-02-02 13:45:43 +08:00
|
|
|
case Dtor_Comdat: llvm_unreachable("not expecting a COMDAT");
|
2010-07-21 13:30:47 +08:00
|
|
|
case Dtor_Deleting: llvm_unreachable("already handled deleting case");
|
|
|
|
|
|
|
|
case Dtor_Complete:
|
[ms-cxxabi] Emit linkonce complete dtors in TUs that need them
Based on Peter Collingbourne's destructor patches.
Prior to this change, clang was considering ?1 to be the complete
destructor and the base destructor, which was wrong. This lead to
crashes when clang tried to emit two LLVM functions with the same name.
In this ABI, TUs with non-inline dtors might not emit a complete
destructor. They are emitted as inline thunks in TUs that need them,
and they always delegate to the base dtors of the complete class and its
virtual bases. This change uses the DeferredDecls machinery to emit
complete dtors as needed.
Currently in clang try body destructors can catch exceptions thrown by
virtual base destructors. In the Microsoft C++ ABI, clang may not have
the destructor definition, in which case clang won't wrap the virtual
virtual base destructor calls in a try-catch. Diagnosing this in user
code is TODO.
Finally, for classes that don't use virtual inheritance, MSVC always
calls the base destructor (?1) directly. This is a useful code size
optimization that avoids emitting lots of extra thunks or aliases.
Implementing it also means our existing tests continue to pass, and is
consistent with MSVC's output.
We can do the same for Itanium by tweaking GetAddrOfCXXDestructor, but
it will require further testing.
Reviewers: rjmccall
CC: cfe-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1066
llvm-svn: 186828
2013-07-22 21:51:44 +08:00
|
|
|
assert((Body || getTarget().getCXXABI().isMicrosoft()) &&
|
|
|
|
"can't emit a dtor without a body for non-Microsoft ABIs");
|
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// Enter the cleanup scopes for virtual bases.
|
|
|
|
EnterDtorCleanups(Dtor, Dtor_Complete);
|
|
|
|
|
[ms-cxxabi] Emit linkonce complete dtors in TUs that need them
Based on Peter Collingbourne's destructor patches.
Prior to this change, clang was considering ?1 to be the complete
destructor and the base destructor, which was wrong. This lead to
crashes when clang tried to emit two LLVM functions with the same name.
In this ABI, TUs with non-inline dtors might not emit a complete
destructor. They are emitted as inline thunks in TUs that need them,
and they always delegate to the base dtors of the complete class and its
virtual bases. This change uses the DeferredDecls machinery to emit
complete dtors as needed.
Currently in clang try body destructors can catch exceptions thrown by
virtual base destructors. In the Microsoft C++ ABI, clang may not have
the destructor definition, in which case clang won't wrap the virtual
virtual base destructor calls in a try-catch. Diagnosing this in user
code is TODO.
Finally, for classes that don't use virtual inheritance, MSVC always
calls the base destructor (?1) directly. This is a useful code size
optimization that avoids emitting lots of extra thunks or aliases.
Implementing it also means our existing tests continue to pass, and is
consistent with MSVC's output.
We can do the same for Itanium by tweaking GetAddrOfCXXDestructor, but
it will require further testing.
Reviewers: rjmccall
CC: cfe-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1066
llvm-svn: 186828
2013-07-22 21:51:44 +08:00
|
|
|
if (!isTryBody) {
|
2010-07-21 13:30:47 +08:00
|
|
|
EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
/*Delegating=*/false, LoadCXXThisAddress());
|
2010-07-21 13:30:47 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-02-02 13:45:43 +08:00
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// Fallthrough: act like we're in the base variant.
|
2017-02-02 13:45:43 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
case Dtor_Base:
|
[ms-cxxabi] Emit linkonce complete dtors in TUs that need them
Based on Peter Collingbourne's destructor patches.
Prior to this change, clang was considering ?1 to be the complete
destructor and the base destructor, which was wrong. This lead to
crashes when clang tried to emit two LLVM functions with the same name.
In this ABI, TUs with non-inline dtors might not emit a complete
destructor. They are emitted as inline thunks in TUs that need them,
and they always delegate to the base dtors of the complete class and its
virtual bases. This change uses the DeferredDecls machinery to emit
complete dtors as needed.
Currently in clang try body destructors can catch exceptions thrown by
virtual base destructors. In the Microsoft C++ ABI, clang may not have
the destructor definition, in which case clang won't wrap the virtual
virtual base destructor calls in a try-catch. Diagnosing this in user
code is TODO.
Finally, for classes that don't use virtual inheritance, MSVC always
calls the base destructor (?1) directly. This is a useful code size
optimization that avoids emitting lots of extra thunks or aliases.
Implementing it also means our existing tests continue to pass, and is
consistent with MSVC's output.
We can do the same for Itanium by tweaking GetAddrOfCXXDestructor, but
it will require further testing.
Reviewers: rjmccall
CC: cfe-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1066
llvm-svn: 186828
2013-07-22 21:51:44 +08:00
|
|
|
assert(Body);
|
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// Enter the cleanup scopes for fields and non-virtual bases.
|
|
|
|
EnterDtorCleanups(Dtor, Dtor_Base);
|
|
|
|
|
|
|
|
// Initialize the vtable pointers before entering the body.
|
2015-09-16 05:46:47 +08:00
|
|
|
if (!CanSkipVTablePointerInitialization(*this, Dtor)) {
|
2018-05-03 19:03:01 +08:00
|
|
|
// Insert the llvm.launder.invariant.group intrinsic before initializing
|
2015-09-16 05:46:47 +08:00
|
|
|
// the vptrs to cancel any previous assumptions we might have made.
|
|
|
|
if (CGM.getCodeGenOpts().StrictVTablePointers &&
|
|
|
|
CGM.getCodeGenOpts().OptimizationLevel > 0)
|
2018-05-03 19:03:01 +08:00
|
|
|
CXXThisValue = Builder.CreateLaunderInvariantGroup(LoadCXXThis());
|
2015-09-16 05:46:47 +08:00
|
|
|
InitializeVTablePointers(Dtor->getParent());
|
|
|
|
}
|
2010-02-19 17:25:03 +08:00
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
if (isTryBody)
|
|
|
|
EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
|
|
|
|
else if (Body)
|
|
|
|
EmitStmt(Body);
|
|
|
|
else {
|
|
|
|
assert(Dtor->isImplicit() && "bodyless dtor not implicit");
|
|
|
|
// nothing to do besides what's in the epilogue
|
|
|
|
}
|
2011-02-03 07:12:46 +08:00
|
|
|
// -fapple-kext must inline any call to this dtor into
|
|
|
|
// the caller's body.
|
2012-11-02 06:30:59 +08:00
|
|
|
if (getLangOpts().AppleKext)
|
2015-09-15 05:35:16 +08:00
|
|
|
CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
|
2015-08-14 02:35:11 +08:00
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
break;
|
2010-02-19 17:25:03 +08:00
|
|
|
}
|
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// Jump out through the epilogue cleanups.
|
|
|
|
DtorEpilogue.ForceCleanup();
|
2010-02-19 17:25:03 +08:00
|
|
|
|
|
|
|
// Exit the try if applicable.
|
|
|
|
if (isTryBody)
|
2010-07-07 14:56:46 +08:00
|
|
|
ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
|
2010-02-19 17:25:03 +08:00
|
|
|
}
|
|
|
|
|
2013-02-17 15:22:09 +08:00
|
|
|
void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) {
|
|
|
|
const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl());
|
|
|
|
const Stmt *RootS = AssignOp->getBody();
|
|
|
|
assert(isa<CompoundStmt>(RootS) &&
|
|
|
|
"Body of an implicit assignment operator should be compound stmt.");
|
|
|
|
const CompoundStmt *RootCS = cast<CompoundStmt>(RootS);
|
|
|
|
|
|
|
|
LexicalScope Scope(*this, RootCS->getSourceRange());
|
|
|
|
|
2016-02-10 04:02:59 +08:00
|
|
|
incrementProfileCounter(RootCS);
|
2013-02-17 15:22:09 +08:00
|
|
|
AssignmentMemcpyizer AM(*this, AssignOp, Args);
|
2014-03-17 22:19:37 +08:00
|
|
|
for (auto *I : RootCS->body())
|
2015-05-20 23:53:59 +08:00
|
|
|
AM.emitAssignment(I);
|
2013-02-17 15:22:09 +08:00
|
|
|
AM.finish();
|
|
|
|
}
|
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
namespace {
|
2017-10-13 09:55:36 +08:00
|
|
|
llvm::Value *LoadThisForDtorDelete(CodeGenFunction &CGF,
|
|
|
|
const CXXDestructorDecl *DD) {
|
|
|
|
if (Expr *ThisArg = DD->getOperatorDeleteThisArg())
|
2017-10-13 23:37:53 +08:00
|
|
|
return CGF.EmitScalarExpr(ThisArg);
|
2017-10-13 09:55:36 +08:00
|
|
|
return CGF.LoadCXXThis();
|
|
|
|
}
|
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
/// Call the operator delete associated with the current destructor.
|
2015-08-19 06:40:54 +08:00
|
|
|
struct CallDtorDelete final : EHScopeStack::Cleanup {
|
2010-07-21 13:30:47 +08:00
|
|
|
CallDtorDelete() {}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2010-07-21 13:30:47 +08:00
|
|
|
const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
|
|
|
|
const CXXRecordDecl *ClassDecl = Dtor->getParent();
|
2017-10-13 09:55:36 +08:00
|
|
|
CGF.EmitDeleteCall(Dtor->getOperatorDelete(),
|
|
|
|
LoadThisForDtorDelete(CGF, Dtor),
|
2010-07-21 13:30:47 +08:00
|
|
|
CGF.getContext().getTagDeclType(ClassDecl));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-10-13 09:55:36 +08:00
|
|
|
void EmitConditionalDtorDeleteCall(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *ShouldDeleteCondition,
|
|
|
|
bool ReturnAfterDelete) {
|
|
|
|
llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete");
|
|
|
|
llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue");
|
|
|
|
llvm::Value *ShouldCallDelete
|
|
|
|
= CGF.Builder.CreateIsNull(ShouldDeleteCondition);
|
|
|
|
CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB);
|
|
|
|
|
|
|
|
CGF.EmitBlock(callDeleteBB);
|
|
|
|
const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
|
|
|
|
const CXXRecordDecl *ClassDecl = Dtor->getParent();
|
|
|
|
CGF.EmitDeleteCall(Dtor->getOperatorDelete(),
|
|
|
|
LoadThisForDtorDelete(CGF, Dtor),
|
|
|
|
CGF.getContext().getTagDeclType(ClassDecl));
|
|
|
|
assert(Dtor->getOperatorDelete()->isDestroyingOperatorDelete() ==
|
|
|
|
ReturnAfterDelete &&
|
|
|
|
"unexpected value for ReturnAfterDelete");
|
|
|
|
if (ReturnAfterDelete)
|
|
|
|
CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
|
|
|
|
else
|
|
|
|
CGF.Builder.CreateBr(continueBB);
|
|
|
|
|
|
|
|
CGF.EmitBlock(continueBB);
|
|
|
|
}
|
|
|
|
|
2015-08-19 06:40:54 +08:00
|
|
|
struct CallDtorDeleteConditional final : EHScopeStack::Cleanup {
|
2013-02-13 16:37:51 +08:00
|
|
|
llvm::Value *ShouldDeleteCondition;
|
2016-02-11 03:11:58 +08:00
|
|
|
|
2013-02-13 16:37:51 +08:00
|
|
|
public:
|
|
|
|
CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition)
|
2015-09-04 07:02:30 +08:00
|
|
|
: ShouldDeleteCondition(ShouldDeleteCondition) {
|
2014-05-21 13:09:00 +08:00
|
|
|
assert(ShouldDeleteCondition != nullptr);
|
2013-02-13 16:37:51 +08:00
|
|
|
}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2017-10-13 09:55:36 +08:00
|
|
|
EmitConditionalDtorDeleteCall(CGF, ShouldDeleteCondition,
|
|
|
|
/*ReturnAfterDelete*/false);
|
2013-02-13 16:37:51 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-08-19 06:40:54 +08:00
|
|
|
class DestroyField final : public EHScopeStack::Cleanup {
|
2011-07-13 00:41:08 +08:00
|
|
|
const FieldDecl *field;
|
2012-01-26 11:33:36 +08:00
|
|
|
CodeGenFunction::Destroyer *destroyer;
|
2011-07-13 00:41:08 +08:00
|
|
|
bool useEHCleanupForArray;
|
|
|
|
|
|
|
|
public:
|
|
|
|
DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
|
|
|
|
bool useEHCleanupForArray)
|
2015-09-04 07:02:30 +08:00
|
|
|
: field(field), destroyer(destroyer),
|
|
|
|
useEHCleanupForArray(useEHCleanupForArray) {}
|
2011-07-13 00:41:08 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2011-07-13 00:41:08 +08:00
|
|
|
// Find the address of the field.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address thisValue = CGF.LoadCXXThisAddress();
|
2012-04-16 11:54:45 +08:00
|
|
|
QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
|
|
|
|
LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
|
|
|
|
LValue LV = CGF.EmitLValueForField(ThisLV, field);
|
2011-07-13 00:41:08 +08:00
|
|
|
assert(LV.isSimple());
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2011-07-13 00:41:08 +08:00
|
|
|
CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
|
2011-07-13 04:27:29 +08:00
|
|
|
flags.isForNormalCleanup() && useEHCleanupForArray);
|
2010-07-21 13:30:47 +08:00
|
|
|
}
|
|
|
|
};
|
2015-09-04 07:02:30 +08:00
|
|
|
|
2015-09-16 08:38:22 +08:00
|
|
|
static void EmitSanitizerDtorCallback(CodeGenFunction &CGF, llvm::Value *Ptr,
|
|
|
|
CharUnits::QuantityType PoisonSize) {
|
2017-09-21 06:53:08 +08:00
|
|
|
CodeGenFunction::SanitizerScope SanScope(&CGF);
|
2015-09-16 08:38:22 +08:00
|
|
|
// Pass in void pointer and size of region as arguments to runtime
|
|
|
|
// function
|
|
|
|
llvm::Value *Args[] = {CGF.Builder.CreateBitCast(Ptr, CGF.VoidPtrTy),
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, PoisonSize)};
|
|
|
|
|
|
|
|
llvm::Type *ArgTypes[] = {CGF.VoidPtrTy, CGF.SizeTy};
|
|
|
|
|
|
|
|
llvm::FunctionType *FnType =
|
|
|
|
llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false);
|
|
|
|
llvm::Value *Fn =
|
|
|
|
CGF.CGM.CreateRuntimeFunction(FnType, "__sanitizer_dtor_callback");
|
|
|
|
CGF.EmitNounwindRuntimeCall(Fn, Args);
|
|
|
|
}
|
|
|
|
|
|
|
|
class SanitizeDtorMembers final : public EHScopeStack::Cleanup {
|
2015-09-04 07:02:30 +08:00
|
|
|
const CXXDestructorDecl *Dtor;
|
|
|
|
|
|
|
|
public:
|
2015-09-16 08:38:22 +08:00
|
|
|
SanitizeDtorMembers(const CXXDestructorDecl *Dtor) : Dtor(Dtor) {}
|
2015-09-04 07:02:30 +08:00
|
|
|
|
|
|
|
// Generate function call for handling object poisoning.
|
|
|
|
// Disables tail call elimination, to prevent the current stack frame
|
|
|
|
// from disappearing from the stack trace.
|
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
|
|
|
const ASTRecordLayout &Layout =
|
|
|
|
CGF.getContext().getASTRecordLayout(Dtor->getParent());
|
|
|
|
|
|
|
|
// Nothing to poison.
|
|
|
|
if (Layout.getFieldCount() == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Prevent the current stack frame from disappearing from the stack trace.
|
|
|
|
CGF.CurFn->addFnAttr("disable-tail-calls", "true");
|
|
|
|
|
|
|
|
// Construct pointer to region to begin poisoning, and calculate poison
|
|
|
|
// size, so that only members declared in this class are poisoned.
|
|
|
|
ASTContext &Context = CGF.getContext();
|
|
|
|
unsigned fieldIndex = 0;
|
|
|
|
int startIndex = -1;
|
|
|
|
// RecordDecl::field_iterator Field;
|
|
|
|
for (const FieldDecl *Field : Dtor->getParent()->fields()) {
|
|
|
|
// Poison field if it is trivial
|
|
|
|
if (FieldHasTrivialDestructorBody(Context, Field)) {
|
|
|
|
// Start sanitizing at this field
|
|
|
|
if (startIndex < 0)
|
|
|
|
startIndex = fieldIndex;
|
|
|
|
|
|
|
|
// Currently on the last field, and it must be poisoned with the
|
|
|
|
// current block.
|
|
|
|
if (fieldIndex == Layout.getFieldCount() - 1) {
|
2015-09-16 08:38:22 +08:00
|
|
|
PoisonMembers(CGF, startIndex, Layout.getFieldCount());
|
2015-09-04 07:02:30 +08:00
|
|
|
}
|
|
|
|
} else if (startIndex >= 0) {
|
|
|
|
// No longer within a block of memory to poison, so poison the block
|
2015-09-16 08:38:22 +08:00
|
|
|
PoisonMembers(CGF, startIndex, fieldIndex);
|
2015-09-04 07:02:30 +08:00
|
|
|
// Re-set the start index
|
|
|
|
startIndex = -1;
|
|
|
|
}
|
|
|
|
fieldIndex += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2015-09-04 13:19:31 +08:00
|
|
|
/// \param layoutStartOffset index of the ASTRecordLayout field to
|
2015-09-04 07:02:30 +08:00
|
|
|
/// start poisoning (inclusive)
|
2015-09-04 13:19:31 +08:00
|
|
|
/// \param layoutEndOffset index of the ASTRecordLayout field to
|
2015-09-04 07:02:30 +08:00
|
|
|
/// end poisoning (exclusive)
|
2015-09-16 08:38:22 +08:00
|
|
|
void PoisonMembers(CodeGenFunction &CGF, unsigned layoutStartOffset,
|
2015-09-04 07:02:30 +08:00
|
|
|
unsigned layoutEndOffset) {
|
|
|
|
ASTContext &Context = CGF.getContext();
|
|
|
|
const ASTRecordLayout &Layout =
|
|
|
|
Context.getASTRecordLayout(Dtor->getParent());
|
|
|
|
|
|
|
|
llvm::ConstantInt *OffsetSizePtr = llvm::ConstantInt::get(
|
|
|
|
CGF.SizeTy,
|
|
|
|
Context.toCharUnitsFromBits(Layout.getFieldOffset(layoutStartOffset))
|
|
|
|
.getQuantity());
|
|
|
|
|
|
|
|
llvm::Value *OffsetPtr = CGF.Builder.CreateGEP(
|
|
|
|
CGF.Builder.CreateBitCast(CGF.LoadCXXThis(), CGF.Int8PtrTy),
|
|
|
|
OffsetSizePtr);
|
|
|
|
|
|
|
|
CharUnits::QuantityType PoisonSize;
|
|
|
|
if (layoutEndOffset >= Layout.getFieldCount()) {
|
|
|
|
PoisonSize = Layout.getNonVirtualSize().getQuantity() -
|
|
|
|
Context.toCharUnitsFromBits(
|
|
|
|
Layout.getFieldOffset(layoutStartOffset))
|
|
|
|
.getQuantity();
|
|
|
|
} else {
|
|
|
|
PoisonSize = Context.toCharUnitsFromBits(
|
|
|
|
Layout.getFieldOffset(layoutEndOffset) -
|
|
|
|
Layout.getFieldOffset(layoutStartOffset))
|
|
|
|
.getQuantity();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PoisonSize == 0)
|
|
|
|
return;
|
|
|
|
|
2015-09-16 08:38:22 +08:00
|
|
|
EmitSanitizerDtorCallback(CGF, OffsetPtr, PoisonSize);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class SanitizeDtorVTable final : public EHScopeStack::Cleanup {
|
|
|
|
const CXXDestructorDecl *Dtor;
|
|
|
|
|
|
|
|
public:
|
|
|
|
SanitizeDtorVTable(const CXXDestructorDecl *Dtor) : Dtor(Dtor) {}
|
2015-09-04 07:02:30 +08:00
|
|
|
|
2015-09-16 08:38:22 +08:00
|
|
|
// Generate function call for handling vtable pointer poisoning.
|
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
|
|
|
assert(Dtor->getParent()->isDynamicClass());
|
2015-09-16 14:26:56 +08:00
|
|
|
(void)Dtor;
|
2015-09-16 08:38:22 +08:00
|
|
|
ASTContext &Context = CGF.getContext();
|
|
|
|
// Poison vtable and vtable ptr if they exist for this class.
|
|
|
|
llvm::Value *VTablePtr = CGF.LoadCXXThis();
|
2015-09-04 07:02:30 +08:00
|
|
|
|
2015-09-16 08:38:22 +08:00
|
|
|
CharUnits::QuantityType PoisonSize =
|
|
|
|
Context.toCharUnitsFromBits(CGF.PointerWidthInBits).getQuantity();
|
|
|
|
// Pass in void pointer and size of region as arguments to runtime
|
|
|
|
// function
|
|
|
|
EmitSanitizerDtorCallback(CGF, VTablePtr, PoisonSize);
|
2015-09-04 07:02:30 +08:00
|
|
|
}
|
2015-09-16 08:38:22 +08:00
|
|
|
};
|
2015-10-07 07:40:43 +08:00
|
|
|
} // end anonymous namespace
|
2010-07-21 13:30:47 +08:00
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Emit all code that comes at the end of class's
|
2009-12-25 06:46:43 +08:00
|
|
|
/// destructor. This is to call destructors on members and base classes
|
|
|
|
/// in reverse order of their construction.
|
2017-10-13 09:55:36 +08:00
|
|
|
///
|
|
|
|
/// For a deleting destructor, this also handles the case where a destroying
|
|
|
|
/// operator delete completely overrides the definition.
|
2010-07-21 13:30:47 +08:00
|
|
|
void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
|
|
|
|
CXXDtorType DtorType) {
|
2014-05-31 00:59:42 +08:00
|
|
|
assert((!DD->isTrivial() || DD->hasAttr<DLLExportAttr>()) &&
|
|
|
|
"Should not emit dtor epilogue for non-exported trivial dtor!");
|
2009-12-25 06:46:43 +08:00
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// The deleting-destructor phase just needs to call the appropriate
|
|
|
|
// operator delete that Sema picked up.
|
2010-02-19 03:59:28 +08:00
|
|
|
if (DtorType == Dtor_Deleting) {
|
2015-05-20 23:53:59 +08:00
|
|
|
assert(DD->getOperatorDelete() &&
|
2013-12-18 09:39:59 +08:00
|
|
|
"operator delete missing - EnterDtorCleanups");
|
2013-02-13 16:37:51 +08:00
|
|
|
if (CXXStructorImplicitParamValue) {
|
|
|
|
// If there is an implicit param to the deleting dtor, it's a boolean
|
2017-10-13 09:55:36 +08:00
|
|
|
// telling whether this is a deleting destructor.
|
|
|
|
if (DD->getOperatorDelete()->isDestroyingOperatorDelete())
|
|
|
|
EmitConditionalDtorDeleteCall(*this, CXXStructorImplicitParamValue,
|
|
|
|
/*ReturnAfterDelete*/true);
|
|
|
|
else
|
|
|
|
EHStack.pushCleanup<CallDtorDeleteConditional>(
|
|
|
|
NormalAndEHCleanup, CXXStructorImplicitParamValue);
|
2013-02-13 16:37:51 +08:00
|
|
|
} else {
|
2017-10-13 09:55:36 +08:00
|
|
|
if (DD->getOperatorDelete()->isDestroyingOperatorDelete()) {
|
|
|
|
const CXXRecordDecl *ClassDecl = DD->getParent();
|
|
|
|
EmitDeleteCall(DD->getOperatorDelete(),
|
|
|
|
LoadThisForDtorDelete(*this, DD),
|
|
|
|
getContext().getTagDeclType(ClassDecl));
|
|
|
|
EmitBranchThroughCleanup(ReturnBlock);
|
|
|
|
} else {
|
|
|
|
EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
|
|
|
|
}
|
2013-02-13 16:37:51 +08:00
|
|
|
}
|
2010-02-19 03:59:28 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
const CXXRecordDecl *ClassDecl = DD->getParent();
|
|
|
|
|
2011-09-18 20:11:43 +08:00
|
|
|
// Unions have no bases and do not call field destructors.
|
|
|
|
if (ClassDecl->isUnion())
|
|
|
|
return;
|
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// The complete-destructor phase just destructs all the virtual bases.
|
2010-02-19 03:59:28 +08:00
|
|
|
if (DtorType == Dtor_Complete) {
|
2015-09-16 08:38:22 +08:00
|
|
|
// Poison the vtable pointer such that access after the base
|
|
|
|
// and member destructors are invoked is invalid.
|
|
|
|
if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
|
|
|
|
SanOpts.has(SanitizerKind::Memory) && ClassDecl->getNumVBases() &&
|
|
|
|
ClassDecl->isPolymorphic())
|
|
|
|
EHStack.pushCleanup<SanitizeDtorVTable>(NormalAndEHCleanup, DD);
|
2010-07-21 13:30:47 +08:00
|
|
|
|
|
|
|
// We push them in the forward order so that they'll be popped in
|
|
|
|
// the reverse order.
|
2014-03-14 00:15:17 +08:00
|
|
|
for (const auto &Base : ClassDecl->vbases()) {
|
2010-02-19 03:59:28 +08:00
|
|
|
CXXRecordDecl *BaseClassDecl
|
|
|
|
= cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-02-19 03:59:28 +08:00
|
|
|
// Ignore trivial destructors.
|
|
|
|
if (BaseClassDecl->hasTrivialDestructor())
|
|
|
|
continue;
|
2010-07-21 13:30:47 +08:00
|
|
|
|
2010-07-21 15:22:38 +08:00
|
|
|
EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
|
|
|
|
BaseClassDecl,
|
|
|
|
/*BaseIsVirtual*/ true);
|
2010-02-19 03:59:28 +08:00
|
|
|
}
|
2010-07-21 13:30:47 +08:00
|
|
|
|
2010-02-19 03:59:28 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(DtorType == Dtor_Base);
|
2015-09-16 08:38:22 +08:00
|
|
|
// Poison the vtable pointer if it has no virtual bases, but inherits
|
|
|
|
// virtual functions.
|
|
|
|
if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
|
|
|
|
SanOpts.has(SanitizerKind::Memory) && !ClassDecl->getNumVBases() &&
|
|
|
|
ClassDecl->isPolymorphic())
|
|
|
|
EHStack.pushCleanup<SanitizeDtorVTable>(NormalAndEHCleanup, DD);
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// Destroy non-virtual bases.
|
2014-03-13 23:41:46 +08:00
|
|
|
for (const auto &Base : ClassDecl->bases()) {
|
2010-07-21 13:30:47 +08:00
|
|
|
// Ignore virtual bases.
|
|
|
|
if (Base.isVirtual())
|
|
|
|
continue;
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// Ignore trivial destructors.
|
|
|
|
if (BaseClassDecl->hasTrivialDestructor())
|
|
|
|
continue;
|
2010-02-19 03:59:28 +08:00
|
|
|
|
2010-07-21 15:22:38 +08:00
|
|
|
EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
|
|
|
|
BaseClassDecl,
|
|
|
|
/*BaseIsVirtual*/ false);
|
2010-07-21 13:30:47 +08:00
|
|
|
}
|
|
|
|
|
2015-09-04 07:02:30 +08:00
|
|
|
// Poison fields such that access after their destructors are
|
|
|
|
// invoked, and before the base class destructor runs, is invalid.
|
|
|
|
if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
|
|
|
|
SanOpts.has(SanitizerKind::Memory))
|
2015-09-16 08:38:22 +08:00
|
|
|
EHStack.pushCleanup<SanitizeDtorMembers>(NormalAndEHCleanup, DD);
|
2015-09-04 07:02:30 +08:00
|
|
|
|
2010-07-21 13:30:47 +08:00
|
|
|
// Destroy direct fields.
|
2014-03-09 04:12:42 +08:00
|
|
|
for (const auto *Field : ClassDecl->fields()) {
|
|
|
|
QualType type = Field->getType();
|
2011-07-13 00:41:08 +08:00
|
|
|
QualType::DestructionKind dtorKind = type.isDestructedType();
|
|
|
|
if (!dtorKind) continue;
|
|
|
|
|
2012-02-26 17:11:52 +08:00
|
|
|
// Anonymous union members do not have their destructors called.
|
|
|
|
const RecordType *RT = type->getAsUnionType();
|
|
|
|
if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
|
|
|
|
|
2011-07-13 00:41:08 +08:00
|
|
|
CleanupKind cleanupKind = getCleanupKind(dtorKind);
|
2014-03-09 04:12:42 +08:00
|
|
|
EHStack.pushCleanup<DestroyField>(cleanupKind, Field,
|
2011-07-13 00:41:08 +08:00
|
|
|
getDestroyer(dtorKind),
|
|
|
|
cleanupKind & EHCleanup);
|
2009-12-25 06:46:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
|
|
|
|
/// constructor for each of several members of an array.
|
2010-07-21 09:10:17 +08:00
|
|
|
///
|
2011-07-13 14:10:41 +08:00
|
|
|
/// \param ctor the constructor to call for each element
|
|
|
|
/// \param arrayType the type of the array to initialize
|
|
|
|
/// \param arrayBegin an arrayType*
|
|
|
|
/// \param zeroInitialize true if each element should be
|
|
|
|
/// zero-initialized before it is constructed
|
2014-08-22 04:26:47 +08:00
|
|
|
void CodeGenFunction::EmitCXXAggrConstructorCall(
|
2016-04-29 17:39:50 +08:00
|
|
|
const CXXConstructorDecl *ctor, const ArrayType *arrayType,
|
2018-07-28 23:33:03 +08:00
|
|
|
Address arrayBegin, const CXXConstructExpr *E, bool NewPointerIsChecked,
|
|
|
|
bool zeroInitialize) {
|
2011-07-13 14:10:41 +08:00
|
|
|
QualType elementType;
|
|
|
|
llvm::Value *numElements =
|
|
|
|
emitArrayLength(arrayType, elementType, arrayBegin);
|
|
|
|
|
2018-07-28 23:33:03 +08:00
|
|
|
EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, E,
|
|
|
|
NewPointerIsChecked, zeroInitialize);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
|
|
|
|
/// constructor for each of several members of an array.
|
|
|
|
///
|
|
|
|
/// \param ctor the constructor to call for each element
|
|
|
|
/// \param numElements the number of elements in the array;
|
2011-07-13 15:37:11 +08:00
|
|
|
/// may be zero
|
2015-09-08 17:42:41 +08:00
|
|
|
/// \param arrayBase a T*, where T is the type constructed by ctor
|
2011-07-13 14:10:41 +08:00
|
|
|
/// \param zeroInitialize true if each element should be
|
|
|
|
/// zero-initialized before it is constructed
|
2014-08-22 04:26:47 +08:00
|
|
|
void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
|
|
|
|
llvm::Value *numElements,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address arrayBase,
|
2014-08-22 04:26:47 +08:00
|
|
|
const CXXConstructExpr *E,
|
2018-07-28 23:33:03 +08:00
|
|
|
bool NewPointerIsChecked,
|
2014-08-22 04:26:47 +08:00
|
|
|
bool zeroInitialize) {
|
2011-07-13 15:37:11 +08:00
|
|
|
// It's legal for numElements to be zero. This can happen both
|
|
|
|
// dynamically, because x can be zero in 'new A[x]', and statically,
|
|
|
|
// because of GCC extensions that permit zero-length arrays. There
|
|
|
|
// are probably legitimate places where we could assume that this
|
|
|
|
// doesn't happen, but it's not clear that it's worth it.
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::BranchInst *zeroCheckBranch = nullptr;
|
2011-07-13 15:37:11 +08:00
|
|
|
|
|
|
|
// Optimize for a constant count.
|
|
|
|
llvm::ConstantInt *constantCount
|
|
|
|
= dyn_cast<llvm::ConstantInt>(numElements);
|
|
|
|
if (constantCount) {
|
|
|
|
// Just skip out if the constant count is zero.
|
|
|
|
if (constantCount->isZero()) return;
|
|
|
|
|
|
|
|
// Otherwise, emit the check.
|
|
|
|
} else {
|
|
|
|
llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop");
|
|
|
|
llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty");
|
|
|
|
zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB);
|
|
|
|
EmitBlock(loopBB);
|
|
|
|
}
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
// Find the end of the array.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *arrayBegin = arrayBase.getPointer();
|
2011-07-13 14:10:41 +08:00
|
|
|
llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
|
|
|
|
"arrayctor.end");
|
|
|
|
|
|
|
|
// Enter the loop, setting up a phi for the current location to initialize.
|
|
|
|
llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
|
|
|
|
llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop");
|
|
|
|
EmitBlock(loopBB);
|
|
|
|
llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2,
|
|
|
|
"arrayctor.cur");
|
|
|
|
cur->addIncoming(arrayBegin, entryBB);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
|
|
|
// Inside the loop body, emit the constructor call on the array element.
|
2011-07-13 14:10:41 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
// The alignment of the base, adjusted by the size of a single element,
|
|
|
|
// provides a conservative estimate of the alignment of every element.
|
|
|
|
// (This assumes we never start tracking offsetted alignments.)
|
2018-07-31 03:24:48 +08:00
|
|
|
//
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
// Note that these are complete objects and so we don't need to
|
|
|
|
// use the non-virtual size or alignment.
|
2011-07-13 14:10:41 +08:00
|
|
|
QualType type = getContext().getTypeDeclType(ctor->getParent());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CharUnits eltAlignment =
|
|
|
|
arrayBase.getAlignment()
|
|
|
|
.alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
|
|
|
|
Address curAddr = Address(cur, eltAlignment);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2010-07-21 09:10:17 +08:00
|
|
|
// Zero initialize the storage, if requested.
|
2011-07-13 14:10:41 +08:00
|
|
|
if (zeroInitialize)
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitNullInitialization(curAddr, type);
|
2015-05-20 23:53:59 +08:00
|
|
|
|
|
|
|
// C++ [class.temporary]p4:
|
2010-01-02 04:29:01 +08:00
|
|
|
// There are two contexts in which temporaries are destroyed at a different
|
|
|
|
// point than the end of the full-expression. The first context is when a
|
2015-05-20 23:53:59 +08:00
|
|
|
// default constructor is called to initialize an element of an array.
|
|
|
|
// If the constructor has one or more default arguments, the destruction of
|
|
|
|
// every temporary created in a default argument expression is sequenced
|
2010-01-02 04:29:01 +08:00
|
|
|
// before the construction of the next array element, if any.
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-03-30 11:14:41 +08:00
|
|
|
{
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope Scope(*this);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
// Evaluate the constructor and its arguments in a regular
|
|
|
|
// partial-destroy cleanup.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getLangOpts().Exceptions &&
|
2011-07-13 14:10:41 +08:00
|
|
|
!ctor->getParent()->hasTrivialDestructor()) {
|
|
|
|
Destroyer *destroyer = destroyCXXObject;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
pushRegularPartialArrayCleanup(arrayBegin, cur, type, eltAlignment,
|
|
|
|
*destroyer);
|
2011-07-13 14:10:41 +08:00
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2014-08-22 04:26:47 +08:00
|
|
|
EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false,
|
2018-04-06 04:52:58 +08:00
|
|
|
/*Delegating=*/false, curAddr, E,
|
2018-07-28 23:33:03 +08:00
|
|
|
AggValueSlot::DoesNotOverlap, NewPointerIsChecked);
|
2011-07-13 14:10:41 +08:00
|
|
|
}
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
// Go to the next element.
|
|
|
|
llvm::Value *next =
|
|
|
|
Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1),
|
|
|
|
"arrayctor.next");
|
|
|
|
cur->addIncoming(next, Builder.GetInsertBlock());
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
// Check whether that's the end of the loop.
|
|
|
|
llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done");
|
|
|
|
llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont");
|
|
|
|
Builder.CreateCondBr(done, contBB, loopBB);
|
2010-01-02 04:29:01 +08:00
|
|
|
|
2011-07-13 15:37:11 +08:00
|
|
|
// Patch the earlier check to skip over the loop.
|
|
|
|
if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB);
|
|
|
|
|
2011-07-13 14:10:41 +08:00
|
|
|
EmitBlock(contBB);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2011-07-09 09:37:26 +08:00
|
|
|
void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address addr,
|
2011-07-09 09:37:26 +08:00
|
|
|
QualType type) {
|
|
|
|
const RecordType *rtype = type->castAs<RecordType>();
|
|
|
|
const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
|
|
|
|
const CXXDestructorDecl *dtor = record->getDestructor();
|
|
|
|
assert(!dtor->isTrivial());
|
|
|
|
CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
|
2013-01-31 13:50:40 +08:00
|
|
|
/*Delegating=*/false, addr);
|
2011-07-09 09:37:26 +08:00
|
|
|
}
|
|
|
|
|
2014-08-22 04:26:47 +08:00
|
|
|
void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
|
|
|
|
CXXCtorType Type,
|
|
|
|
bool ForVirtualBase,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
bool Delegating, Address This,
|
2018-04-06 04:52:58 +08:00
|
|
|
const CXXConstructExpr *E,
|
2018-07-28 23:33:03 +08:00
|
|
|
AggValueSlot::Overlap_t Overlap,
|
|
|
|
bool NewPointerIsChecked) {
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
CallArgList Args;
|
|
|
|
|
2018-12-13 18:15:27 +08:00
|
|
|
LangAS SlotAS = E->getType().getAddressSpace();
|
2019-01-11 09:54:53 +08:00
|
|
|
QualType ThisType = D->getThisType();
|
2018-12-13 18:15:27 +08:00
|
|
|
LangAS ThisAS = ThisType.getTypePtr()->getPointeeType().getAddressSpace();
|
|
|
|
llvm::Value *ThisPtr = This.getPointer();
|
|
|
|
if (SlotAS != ThisAS) {
|
|
|
|
unsigned TargetThisAS = getContext().getTargetAddressSpace(ThisAS);
|
|
|
|
llvm::Type *NewType =
|
|
|
|
ThisPtr->getType()->getPointerElementType()->getPointerTo(TargetThisAS);
|
|
|
|
ThisPtr = getTargetHooks().performAddrSpaceCast(*this, This.getPointer(),
|
|
|
|
ThisAS, SlotAS, NewType);
|
|
|
|
}
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
// Push the this ptr.
|
2019-01-11 09:54:53 +08:00
|
|
|
Args.add(RValue::get(ThisPtr), D->getThisType());
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
|
|
|
|
// If this is a trivial constructor, emit a memcpy now before we lose
|
|
|
|
// the alignment information on the argument.
|
|
|
|
// FIXME: It would be better to preserve alignment information into CallArg.
|
|
|
|
if (isMemcpyEquivalentSpecialMember(D)) {
|
|
|
|
assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
|
|
|
|
|
|
|
|
const Expr *Arg = E->getArg(0);
|
2018-01-25 22:21:55 +08:00
|
|
|
LValue Src = EmitLValue(Arg);
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
QualType DestTy = getContext().getTypeDeclType(D->getParent());
|
2018-01-25 22:21:55 +08:00
|
|
|
LValue Dest = MakeAddrLValue(This, DestTy);
|
2018-04-06 04:52:58 +08:00
|
|
|
EmitAggregateCopyCtor(Dest, Src, Overlap);
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the rest of the user-supplied arguments.
|
|
|
|
const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
|
2017-02-01 10:21:07 +08:00
|
|
|
EvaluationOrder Order = E->isListInitialization()
|
|
|
|
? EvaluationOrder::ForceLeftToRight
|
|
|
|
: EvaluationOrder::Default;
|
|
|
|
EmitCallArgs(Args, FPT, E->arguments(), E->getConstructor(),
|
|
|
|
/*ParamsToSkip*/ 0, Order);
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
|
2018-04-06 04:52:58 +08:00
|
|
|
EmitCXXConstructorCall(D, Type, ForVirtualBase, Delegating, This, Args,
|
2018-07-28 23:33:03 +08:00
|
|
|
Overlap, E->getExprLoc(), NewPointerIsChecked);
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool canEmitDelegateCallArgs(CodeGenFunction &CGF,
|
|
|
|
const CXXConstructorDecl *Ctor,
|
|
|
|
CXXCtorType Type, CallArgList &Args) {
|
|
|
|
// We can't forward a variadic call.
|
|
|
|
if (Ctor->isVariadic())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
|
|
|
|
// If the parameters are callee-cleanup, it's not safe to forward.
|
|
|
|
for (auto *P : Ctor->parameters())
|
|
|
|
if (P->getType().isDestructedType())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Likewise if they're inalloca.
|
|
|
|
const CGFunctionInfo &Info =
|
2017-02-24 06:07:35 +08:00
|
|
|
CGF.CGM.getTypes().arrangeCXXConstructorCall(Args, Ctor, Type, 0, 0);
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
if (Info.usesInAlloca())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Anything else should be OK.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
|
|
|
|
CXXCtorType Type,
|
|
|
|
bool ForVirtualBase,
|
|
|
|
bool Delegating,
|
|
|
|
Address This,
|
2018-04-06 04:52:58 +08:00
|
|
|
CallArgList &Args,
|
2018-06-25 13:48:04 +08:00
|
|
|
AggValueSlot::Overlap_t Overlap,
|
2018-07-28 23:33:03 +08:00
|
|
|
SourceLocation Loc,
|
|
|
|
bool NewPointerIsChecked) {
|
2015-09-15 08:37:06 +08:00
|
|
|
const CXXRecordDecl *ClassDecl = D->getParent();
|
|
|
|
|
2018-07-28 23:33:03 +08:00
|
|
|
if (!NewPointerIsChecked)
|
|
|
|
EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, Loc, This.getPointer(),
|
|
|
|
getContext().getRecordType(ClassDecl), CharUnits::Zero());
|
2010-02-06 08:25:16 +08:00
|
|
|
|
2015-04-30 03:26:57 +08:00
|
|
|
if (D->isTrivial() && D->isDefaultConstructor()) {
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
assert(Args.size() == 1 && "trivial default ctor with args");
|
2015-04-30 03:26:57 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a trivial constructor, just emit what's needed. If this is a
|
|
|
|
// union copy constructor, we must emit a memcpy, because the AST does not
|
|
|
|
// model that copy.
|
|
|
|
if (isMemcpyEquivalentSpecialMember(D)) {
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
assert(Args.size() == 2 && "unexpected argcount for trivial ctor");
|
2010-02-06 08:25:16 +08:00
|
|
|
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
QualType SrcTy = D->getParamDecl(0)->getType().getNonReferenceType();
|
2018-03-15 23:25:19 +08:00
|
|
|
Address Src(Args[1].getRValue(*this).getScalarVal(),
|
|
|
|
getNaturalTypeAlignment(SrcTy));
|
2018-01-25 22:21:55 +08:00
|
|
|
LValue SrcLVal = MakeAddrLValue(Src, SrcTy);
|
2015-09-15 08:37:06 +08:00
|
|
|
QualType DestTy = getContext().getTypeDeclType(ClassDecl);
|
2018-01-25 22:21:55 +08:00
|
|
|
LValue DestLVal = MakeAddrLValue(This, DestTy);
|
2018-04-06 04:52:58 +08:00
|
|
|
EmitAggregateCopyCtor(DestLVal, SrcLVal, Overlap);
|
2010-01-02 04:29:01 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-02-24 06:07:35 +08:00
|
|
|
bool PassPrototypeArgs = true;
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
// Check whether we can actually emit the constructor before trying to do so.
|
|
|
|
if (auto Inherited = D->getInheritedConstructor()) {
|
2017-02-24 06:07:35 +08:00
|
|
|
PassPrototypeArgs = getTypes().inheritingCtorHasParams(Inherited, Type);
|
|
|
|
if (PassPrototypeArgs && !canEmitDelegateCallArgs(*this, D, Type, Args)) {
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
EmitInlinedInheritingCXXConstructorCall(D, Type, ForVirtualBase,
|
|
|
|
Delegating, Args);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2013-12-18 03:46:40 +08:00
|
|
|
|
|
|
|
// Insert any ABI-specific implicit constructor arguments.
|
2017-02-23 04:28:02 +08:00
|
|
|
CGCXXABI::AddedStructorArgs ExtraArgs =
|
|
|
|
CGM.getCXXABI().addImplicitConstructorArgs(*this, D, Type, ForVirtualBase,
|
|
|
|
Delegating, Args);
|
2013-12-18 03:46:40 +08:00
|
|
|
|
|
|
|
// Emit the call.
|
2016-10-27 07:46:34 +08:00
|
|
|
llvm::Constant *CalleePtr =
|
|
|
|
CGM.getAddrOfCXXStructor(D, getFromCtorType(Type));
|
2017-02-23 04:28:02 +08:00
|
|
|
const CGFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall(
|
2017-02-24 06:07:35 +08:00
|
|
|
Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs);
|
2018-11-13 23:48:08 +08:00
|
|
|
CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(D, Type));
|
2016-10-27 07:46:34 +08:00
|
|
|
EmitCall(Info, Callee, ReturnValueSlot(), Args);
|
2015-09-15 08:37:06 +08:00
|
|
|
|
|
|
|
// Generate vtable assumptions if we're constructing a complete object
|
|
|
|
// with a vtable. We don't do this for base subobjects for two reasons:
|
|
|
|
// first, it's incorrect for classes with virtual bases, and second, we're
|
|
|
|
// about to overwrite the vptrs anyway.
|
|
|
|
// We also have to make sure if we can refer to vtable:
|
|
|
|
// - Otherwise we can refer to vtable if it's safe to speculatively emit.
|
|
|
|
// FIXME: If vtable is used by ctor/dtor, or if vtable is external and we are
|
|
|
|
// sure that definition of vtable is not hidden,
|
|
|
|
// then we are always safe to refer to it.
|
2015-09-29 04:30:22 +08:00
|
|
|
// FIXME: It looks like InstCombine is very inefficient on dealing with
|
|
|
|
// assumes. Make assumption loads require -fstrict-vtable-pointers temporarily.
|
2015-09-15 08:37:06 +08:00
|
|
|
if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
|
|
|
|
ClassDecl->isDynamicClass() && Type != Ctor_Base &&
|
2015-09-29 04:30:22 +08:00
|
|
|
CGM.getCXXABI().canSpeculativelyEmitVTable(ClassDecl) &&
|
|
|
|
CGM.getCodeGenOpts().StrictVTablePointers)
|
2015-09-15 08:37:06 +08:00
|
|
|
EmitVTableAssumptionLoads(ClassDecl, This);
|
|
|
|
}
|
|
|
|
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
void CodeGenFunction::EmitInheritedCXXConstructorCall(
|
|
|
|
const CXXConstructorDecl *D, bool ForVirtualBase, Address This,
|
|
|
|
bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E) {
|
|
|
|
CallArgList Args;
|
2019-01-11 09:54:53 +08:00
|
|
|
CallArg ThisArg(RValue::get(This.getPointer()), D->getThisType());
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
|
|
|
|
// Forward the parameters.
|
|
|
|
if (InheritedFromVBase &&
|
|
|
|
CGM.getTarget().getCXXABI().hasConstructorVariants()) {
|
|
|
|
// Nothing to do; this construction is not responsible for constructing
|
|
|
|
// the base class containing the inherited constructor.
|
|
|
|
// FIXME: Can we just pass undef's for the remaining arguments if we don't
|
|
|
|
// have constructor variants?
|
|
|
|
Args.push_back(ThisArg);
|
|
|
|
} else if (!CXXInheritedCtorInitExprArgs.empty()) {
|
|
|
|
// The inheriting constructor was inlined; just inject its arguments.
|
|
|
|
assert(CXXInheritedCtorInitExprArgs.size() >= D->getNumParams() &&
|
|
|
|
"wrong number of parameters for inherited constructor call");
|
|
|
|
Args = CXXInheritedCtorInitExprArgs;
|
|
|
|
Args[0] = ThisArg;
|
|
|
|
} else {
|
|
|
|
// The inheriting constructor was not inlined. Emit delegating arguments.
|
|
|
|
Args.push_back(ThisArg);
|
|
|
|
const auto *OuterCtor = cast<CXXConstructorDecl>(CurCodeDecl);
|
|
|
|
assert(OuterCtor->getNumParams() == D->getNumParams());
|
|
|
|
assert(!OuterCtor->isVariadic() && "should have been inlined");
|
|
|
|
|
|
|
|
for (const auto *Param : OuterCtor->parameters()) {
|
|
|
|
assert(getContext().hasSameUnqualifiedType(
|
|
|
|
OuterCtor->getParamDecl(Param->getFunctionScopeIndex())->getType(),
|
|
|
|
Param->getType()));
|
|
|
|
EmitDelegateCallArg(Args, Param, E->getLocation());
|
|
|
|
|
|
|
|
// Forward __attribute__(pass_object_size).
|
|
|
|
if (Param->hasAttr<PassObjectSizeAttr>()) {
|
|
|
|
auto *POSParam = SizeArguments[Param];
|
|
|
|
assert(POSParam && "missing pass_object_size value for forwarding");
|
|
|
|
EmitDelegateCallArg(Args, POSParam, E->getLocation());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
EmitCXXConstructorCall(D, Ctor_Base, ForVirtualBase, /*Delegating*/false,
|
2018-06-25 13:48:04 +08:00
|
|
|
This, Args, AggValueSlot::MayOverlap,
|
2018-07-28 23:33:03 +08:00
|
|
|
E->getLocation(), /*NewPointerIsChecked*/true);
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitInlinedInheritingCXXConstructorCall(
|
|
|
|
const CXXConstructorDecl *Ctor, CXXCtorType CtorType, bool ForVirtualBase,
|
|
|
|
bool Delegating, CallArgList &Args) {
|
2017-02-28 05:30:05 +08:00
|
|
|
GlobalDecl GD(Ctor, CtorType);
|
|
|
|
InlinedInheritingConstructorScope Scope(*this, GD);
|
|
|
|
ApplyInlineDebugLocation DebugScope(*this, GD);
|
2018-12-21 06:43:26 +08:00
|
|
|
RunCleanupsScope RunCleanups(*this);
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
|
|
|
|
// Save the arguments to be passed to the inherited constructor.
|
|
|
|
CXXInheritedCtorInitExprArgs = Args;
|
|
|
|
|
|
|
|
FunctionArgList Params;
|
|
|
|
QualType RetType = BuildFunctionArgList(CurGD, Params);
|
|
|
|
FnRetTy = RetType;
|
|
|
|
|
|
|
|
// Insert any ABI-specific implicit constructor arguments.
|
|
|
|
CGM.getCXXABI().addImplicitConstructorArgs(*this, Ctor, CtorType,
|
|
|
|
ForVirtualBase, Delegating, Args);
|
|
|
|
|
|
|
|
// Emit a simplified prolog. We only need to emit the implicit params.
|
|
|
|
assert(Args.size() >= Params.size() && "too few arguments for call");
|
|
|
|
for (unsigned I = 0, N = Args.size(); I != N; ++I) {
|
|
|
|
if (I < Params.size() && isa<ImplicitParamDecl>(Params[I])) {
|
2018-03-15 23:25:19 +08:00
|
|
|
const RValue &RV = Args[I].getRValue(*this);
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
assert(!RV.isComplex() && "complex indirect params not supported");
|
|
|
|
ParamValue Val = RV.isScalar()
|
|
|
|
? ParamValue::forDirect(RV.getScalarVal())
|
|
|
|
: ParamValue::forIndirect(RV.getAggregateAddress());
|
|
|
|
EmitParmDecl(*Params[I], Val, I + 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a return value slot if the ABI implementation wants one.
|
|
|
|
// FIXME: This is dumb, we should ask the ABI not to try to set the return
|
|
|
|
// value instead.
|
|
|
|
if (!RetType->isVoidType())
|
|
|
|
ReturnValue = CreateIRTemp(RetType, "retval.inhctor");
|
|
|
|
|
|
|
|
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
|
|
|
|
CXXThisValue = CXXABIThisValue;
|
|
|
|
|
|
|
|
// Directly emit the constructor initializers.
|
|
|
|
EmitCtorPrologue(Ctor, CtorType, Params);
|
|
|
|
}
|
|
|
|
|
2015-09-15 08:37:06 +08:00
|
|
|
void CodeGenFunction::EmitVTableAssumptionLoad(const VPtr &Vptr, Address This) {
|
|
|
|
llvm::Value *VTableGlobal =
|
|
|
|
CGM.getCXXABI().getVTableAddressPoint(Vptr.Base, Vptr.VTableClass);
|
|
|
|
if (!VTableGlobal)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// We can just use the base offset in the complete class.
|
|
|
|
CharUnits NonVirtualOffset = Vptr.Base.getBaseOffset();
|
|
|
|
|
|
|
|
if (!NonVirtualOffset.isZero())
|
|
|
|
This =
|
|
|
|
ApplyNonVirtualAndVirtualOffset(*this, This, NonVirtualOffset, nullptr,
|
|
|
|
Vptr.VTableClass, Vptr.NearestVBase);
|
|
|
|
|
2015-09-16 05:46:55 +08:00
|
|
|
llvm::Value *VPtrValue =
|
|
|
|
GetVTablePtr(This, VTableGlobal->getType(), Vptr.VTableClass);
|
2015-09-15 08:37:06 +08:00
|
|
|
llvm::Value *Cmp =
|
|
|
|
Builder.CreateICmpEQ(VPtrValue, VTableGlobal, "cmp.vtables");
|
|
|
|
Builder.CreateAssumption(Cmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl,
|
|
|
|
Address This) {
|
|
|
|
if (CGM.getCXXABI().doStructorsInitializeVPtrs(ClassDecl))
|
|
|
|
for (const VPtr &Vptr : getVTablePointers(ClassDecl))
|
|
|
|
EmitVTableAssumptionLoad(Vptr, This);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2010-11-14 05:53:34 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address This, Address Src,
|
|
|
|
const CXXConstructExpr *E) {
|
2013-12-05 03:23:12 +08:00
|
|
|
const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-11-14 05:53:34 +08:00
|
|
|
CallArgList Args;
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-11-14 05:53:34 +08:00
|
|
|
// Push the this ptr.
|
2019-01-11 09:54:53 +08:00
|
|
|
Args.add(RValue::get(This.getPointer()), D->getThisType());
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-11-14 05:53:34 +08:00
|
|
|
// Push the src ptr.
|
2014-01-21 04:26:09 +08:00
|
|
|
QualType QT = *(FPT->param_type_begin());
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *t = CGM.getTypes().ConvertType(QT);
|
2010-11-14 05:53:34 +08:00
|
|
|
Src = Builder.CreateBitCast(Src, t);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Args.add(RValue::get(Src.getPointer()), QT);
|
2013-12-05 03:23:12 +08:00
|
|
|
|
2010-11-14 05:53:34 +08:00
|
|
|
// Skip over first argument (Src).
|
2015-07-22 02:37:18 +08:00
|
|
|
EmitCallArgs(Args, FPT, drop_begin(E->arguments(), 1), E->getConstructor(),
|
2014-09-09 01:22:45 +08:00
|
|
|
/*ParamsToSkip*/ 1);
|
2013-12-05 03:23:12 +08:00
|
|
|
|
2018-07-28 23:33:03 +08:00
|
|
|
EmitCXXConstructorCall(D, Ctor_Complete, /*ForVirtualBase*/false,
|
|
|
|
/*Delegating*/false, This, Args,
|
|
|
|
AggValueSlot::MayOverlap, E->getExprLoc(),
|
|
|
|
/*NewPointerIsChecked*/false);
|
2010-11-14 05:53:34 +08:00
|
|
|
}
|
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
|
|
|
|
CXXCtorType CtorType,
|
2013-10-02 10:29:49 +08:00
|
|
|
const FunctionArgList &Args,
|
|
|
|
SourceLocation Loc) {
|
2010-02-23 08:48:20 +08:00
|
|
|
CallArgList DelegateArgs;
|
|
|
|
|
|
|
|
FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
|
|
|
|
assert(I != E && "no parameters to constructor");
|
|
|
|
|
|
|
|
// this
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
Address This = LoadCXXThisAddress();
|
|
|
|
DelegateArgs.add(RValue::get(This.getPointer()), (*I)->getType());
|
2010-02-23 08:48:20 +08:00
|
|
|
++I;
|
|
|
|
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
// FIXME: The location of the VTT parameter in the parameter list is
|
|
|
|
// specific to the Itanium ABI and shouldn't be hardcoded here.
|
|
|
|
if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
|
|
|
|
assert(I != E && "cannot skip vtt parameter, already done with args");
|
|
|
|
assert((*I)->getType()->isPointerType() &&
|
|
|
|
"skipping parameter not of vtt type");
|
|
|
|
++I;
|
2010-02-23 08:48:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Explicit arguments.
|
|
|
|
for (; I != E; ++I) {
|
2011-03-12 04:59:21 +08:00
|
|
|
const VarDecl *param = *I;
|
2013-10-02 10:29:49 +08:00
|
|
|
// FIXME: per-argument source location
|
|
|
|
EmitDelegateCallArg(DelegateArgs, param, Loc);
|
2010-02-23 08:48:20 +08:00
|
|
|
}
|
|
|
|
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
EmitCXXConstructorCall(Ctor, CtorType, /*ForVirtualBase=*/false,
|
2018-04-06 04:52:58 +08:00
|
|
|
/*Delegating=*/true, This, DelegateArgs,
|
2018-07-28 23:33:03 +08:00
|
|
|
AggValueSlot::MayOverlap, Loc,
|
|
|
|
/*NewPointerIsChecked=*/true);
|
2010-02-23 08:48:20 +08:00
|
|
|
}
|
|
|
|
|
2011-05-04 07:05:34 +08:00
|
|
|
namespace {
|
2015-08-19 06:40:54 +08:00
|
|
|
struct CallDelegatingCtorDtor final : EHScopeStack::Cleanup {
|
2011-05-04 07:05:34 +08:00
|
|
|
const CXXDestructorDecl *Dtor;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Addr;
|
2011-05-04 07:05:34 +08:00
|
|
|
CXXDtorType Type;
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CallDelegatingCtorDtor(const CXXDestructorDecl *D, Address Addr,
|
2011-05-04 07:05:34 +08:00
|
|
|
CXXDtorType Type)
|
|
|
|
: Dtor(D), Addr(Addr), Type(Type) {}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2011-05-04 07:05:34 +08:00
|
|
|
CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
|
2013-01-31 13:50:40 +08:00
|
|
|
/*Delegating=*/true, Addr);
|
2011-05-04 07:05:34 +08:00
|
|
|
}
|
|
|
|
};
|
2015-10-07 07:40:43 +08:00
|
|
|
} // end anonymous namespace
|
2011-05-04 07:05:34 +08:00
|
|
|
|
2011-05-01 15:04:31 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
|
|
|
|
const FunctionArgList &Args) {
|
|
|
|
assert(Ctor->isDelegatingConstructor());
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ThisPtr = LoadCXXThisAddress();
|
2011-05-01 15:04:31 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
AggValueSlot AggSlot =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
AggValueSlot::forAddr(ThisPtr, Qualifiers(),
|
2011-08-26 04:40:09 +08:00
|
|
|
AggValueSlot::IsDestructed,
|
2011-08-26 07:04:34 +08:00
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2018-04-06 04:52:58 +08:00
|
|
|
AggValueSlot::IsNotAliased,
|
2018-07-28 23:33:03 +08:00
|
|
|
AggValueSlot::MayOverlap,
|
|
|
|
AggValueSlot::IsNotZeroed,
|
|
|
|
// Checks are made by the code that calls constructor.
|
|
|
|
AggValueSlot::IsSanitizerChecked);
|
2011-05-01 15:04:31 +08:00
|
|
|
|
|
|
|
EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
|
|
|
|
|
2011-05-04 07:05:34 +08:00
|
|
|
const CXXRecordDecl *ClassDecl = Ctor->getParent();
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
|
2011-05-04 07:05:34 +08:00
|
|
|
CXXDtorType Type =
|
|
|
|
CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
|
|
|
|
|
|
|
|
EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup,
|
|
|
|
ClassDecl->getDestructor(),
|
|
|
|
ThisPtr, Type);
|
|
|
|
}
|
|
|
|
}
|
2011-05-01 15:04:31 +08:00
|
|
|
|
2010-01-02 04:29:01 +08:00
|
|
|
void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
|
|
|
|
CXXDtorType Type,
|
2010-05-03 07:29:11 +08:00
|
|
|
bool ForVirtualBase,
|
2013-01-31 13:50:40 +08:00
|
|
|
bool Delegating,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address This) {
|
2013-12-13 08:53:54 +08:00
|
|
|
CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase,
|
|
|
|
Delegating, This);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2010-07-21 09:41:18 +08:00
|
|
|
namespace {
|
2015-08-19 06:40:54 +08:00
|
|
|
struct CallLocalDtor final : EHScopeStack::Cleanup {
|
2010-07-21 09:41:18 +08:00
|
|
|
const CXXDestructorDecl *Dtor;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Addr;
|
2010-07-21 09:41:18 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CallLocalDtor(const CXXDestructorDecl *D, Address Addr)
|
2010-07-21 09:41:18 +08:00
|
|
|
: Dtor(D), Addr(Addr) {}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2010-07-21 09:41:18 +08:00
|
|
|
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
|
2013-01-31 13:50:40 +08:00
|
|
|
/*ForVirtualBase=*/false,
|
|
|
|
/*Delegating=*/false, Addr);
|
2010-07-21 09:41:18 +08:00
|
|
|
}
|
|
|
|
};
|
2016-02-11 03:11:58 +08:00
|
|
|
} // end anonymous namespace
|
2010-07-21 09:41:18 +08:00
|
|
|
|
2010-07-21 14:29:51 +08:00
|
|
|
void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Addr) {
|
2010-07-21 15:22:38 +08:00
|
|
|
EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
|
2010-07-21 14:29:51 +08:00
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
void CodeGenFunction::PushDestructorCleanup(QualType T, Address Addr) {
|
2010-07-06 09:34:17 +08:00
|
|
|
CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
|
|
|
|
if (!ClassDecl) return;
|
|
|
|
if (ClassDecl->hasTrivialDestructor()) return;
|
|
|
|
|
|
|
|
const CXXDestructorDecl *D = ClassDecl->getDestructor();
|
2011-04-28 10:15:35 +08:00
|
|
|
assert(D && D->isUsed() && "destructor not marked as used!");
|
2010-07-21 14:29:51 +08:00
|
|
|
PushDestructorCleanup(D, Addr);
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
2015-09-15 08:37:06 +08:00
|
|
|
void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
|
2010-03-29 03:40:00 +08:00
|
|
|
// Compute the address point.
|
2013-09-27 22:48:01 +08:00
|
|
|
llvm::Value *VTableAddressPoint =
|
|
|
|
CGM.getCXXABI().getVTableAddressPointInStructor(
|
2015-09-15 08:37:06 +08:00
|
|
|
*this, Vptr.VTableClass, Vptr.Base, Vptr.NearestVBase);
|
|
|
|
|
2013-09-27 22:48:01 +08:00
|
|
|
if (!VTableAddressPoint)
|
|
|
|
return;
|
2010-03-29 03:40:00 +08:00
|
|
|
|
|
|
|
// Compute where to store the address point.
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *VirtualOffset = nullptr;
|
2011-03-23 08:45:26 +08:00
|
|
|
CharUnits NonVirtualOffset = CharUnits::Zero();
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2015-09-15 08:37:06 +08:00
|
|
|
if (CGM.getCXXABI().isVirtualOffsetNeededForVTableField(*this, Vptr)) {
|
2010-04-21 02:05:10 +08:00
|
|
|
// We need to use the virtual base offset offset because the virtual base
|
|
|
|
// might have a different offset in the most derived class.
|
2015-09-15 08:37:06 +08:00
|
|
|
|
|
|
|
VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(
|
|
|
|
*this, LoadCXXThisAddress(), Vptr.VTableClass, Vptr.NearestVBase);
|
|
|
|
NonVirtualOffset = Vptr.OffsetFromNearestVBase;
|
2010-04-21 02:05:10 +08:00
|
|
|
} else {
|
2010-05-03 08:29:58 +08:00
|
|
|
// We can just use the base offset in the complete class.
|
2015-09-15 08:37:06 +08:00
|
|
|
NonVirtualOffset = Vptr.Base.getBaseOffset();
|
2010-04-21 02:05:10 +08:00
|
|
|
}
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-05-03 08:29:58 +08:00
|
|
|
// Apply the offsets.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address VTableField = LoadCXXThisAddress();
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2011-03-23 08:45:26 +08:00
|
|
|
if (!NonVirtualOffset.isZero() || VirtualOffset)
|
2015-09-15 08:37:06 +08:00
|
|
|
VTableField = ApplyNonVirtualAndVirtualOffset(
|
|
|
|
*this, VTableField, NonVirtualOffset, VirtualOffset, Vptr.VTableClass,
|
|
|
|
Vptr.NearestVBase);
|
2010-04-21 00:22:16 +08:00
|
|
|
|
2014-12-04 05:00:21 +08:00
|
|
|
// Finally, store the address point. Use the same LLVM types as the field to
|
|
|
|
// support optimization.
|
|
|
|
llvm::Type *VTablePtrTy =
|
|
|
|
llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true)
|
|
|
|
->getPointerTo()
|
|
|
|
->getPointerTo();
|
|
|
|
VTableField = Builder.CreateBitCast(VTableField, VTablePtrTy->getPointerTo());
|
|
|
|
VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy);
|
2015-09-15 08:37:06 +08:00
|
|
|
|
2012-03-27 01:03:51 +08:00
|
|
|
llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
|
2017-11-27 17:39:29 +08:00
|
|
|
TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(VTablePtrTy);
|
|
|
|
CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
|
2015-09-16 05:46:55 +08:00
|
|
|
if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
|
|
|
|
CGM.getCodeGenOpts().StrictVTablePointers)
|
|
|
|
CGM.DecorateInstructionWithInvariantGroup(Store, Vptr.VTableClass);
|
2010-03-29 03:40:00 +08:00
|
|
|
}
|
|
|
|
|
2015-09-15 08:37:06 +08:00
|
|
|
CodeGenFunction::VPtrsVector
|
|
|
|
CodeGenFunction::getVTablePointers(const CXXRecordDecl *VTableClass) {
|
|
|
|
CodeGenFunction::VPtrsVector VPtrsResult;
|
|
|
|
VisitedVirtualBasesSetTy VBases;
|
|
|
|
getVTablePointers(BaseSubobject(VTableClass, CharUnits::Zero()),
|
|
|
|
/*NearestVBase=*/nullptr,
|
|
|
|
/*OffsetFromNearestVBase=*/CharUnits::Zero(),
|
|
|
|
/*BaseIsNonVirtualPrimaryBase=*/false, VTableClass, VBases,
|
|
|
|
VPtrsResult);
|
|
|
|
return VPtrsResult;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::getVTablePointers(BaseSubobject Base,
|
|
|
|
const CXXRecordDecl *NearestVBase,
|
|
|
|
CharUnits OffsetFromNearestVBase,
|
|
|
|
bool BaseIsNonVirtualPrimaryBase,
|
|
|
|
const CXXRecordDecl *VTableClass,
|
|
|
|
VisitedVirtualBasesSetTy &VBases,
|
|
|
|
VPtrsVector &Vptrs) {
|
2010-03-29 05:07:49 +08:00
|
|
|
// If this base is a non-virtual primary base the address point has already
|
|
|
|
// been set.
|
|
|
|
if (!BaseIsNonVirtualPrimaryBase) {
|
|
|
|
// Initialize the vtable pointer for this base.
|
2015-09-15 08:37:06 +08:00
|
|
|
VPtr Vptr = {Base, NearestVBase, OffsetFromNearestVBase, VTableClass};
|
|
|
|
Vptrs.push_back(Vptr);
|
2010-03-29 05:07:49 +08:00
|
|
|
}
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2010-03-29 05:07:49 +08:00
|
|
|
const CXXRecordDecl *RD = Base.getBase();
|
2010-03-29 04:55:21 +08:00
|
|
|
|
2010-03-29 05:07:49 +08:00
|
|
|
// Traverse bases.
|
2014-03-13 23:41:46 +08:00
|
|
|
for (const auto &I : RD->bases()) {
|
2010-03-26 12:39:42 +08:00
|
|
|
CXXRecordDecl *BaseDecl
|
2014-03-13 23:41:46 +08:00
|
|
|
= cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
|
2010-03-29 05:07:49 +08:00
|
|
|
|
|
|
|
// Ignore classes without a vtable.
|
|
|
|
if (!BaseDecl->isDynamicClass())
|
|
|
|
continue;
|
|
|
|
|
2011-03-23 09:04:18 +08:00
|
|
|
CharUnits BaseOffset;
|
|
|
|
CharUnits BaseOffsetFromNearestVBase;
|
2010-03-29 09:16:41 +08:00
|
|
|
bool BaseDeclIsNonVirtualPrimaryBase;
|
2010-03-29 05:07:49 +08:00
|
|
|
|
2014-03-13 23:41:46 +08:00
|
|
|
if (I.isVirtual()) {
|
2010-03-29 05:07:49 +08:00
|
|
|
// Check if we've visited this virtual base before.
|
2014-11-19 15:49:47 +08:00
|
|
|
if (!VBases.insert(BaseDecl).second)
|
2010-03-29 05:07:49 +08:00
|
|
|
continue;
|
|
|
|
|
2015-05-20 23:53:59 +08:00
|
|
|
const ASTRecordLayout &Layout =
|
2010-03-29 05:07:49 +08:00
|
|
|
getContext().getASTRecordLayout(VTableClass);
|
|
|
|
|
2011-03-23 09:04:18 +08:00
|
|
|
BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
|
|
|
|
BaseOffsetFromNearestVBase = CharUnits::Zero();
|
2010-03-29 09:16:41 +08:00
|
|
|
BaseDeclIsNonVirtualPrimaryBase = false;
|
2010-03-29 05:07:49 +08:00
|
|
|
} else {
|
|
|
|
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
|
|
|
|
|
2011-03-24 09:21:01 +08:00
|
|
|
BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
|
2015-05-20 23:53:59 +08:00
|
|
|
BaseOffsetFromNearestVBase =
|
2011-03-23 09:04:18 +08:00
|
|
|
OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
|
2010-03-29 09:16:41 +08:00
|
|
|
BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
|
2010-03-29 05:07:49 +08:00
|
|
|
}
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2015-09-15 08:37:06 +08:00
|
|
|
getVTablePointers(
|
|
|
|
BaseSubobject(BaseDecl, BaseOffset),
|
|
|
|
I.isVirtual() ? BaseDecl : NearestVBase, BaseOffsetFromNearestVBase,
|
|
|
|
BaseDeclIsNonVirtualPrimaryBase, VTableClass, VBases, Vptrs);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-29 05:07:49 +08:00
|
|
|
void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
|
|
|
|
// Ignore classes without a vtable.
|
2010-03-26 12:39:42 +08:00
|
|
|
if (!RD->isDynamicClass())
|
2010-01-02 04:29:01 +08:00
|
|
|
return;
|
|
|
|
|
2010-03-29 05:07:49 +08:00
|
|
|
// Initialize the vtable pointers for this class and all of its bases.
|
2015-09-15 08:37:06 +08:00
|
|
|
if (CGM.getCXXABI().doStructorsInitializeVPtrs(RD))
|
|
|
|
for (const VPtr &Vptr : getVTablePointers(RD))
|
|
|
|
InitializeVTablePointer(Vptr);
|
2013-10-10 02:16:58 +08:00
|
|
|
|
|
|
|
if (RD->getNumVBases())
|
|
|
|
CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD);
|
2010-01-02 04:29:01 +08:00
|
|
|
}
|
2010-10-27 02:44:08 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *CodeGenFunction::GetVTablePtr(Address This,
|
2015-09-16 05:46:55 +08:00
|
|
|
llvm::Type *VTableTy,
|
|
|
|
const CXXRecordDecl *RD) {
|
|
|
|
Address VTablePtrSrc = Builder.CreateElementBitCast(This, VTableTy);
|
2012-03-27 01:03:51 +08:00
|
|
|
llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
|
2017-11-27 17:39:29 +08:00
|
|
|
TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(VTableTy);
|
|
|
|
CGM.DecorateInstructionWithTBAA(VTable, TBAAInfo);
|
2015-09-16 05:46:55 +08:00
|
|
|
|
|
|
|
if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
|
|
|
|
CGM.getCodeGenOpts().StrictVTablePointers)
|
|
|
|
CGM.DecorateInstructionWithInvariantGroup(VTable, RD);
|
|
|
|
|
2012-03-27 01:03:51 +08:00
|
|
|
return VTable;
|
2010-10-27 02:44:08 +08:00
|
|
|
}
|
2011-05-09 04:32:23 +08:00
|
|
|
|
2015-03-14 10:42:25 +08:00
|
|
|
// If a class has a single non-virtual base and does not introduce or override
|
|
|
|
// virtual member functions or fields, it will have the same layout as its base.
|
|
|
|
// This function returns the least derived such class.
|
|
|
|
//
|
|
|
|
// Casting an instance of a base class to such a derived class is technically
|
|
|
|
// undefined behavior, but it is a relatively common hack for introducing member
|
|
|
|
// functions on class instances with specific properties (e.g. llvm::Operator)
|
|
|
|
// that works under most compilers and should not have security implications, so
|
|
|
|
// we allow it by default. It can be disabled with -fsanitize=cfi-cast-strict.
|
|
|
|
static const CXXRecordDecl *
|
|
|
|
LeastDerivedClassWithSameLayout(const CXXRecordDecl *RD) {
|
|
|
|
if (!RD->field_empty())
|
|
|
|
return RD;
|
|
|
|
|
|
|
|
if (RD->getNumVBases() != 0)
|
|
|
|
return RD;
|
|
|
|
|
|
|
|
if (RD->getNumBases() != 1)
|
|
|
|
return RD;
|
|
|
|
|
|
|
|
for (const CXXMethodDecl *MD : RD->methods()) {
|
|
|
|
if (MD->isVirtual()) {
|
|
|
|
// Virtual member functions are only ok if they are implicit destructors
|
|
|
|
// because the implicit destructor will have the same semantics as the
|
|
|
|
// base class's destructor if no fields are added.
|
|
|
|
if (isa<CXXDestructorDecl>(MD) && MD->isImplicit())
|
|
|
|
continue;
|
|
|
|
return RD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return LeastDerivedClassWithSameLayout(
|
|
|
|
RD->bases_begin()->getType()->getAsCXXRecordDecl());
|
|
|
|
}
|
|
|
|
|
2016-06-25 05:21:46 +08:00
|
|
|
void CodeGenFunction::EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
|
|
|
|
llvm::Value *VTable,
|
|
|
|
SourceLocation Loc) {
|
2017-08-01 06:35:33 +08:00
|
|
|
if (SanOpts.has(SanitizerKind::CFIVCall))
|
|
|
|
EmitVTablePtrCheckForCall(RD, VTable, CodeGenFunction::CFITCK_VCall, Loc);
|
|
|
|
else if (CGM.getCodeGenOpts().WholeProgramVTables &&
|
|
|
|
CGM.HasHiddenLTOVisibility(RD)) {
|
2016-02-25 04:46:36 +08:00
|
|
|
llvm::Metadata *MD =
|
|
|
|
CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
|
2016-06-25 05:21:46 +08:00
|
|
|
llvm::Value *TypeId =
|
2016-02-25 04:46:36 +08:00
|
|
|
llvm::MetadataAsValue::get(CGM.getLLVMContext(), MD);
|
|
|
|
|
|
|
|
llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy);
|
2016-06-25 05:21:46 +08:00
|
|
|
llvm::Value *TypeTest =
|
|
|
|
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
|
|
|
|
{CastedVTable, TypeId});
|
|
|
|
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::assume), TypeTest);
|
2016-02-25 04:46:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitVTablePtrCheckForCall(const CXXRecordDecl *RD,
|
2015-06-19 09:51:54 +08:00
|
|
|
llvm::Value *VTable,
|
|
|
|
CFITypeCheckKind TCK,
|
|
|
|
SourceLocation Loc) {
|
2015-04-02 08:23:30 +08:00
|
|
|
if (!SanOpts.has(SanitizerKind::CFICastStrict))
|
2016-02-25 04:46:36 +08:00
|
|
|
RD = LeastDerivedClassWithSameLayout(RD);
|
2015-04-02 08:23:30 +08:00
|
|
|
|
2016-02-25 04:46:36 +08:00
|
|
|
EmitVTablePtrCheck(RD, VTable, TCK, Loc);
|
2015-04-02 08:23:30 +08:00
|
|
|
}
|
|
|
|
|
2015-03-14 10:42:25 +08:00
|
|
|
void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T,
|
|
|
|
llvm::Value *Derived,
|
2015-06-19 09:51:54 +08:00
|
|
|
bool MayBeNull,
|
|
|
|
CFITypeCheckKind TCK,
|
|
|
|
SourceLocation Loc) {
|
2015-03-14 10:42:25 +08:00
|
|
|
if (!getLangOpts().CPlusPlus)
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto *ClassTy = T->getAs<RecordType>();
|
|
|
|
if (!ClassTy)
|
|
|
|
return;
|
|
|
|
|
|
|
|
const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(ClassTy->getDecl());
|
|
|
|
|
|
|
|
if (!ClassDecl->isCompleteDefinition() || !ClassDecl->isDynamicClass())
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!SanOpts.has(SanitizerKind::CFICastStrict))
|
|
|
|
ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl);
|
|
|
|
|
2015-10-07 07:40:43 +08:00
|
|
|
llvm::BasicBlock *ContBlock = nullptr;
|
2015-03-14 10:42:25 +08:00
|
|
|
|
|
|
|
if (MayBeNull) {
|
|
|
|
llvm::Value *DerivedNotNull =
|
|
|
|
Builder.CreateIsNotNull(Derived, "cast.nonnull");
|
|
|
|
|
|
|
|
llvm::BasicBlock *CheckBlock = createBasicBlock("cast.check");
|
|
|
|
ContBlock = createBasicBlock("cast.cont");
|
|
|
|
|
|
|
|
Builder.CreateCondBr(DerivedNotNull, CheckBlock, ContBlock);
|
|
|
|
|
|
|
|
EmitBlock(CheckBlock);
|
|
|
|
}
|
|
|
|
|
2017-12-14 05:53:04 +08:00
|
|
|
llvm::Value *VTable;
|
|
|
|
std::tie(VTable, ClassDecl) = CGM.getCXXABI().LoadVTablePtr(
|
|
|
|
*this, Address(Derived, getPointerAlign()), ClassDecl);
|
2015-09-16 05:46:55 +08:00
|
|
|
|
2015-06-19 09:51:54 +08:00
|
|
|
EmitVTablePtrCheck(ClassDecl, VTable, TCK, Loc);
|
2015-03-14 10:42:25 +08:00
|
|
|
|
|
|
|
if (MayBeNull) {
|
|
|
|
Builder.CreateBr(ContBlock);
|
|
|
|
EmitBlock(ContBlock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
|
2015-06-19 09:51:54 +08:00
|
|
|
llvm::Value *VTable,
|
|
|
|
CFITypeCheckKind TCK,
|
|
|
|
SourceLocation Loc) {
|
2016-04-29 01:09:37 +08:00
|
|
|
if (!CGM.getCodeGenOpts().SanitizeCfiCrossDso &&
|
|
|
|
!CGM.HasHiddenLTOVisibility(RD))
|
|
|
|
return;
|
|
|
|
|
2017-09-26 06:11:12 +08:00
|
|
|
SanitizerMask M;
|
2016-01-16 08:31:22 +08:00
|
|
|
llvm::SanitizerStatKind SSK;
|
|
|
|
switch (TCK) {
|
|
|
|
case CFITCK_VCall:
|
2017-09-26 06:11:12 +08:00
|
|
|
M = SanitizerKind::CFIVCall;
|
2016-01-16 08:31:22 +08:00
|
|
|
SSK = llvm::SanStat_CFI_VCall;
|
|
|
|
break;
|
|
|
|
case CFITCK_NVCall:
|
2017-09-26 06:11:12 +08:00
|
|
|
M = SanitizerKind::CFINVCall;
|
2016-01-16 08:31:22 +08:00
|
|
|
SSK = llvm::SanStat_CFI_NVCall;
|
|
|
|
break;
|
|
|
|
case CFITCK_DerivedCast:
|
2017-09-26 06:11:12 +08:00
|
|
|
M = SanitizerKind::CFIDerivedCast;
|
2016-01-16 08:31:22 +08:00
|
|
|
SSK = llvm::SanStat_CFI_DerivedCast;
|
|
|
|
break;
|
|
|
|
case CFITCK_UnrelatedCast:
|
2017-09-26 06:11:12 +08:00
|
|
|
M = SanitizerKind::CFIUnrelatedCast;
|
2016-01-16 08:31:22 +08:00
|
|
|
SSK = llvm::SanStat_CFI_UnrelatedCast;
|
|
|
|
break;
|
2016-01-26 07:34:52 +08:00
|
|
|
case CFITCK_ICall:
|
2018-06-26 10:15:47 +08:00
|
|
|
case CFITCK_NVMFCall:
|
|
|
|
case CFITCK_VMFCall:
|
|
|
|
llvm_unreachable("unexpected sanitizer kind");
|
2016-01-16 08:31:22 +08:00
|
|
|
}
|
2017-09-26 06:11:12 +08:00
|
|
|
|
|
|
|
std::string TypeName = RD->getQualifiedNameAsString();
|
|
|
|
if (getContext().getSanitizerBlacklist().isBlacklistedType(M, TypeName))
|
|
|
|
return;
|
|
|
|
|
|
|
|
SanitizerScope SanScope(this);
|
2016-01-16 08:31:22 +08:00
|
|
|
EmitSanitizerStatReport(SSK);
|
2015-06-19 09:51:54 +08:00
|
|
|
|
2015-12-16 07:00:20 +08:00
|
|
|
llvm::Metadata *MD =
|
|
|
|
CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
|
2016-06-25 05:21:46 +08:00
|
|
|
llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
|
2015-02-21 04:30:56 +08:00
|
|
|
|
2015-06-19 09:51:54 +08:00
|
|
|
llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy);
|
2016-06-25 05:21:46 +08:00
|
|
|
llvm::Value *TypeTest = Builder.CreateCall(
|
|
|
|
CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedVTable, TypeId});
|
2015-02-21 04:30:56 +08:00
|
|
|
|
2015-06-19 09:51:54 +08:00
|
|
|
llvm::Constant *StaticData[] = {
|
2016-01-26 07:34:52 +08:00
|
|
|
llvm::ConstantInt::get(Int8Ty, TCK),
|
2015-12-16 07:00:20 +08:00
|
|
|
EmitCheckSourceLocation(Loc),
|
|
|
|
EmitCheckTypeDescriptor(QualType(RD->getTypeForDecl(), 0)),
|
2015-06-19 09:51:54 +08:00
|
|
|
};
|
2016-01-26 07:34:52 +08:00
|
|
|
|
2016-06-25 05:21:46 +08:00
|
|
|
auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
|
|
|
|
if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
|
|
|
|
EmitCfiSlowPathCheck(M, TypeTest, CrossDsoTypeId, CastedVTable, StaticData);
|
2016-02-04 06:18:55 +08:00
|
|
|
return;
|
2016-01-26 07:34:52 +08:00
|
|
|
}
|
2016-02-04 06:18:55 +08:00
|
|
|
|
|
|
|
if (CGM.getCodeGenOpts().SanitizeTrap.has(M)) {
|
2016-06-25 05:21:46 +08:00
|
|
|
EmitTrapCheck(TypeTest);
|
2016-02-04 06:18:55 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *AllVtables = llvm::MetadataAsValue::get(
|
|
|
|
CGM.getLLVMContext(),
|
|
|
|
llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
|
2016-06-25 05:21:46 +08:00
|
|
|
llvm::Value *ValidVtable = Builder.CreateCall(
|
|
|
|
CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedVTable, AllVtables});
|
2016-12-13 00:18:40 +08:00
|
|
|
EmitCheck(std::make_pair(TypeTest, M), SanitizerHandler::CFICheckFail,
|
|
|
|
StaticData, {CastedVTable, ValidVtable});
|
2015-02-21 04:30:56 +08:00
|
|
|
}
|
2011-05-09 04:32:23 +08:00
|
|
|
|
2016-06-25 08:24:06 +08:00
|
|
|
bool CodeGenFunction::ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD) {
|
|
|
|
if (!CGM.getCodeGenOpts().WholeProgramVTables ||
|
|
|
|
!SanOpts.has(SanitizerKind::CFIVCall) ||
|
|
|
|
!CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIVCall) ||
|
|
|
|
!CGM.HasHiddenLTOVisibility(RD))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
std::string TypeName = RD->getQualifiedNameAsString();
|
2017-09-26 06:11:12 +08:00
|
|
|
return !getContext().getSanitizerBlacklist().isBlacklistedType(
|
|
|
|
SanitizerKind::CFIVCall, TypeName);
|
2016-06-25 08:24:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
|
|
|
|
const CXXRecordDecl *RD, llvm::Value *VTable, uint64_t VTableByteOffset) {
|
|
|
|
SanitizerScope SanScope(this);
|
|
|
|
|
|
|
|
EmitSanitizerStatReport(llvm::SanStat_CFI_VCall);
|
|
|
|
|
|
|
|
llvm::Metadata *MD =
|
|
|
|
CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
|
|
|
|
llvm::Value *TypeId = llvm::MetadataAsValue::get(CGM.getLLVMContext(), MD);
|
|
|
|
|
|
|
|
llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy);
|
|
|
|
llvm::Value *CheckedLoad = Builder.CreateCall(
|
|
|
|
CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
|
|
|
|
{CastedVTable, llvm::ConstantInt::get(Int32Ty, VTableByteOffset),
|
|
|
|
TypeId});
|
|
|
|
llvm::Value *CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
|
|
|
|
|
|
|
|
EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIVCall),
|
2016-12-13 00:18:40 +08:00
|
|
|
SanitizerHandler::CFICheckFail, nullptr, nullptr);
|
2016-06-25 08:24:06 +08:00
|
|
|
|
|
|
|
return Builder.CreateBitCast(
|
|
|
|
Builder.CreateExtractValue(CheckedLoad, 0),
|
|
|
|
cast<llvm::PointerType>(VTable->getType())->getElementType());
|
|
|
|
}
|
|
|
|
|
2013-09-29 16:45:24 +08:00
|
|
|
void CodeGenFunction::EmitForwardingCallToLambda(
|
|
|
|
const CXXMethodDecl *callOperator,
|
|
|
|
CallArgList &callArgs) {
|
2012-02-16 11:47:28 +08:00
|
|
|
// Get the address of the call operator.
|
2012-07-07 14:41:13 +08:00
|
|
|
const CGFunctionInfo &calleeFnInfo =
|
|
|
|
CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
|
2016-10-27 07:46:34 +08:00
|
|
|
llvm::Constant *calleePtr =
|
2012-07-07 14:41:13 +08:00
|
|
|
CGM.GetAddrOfFunction(GlobalDecl(callOperator),
|
|
|
|
CGM.getTypes().GetFunctionType(calleeFnInfo));
|
|
|
|
|
|
|
|
// Prepare the return slot.
|
|
|
|
const FunctionProtoType *FPT =
|
|
|
|
callOperator->getType()->castAs<FunctionProtoType>();
|
2014-01-26 00:55:45 +08:00
|
|
|
QualType resultType = FPT->getReturnType();
|
2012-07-07 14:41:13 +08:00
|
|
|
ReturnValueSlot returnSlot;
|
|
|
|
if (!resultType->isVoidType() &&
|
|
|
|
calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
|
2013-03-08 05:37:08 +08:00
|
|
|
!hasScalarEvaluationKind(calleeFnInfo.getReturnType()))
|
2012-07-07 14:41:13 +08:00
|
|
|
returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified());
|
|
|
|
|
|
|
|
// We don't need to separately arrange the call arguments because
|
|
|
|
// the call can't be variadic anyway --- it's impossible to forward
|
|
|
|
// variadic arguments.
|
2015-05-20 23:53:59 +08:00
|
|
|
|
2012-02-16 11:47:28 +08:00
|
|
|
// Now emit our call.
|
2018-11-13 23:48:08 +08:00
|
|
|
auto callee = CGCallee::forDirect(calleePtr, GlobalDecl(callOperator));
|
2016-10-27 07:46:34 +08:00
|
|
|
RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, callArgs);
|
2012-02-16 11:47:28 +08:00
|
|
|
|
2012-07-07 14:41:13 +08:00
|
|
|
// If necessary, copy the returned value into the slot.
|
2017-12-15 02:21:14 +08:00
|
|
|
if (!resultType->isVoidType() && returnSlot.isNull()) {
|
|
|
|
if (getLangOpts().ObjCAutoRefCount && resultType->isObjCRetainableType()) {
|
|
|
|
RV = RValue::get(EmitARCRetainAutoreleasedReturnValue(RV.getScalarVal()));
|
|
|
|
}
|
2012-07-07 14:41:13 +08:00
|
|
|
EmitReturnOfRValue(RV, resultType);
|
2017-12-15 02:21:14 +08:00
|
|
|
} else
|
2012-12-14 07:37:17 +08:00
|
|
|
EmitBranchThroughCleanup(ReturnBlock);
|
2012-02-16 11:47:28 +08:00
|
|
|
}
|
|
|
|
|
2012-02-25 10:48:22 +08:00
|
|
|
void CodeGenFunction::EmitLambdaBlockInvokeBody() {
|
|
|
|
const BlockDecl *BD = BlockInfo->getBlockDecl();
|
|
|
|
const VarDecl *variable = BD->capture_begin()->getVariable();
|
|
|
|
const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
|
2017-08-05 06:38:06 +08:00
|
|
|
const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
|
|
|
|
|
|
|
|
if (CallOp->isVariadic()) {
|
|
|
|
// FIXME: Making this work correctly is nasty because it requires either
|
|
|
|
// cloning the body of the call operator or making the call operator
|
|
|
|
// forward.
|
|
|
|
CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function");
|
|
|
|
return;
|
|
|
|
}
|
2012-02-25 10:48:22 +08:00
|
|
|
|
|
|
|
// Start building arguments for forwarding call
|
|
|
|
CallArgList CallArgs;
|
|
|
|
|
|
|
|
QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
|
2018-10-02 05:51:28 +08:00
|
|
|
Address ThisPtr = GetAddrOfBlockDecl(variable);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType);
|
2012-02-25 10:48:22 +08:00
|
|
|
|
|
|
|
// Add the rest of the parameters.
|
2016-06-24 12:05:48 +08:00
|
|
|
for (auto param : BD->parameters())
|
2018-08-10 05:08:08 +08:00
|
|
|
EmitDelegateCallArg(CallArgs, param, param->getBeginLoc());
|
2014-03-08 00:09:59 +08:00
|
|
|
|
2015-05-20 23:53:59 +08:00
|
|
|
assert(!Lambda->isGenericLambda() &&
|
2013-09-29 16:45:24 +08:00
|
|
|
"generic lambda interconversion to block not implemented");
|
2017-08-05 06:38:06 +08:00
|
|
|
EmitForwardingCallToLambda(CallOp, CallArgs);
|
2012-02-25 10:48:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
|
|
|
|
const CXXRecordDecl *Lambda = MD->getParent();
|
|
|
|
|
|
|
|
// Start building arguments for forwarding call
|
|
|
|
CallArgList CallArgs;
|
|
|
|
|
|
|
|
QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
|
|
|
|
llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
|
|
|
|
CallArgs.add(RValue::get(ThisPtr), ThisType);
|
|
|
|
|
|
|
|
// Add the rest of the parameters.
|
2016-06-24 12:05:48 +08:00
|
|
|
for (auto Param : MD->parameters())
|
2018-08-10 05:08:08 +08:00
|
|
|
EmitDelegateCallArg(CallArgs, Param, Param->getBeginLoc());
|
2014-03-07 23:12:56 +08:00
|
|
|
|
2013-09-29 16:45:24 +08:00
|
|
|
const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
|
|
|
|
// For a generic lambda, find the corresponding call operator specialization
|
|
|
|
// to which the call to the static-invoker shall be forwarded.
|
|
|
|
if (Lambda->isGenericLambda()) {
|
|
|
|
assert(MD->isFunctionTemplateSpecialization());
|
|
|
|
const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs();
|
|
|
|
FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate();
|
2014-05-21 13:09:00 +08:00
|
|
|
void *InsertPos = nullptr;
|
2015-05-20 23:53:59 +08:00
|
|
|
FunctionDecl *CorrespondingCallOpSpecialization =
|
2014-06-26 12:58:53 +08:00
|
|
|
CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos);
|
2013-09-29 16:45:24 +08:00
|
|
|
assert(CorrespondingCallOpSpecialization);
|
|
|
|
CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization);
|
|
|
|
}
|
|
|
|
EmitForwardingCallToLambda(CallOp, CallArgs);
|
2012-02-25 10:48:22 +08:00
|
|
|
}
|
|
|
|
|
2017-08-05 06:38:06 +08:00
|
|
|
void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) {
|
2012-02-17 11:02:34 +08:00
|
|
|
if (MD->isVariadic()) {
|
2012-02-16 11:47:28 +08:00
|
|
|
// FIXME: Making this work correctly is nasty because it requires either
|
|
|
|
// cloning the body of the call operator or making the call operator forward.
|
|
|
|
CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
|
2012-02-25 10:48:22 +08:00
|
|
|
return;
|
2012-02-16 11:47:28 +08:00
|
|
|
}
|
|
|
|
|
2012-02-17 11:02:34 +08:00
|
|
|
EmitLambdaDelegatingInvokeBody(MD);
|
2012-02-16 09:37:33 +08:00
|
|
|
}
|