2010-06-10 07:25:41 +08:00
|
|
|
//===--- MicrosoftCXXABI.cpp - Emit LLVM Code from ASTs for a Module ------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2010-06-10 07:25:41 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2011-04-15 13:22:18 +08:00
|
|
|
// This provides C++ code generation targeting the Microsoft Visual C++ ABI.
|
2010-06-10 07:25:41 +08:00
|
|
|
// The class in this file generates structures that follow the Microsoft
|
|
|
|
// Visual C++ ABI, which is actually not very well documented at all outside
|
|
|
|
// of Microsoft.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CGCXXABI.h"
|
2015-09-17 04:15:55 +08:00
|
|
|
#include "CGCleanup.h"
|
2013-06-19 23:20:38 +08:00
|
|
|
#include "CGVTables.h"
|
2014-01-07 19:51:46 +08:00
|
|
|
#include "CodeGenModule.h"
|
2015-03-07 02:53:55 +08:00
|
|
|
#include "CodeGenTypes.h"
|
2015-03-04 03:21:04 +08:00
|
|
|
#include "TargetInfo.h"
|
2019-12-10 08:11:56 +08:00
|
|
|
#include "clang/AST/Attr.h"
|
2019-11-16 10:49:32 +08:00
|
|
|
#include "clang/AST/CXXInheritance.h"
|
2010-06-10 07:25:41 +08:00
|
|
|
#include "clang/AST/Decl.h"
|
|
|
|
#include "clang/AST/DeclCXX.h"
|
2015-03-04 03:21:04 +08:00
|
|
|
#include "clang/AST/StmtCXX.h"
|
2013-07-30 17:46:19 +08:00
|
|
|
#include "clang/AST/VTableBuilder.h"
|
2019-11-16 10:49:32 +08:00
|
|
|
#include "clang/CodeGen/ConstantInitBuilder.h"
|
2014-07-07 16:09:15 +08:00
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2013-09-27 22:48:01 +08:00
|
|
|
#include "llvm/ADT/StringSet.h"
|
2015-03-04 03:21:04 +08:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2010-06-10 07:25:41 +08:00
|
|
|
|
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2014-01-03 08:14:35 +08:00
|
|
|
/// Holds all the vbtable globals for a given class.
|
|
|
|
struct VBTableGlobals {
|
2014-02-28 03:40:09 +08:00
|
|
|
const VPtrInfoVector *VBTables;
|
2014-01-03 08:14:35 +08:00
|
|
|
SmallVector<llvm::GlobalVariable *, 2> Globals;
|
|
|
|
};
|
|
|
|
|
2010-08-16 11:33:14 +08:00
|
|
|
class MicrosoftCXXABI : public CGCXXABI {
|
2010-06-10 07:25:41 +08:00
|
|
|
public:
|
2014-07-07 16:09:15 +08:00
|
|
|
MicrosoftCXXABI(CodeGenModule &CGM)
|
|
|
|
: CGCXXABI(CGM), BaseClassDescriptorType(nullptr),
|
|
|
|
ClassHierarchyDescriptorType(nullptr),
|
2015-03-05 08:46:22 +08:00
|
|
|
CompleteObjectLocatorType(nullptr), CatchableTypeType(nullptr),
|
2015-10-15 23:29:40 +08:00
|
|
|
ThrowInfoType(nullptr) {}
|
2010-08-31 15:33:07 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
bool HasThisReturn(GlobalDecl GD) const override;
|
2014-11-01 04:09:12 +08:00
|
|
|
bool hasMostDerivedReturn(GlobalDecl GD) const override;
|
2013-07-01 04:40:16 +08:00
|
|
|
|
2014-05-14 06:05:45 +08:00
|
|
|
bool classifyReturnType(CGFunctionInfo &FI) const override;
|
2013-04-17 20:54:10 +08:00
|
|
|
|
2014-05-03 08:33:28 +08:00
|
|
|
RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override;
|
2013-04-17 20:54:10 +08:00
|
|
|
|
2014-05-10 06:46:15 +08:00
|
|
|
bool isSRetParameterAfterThis() const override { return true; }
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
bool isThisCompleteObject(GlobalDecl GD) const override {
|
|
|
|
// The Microsoft ABI doesn't use separate complete-object vs.
|
|
|
|
// base-object variants of constructors, but it does of destructors.
|
|
|
|
if (isa<CXXDestructorDecl>(GD.getDecl())) {
|
|
|
|
switch (GD.getDtorType()) {
|
|
|
|
case Dtor_Complete:
|
|
|
|
case Dtor_Deleting:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case Dtor_Base:
|
|
|
|
return false;
|
|
|
|
|
|
|
|
case Dtor_Comdat: llvm_unreachable("emitting dtor comdat as function?");
|
|
|
|
}
|
|
|
|
llvm_unreachable("bad dtor kind");
|
|
|
|
}
|
|
|
|
|
|
|
|
// No other kinds.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-09-12 07:05:02 +08:00
|
|
|
size_t getSrcArgforCopyCtor(const CXXConstructorDecl *CD,
|
|
|
|
FunctionArgList &Args) const override {
|
|
|
|
assert(Args.size() >= 2 &&
|
|
|
|
"expected the arglist to have at least two args!");
|
|
|
|
// The 'most_derived' parameter goes second if the ctor is variadic and
|
|
|
|
// has v-bases.
|
|
|
|
if (CD->getParent()->getNumVBases() > 0 &&
|
|
|
|
CD->getType()->castAs<FunctionProtoType>()->isVariadic())
|
|
|
|
return 2;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-11-02 17:01:44 +08:00
|
|
|
std::vector<CharUnits> getVBPtrOffsets(const CXXRecordDecl *RD) override {
|
|
|
|
std::vector<CharUnits> VBPtrOffsets;
|
|
|
|
const ASTContext &Context = getContext();
|
|
|
|
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
|
|
|
|
|
|
|
|
const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
|
2016-10-11 00:26:29 +08:00
|
|
|
for (const std::unique_ptr<VPtrInfo> &VBT : *VBGlobals.VBTables) {
|
2015-11-02 17:01:44 +08:00
|
|
|
const ASTRecordLayout &SubobjectLayout =
|
2016-07-20 22:40:25 +08:00
|
|
|
Context.getASTRecordLayout(VBT->IntroducingObject);
|
2015-11-02 17:01:44 +08:00
|
|
|
CharUnits Offs = VBT->NonVirtualOffset;
|
|
|
|
Offs += SubobjectLayout.getVBPtrOffset();
|
|
|
|
if (VBT->getVBaseWithVPtr())
|
|
|
|
Offs += Layout.getVBaseClassOffset(VBT->getVBaseWithVPtr());
|
|
|
|
VBPtrOffsets.push_back(Offs);
|
|
|
|
}
|
|
|
|
llvm::array_pod_sort(VBPtrOffsets.begin(), VBPtrOffsets.end());
|
|
|
|
return VBPtrOffsets;
|
|
|
|
}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
StringRef GetPureVirtualCallName() override { return "_purecall"; }
|
|
|
|
StringRef GetDeletedVirtualCallName() override { return "_purecall"; }
|
2012-07-18 01:10:11 +08:00
|
|
|
|
2014-11-01 15:37:17 +08:00
|
|
|
void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Ptr, QualType ElementType,
|
2014-11-01 04:09:12 +08:00
|
|
|
const CXXDestructorDecl *Dtor) override;
|
2012-09-25 18:10:39 +08:00
|
|
|
|
2014-11-25 16:59:34 +08:00
|
|
|
void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
|
2015-03-05 08:46:22 +08:00
|
|
|
void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
|
2014-11-25 15:20:20 +08:00
|
|
|
|
2015-03-04 03:21:04 +08:00
|
|
|
void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
|
|
|
|
|
2014-07-07 14:20:47 +08:00
|
|
|
llvm::GlobalVariable *getMSCompleteObjectLocator(const CXXRecordDecl *RD,
|
2016-10-11 00:26:29 +08:00
|
|
|
const VPtrInfo &Info);
|
2014-07-07 14:20:47 +08:00
|
|
|
|
2015-03-18 04:35:00 +08:00
|
|
|
llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
|
2015-09-17 04:15:55 +08:00
|
|
|
CatchTypeInfo
|
2015-03-30 05:55:10 +08:00
|
|
|
getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType) override;
|
2014-07-07 14:20:47 +08:00
|
|
|
|
2015-09-17 04:15:55 +08:00
|
|
|
/// MSVC needs an extra flag to indicate a catchall.
|
|
|
|
CatchTypeInfo getCatchAllTypeInfo() override {
|
|
|
|
return CatchTypeInfo{nullptr, 0x40};
|
|
|
|
}
|
|
|
|
|
2014-06-23 03:05:33 +08:00
|
|
|
bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
|
|
|
|
void EmitBadTypeidCall(CodeGenFunction &CGF) override;
|
|
|
|
llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ThisPtr,
|
2014-06-23 03:05:33 +08:00
|
|
|
llvm::Type *StdTypeInfoPtrTy) override;
|
|
|
|
|
|
|
|
bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
|
|
|
|
QualType SrcRecordTy) override;
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
|
2014-06-23 03:05:33 +08:00
|
|
|
QualType SrcRecordTy, QualType DestTy,
|
|
|
|
QualType DestRecordTy,
|
|
|
|
llvm::BasicBlock *CastEnd) override;
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
|
2014-06-23 03:05:33 +08:00
|
|
|
QualType SrcRecordTy,
|
|
|
|
QualType DestTy) override;
|
|
|
|
|
|
|
|
bool EmitBadCastCall(CodeGenFunction &CGF) override;
|
2015-09-15 08:37:06 +08:00
|
|
|
bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override {
|
2015-07-24 12:04:49 +08:00
|
|
|
return false;
|
|
|
|
}
|
2014-06-23 03:05:33 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
llvm::Value *
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
|
2014-03-12 14:41:41 +08:00
|
|
|
const CXXRecordDecl *ClassDecl,
|
|
|
|
const CXXRecordDecl *BaseClassDecl) override;
|
2013-05-30 02:02:47 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
llvm::BasicBlock *
|
|
|
|
EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
|
|
|
|
const CXXRecordDecl *RD) override;
|
2018-07-31 03:24:48 +08:00
|
|
|
|
[MS-ABI]V-base dtor called more than needed when throw happens in v-base ctor in window. Need add "complete object flag" check in eh cleanup code.
The problem only happen on window ( A MS-ABI issuer )
The nature of the problem is virtual base dtor called more than it is needed after exception throw in inheriting base class(with virtual bases) ctor.
The root problem is when throw happen, not all virtual base classes have been contructed, so not all virtual base dtors are need to call for ehcleanup.
clang has code to handle vbase initialization: basically add check for "complete object flag" before call to v-base ctor.
But that part is missing for cleanup code.
To fix this add similar code as v-base init to cleanup code, same algorithm.
1> Add new routine:
EmitDtorCompleteObjectHandler
With corresponding to EmitCtorCompleteObjectHandler
2> In the EmitDestructorCal
Call EmitDtorCompleteObjectHandler when generate ehcleanup inside ctor.
Just add check for "complete object flag" before call to v-base dtor.
Without my change:
ehcleanup: ; preds = %ctor.skip_vbases
%13 = cleanuppad within none [], !dbg !66
%14 = bitcast %struct.class_0* %this1 to i8*, !dbg !66
%15 = getelementptr inbounds i8, i8* %14, i64 8, !dbg !66
%16 = bitcast i8* %15 to %struct.class_2*, !dbg !66
call void @"\01??1class_2@@UEAA@XZ"(%struct.class_2* %16) #6 [ "funclet"(token
%13) ], !dbg !66
cleanupret from %13 unwind to caller, !dbg !66
with my change:
ehcleanup: ; preds = %ctor.skip_vbases
%13 = cleanuppad within none [], !dbg !66
%14 = bitcast %struct.class_0* %this1 to i8*, !dbg !66
%15 = getelementptr inbounds i8, i8* %14, i64 8, !dbg !66
%16 = bitcast i8* %15 to %struct.class_2*, !dbg !66
%is_complete_object4 = icmp ne i32 %is_most_derived2, 0, !dbg !66
br i1 %is_complete_object4, label %Dtor.dtor_vbase, label %Dtor.skip_vbase, !d
bg !66
Dtor.dtor_vbase: ; preds = %ehcleanup
call void @"\01??1class_2@@UEAA@XZ"(%struct.class_2* %16) #6 [ "funclet"(token
%13) ], !dbg !66
br label %Dtor.skip_vbase, !dbg !66
Dtor.skip_vbase: ; preds = %Dtor.dtor_vbase, %ehcleanup
cleanupret from %13 unwind to caller, !dbg !66
Please let me know you need more info.
Patch by Jennifer Yu.
Differential Revision: https://reviews.llvm.org/D27358
llvm-svn: 288869
2016-12-07 08:21:45 +08:00
|
|
|
llvm::BasicBlock *
|
|
|
|
EmitDtorCompleteObjectHandler(CodeGenFunction &CGF);
|
2013-02-27 21:46:31 +08:00
|
|
|
|
2013-10-10 02:16:58 +08:00
|
|
|
void initializeHiddenVirtualInheritanceMembers(CodeGenFunction &CGF,
|
2014-03-12 14:41:41 +08:00
|
|
|
const CXXRecordDecl *RD) override;
|
2013-10-10 02:16:58 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void EmitCXXConstructors(const CXXConstructorDecl *D) override;
|
2013-08-05 01:30:04 +08:00
|
|
|
|
[ms-cxxabi] Emit linkonce complete dtors in TUs that need them
Based on Peter Collingbourne's destructor patches.
Prior to this change, clang was considering ?1 to be the complete
destructor and the base destructor, which was wrong. This lead to
crashes when clang tried to emit two LLVM functions with the same name.
In this ABI, TUs with non-inline dtors might not emit a complete
destructor. They are emitted as inline thunks in TUs that need them,
and they always delegate to the base dtors of the complete class and its
virtual bases. This change uses the DeferredDecls machinery to emit
complete dtors as needed.
Currently in clang try body destructors can catch exceptions thrown by
virtual base destructors. In the Microsoft C++ ABI, clang may not have
the destructor definition, in which case clang won't wrap the virtual
virtual base destructor calls in a try-catch. Diagnosing this in user
code is TODO.
Finally, for classes that don't use virtual inheritance, MSVC always
calls the base destructor (?1) directly. This is a useful code size
optimization that avoids emitting lots of extra thunks or aliases.
Implementing it also means our existing tests continue to pass, and is
consistent with MSVC's output.
We can do the same for Itanium by tweaking GetAddrOfCXXDestructor, but
it will require further testing.
Reviewers: rjmccall
CC: cfe-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1066
llvm-svn: 186828
2013-07-22 21:51:44 +08:00
|
|
|
// Background on MSVC destructors
|
|
|
|
// ==============================
|
|
|
|
//
|
|
|
|
// Both Itanium and MSVC ABIs have destructor variants. The variant names
|
|
|
|
// roughly correspond in the following way:
|
|
|
|
// Itanium Microsoft
|
|
|
|
// Base -> no name, just ~Class
|
|
|
|
// Complete -> vbase destructor
|
|
|
|
// Deleting -> scalar deleting destructor
|
|
|
|
// vector deleting destructor
|
|
|
|
//
|
|
|
|
// The base and complete destructors are the same as in Itanium, although the
|
|
|
|
// complete destructor does not accept a VTT parameter when there are virtual
|
|
|
|
// bases. A separate mechanism involving vtordisps is used to ensure that
|
|
|
|
// virtual methods of destroyed subobjects are not called.
|
|
|
|
//
|
|
|
|
// The deleting destructors accept an i32 bitfield as a second parameter. Bit
|
|
|
|
// 1 indicates if the memory should be deleted. Bit 2 indicates if the this
|
|
|
|
// pointer points to an array. The scalar deleting destructor assumes that
|
|
|
|
// bit 2 is zero, and therefore does not contain a loop.
|
|
|
|
//
|
|
|
|
// For virtual destructors, only one entry is reserved in the vftable, and it
|
|
|
|
// always points to the vector deleting destructor. The vector deleting
|
|
|
|
// destructor is the most general, so it can be used to destroy objects in
|
|
|
|
// place, delete single heap objects, or delete arrays.
|
|
|
|
//
|
|
|
|
// A TU defining a non-inline destructor is only guaranteed to emit a base
|
|
|
|
// destructor, and all of the other variants are emitted on an as-needed basis
|
|
|
|
// in COMDATs. Because a non-base destructor can be emitted in a TU that
|
|
|
|
// lacks a definition for the destructor, non-base destructors must always
|
|
|
|
// delegate to or alias the base destructor.
|
|
|
|
|
2020-05-19 14:43:46 +08:00
|
|
|
AddedStructorArgCounts
|
2019-03-23 07:05:10 +08:00
|
|
|
buildStructorSignature(GlobalDecl GD,
|
2017-02-23 04:28:02 +08:00
|
|
|
SmallVectorImpl<CanQualType> &ArgTys) override;
|
2010-08-31 15:33:07 +08:00
|
|
|
|
[ms-cxxabi] Emit linkonce complete dtors in TUs that need them
Based on Peter Collingbourne's destructor patches.
Prior to this change, clang was considering ?1 to be the complete
destructor and the base destructor, which was wrong. This lead to
crashes when clang tried to emit two LLVM functions with the same name.
In this ABI, TUs with non-inline dtors might not emit a complete
destructor. They are emitted as inline thunks in TUs that need them,
and they always delegate to the base dtors of the complete class and its
virtual bases. This change uses the DeferredDecls machinery to emit
complete dtors as needed.
Currently in clang try body destructors can catch exceptions thrown by
virtual base destructors. In the Microsoft C++ ABI, clang may not have
the destructor definition, in which case clang won't wrap the virtual
virtual base destructor calls in a try-catch. Diagnosing this in user
code is TODO.
Finally, for classes that don't use virtual inheritance, MSVC always
calls the base destructor (?1) directly. This is a useful code size
optimization that avoids emitting lots of extra thunks or aliases.
Implementing it also means our existing tests continue to pass, and is
consistent with MSVC's output.
We can do the same for Itanium by tweaking GetAddrOfCXXDestructor, but
it will require further testing.
Reviewers: rjmccall
CC: cfe-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1066
llvm-svn: 186828
2013-07-22 21:51:44 +08:00
|
|
|
/// Non-base dtors should be emitted as delegating thunks in this ABI.
|
|
|
|
bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
|
2014-03-12 14:41:41 +08:00
|
|
|
CXXDtorType DT) const override {
|
[ms-cxxabi] Emit linkonce complete dtors in TUs that need them
Based on Peter Collingbourne's destructor patches.
Prior to this change, clang was considering ?1 to be the complete
destructor and the base destructor, which was wrong. This lead to
crashes when clang tried to emit two LLVM functions with the same name.
In this ABI, TUs with non-inline dtors might not emit a complete
destructor. They are emitted as inline thunks in TUs that need them,
and they always delegate to the base dtors of the complete class and its
virtual bases. This change uses the DeferredDecls machinery to emit
complete dtors as needed.
Currently in clang try body destructors can catch exceptions thrown by
virtual base destructors. In the Microsoft C++ ABI, clang may not have
the destructor definition, in which case clang won't wrap the virtual
virtual base destructor calls in a try-catch. Diagnosing this in user
code is TODO.
Finally, for classes that don't use virtual inheritance, MSVC always
calls the base destructor (?1) directly. This is a useful code size
optimization that avoids emitting lots of extra thunks or aliases.
Implementing it also means our existing tests continue to pass, and is
consistent with MSVC's output.
We can do the same for Itanium by tweaking GetAddrOfCXXDestructor, but
it will require further testing.
Reviewers: rjmccall
CC: cfe-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1066
llvm-svn: 186828
2013-07-22 21:51:44 +08:00
|
|
|
return DT != Dtor_Base;
|
|
|
|
}
|
|
|
|
|
2018-03-17 03:40:50 +08:00
|
|
|
void setCXXDestructorDLLStorage(llvm::GlobalValue *GV,
|
|
|
|
const CXXDestructorDecl *Dtor,
|
|
|
|
CXXDtorType DT) const override;
|
|
|
|
|
|
|
|
llvm::GlobalValue::LinkageTypes
|
|
|
|
getCXXDestructorLinkage(GVALinkage Linkage, const CXXDestructorDecl *Dtor,
|
|
|
|
CXXDtorType DT) const override;
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void EmitCXXDestructors(const CXXDestructorDecl *D) override;
|
[ms-cxxabi] Emit linkonce complete dtors in TUs that need them
Based on Peter Collingbourne's destructor patches.
Prior to this change, clang was considering ?1 to be the complete
destructor and the base destructor, which was wrong. This lead to
crashes when clang tried to emit two LLVM functions with the same name.
In this ABI, TUs with non-inline dtors might not emit a complete
destructor. They are emitted as inline thunks in TUs that need them,
and they always delegate to the base dtors of the complete class and its
virtual bases. This change uses the DeferredDecls machinery to emit
complete dtors as needed.
Currently in clang try body destructors can catch exceptions thrown by
virtual base destructors. In the Microsoft C++ ABI, clang may not have
the destructor definition, in which case clang won't wrap the virtual
virtual base destructor calls in a try-catch. Diagnosing this in user
code is TODO.
Finally, for classes that don't use virtual inheritance, MSVC always
calls the base destructor (?1) directly. This is a useful code size
optimization that avoids emitting lots of extra thunks or aliases.
Implementing it also means our existing tests continue to pass, and is
consistent with MSVC's output.
We can do the same for Itanium by tweaking GetAddrOfCXXDestructor, but
it will require further testing.
Reviewers: rjmccall
CC: cfe-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1066
llvm-svn: 186828
2013-07-22 21:51:44 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
const CXXRecordDecl *
|
|
|
|
getThisArgumentTypeForMethod(const CXXMethodDecl *MD) override {
|
2013-08-21 14:25:03 +08:00
|
|
|
if (MD->isVirtual() && !isa<CXXDestructorDecl>(MD)) {
|
2018-04-03 04:00:39 +08:00
|
|
|
MethodVFTableLocation ML =
|
2013-11-05 23:54:58 +08:00
|
|
|
CGM.getMicrosoftVTableContext().getMethodVFTableLocation(MD);
|
2013-08-21 14:25:03 +08:00
|
|
|
// The vbases might be ordered differently in the final overrider object
|
|
|
|
// and the complete object, so the "this" argument may sometimes point to
|
|
|
|
// memory that has no particular type (e.g. past the complete object).
|
|
|
|
// In this case, we just use a generic pointer type.
|
|
|
|
// FIXME: might want to have a more precise type in the non-virtual
|
|
|
|
// multiple inheritance case.
|
2013-11-07 21:34:02 +08:00
|
|
|
if (ML.VBase || !ML.VFPtrOffset.isZero())
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-08-21 14:25:03 +08:00
|
|
|
}
|
|
|
|
return MD->getParent();
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address
|
2014-03-15 01:43:37 +08:00
|
|
|
adjustThisArgumentForVirtualFunctionCall(CodeGenFunction &CGF, GlobalDecl GD,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address This,
|
2014-03-15 01:43:37 +08:00
|
|
|
bool VirtualCall) override;
|
2013-08-21 14:25:03 +08:00
|
|
|
|
2013-12-18 03:46:40 +08:00
|
|
|
void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
|
2014-03-12 14:41:41 +08:00
|
|
|
FunctionArgList &Params) override;
|
2010-08-31 15:33:07 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
|
2011-01-27 10:46:02 +08:00
|
|
|
|
2020-05-19 14:43:46 +08:00
|
|
|
AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
|
|
|
|
const CXXConstructorDecl *D,
|
|
|
|
CXXCtorType Type,
|
|
|
|
bool ForVirtualBase,
|
|
|
|
bool Delegating) override;
|
2013-08-21 14:25:03 +08:00
|
|
|
|
2020-07-02 01:57:45 +08:00
|
|
|
llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
|
|
|
|
const CXXDestructorDecl *DD,
|
|
|
|
CXXDtorType Type,
|
|
|
|
bool ForVirtualBase,
|
|
|
|
bool Delegating) override;
|
|
|
|
|
2013-12-13 08:53:54 +08:00
|
|
|
void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
|
|
|
|
CXXDtorType Type, bool ForVirtualBase,
|
2019-07-22 17:39:13 +08:00
|
|
|
bool Delegating, Address This,
|
|
|
|
QualType ThisTy) override;
|
2013-12-13 08:53:54 +08:00
|
|
|
|
2016-10-11 00:26:29 +08:00
|
|
|
void emitVTableTypeMetadata(const VPtrInfo &Info, const CXXRecordDecl *RD,
|
2016-06-25 05:21:46 +08:00
|
|
|
llvm::GlobalVariable *VTable);
|
CFI: Implement bitset emission for the Microsoft ABI.
Clang's control flow integrity implementation works by conceptually attaching
"tags" (in the form of bitset entries) to each virtual table, identifying
the names of the classes that the virtual table is compatible with. Under
the Itanium ABI, it is simple to assign tags to virtual tables; they are
simply the address points, which are available via VTableLayout. Because any
overridden methods receive an entry in the derived class's virtual table,
a check for an overridden method call can always be done by checking the
tag of whichever derived class overrode the method call.
The Microsoft ABI is a little different, as it does not directly use address
points, and overrides in a derived class do not cause new virtual table entries
to be added to the derived class; instead, the slot in the base class is
reused, and the compiler needs to adjust the this pointer at the call site
to (generally) the base class that initially defined the method. After the
this pointer has been adjusted, we cannot check for the derived class's tag,
as the virtual table may not be compatible with the derived class. So we
need to determine which base class we have been adjusted to.
Specifically, at each call site, we use ASTRecordLayout to identify the most
derived class whose virtual table is laid out at the "this" pointer offset
we are using to make the call, and check the virtual table for that tag.
Because address point information is unavailable, we "reconstruct" it as
follows: any virtual tables we create for a non-derived class receive a tag
for that class, and virtual tables for a base class inside a derived class
receive a tag for the base class, together with tags for any derived classes
which are laid out at the same position as the derived class (and therefore
have compatible virtual tables).
Differential Revision: http://reviews.llvm.org/D10520
llvm-svn: 240117
2015-06-19 10:30:43 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void emitVTableDefinitions(CodeGenVTables &CGVT,
|
|
|
|
const CXXRecordDecl *RD) override;
|
2013-09-27 22:48:01 +08:00
|
|
|
|
2015-09-15 08:37:06 +08:00
|
|
|
bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
|
|
|
|
CodeGenFunction::VPtr Vptr) override;
|
|
|
|
|
|
|
|
/// Don't initialize vptrs if dynamic class
|
|
|
|
/// is marked with with the 'novtable' attribute.
|
|
|
|
bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
|
|
|
|
return !VTableClass->hasAttr<MSNoVTableAttr>();
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Constant *
|
|
|
|
getVTableAddressPoint(BaseSubobject Base,
|
|
|
|
const CXXRecordDecl *VTableClass) override;
|
|
|
|
|
2013-09-27 22:48:01 +08:00
|
|
|
llvm::Value *getVTableAddressPointInStructor(
|
|
|
|
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
|
2015-09-15 08:37:06 +08:00
|
|
|
BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
|
2013-09-27 22:48:01 +08:00
|
|
|
|
|
|
|
llvm::Constant *
|
|
|
|
getVTableAddressPointForConstExpr(BaseSubobject Base,
|
2014-03-12 14:41:41 +08:00
|
|
|
const CXXRecordDecl *VTableClass) override;
|
2013-09-27 22:48:01 +08:00
|
|
|
|
|
|
|
llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
|
2014-03-12 14:41:41 +08:00
|
|
|
CharUnits VPtrOffset) override;
|
2013-09-27 22:48:01 +08:00
|
|
|
|
2018-02-07 02:52:44 +08:00
|
|
|
CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
|
|
|
|
Address This, llvm::Type *Ty,
|
|
|
|
SourceLocation Loc) override;
|
2013-08-21 14:25:03 +08:00
|
|
|
|
2014-11-01 04:09:12 +08:00
|
|
|
llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
|
|
|
|
const CXXDestructorDecl *Dtor,
|
2019-07-22 17:39:13 +08:00
|
|
|
CXXDtorType DtorType, Address This,
|
|
|
|
DeleteOrMemberCallExpr E) override;
|
2013-02-15 22:45:22 +08:00
|
|
|
|
2013-10-09 17:23:58 +08:00
|
|
|
void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF, GlobalDecl GD,
|
2014-03-12 14:41:41 +08:00
|
|
|
CallArgList &CallArgs) override {
|
2013-10-09 17:23:58 +08:00
|
|
|
assert(GD.getDtorType() == Dtor_Deleting &&
|
|
|
|
"Only deleting destructor thunks are available in this ABI");
|
|
|
|
CallArgs.add(RValue::get(getStructorImplicitParamValue(CGF)),
|
2015-03-15 07:44:48 +08:00
|
|
|
getContext().IntTy);
|
2013-10-09 17:23:58 +08:00
|
|
|
}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
|
2013-06-19 23:20:38 +08:00
|
|
|
|
2014-01-03 08:14:35 +08:00
|
|
|
llvm::GlobalVariable *
|
2014-02-28 03:40:09 +08:00
|
|
|
getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD,
|
2014-01-03 08:14:35 +08:00
|
|
|
llvm::GlobalVariable::LinkageTypes Linkage);
|
|
|
|
|
2015-06-23 15:31:11 +08:00
|
|
|
llvm::GlobalVariable *
|
|
|
|
getAddrOfVirtualDisplacementMap(const CXXRecordDecl *SrcRD,
|
|
|
|
const CXXRecordDecl *DstRD) {
|
|
|
|
SmallString<256> OutName;
|
|
|
|
llvm::raw_svector_ostream Out(OutName);
|
|
|
|
getMangleContext().mangleCXXVirtualDisplacementMap(SrcRD, DstRD, Out);
|
|
|
|
StringRef MangledName = OutName.str();
|
|
|
|
|
|
|
|
if (auto *VDispMap = CGM.getModule().getNamedGlobal(MangledName))
|
|
|
|
return VDispMap;
|
|
|
|
|
|
|
|
MicrosoftVTableContext &VTContext = CGM.getMicrosoftVTableContext();
|
|
|
|
unsigned NumEntries = 1 + SrcRD->getNumVBases();
|
|
|
|
SmallVector<llvm::Constant *, 4> Map(NumEntries,
|
|
|
|
llvm::UndefValue::get(CGM.IntTy));
|
|
|
|
Map[0] = llvm::ConstantInt::get(CGM.IntTy, 0);
|
|
|
|
bool AnyDifferent = false;
|
|
|
|
for (const auto &I : SrcRD->vbases()) {
|
|
|
|
const CXXRecordDecl *VBase = I.getType()->getAsCXXRecordDecl();
|
|
|
|
if (!DstRD->isVirtuallyDerivedFrom(VBase))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned SrcVBIndex = VTContext.getVBTableIndex(SrcRD, VBase);
|
|
|
|
unsigned DstVBIndex = VTContext.getVBTableIndex(DstRD, VBase);
|
|
|
|
Map[SrcVBIndex] = llvm::ConstantInt::get(CGM.IntTy, DstVBIndex * 4);
|
|
|
|
AnyDifferent |= SrcVBIndex != DstVBIndex;
|
|
|
|
}
|
|
|
|
// This map would be useless, don't use it.
|
|
|
|
if (!AnyDifferent)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
llvm::ArrayType *VDispMapTy = llvm::ArrayType::get(CGM.IntTy, Map.size());
|
|
|
|
llvm::Constant *Init = llvm::ConstantArray::get(VDispMapTy, Map);
|
|
|
|
llvm::GlobalValue::LinkageTypes Linkage =
|
|
|
|
SrcRD->isExternallyVisible() && DstRD->isExternallyVisible()
|
|
|
|
? llvm::GlobalValue::LinkOnceODRLinkage
|
|
|
|
: llvm::GlobalValue::InternalLinkage;
|
|
|
|
auto *VDispMap = new llvm::GlobalVariable(
|
2019-07-16 12:46:31 +08:00
|
|
|
CGM.getModule(), VDispMapTy, /*isConstant=*/true, Linkage,
|
2015-06-23 15:31:11 +08:00
|
|
|
/*Initializer=*/Init, MangledName);
|
|
|
|
return VDispMap;
|
|
|
|
}
|
|
|
|
|
2014-02-28 03:40:09 +08:00
|
|
|
void emitVBTableDefinition(const VPtrInfo &VBT, const CXXRecordDecl *RD,
|
2014-01-03 08:14:35 +08:00
|
|
|
llvm::GlobalVariable *GV) const;
|
|
|
|
|
2014-06-07 04:04:01 +08:00
|
|
|
void setThunkLinkage(llvm::Function *Thunk, bool ForVTable,
|
|
|
|
GlobalDecl GD, bool ReturnAdjustment) override {
|
|
|
|
GVALinkage Linkage =
|
|
|
|
getContext().GetGVALinkageForFunction(cast<FunctionDecl>(GD.getDecl()));
|
|
|
|
|
|
|
|
if (Linkage == GVA_Internal)
|
|
|
|
Thunk->setLinkage(llvm::GlobalValue::InternalLinkage);
|
|
|
|
else if (ReturnAdjustment)
|
|
|
|
Thunk->setLinkage(llvm::GlobalValue::WeakODRLinkage);
|
|
|
|
else
|
|
|
|
Thunk->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage);
|
2013-10-09 17:23:58 +08:00
|
|
|
}
|
|
|
|
|
2018-03-01 08:35:47 +08:00
|
|
|
bool exportThunk() override { return false; }
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
|
2014-03-12 14:41:41 +08:00
|
|
|
const ThisAdjustment &TA) override;
|
2013-10-30 19:55:43 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
|
2014-03-12 14:41:41 +08:00
|
|
|
const ReturnAdjustment &RA) override;
|
2013-10-30 19:55:43 +08:00
|
|
|
|
2014-10-05 13:05:40 +08:00
|
|
|
void EmitThreadLocalInitFuncs(
|
2015-12-01 09:10:48 +08:00
|
|
|
CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
|
2014-10-05 13:05:40 +08:00
|
|
|
ArrayRef<llvm::Function *> CXXThreadLocalInits,
|
2015-12-01 09:10:48 +08:00
|
|
|
ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
|
2014-10-05 13:05:40 +08:00
|
|
|
|
2019-09-13 04:00:24 +08:00
|
|
|
bool usesThreadWrapperFunction(const VarDecl *VD) const override {
|
|
|
|
return false;
|
|
|
|
}
|
2014-10-05 13:05:40 +08:00
|
|
|
LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
|
|
|
|
QualType LValType) override;
|
|
|
|
|
2012-05-01 14:13:13 +08:00
|
|
|
void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
|
|
|
|
llvm::GlobalVariable *DeclPtr,
|
2014-03-12 14:41:41 +08:00
|
|
|
bool PerformInit) override;
|
2014-10-05 13:05:40 +08:00
|
|
|
void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
|
2019-02-07 09:14:17 +08:00
|
|
|
llvm::FunctionCallee Dtor,
|
|
|
|
llvm::Constant *Addr) override;
|
2012-05-01 14:13:13 +08:00
|
|
|
|
2011-01-27 10:46:02 +08:00
|
|
|
// ==== Notes on array cookies =========
|
|
|
|
//
|
|
|
|
// MSVC seems to only use cookies when the class has a destructor; a
|
|
|
|
// two-argument usual array deallocation function isn't sufficient.
|
|
|
|
//
|
|
|
|
// For example, this code prints "100" and "1":
|
|
|
|
// struct A {
|
|
|
|
// char x;
|
|
|
|
// void *operator new[](size_t sz) {
|
|
|
|
// printf("%u\n", sz);
|
|
|
|
// return malloc(sz);
|
|
|
|
// }
|
|
|
|
// void operator delete[](void *p, size_t sz) {
|
|
|
|
// printf("%u\n", sz);
|
|
|
|
// free(p);
|
|
|
|
// }
|
|
|
|
// };
|
|
|
|
// int main() {
|
|
|
|
// A *p = new A[100];
|
|
|
|
// delete[] p;
|
|
|
|
// }
|
|
|
|
// Whereas it prints "104" and "104" if you give A a destructor.
|
2012-05-01 13:23:51 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
bool requiresArrayCookie(const CXXDeleteExpr *expr,
|
|
|
|
QualType elementType) override;
|
|
|
|
bool requiresArrayCookie(const CXXNewExpr *expr) override;
|
|
|
|
CharUnits getArrayCookieSizeImpl(QualType type) override;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address InitializeArrayCookie(CodeGenFunction &CGF,
|
|
|
|
Address NewPtr,
|
|
|
|
llvm::Value *NumElements,
|
|
|
|
const CXXNewExpr *expr,
|
|
|
|
QualType ElementType) override;
|
2012-05-01 13:23:51 +08:00
|
|
|
llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address allocPtr,
|
2014-03-12 14:41:41 +08:00
|
|
|
CharUnits cookieSize) override;
|
2013-03-23 03:02:54 +08:00
|
|
|
|
2014-07-07 16:09:15 +08:00
|
|
|
friend struct MSRTTIBuilder;
|
|
|
|
|
|
|
|
bool isImageRelative() const {
|
2019-07-16 12:46:31 +08:00
|
|
|
return CGM.getTarget().getPointerWidth(/*AddrSpace=*/0) == 64;
|
2014-07-07 16:09:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// 5 routines for constructing the llvm types for MS RTTI structs.
|
|
|
|
llvm::StructType *getTypeDescriptorType(StringRef TypeInfoString) {
|
|
|
|
llvm::SmallString<32> TDTypeName("rtti.TypeDescriptor");
|
|
|
|
TDTypeName += llvm::utostr(TypeInfoString.size());
|
|
|
|
llvm::StructType *&TypeDescriptorType =
|
|
|
|
TypeDescriptorTypeMap[TypeInfoString.size()];
|
|
|
|
if (TypeDescriptorType)
|
|
|
|
return TypeDescriptorType;
|
|
|
|
llvm::Type *FieldTypes[] = {
|
|
|
|
CGM.Int8PtrPtrTy,
|
|
|
|
CGM.Int8PtrTy,
|
|
|
|
llvm::ArrayType::get(CGM.Int8Ty, TypeInfoString.size() + 1)};
|
|
|
|
TypeDescriptorType =
|
|
|
|
llvm::StructType::create(CGM.getLLVMContext(), FieldTypes, TDTypeName);
|
|
|
|
return TypeDescriptorType;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Type *getImageRelativeType(llvm::Type *PtrType) {
|
|
|
|
if (!isImageRelative())
|
|
|
|
return PtrType;
|
|
|
|
return CGM.IntTy;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::StructType *getBaseClassDescriptorType() {
|
|
|
|
if (BaseClassDescriptorType)
|
|
|
|
return BaseClassDescriptorType;
|
|
|
|
llvm::Type *FieldTypes[] = {
|
|
|
|
getImageRelativeType(CGM.Int8PtrTy),
|
|
|
|
CGM.IntTy,
|
|
|
|
CGM.IntTy,
|
|
|
|
CGM.IntTy,
|
|
|
|
CGM.IntTy,
|
|
|
|
CGM.IntTy,
|
|
|
|
getImageRelativeType(getClassHierarchyDescriptorType()->getPointerTo()),
|
|
|
|
};
|
|
|
|
BaseClassDescriptorType = llvm::StructType::create(
|
|
|
|
CGM.getLLVMContext(), FieldTypes, "rtti.BaseClassDescriptor");
|
|
|
|
return BaseClassDescriptorType;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::StructType *getClassHierarchyDescriptorType() {
|
|
|
|
if (ClassHierarchyDescriptorType)
|
|
|
|
return ClassHierarchyDescriptorType;
|
|
|
|
// Forward-declare RTTIClassHierarchyDescriptor to break a cycle.
|
|
|
|
ClassHierarchyDescriptorType = llvm::StructType::create(
|
|
|
|
CGM.getLLVMContext(), "rtti.ClassHierarchyDescriptor");
|
|
|
|
llvm::Type *FieldTypes[] = {
|
|
|
|
CGM.IntTy,
|
|
|
|
CGM.IntTy,
|
|
|
|
CGM.IntTy,
|
|
|
|
getImageRelativeType(
|
|
|
|
getBaseClassDescriptorType()->getPointerTo()->getPointerTo()),
|
|
|
|
};
|
|
|
|
ClassHierarchyDescriptorType->setBody(FieldTypes);
|
|
|
|
return ClassHierarchyDescriptorType;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::StructType *getCompleteObjectLocatorType() {
|
|
|
|
if (CompleteObjectLocatorType)
|
|
|
|
return CompleteObjectLocatorType;
|
|
|
|
CompleteObjectLocatorType = llvm::StructType::create(
|
|
|
|
CGM.getLLVMContext(), "rtti.CompleteObjectLocator");
|
|
|
|
llvm::Type *FieldTypes[] = {
|
|
|
|
CGM.IntTy,
|
|
|
|
CGM.IntTy,
|
|
|
|
CGM.IntTy,
|
|
|
|
getImageRelativeType(CGM.Int8PtrTy),
|
|
|
|
getImageRelativeType(getClassHierarchyDescriptorType()->getPointerTo()),
|
|
|
|
getImageRelativeType(CompleteObjectLocatorType),
|
|
|
|
};
|
|
|
|
llvm::ArrayRef<llvm::Type *> FieldTypesRef(FieldTypes);
|
|
|
|
if (!isImageRelative())
|
|
|
|
FieldTypesRef = FieldTypesRef.drop_back();
|
|
|
|
CompleteObjectLocatorType->setBody(FieldTypesRef);
|
|
|
|
return CompleteObjectLocatorType;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::GlobalVariable *getImageBase() {
|
|
|
|
StringRef Name = "__ImageBase";
|
|
|
|
if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name))
|
|
|
|
return GV;
|
|
|
|
|
2018-03-23 07:02:19 +08:00
|
|
|
auto *GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty,
|
|
|
|
/*isConstant=*/true,
|
|
|
|
llvm::GlobalValue::ExternalLinkage,
|
|
|
|
/*Initializer=*/nullptr, Name);
|
|
|
|
CGM.setDSOLocal(GV);
|
|
|
|
return GV;
|
2014-07-07 16:09:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Constant *getImageRelativeConstant(llvm::Constant *PtrVal) {
|
|
|
|
if (!isImageRelative())
|
|
|
|
return PtrVal;
|
|
|
|
|
2015-03-05 08:46:22 +08:00
|
|
|
if (PtrVal->isNullValue())
|
|
|
|
return llvm::Constant::getNullValue(CGM.IntTy);
|
|
|
|
|
2014-07-07 16:09:15 +08:00
|
|
|
llvm::Constant *ImageBaseAsInt =
|
|
|
|
llvm::ConstantExpr::getPtrToInt(getImageBase(), CGM.IntPtrTy);
|
|
|
|
llvm::Constant *PtrValAsInt =
|
|
|
|
llvm::ConstantExpr::getPtrToInt(PtrVal, CGM.IntPtrTy);
|
|
|
|
llvm::Constant *Diff =
|
|
|
|
llvm::ConstantExpr::getSub(PtrValAsInt, ImageBaseAsInt,
|
|
|
|
/*HasNUW=*/true, /*HasNSW=*/true);
|
|
|
|
return llvm::ConstantExpr::getTrunc(Diff, CGM.IntTy);
|
|
|
|
}
|
|
|
|
|
2013-03-23 03:02:54 +08:00
|
|
|
private:
|
2013-10-03 14:26:13 +08:00
|
|
|
MicrosoftMangleContext &getMangleContext() {
|
|
|
|
return cast<MicrosoftMangleContext>(CodeGen::CGCXXABI::getMangleContext());
|
|
|
|
}
|
|
|
|
|
2013-04-12 02:13:19 +08:00
|
|
|
llvm::Constant *getZeroInt() {
|
|
|
|
return llvm::ConstantInt::get(CGM.IntTy, 0);
|
2013-03-23 03:02:54 +08:00
|
|
|
}
|
|
|
|
|
2013-04-12 02:13:19 +08:00
|
|
|
llvm::Constant *getAllOnesInt() {
|
|
|
|
return llvm::Constant::getAllOnesValue(CGM.IntTy);
|
2013-03-23 03:02:54 +08:00
|
|
|
}
|
|
|
|
|
2016-07-01 10:41:25 +08:00
|
|
|
CharUnits getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD) override;
|
2014-03-15 01:43:37 +08:00
|
|
|
|
2013-04-12 02:13:19 +08:00
|
|
|
void
|
|
|
|
GetNullMemberPointerFields(const MemberPointerType *MPT,
|
|
|
|
llvm::SmallVectorImpl<llvm::Constant *> &fields);
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Shared code for virtual base adjustment. Returns the offset from
|
2013-05-30 02:02:47 +08:00
|
|
|
/// the vbptr to the virtual base. Optionally returns the address of the
|
|
|
|
/// vbptr itself.
|
|
|
|
llvm::Value *GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Base,
|
2013-05-30 02:02:47 +08:00
|
|
|
llvm::Value *VBPtrOffset,
|
|
|
|
llvm::Value *VBTableOffset,
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value **VBPtr = nullptr);
|
2013-05-30 02:02:47 +08:00
|
|
|
|
2013-10-30 19:55:43 +08:00
|
|
|
llvm::Value *GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Base,
|
2013-10-30 19:55:43 +08:00
|
|
|
int32_t VBPtrOffset,
|
|
|
|
int32_t VBTableOffset,
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value **VBPtr = nullptr) {
|
2014-10-23 01:26:00 +08:00
|
|
|
assert(VBTableOffset % 4 == 0 && "should be byte offset into table of i32s");
|
2013-10-30 19:55:43 +08:00
|
|
|
llvm::Value *VBPOffset = llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset),
|
|
|
|
*VBTOffset = llvm::ConstantInt::get(CGM.IntTy, VBTableOffset);
|
|
|
|
return GetVBaseOffsetFromVBPtr(CGF, Base, VBPOffset, VBTOffset, VBPtr);
|
|
|
|
}
|
|
|
|
|
2017-12-14 05:53:04 +08:00
|
|
|
std::tuple<Address, llvm::Value *, const CXXRecordDecl *>
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
performBaseAdjustment(CodeGenFunction &CGF, Address Value,
|
2015-02-27 10:38:02 +08:00
|
|
|
QualType SrcRecordTy);
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Performs a full virtual base adjustment. Used to dereference
|
2013-05-30 02:02:47 +08:00
|
|
|
/// pointers to members of virtual bases.
|
2014-02-21 07:22:07 +08:00
|
|
|
llvm::Value *AdjustVirtualBase(CodeGenFunction &CGF, const Expr *E,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
const CXXRecordDecl *RD, Address Base,
|
2013-04-12 02:13:19 +08:00
|
|
|
llvm::Value *VirtualBaseAdjustmentOffset,
|
|
|
|
llvm::Value *VBPtrOffset /* optional */);
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Emits a full member pointer with the fields common to data and
|
2013-05-03 09:15:11 +08:00
|
|
|
/// function member pointers.
|
|
|
|
llvm::Constant *EmitFullMemberPointer(llvm::Constant *FirstField,
|
|
|
|
bool IsMemberFunction,
|
2013-05-10 05:01:17 +08:00
|
|
|
const CXXRecordDecl *RD,
|
2015-05-11 05:48:08 +08:00
|
|
|
CharUnits NonVirtualBaseAdjustment,
|
|
|
|
unsigned VBTableIndex);
|
2013-05-10 05:01:17 +08:00
|
|
|
|
|
|
|
bool MemberPointerConstantIsNull(const MemberPointerType *MPT,
|
|
|
|
llvm::Constant *MP);
|
2013-05-03 09:15:11 +08:00
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// - Initialize all vbptrs of 'this' with RD as the complete type.
|
2013-06-19 23:20:38 +08:00
|
|
|
void EmitVBPtrStores(CodeGenFunction &CGF, const CXXRecordDecl *RD);
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Caching wrapper around VBTableBuilder::enumerateVBTables().
|
2014-01-03 08:14:35 +08:00
|
|
|
const VBTableGlobals &enumerateVBTables(const CXXRecordDecl *RD);
|
2013-06-19 23:20:38 +08:00
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Generate a thunk for calling a virtual member function MD.
|
2018-04-03 04:00:39 +08:00
|
|
|
llvm::Function *EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
|
|
|
|
const MethodVFTableLocation &ML);
|
2013-11-16 01:24:45 +08:00
|
|
|
|
2019-10-29 08:05:34 +08:00
|
|
|
llvm::Constant *EmitMemberDataPointer(const CXXRecordDecl *RD,
|
|
|
|
CharUnits offset);
|
|
|
|
|
2013-03-23 03:02:54 +08:00
|
|
|
public:
|
2014-03-12 14:41:41 +08:00
|
|
|
llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
|
2013-04-12 02:13:19 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
bool isZeroInitializable(const MemberPointerType *MPT) override;
|
2013-04-12 02:13:19 +08:00
|
|
|
|
2014-08-08 06:56:13 +08:00
|
|
|
bool isMemberPointerConvertible(const MemberPointerType *MPT) const override {
|
|
|
|
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
|
2014-09-19 06:05:54 +08:00
|
|
|
return RD->hasAttr<MSInheritanceAttr>();
|
|
|
|
}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
|
2013-03-23 03:02:54 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
|
|
|
|
CharUnits offset) override;
|
2015-06-23 15:31:01 +08:00
|
|
|
llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
|
2014-03-12 14:41:41 +08:00
|
|
|
llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
|
2013-03-23 03:02:54 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *L,
|
|
|
|
llvm::Value *R,
|
|
|
|
const MemberPointerType *MPT,
|
|
|
|
bool Inequality) override;
|
2013-05-01 04:15:14 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *MemPtr,
|
|
|
|
const MemberPointerType *MPT) override;
|
2013-03-23 03:02:54 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
llvm::Value *
|
2014-02-21 07:22:07 +08:00
|
|
|
EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Base, llvm::Value *MemPtr,
|
2014-03-12 14:41:41 +08:00
|
|
|
const MemberPointerType *MPT) override;
|
2013-03-23 03:02:54 +08:00
|
|
|
|
2015-06-29 08:06:50 +08:00
|
|
|
llvm::Value *EmitNonNullMemberPointerConversion(
|
|
|
|
const MemberPointerType *SrcTy, const MemberPointerType *DstTy,
|
|
|
|
CastKind CK, CastExpr::path_const_iterator PathBegin,
|
|
|
|
CastExpr::path_const_iterator PathEnd, llvm::Value *Src,
|
|
|
|
CGBuilderTy &Builder);
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
|
|
|
|
const CastExpr *E,
|
|
|
|
llvm::Value *Src) override;
|
2013-05-10 05:01:17 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
|
|
|
|
llvm::Constant *Src) override;
|
2013-05-10 05:01:17 +08:00
|
|
|
|
2015-06-23 15:31:07 +08:00
|
|
|
llvm::Constant *EmitMemberPointerConversion(
|
|
|
|
const MemberPointerType *SrcTy, const MemberPointerType *DstTy,
|
|
|
|
CastKind CK, CastExpr::path_const_iterator PathBegin,
|
|
|
|
CastExpr::path_const_iterator PathEnd, llvm::Constant *Src);
|
|
|
|
|
2016-10-27 07:46:34 +08:00
|
|
|
CGCallee
|
2014-02-21 07:22:07 +08:00
|
|
|
EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, const Expr *E,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address This, llvm::Value *&ThisPtrForCall,
|
|
|
|
llvm::Value *MemPtr,
|
2014-03-12 14:41:41 +08:00
|
|
|
const MemberPointerType *MPT) override;
|
2013-04-12 02:13:19 +08:00
|
|
|
|
2019-03-23 07:05:10 +08:00
|
|
|
void emitCXXStructor(GlobalDecl GD) override;
|
2014-09-16 03:20:10 +08:00
|
|
|
|
2015-03-05 08:46:22 +08:00
|
|
|
llvm::StructType *getCatchableTypeType() {
|
|
|
|
if (CatchableTypeType)
|
|
|
|
return CatchableTypeType;
|
|
|
|
llvm::Type *FieldTypes[] = {
|
|
|
|
CGM.IntTy, // Flags
|
|
|
|
getImageRelativeType(CGM.Int8PtrTy), // TypeDescriptor
|
|
|
|
CGM.IntTy, // NonVirtualAdjustment
|
|
|
|
CGM.IntTy, // OffsetToVBPtr
|
|
|
|
CGM.IntTy, // VBTableIndex
|
|
|
|
CGM.IntTy, // Size
|
|
|
|
getImageRelativeType(CGM.Int8PtrTy) // CopyCtor
|
|
|
|
};
|
|
|
|
CatchableTypeType = llvm::StructType::create(
|
|
|
|
CGM.getLLVMContext(), FieldTypes, "eh.CatchableType");
|
|
|
|
return CatchableTypeType;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::StructType *getCatchableTypeArrayType(uint32_t NumEntries) {
|
|
|
|
llvm::StructType *&CatchableTypeArrayType =
|
|
|
|
CatchableTypeArrayTypeMap[NumEntries];
|
|
|
|
if (CatchableTypeArrayType)
|
|
|
|
return CatchableTypeArrayType;
|
|
|
|
|
|
|
|
llvm::SmallString<23> CTATypeName("eh.CatchableTypeArray.");
|
|
|
|
CTATypeName += llvm::utostr(NumEntries);
|
|
|
|
llvm::Type *CTType =
|
|
|
|
getImageRelativeType(getCatchableTypeType()->getPointerTo());
|
|
|
|
llvm::Type *FieldTypes[] = {
|
|
|
|
CGM.IntTy, // NumEntries
|
|
|
|
llvm::ArrayType::get(CTType, NumEntries) // CatchableTypes
|
|
|
|
};
|
|
|
|
CatchableTypeArrayType =
|
|
|
|
llvm::StructType::create(CGM.getLLVMContext(), FieldTypes, CTATypeName);
|
|
|
|
return CatchableTypeArrayType;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::StructType *getThrowInfoType() {
|
|
|
|
if (ThrowInfoType)
|
|
|
|
return ThrowInfoType;
|
|
|
|
llvm::Type *FieldTypes[] = {
|
|
|
|
CGM.IntTy, // Flags
|
|
|
|
getImageRelativeType(CGM.Int8PtrTy), // CleanupFn
|
|
|
|
getImageRelativeType(CGM.Int8PtrTy), // ForwardCompat
|
|
|
|
getImageRelativeType(CGM.Int8PtrTy) // CatchableTypeArray
|
|
|
|
};
|
|
|
|
ThrowInfoType = llvm::StructType::create(CGM.getLLVMContext(), FieldTypes,
|
|
|
|
"eh.ThrowInfo");
|
|
|
|
return ThrowInfoType;
|
|
|
|
}
|
|
|
|
|
2019-02-06 00:42:33 +08:00
|
|
|
llvm::FunctionCallee getThrowFn() {
|
2015-03-05 08:46:22 +08:00
|
|
|
// _CxxThrowException is passed an exception object and a ThrowInfo object
|
|
|
|
// which describes the exception.
|
|
|
|
llvm::Type *Args[] = {CGM.Int8PtrTy, getThrowInfoType()->getPointerTo()};
|
|
|
|
llvm::FunctionType *FTy =
|
2019-07-16 12:46:31 +08:00
|
|
|
llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
|
2019-02-06 00:42:33 +08:00
|
|
|
llvm::FunctionCallee Throw =
|
|
|
|
CGM.CreateRuntimeFunction(FTy, "_CxxThrowException");
|
2015-03-05 08:46:22 +08:00
|
|
|
// _CxxThrowException is stdcall on 32-bit x86 platforms.
|
2019-02-06 00:42:33 +08:00
|
|
|
if (CGM.getTarget().getTriple().getArch() == llvm::Triple::x86) {
|
2019-03-19 14:14:14 +08:00
|
|
|
if (auto *Fn = dyn_cast<llvm::Function>(Throw.getCallee()))
|
2019-02-06 00:42:33 +08:00
|
|
|
Fn->setCallingConv(llvm::CallingConv::X86_StdCall);
|
|
|
|
}
|
|
|
|
return Throw;
|
2015-03-05 08:46:22 +08:00
|
|
|
}
|
|
|
|
|
2015-03-14 06:36:55 +08:00
|
|
|
llvm::Function *getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
|
|
|
|
CXXCtorType CT);
|
2015-03-12 02:36:39 +08:00
|
|
|
|
2015-03-05 08:46:22 +08:00
|
|
|
llvm::Constant *getCatchableType(QualType T,
|
|
|
|
uint32_t NVOffset = 0,
|
|
|
|
int32_t VBPtrOffset = -1,
|
|
|
|
uint32_t VBIndex = 0);
|
|
|
|
|
|
|
|
llvm::GlobalVariable *getCatchableTypeArray(QualType T);
|
|
|
|
|
2015-03-14 02:26:17 +08:00
|
|
|
llvm::GlobalVariable *getThrowInfo(QualType T) override;
|
2015-03-05 08:46:22 +08:00
|
|
|
|
2017-12-14 05:53:04 +08:00
|
|
|
std::pair<llvm::Value *, const CXXRecordDecl *>
|
|
|
|
LoadVTablePtr(CodeGenFunction &CGF, Address This,
|
|
|
|
const CXXRecordDecl *RD) override;
|
|
|
|
|
2013-06-19 23:20:38 +08:00
|
|
|
private:
|
2013-09-27 22:48:01 +08:00
|
|
|
typedef std::pair<const CXXRecordDecl *, CharUnits> VFTableIdTy;
|
2014-07-02 04:30:31 +08:00
|
|
|
typedef llvm::DenseMap<VFTableIdTy, llvm::GlobalVariable *> VTablesMapTy;
|
|
|
|
typedef llvm::DenseMap<VFTableIdTy, llvm::GlobalValue *> VFTablesMapTy;
|
2018-05-09 09:00:01 +08:00
|
|
|
/// All the vftables that have been referenced.
|
2013-09-27 22:48:01 +08:00
|
|
|
VFTablesMapTy VFTablesMap;
|
2014-07-02 04:30:31 +08:00
|
|
|
VTablesMapTy VTablesMap;
|
2013-09-27 22:48:01 +08:00
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// This set holds the record decls we've deferred vtable emission for.
|
2013-09-27 22:48:01 +08:00
|
|
|
llvm::SmallPtrSet<const CXXRecordDecl *, 4> DeferredVFTables;
|
|
|
|
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// All the vbtables which have been referenced.
|
2014-01-03 08:14:35 +08:00
|
|
|
llvm::DenseMap<const CXXRecordDecl *, VBTableGlobals> VBTablesMap;
|
2013-09-11 04:14:30 +08:00
|
|
|
|
|
|
|
/// Info on the global variable used to guard initialization of static locals.
|
|
|
|
/// The BitIndex field is only used for externally invisible declarations.
|
|
|
|
struct GuardInfo {
|
2014-05-21 13:09:00 +08:00
|
|
|
GuardInfo() : Guard(nullptr), BitIndex(0) {}
|
2013-09-11 04:14:30 +08:00
|
|
|
llvm::GlobalVariable *Guard;
|
|
|
|
unsigned BitIndex;
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Map from DeclContext to the current guard variable. We assume that the
|
|
|
|
/// AST is visited in source code order.
|
|
|
|
llvm::DenseMap<const DeclContext *, GuardInfo> GuardVariableMap;
|
2015-05-08 05:19:06 +08:00
|
|
|
llvm::DenseMap<const DeclContext *, GuardInfo> ThreadLocalGuardVariableMap;
|
2015-05-07 14:15:46 +08:00
|
|
|
llvm::DenseMap<const DeclContext *, unsigned> ThreadSafeGuardNumMap;
|
2014-07-07 16:09:15 +08:00
|
|
|
|
|
|
|
llvm::DenseMap<size_t, llvm::StructType *> TypeDescriptorTypeMap;
|
|
|
|
llvm::StructType *BaseClassDescriptorType;
|
|
|
|
llvm::StructType *ClassHierarchyDescriptorType;
|
|
|
|
llvm::StructType *CompleteObjectLocatorType;
|
2015-03-05 08:46:22 +08:00
|
|
|
|
|
|
|
llvm::DenseMap<QualType, llvm::GlobalVariable *> CatchableTypeArrays;
|
|
|
|
|
|
|
|
llvm::StructType *CatchableTypeType;
|
|
|
|
llvm::DenseMap<uint32_t, llvm::StructType *> CatchableTypeArrayTypeMap;
|
|
|
|
llvm::StructType *ThrowInfoType;
|
2010-06-10 07:25:41 +08:00
|
|
|
};
|
|
|
|
|
2015-06-23 07:07:51 +08:00
|
|
|
}
|
2010-06-10 07:25:41 +08:00
|
|
|
|
2014-05-03 08:33:28 +08:00
|
|
|
CGCXXABI::RecordArgABI
|
|
|
|
MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
|
2020-09-25 04:42:41 +08:00
|
|
|
// Use the default C calling convention rules for things that can be passed in
|
|
|
|
// registers, i.e. non-trivially copyable records or records marked with
|
|
|
|
// [[trivial_abi]].
|
|
|
|
if (RD->canPassInRegisters())
|
|
|
|
return RAA_Default;
|
|
|
|
|
2014-05-03 08:33:28 +08:00
|
|
|
switch (CGM.getTarget().getTriple().getArch()) {
|
|
|
|
default:
|
|
|
|
// FIXME: Implement for other architectures.
|
2020-09-25 05:11:06 +08:00
|
|
|
return RAA_Indirect;
|
2014-05-03 08:33:28 +08:00
|
|
|
|
2016-08-26 02:23:28 +08:00
|
|
|
case llvm::Triple::thumb:
|
2020-09-25 04:42:41 +08:00
|
|
|
// Pass things indirectly for now because it is simple.
|
2016-08-26 02:23:28 +08:00
|
|
|
// FIXME: This is incompatible with MSVC for arguments with a dtor and no
|
|
|
|
// copy ctor.
|
2020-09-25 04:42:41 +08:00
|
|
|
return RAA_Indirect;
|
2016-08-26 02:23:28 +08:00
|
|
|
|
2020-09-19 02:22:15 +08:00
|
|
|
case llvm::Triple::x86: {
|
|
|
|
// If the argument has *required* alignment greater than four bytes, pass
|
|
|
|
// it indirectly. Prior to MSVC version 19.14, passing overaligned
|
|
|
|
// arguments was not supported and resulted in a compiler error. In 19.14
|
|
|
|
// and later versions, such arguments are now passed indirectly.
|
|
|
|
TypeInfo Info = getContext().getTypeInfo(RD->getTypeForDecl());
|
|
|
|
if (Info.AlignIsRequired && Info.Align > 4)
|
|
|
|
return RAA_Indirect;
|
2014-05-15 00:02:09 +08:00
|
|
|
|
|
|
|
// If C++ prohibits us from making a copy, construct the arguments directly
|
|
|
|
// into argument memory.
|
2020-09-25 04:42:41 +08:00
|
|
|
return RAA_DirectInMemory;
|
2020-09-19 02:22:15 +08:00
|
|
|
}
|
2014-05-03 08:33:28 +08:00
|
|
|
|
|
|
|
case llvm::Triple::x86_64:
|
[ARM64] [Windows] Follow MS X86_64 C++ ABI when passing structs
Summary: Microsoft's C++ object model for ARM64 is the same as that for X86_64.
For example, small structs with non-trivial copy constructors or virtual
function tables are passed indirectly. Currently, they are passed in registers
when compiled with clang.
Reviewers: rnk, mstorsjo, TomTan, haripul, javed.absar
Reviewed By: rnk, mstorsjo
Subscribers: kristof.beyls, chrib, llvm-commits, cfe-commits
Differential Revision: https://reviews.llvm.org/D49770
llvm-svn: 338076
2018-07-27 06:18:28 +08:00
|
|
|
case llvm::Triple::aarch64:
|
2020-09-25 04:42:41 +08:00
|
|
|
return RAA_Indirect;
|
2014-05-03 08:33:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm_unreachable("invalid enum");
|
|
|
|
}
|
|
|
|
|
2014-11-01 15:37:17 +08:00
|
|
|
void MicrosoftCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
|
|
|
|
const CXXDeleteExpr *DE,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Ptr,
|
2014-11-01 15:37:17 +08:00
|
|
|
QualType ElementType,
|
|
|
|
const CXXDestructorDecl *Dtor) {
|
2014-11-01 04:09:12 +08:00
|
|
|
// FIXME: Provide a source location here even though there's no
|
|
|
|
// CXXMemberCallExpr for dtor call.
|
2014-11-01 15:37:17 +08:00
|
|
|
bool UseGlobalDelete = DE->isGlobalDelete();
|
2014-11-01 04:09:12 +08:00
|
|
|
CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
|
2019-07-22 17:39:13 +08:00
|
|
|
llvm::Value *MDThis = EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
|
2014-11-01 04:09:12 +08:00
|
|
|
if (UseGlobalDelete)
|
2014-11-01 15:37:17 +08:00
|
|
|
CGF.EmitDeleteCall(DE->getOperatorDelete(), MDThis, ElementType);
|
2012-09-25 18:10:39 +08:00
|
|
|
}
|
|
|
|
|
2014-11-25 15:20:20 +08:00
|
|
|
void MicrosoftCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
|
2015-03-05 08:46:22 +08:00
|
|
|
llvm::Value *Args[] = {
|
|
|
|
llvm::ConstantPointerNull::get(CGM.Int8PtrTy),
|
|
|
|
llvm::ConstantPointerNull::get(getThrowInfoType()->getPointerTo())};
|
2019-02-06 00:42:33 +08:00
|
|
|
llvm::FunctionCallee Fn = getThrowFn();
|
2014-11-25 15:20:20 +08:00
|
|
|
if (isNoReturn)
|
|
|
|
CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, Args);
|
|
|
|
else
|
|
|
|
CGF.EmitRuntimeCallOrInvoke(Fn, Args);
|
|
|
|
}
|
|
|
|
|
2015-03-04 03:21:04 +08:00
|
|
|
void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF,
|
|
|
|
const CXXCatchStmt *S) {
|
|
|
|
// In the MS ABI, the runtime handles the copy, and the catch handler is
|
|
|
|
// responsible for destruction.
|
|
|
|
VarDecl *CatchParam = S->getExceptionDecl();
|
2015-12-12 13:39:21 +08:00
|
|
|
llvm::BasicBlock *CatchPadBB = CGF.Builder.GetInsertBlock();
|
2015-10-08 09:13:52 +08:00
|
|
|
llvm::CatchPadInst *CPI =
|
|
|
|
cast<llvm::CatchPadInst>(CatchPadBB->getFirstNonPHI());
|
2015-12-12 13:39:21 +08:00
|
|
|
CGF.CurrentFuncletPad = CPI;
|
2015-10-08 09:13:52 +08:00
|
|
|
|
2015-04-07 08:09:59 +08:00
|
|
|
// If this is a catch-all or the catch parameter is unnamed, we don't need to
|
|
|
|
// emit an alloca to the object.
|
|
|
|
if (!CatchParam || !CatchParam->getDeclName()) {
|
2015-10-08 09:13:52 +08:00
|
|
|
CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
|
2015-03-04 03:21:04 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
|
2015-10-08 09:13:52 +08:00
|
|
|
CPI->setArgOperand(2, var.getObjectAddress(CGF).getPointer());
|
|
|
|
CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
|
2015-03-04 03:21:04 +08:00
|
|
|
CGF.EmitAutoVarCleanups(var);
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
/// We need to perform a generic polymorphic operation (like a typeid
|
|
|
|
/// or a cast), which requires an object with a vfptr. Adjust the
|
|
|
|
/// address to point to an object with a vfptr.
|
2017-12-14 05:53:04 +08:00
|
|
|
std::tuple<Address, llvm::Value *, const CXXRecordDecl *>
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value,
|
2015-02-27 10:38:02 +08:00
|
|
|
QualType SrcRecordTy) {
|
2014-06-23 03:05:33 +08:00
|
|
|
Value = CGF.Builder.CreateBitCast(Value, CGF.Int8PtrTy);
|
|
|
|
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
|
2015-03-15 07:44:48 +08:00
|
|
|
const ASTContext &Context = getContext();
|
2014-06-23 03:05:33 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
// If the class itself has a vfptr, great. This check implicitly
|
|
|
|
// covers non-virtual base subobjects: a class with its own virtual
|
|
|
|
// functions would be a candidate to be a primary base.
|
2015-02-27 10:38:02 +08:00
|
|
|
if (Context.getASTRecordLayout(SrcDecl).hasExtendableVFPtr())
|
2017-12-14 05:53:04 +08:00
|
|
|
return std::make_tuple(Value, llvm::ConstantInt::get(CGF.Int32Ty, 0),
|
|
|
|
SrcDecl);
|
2014-06-23 03:05:33 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
// Okay, one of the vbases must have a vfptr, or else this isn't
|
|
|
|
// actually a polymorphic class.
|
|
|
|
const CXXRecordDecl *PolymorphicBase = nullptr;
|
|
|
|
for (auto &Base : SrcDecl->vbases()) {
|
|
|
|
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
|
|
|
|
if (Context.getASTRecordLayout(BaseDecl).hasExtendableVFPtr()) {
|
|
|
|
PolymorphicBase = BaseDecl;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(PolymorphicBase && "polymorphic class has no apparent vfptr?");
|
|
|
|
|
|
|
|
llvm::Value *Offset =
|
|
|
|
GetVirtualBaseClassOffset(CGF, Value, SrcDecl, PolymorphicBase);
|
|
|
|
llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(Value.getPointer(), Offset);
|
|
|
|
CharUnits VBaseAlign =
|
|
|
|
CGF.CGM.getVBaseAlignment(Value.getAlignment(), SrcDecl, PolymorphicBase);
|
2017-12-14 05:53:04 +08:00
|
|
|
return std::make_tuple(Address(Ptr, VBaseAlign), Offset, PolymorphicBase);
|
2014-06-23 03:05:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool MicrosoftCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
|
|
|
|
QualType SrcRecordTy) {
|
|
|
|
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
|
|
|
|
return IsDeref &&
|
2015-03-15 07:44:48 +08:00
|
|
|
!getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr();
|
2014-06-23 03:05:33 +08:00
|
|
|
}
|
|
|
|
|
2019-01-30 10:54:28 +08:00
|
|
|
static llvm::CallBase *emitRTtypeidCall(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *Argument) {
|
2014-06-23 03:05:33 +08:00
|
|
|
llvm::Type *ArgTypes[] = {CGF.Int8PtrTy};
|
|
|
|
llvm::FunctionType *FTy =
|
|
|
|
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false);
|
|
|
|
llvm::Value *Args[] = {Argument};
|
2019-02-06 00:42:33 +08:00
|
|
|
llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(FTy, "__RTtypeid");
|
2014-06-23 03:05:33 +08:00
|
|
|
return CGF.EmitRuntimeCallOrInvoke(Fn, Args);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MicrosoftCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
|
2019-01-30 10:54:28 +08:00
|
|
|
llvm::CallBase *Call =
|
2014-06-23 03:05:33 +08:00
|
|
|
emitRTtypeidCall(CGF, llvm::Constant::getNullValue(CGM.VoidPtrTy));
|
2019-01-30 10:54:28 +08:00
|
|
|
Call->setDoesNotReturn();
|
2014-06-23 03:05:33 +08:00
|
|
|
CGF.Builder.CreateUnreachable();
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *MicrosoftCXXABI::EmitTypeid(CodeGenFunction &CGF,
|
|
|
|
QualType SrcRecordTy,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ThisPtr,
|
2014-06-23 03:05:33 +08:00
|
|
|
llvm::Type *StdTypeInfoPtrTy) {
|
2017-12-14 05:53:04 +08:00
|
|
|
std::tie(ThisPtr, std::ignore, std::ignore) =
|
2016-07-10 03:26:25 +08:00
|
|
|
performBaseAdjustment(CGF, ThisPtr, SrcRecordTy);
|
2019-01-30 10:54:28 +08:00
|
|
|
llvm::CallBase *Typeid = emitRTtypeidCall(CGF, ThisPtr.getPointer());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return CGF.Builder.CreateBitCast(Typeid, StdTypeInfoPtrTy);
|
2014-06-23 03:05:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool MicrosoftCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
|
|
|
|
QualType SrcRecordTy) {
|
|
|
|
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
|
|
|
|
return SrcIsPtr &&
|
2015-03-15 07:44:48 +08:00
|
|
|
!getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr();
|
2014-06-23 03:05:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall(
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CodeGenFunction &CGF, Address This, QualType SrcRecordTy,
|
2014-06-23 03:05:33 +08:00
|
|
|
QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
|
|
|
|
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
|
|
|
|
|
|
|
|
llvm::Value *SrcRTTI =
|
|
|
|
CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
|
|
|
|
llvm::Value *DestRTTI =
|
|
|
|
CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
|
|
|
|
|
|
|
|
llvm::Value *Offset;
|
2017-12-14 05:53:04 +08:00
|
|
|
std::tie(This, Offset, std::ignore) =
|
|
|
|
performBaseAdjustment(CGF, This, SrcRecordTy);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *ThisPtr = This.getPointer();
|
2016-07-10 03:26:25 +08:00
|
|
|
Offset = CGF.Builder.CreateTrunc(Offset, CGF.Int32Ty);
|
2014-06-23 03:05:33 +08:00
|
|
|
|
|
|
|
// PVOID __RTDynamicCast(
|
|
|
|
// PVOID inptr,
|
|
|
|
// LONG VfDelta,
|
|
|
|
// PVOID SrcType,
|
|
|
|
// PVOID TargetType,
|
|
|
|
// BOOL isReference)
|
|
|
|
llvm::Type *ArgTypes[] = {CGF.Int8PtrTy, CGF.Int32Ty, CGF.Int8PtrTy,
|
|
|
|
CGF.Int8PtrTy, CGF.Int32Ty};
|
2019-02-06 00:42:33 +08:00
|
|
|
llvm::FunctionCallee Function = CGF.CGM.CreateRuntimeFunction(
|
2014-06-23 03:05:33 +08:00
|
|
|
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false),
|
|
|
|
"__RTDynamicCast");
|
|
|
|
llvm::Value *Args[] = {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
ThisPtr, Offset, SrcRTTI, DestRTTI,
|
2014-06-23 03:05:33 +08:00
|
|
|
llvm::ConstantInt::get(CGF.Int32Ty, DestTy->isReferenceType())};
|
2019-01-30 10:54:28 +08:00
|
|
|
ThisPtr = CGF.EmitRuntimeCallOrInvoke(Function, Args);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return CGF.Builder.CreateBitCast(ThisPtr, DestLTy);
|
2014-06-23 03:05:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
MicrosoftCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
|
2014-06-23 03:05:33 +08:00
|
|
|
QualType SrcRecordTy,
|
|
|
|
QualType DestTy) {
|
2017-12-14 05:53:04 +08:00
|
|
|
std::tie(Value, std::ignore, std::ignore) =
|
|
|
|
performBaseAdjustment(CGF, Value, SrcRecordTy);
|
2014-06-23 03:05:33 +08:00
|
|
|
|
|
|
|
// PVOID __RTCastToVoid(
|
|
|
|
// PVOID inptr)
|
|
|
|
llvm::Type *ArgTypes[] = {CGF.Int8PtrTy};
|
2019-02-06 00:42:33 +08:00
|
|
|
llvm::FunctionCallee Function = CGF.CGM.CreateRuntimeFunction(
|
2014-06-23 03:05:33 +08:00
|
|
|
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false),
|
|
|
|
"__RTCastToVoid");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *Args[] = {Value.getPointer()};
|
2014-06-23 03:05:33 +08:00
|
|
|
return CGF.EmitRuntimeCall(Function, Args);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool MicrosoftCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-09-02 02:50:02 +08:00
|
|
|
llvm::Value *MicrosoftCXXABI::GetVirtualBaseClassOffset(
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CodeGenFunction &CGF, Address This, const CXXRecordDecl *ClassDecl,
|
2014-09-02 02:50:02 +08:00
|
|
|
const CXXRecordDecl *BaseClassDecl) {
|
2015-03-15 07:44:48 +08:00
|
|
|
const ASTContext &Context = getContext();
|
2014-01-14 08:50:39 +08:00
|
|
|
int64_t VBPtrChars =
|
2015-03-15 07:44:48 +08:00
|
|
|
Context.getASTRecordLayout(ClassDecl).getVBPtrOffset().getQuantity();
|
2013-05-30 02:02:47 +08:00
|
|
|
llvm::Value *VBPtrOffset = llvm::ConstantInt::get(CGM.PtrDiffTy, VBPtrChars);
|
2015-03-15 07:44:48 +08:00
|
|
|
CharUnits IntSize = Context.getTypeSizeInChars(Context.IntTy);
|
2013-11-05 23:54:58 +08:00
|
|
|
CharUnits VBTableChars =
|
|
|
|
IntSize *
|
|
|
|
CGM.getMicrosoftVTableContext().getVBTableIndex(ClassDecl, BaseClassDecl);
|
2013-05-30 02:02:47 +08:00
|
|
|
llvm::Value *VBTableOffset =
|
2014-09-02 02:50:02 +08:00
|
|
|
llvm::ConstantInt::get(CGM.IntTy, VBTableChars.getQuantity());
|
2013-05-30 02:02:47 +08:00
|
|
|
|
|
|
|
llvm::Value *VBPtrToNewBase =
|
2014-09-02 02:50:02 +08:00
|
|
|
GetVBaseOffsetFromVBPtr(CGF, This, VBPtrOffset, VBTableOffset);
|
2013-05-30 02:02:47 +08:00
|
|
|
VBPtrToNewBase =
|
2014-09-02 02:50:02 +08:00
|
|
|
CGF.Builder.CreateSExtOrBitCast(VBPtrToNewBase, CGM.PtrDiffTy);
|
2013-05-30 02:02:47 +08:00
|
|
|
return CGF.Builder.CreateNSWAdd(VBPtrOffset, VBPtrToNewBase);
|
|
|
|
}
|
|
|
|
|
2013-07-01 04:40:16 +08:00
|
|
|
bool MicrosoftCXXABI::HasThisReturn(GlobalDecl GD) const {
|
|
|
|
return isa<CXXConstructorDecl>(GD.getDecl());
|
2012-09-25 16:00:39 +08:00
|
|
|
}
|
|
|
|
|
2014-11-01 04:09:12 +08:00
|
|
|
static bool isDeletingDtor(GlobalDecl GD) {
|
|
|
|
return isa<CXXDestructorDecl>(GD.getDecl()) &&
|
|
|
|
GD.getDtorType() == Dtor_Deleting;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool MicrosoftCXXABI::hasMostDerivedReturn(GlobalDecl GD) const {
|
|
|
|
return isDeletingDtor(GD);
|
|
|
|
}
|
|
|
|
|
2020-10-14 10:58:39 +08:00
|
|
|
static bool isCXX14Aggregate(const CXXRecordDecl *RD) {
|
[COFF, ARM64] Fix ABI implementation of struct returns
Summary:
Related llvm patch: D60348.
Patch co-authored by Sanjin Sijaric.
Reviewers: rnk, efriedma, TomTan, ssijaric, ostannard
Reviewed By: efriedma
Subscribers: dmajor, richard.townsend.arm, ostannard, javed.absar, kristof.beyls, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D60349
llvm-svn: 359932
2019-05-04 05:12:24 +08:00
|
|
|
// For AArch64, we use the C++14 definition of an aggregate, so we also
|
|
|
|
// check for:
|
|
|
|
// No private or protected non static data members.
|
|
|
|
// No base classes
|
|
|
|
// No virtual functions
|
|
|
|
// Additionally, we need to ensure that there is a trivial copy assignment
|
|
|
|
// operator, a trivial destructor and no user-provided constructors.
|
|
|
|
if (RD->hasProtectedFields() || RD->hasPrivateFields())
|
2020-10-14 10:58:39 +08:00
|
|
|
return false;
|
[COFF, ARM64] Fix ABI implementation of struct returns
Summary:
Related llvm patch: D60348.
Patch co-authored by Sanjin Sijaric.
Reviewers: rnk, efriedma, TomTan, ssijaric, ostannard
Reviewed By: efriedma
Subscribers: dmajor, richard.townsend.arm, ostannard, javed.absar, kristof.beyls, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D60349
llvm-svn: 359932
2019-05-04 05:12:24 +08:00
|
|
|
if (RD->getNumBases() > 0)
|
2020-10-14 10:58:39 +08:00
|
|
|
return false;
|
[COFF, ARM64] Fix ABI implementation of struct returns
Summary:
Related llvm patch: D60348.
Patch co-authored by Sanjin Sijaric.
Reviewers: rnk, efriedma, TomTan, ssijaric, ostannard
Reviewed By: efriedma
Subscribers: dmajor, richard.townsend.arm, ostannard, javed.absar, kristof.beyls, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D60349
llvm-svn: 359932
2019-05-04 05:12:24 +08:00
|
|
|
if (RD->isPolymorphic())
|
2020-10-14 10:58:39 +08:00
|
|
|
return false;
|
[COFF, ARM64] Fix ABI implementation of struct returns
Summary:
Related llvm patch: D60348.
Patch co-authored by Sanjin Sijaric.
Reviewers: rnk, efriedma, TomTan, ssijaric, ostannard
Reviewed By: efriedma
Subscribers: dmajor, richard.townsend.arm, ostannard, javed.absar, kristof.beyls, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D60349
llvm-svn: 359932
2019-05-04 05:12:24 +08:00
|
|
|
if (RD->hasNonTrivialCopyAssignment())
|
2020-10-14 10:58:39 +08:00
|
|
|
return false;
|
[COFF, ARM64] Fix ABI implementation of struct returns
Summary:
Related llvm patch: D60348.
Patch co-authored by Sanjin Sijaric.
Reviewers: rnk, efriedma, TomTan, ssijaric, ostannard
Reviewed By: efriedma
Subscribers: dmajor, richard.townsend.arm, ostannard, javed.absar, kristof.beyls, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D60349
llvm-svn: 359932
2019-05-04 05:12:24 +08:00
|
|
|
for (const CXXConstructorDecl *Ctor : RD->ctors())
|
|
|
|
if (Ctor->isUserProvided())
|
2020-10-14 10:58:39 +08:00
|
|
|
return false;
|
[COFF, ARM64] Fix ABI implementation of struct returns
Summary:
Related llvm patch: D60348.
Patch co-authored by Sanjin Sijaric.
Reviewers: rnk, efriedma, TomTan, ssijaric, ostannard
Reviewed By: efriedma
Subscribers: dmajor, richard.townsend.arm, ostannard, javed.absar, kristof.beyls, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D60349
llvm-svn: 359932
2019-05-04 05:12:24 +08:00
|
|
|
if (RD->hasNonTrivialDestructor())
|
2020-10-14 10:58:39 +08:00
|
|
|
return false;
|
|
|
|
return true;
|
[COFF, ARM64] Fix ABI implementation of struct returns
Summary:
Related llvm patch: D60348.
Patch co-authored by Sanjin Sijaric.
Reviewers: rnk, efriedma, TomTan, ssijaric, ostannard
Reviewed By: efriedma
Subscribers: dmajor, richard.townsend.arm, ostannard, javed.absar, kristof.beyls, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D60349
llvm-svn: 359932
2019-05-04 05:12:24 +08:00
|
|
|
}
|
|
|
|
|
2014-05-14 06:05:45 +08:00
|
|
|
bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
|
|
|
|
const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
|
|
|
|
if (!RD)
|
|
|
|
return false;
|
|
|
|
|
2020-10-14 10:58:39 +08:00
|
|
|
// Normally, the C++ concept of "is trivially copyable" is used to determine
|
|
|
|
// if a struct can be returned directly. However, as MSVC and the language
|
|
|
|
// have evolved, the definition of "trivially copyable" has changed, while the
|
|
|
|
// ABI must remain stable. AArch64 uses the C++14 concept of an "aggregate",
|
|
|
|
// while other ISAs use the older concept of "plain old data".
|
|
|
|
bool isTrivialForABI = RD->isPOD();
|
[COFF, ARM64] Fix ABI implementation of struct returns
Summary:
Related llvm patch: D60348.
Patch co-authored by Sanjin Sijaric.
Reviewers: rnk, efriedma, TomTan, ssijaric, ostannard
Reviewed By: efriedma
Subscribers: dmajor, richard.townsend.arm, ostannard, javed.absar, kristof.beyls, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D60349
llvm-svn: 359932
2019-05-04 05:12:24 +08:00
|
|
|
bool isAArch64 = CGM.getTarget().getTriple().isAArch64();
|
2020-10-14 10:58:39 +08:00
|
|
|
if (isAArch64)
|
|
|
|
isTrivialForABI = RD->canPassInRegisters() && isCXX14Aggregate(RD);
|
|
|
|
|
|
|
|
// MSVC always returns structs indirectly from C++ instance methods.
|
|
|
|
bool isIndirectReturn = !isTrivialForABI || FI.isInstanceMethod();
|
|
|
|
|
|
|
|
if (isIndirectReturn) {
|
[COFF, ARM64] Fix ABI implementation of struct returns
Summary:
Related llvm patch: D60348.
Patch co-authored by Sanjin Sijaric.
Reviewers: rnk, efriedma, TomTan, ssijaric, ostannard
Reviewed By: efriedma
Subscribers: dmajor, richard.townsend.arm, ostannard, javed.absar, kristof.beyls, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D60349
llvm-svn: 359932
2019-05-04 05:12:24 +08:00
|
|
|
CharUnits Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
|
[COFF, ARM64] Fix ABI implementation of struct returns
Summary:
Related llvm patch: D60348.
Patch co-authored by Sanjin Sijaric.
Reviewers: rnk, efriedma, TomTan, ssijaric, ostannard
Reviewed By: efriedma
Subscribers: dmajor, richard.townsend.arm, ostannard, javed.absar, kristof.beyls, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D60349
llvm-svn: 359932
2019-05-04 05:12:24 +08:00
|
|
|
|
2020-10-14 10:58:39 +08:00
|
|
|
// MSVC always passes `this` before the `sret` parameter.
|
|
|
|
FI.getReturnInfo().setSRetAfterThis(FI.isInstanceMethod());
|
|
|
|
|
|
|
|
// On AArch64, use the `inreg` attribute if the object is considered to not
|
|
|
|
// be trivially copyable, or if this is an instance method struct return.
|
|
|
|
FI.getReturnInfo().setInReg(isAArch64);
|
[COFF, ARM64] Decide when to mark struct returns as SRet
Summary:
Refer the MS ARM64 ABI Convention for the behavior for struct returns:
https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions#return-values
Reviewers: mstorsjo, compnerd, rnk, javed.absar, yinma, efriedma
Reviewed By: rnk, efriedma
Subscribers: haripul, TomTan, yinma, efriedma, kristof.beyls, chrib, llvm-commits
Differential Revision: https://reviews.llvm.org/D49464
llvm-svn: 338050
2018-07-27 02:07:59 +08:00
|
|
|
|
2014-05-14 06:05:45 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, use the C ABI rules.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-06-19 23:20:38 +08:00
|
|
|
llvm::BasicBlock *
|
|
|
|
MicrosoftCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
|
|
|
|
const CXXRecordDecl *RD) {
|
2013-02-27 21:46:31 +08:00
|
|
|
llvm::Value *IsMostDerivedClass = getStructorImplicitParamValue(CGF);
|
|
|
|
assert(IsMostDerivedClass &&
|
|
|
|
"ctor for a class with virtual bases must have an implicit parameter");
|
2013-06-19 23:20:38 +08:00
|
|
|
llvm::Value *IsCompleteObject =
|
|
|
|
CGF.Builder.CreateIsNotNull(IsMostDerivedClass, "is_complete_object");
|
2013-02-27 21:46:31 +08:00
|
|
|
|
|
|
|
llvm::BasicBlock *CallVbaseCtorsBB = CGF.createBasicBlock("ctor.init_vbases");
|
|
|
|
llvm::BasicBlock *SkipVbaseCtorsBB = CGF.createBasicBlock("ctor.skip_vbases");
|
|
|
|
CGF.Builder.CreateCondBr(IsCompleteObject,
|
|
|
|
CallVbaseCtorsBB, SkipVbaseCtorsBB);
|
|
|
|
|
|
|
|
CGF.EmitBlock(CallVbaseCtorsBB);
|
2013-06-19 23:20:38 +08:00
|
|
|
|
|
|
|
// Fill in the vbtable pointers here.
|
|
|
|
EmitVBPtrStores(CGF, RD);
|
2013-02-27 21:46:31 +08:00
|
|
|
|
|
|
|
// CGF will put the base ctor calls in this basic block for us later.
|
|
|
|
|
|
|
|
return SkipVbaseCtorsBB;
|
2012-09-25 16:00:39 +08:00
|
|
|
}
|
|
|
|
|
[MS-ABI]V-base dtor called more than needed when throw happens in v-base ctor in window. Need add "complete object flag" check in eh cleanup code.
The problem only happen on window ( A MS-ABI issuer )
The nature of the problem is virtual base dtor called more than it is needed after exception throw in inheriting base class(with virtual bases) ctor.
The root problem is when throw happen, not all virtual base classes have been contructed, so not all virtual base dtors are need to call for ehcleanup.
clang has code to handle vbase initialization: basically add check for "complete object flag" before call to v-base ctor.
But that part is missing for cleanup code.
To fix this add similar code as v-base init to cleanup code, same algorithm.
1> Add new routine:
EmitDtorCompleteObjectHandler
With corresponding to EmitCtorCompleteObjectHandler
2> In the EmitDestructorCal
Call EmitDtorCompleteObjectHandler when generate ehcleanup inside ctor.
Just add check for "complete object flag" before call to v-base dtor.
Without my change:
ehcleanup: ; preds = %ctor.skip_vbases
%13 = cleanuppad within none [], !dbg !66
%14 = bitcast %struct.class_0* %this1 to i8*, !dbg !66
%15 = getelementptr inbounds i8, i8* %14, i64 8, !dbg !66
%16 = bitcast i8* %15 to %struct.class_2*, !dbg !66
call void @"\01??1class_2@@UEAA@XZ"(%struct.class_2* %16) #6 [ "funclet"(token
%13) ], !dbg !66
cleanupret from %13 unwind to caller, !dbg !66
with my change:
ehcleanup: ; preds = %ctor.skip_vbases
%13 = cleanuppad within none [], !dbg !66
%14 = bitcast %struct.class_0* %this1 to i8*, !dbg !66
%15 = getelementptr inbounds i8, i8* %14, i64 8, !dbg !66
%16 = bitcast i8* %15 to %struct.class_2*, !dbg !66
%is_complete_object4 = icmp ne i32 %is_most_derived2, 0, !dbg !66
br i1 %is_complete_object4, label %Dtor.dtor_vbase, label %Dtor.skip_vbase, !d
bg !66
Dtor.dtor_vbase: ; preds = %ehcleanup
call void @"\01??1class_2@@UEAA@XZ"(%struct.class_2* %16) #6 [ "funclet"(token
%13) ], !dbg !66
br label %Dtor.skip_vbase, !dbg !66
Dtor.skip_vbase: ; preds = %Dtor.dtor_vbase, %ehcleanup
cleanupret from %13 unwind to caller, !dbg !66
Please let me know you need more info.
Patch by Jennifer Yu.
Differential Revision: https://reviews.llvm.org/D27358
llvm-svn: 288869
2016-12-07 08:21:45 +08:00
|
|
|
llvm::BasicBlock *
|
|
|
|
MicrosoftCXXABI::EmitDtorCompleteObjectHandler(CodeGenFunction &CGF) {
|
|
|
|
llvm::Value *IsMostDerivedClass = getStructorImplicitParamValue(CGF);
|
|
|
|
assert(IsMostDerivedClass &&
|
|
|
|
"ctor for a class with virtual bases must have an implicit parameter");
|
|
|
|
llvm::Value *IsCompleteObject =
|
|
|
|
CGF.Builder.CreateIsNotNull(IsMostDerivedClass, "is_complete_object");
|
|
|
|
|
|
|
|
llvm::BasicBlock *CallVbaseDtorsBB = CGF.createBasicBlock("Dtor.dtor_vbases");
|
|
|
|
llvm::BasicBlock *SkipVbaseDtorsBB = CGF.createBasicBlock("Dtor.skip_vbases");
|
|
|
|
CGF.Builder.CreateCondBr(IsCompleteObject,
|
|
|
|
CallVbaseDtorsBB, SkipVbaseDtorsBB);
|
|
|
|
|
|
|
|
CGF.EmitBlock(CallVbaseDtorsBB);
|
|
|
|
// CGF will put the base dtor calls in this basic block for us later.
|
2018-07-31 03:24:48 +08:00
|
|
|
|
[MS-ABI]V-base dtor called more than needed when throw happens in v-base ctor in window. Need add "complete object flag" check in eh cleanup code.
The problem only happen on window ( A MS-ABI issuer )
The nature of the problem is virtual base dtor called more than it is needed after exception throw in inheriting base class(with virtual bases) ctor.
The root problem is when throw happen, not all virtual base classes have been contructed, so not all virtual base dtors are need to call for ehcleanup.
clang has code to handle vbase initialization: basically add check for "complete object flag" before call to v-base ctor.
But that part is missing for cleanup code.
To fix this add similar code as v-base init to cleanup code, same algorithm.
1> Add new routine:
EmitDtorCompleteObjectHandler
With corresponding to EmitCtorCompleteObjectHandler
2> In the EmitDestructorCal
Call EmitDtorCompleteObjectHandler when generate ehcleanup inside ctor.
Just add check for "complete object flag" before call to v-base dtor.
Without my change:
ehcleanup: ; preds = %ctor.skip_vbases
%13 = cleanuppad within none [], !dbg !66
%14 = bitcast %struct.class_0* %this1 to i8*, !dbg !66
%15 = getelementptr inbounds i8, i8* %14, i64 8, !dbg !66
%16 = bitcast i8* %15 to %struct.class_2*, !dbg !66
call void @"\01??1class_2@@UEAA@XZ"(%struct.class_2* %16) #6 [ "funclet"(token
%13) ], !dbg !66
cleanupret from %13 unwind to caller, !dbg !66
with my change:
ehcleanup: ; preds = %ctor.skip_vbases
%13 = cleanuppad within none [], !dbg !66
%14 = bitcast %struct.class_0* %this1 to i8*, !dbg !66
%15 = getelementptr inbounds i8, i8* %14, i64 8, !dbg !66
%16 = bitcast i8* %15 to %struct.class_2*, !dbg !66
%is_complete_object4 = icmp ne i32 %is_most_derived2, 0, !dbg !66
br i1 %is_complete_object4, label %Dtor.dtor_vbase, label %Dtor.skip_vbase, !d
bg !66
Dtor.dtor_vbase: ; preds = %ehcleanup
call void @"\01??1class_2@@UEAA@XZ"(%struct.class_2* %16) #6 [ "funclet"(token
%13) ], !dbg !66
br label %Dtor.skip_vbase, !dbg !66
Dtor.skip_vbase: ; preds = %Dtor.dtor_vbase, %ehcleanup
cleanupret from %13 unwind to caller, !dbg !66
Please let me know you need more info.
Patch by Jennifer Yu.
Differential Revision: https://reviews.llvm.org/D27358
llvm-svn: 288869
2016-12-07 08:21:45 +08:00
|
|
|
return SkipVbaseDtorsBB;
|
|
|
|
}
|
|
|
|
|
2013-10-10 02:16:58 +08:00
|
|
|
void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers(
|
|
|
|
CodeGenFunction &CGF, const CXXRecordDecl *RD) {
|
|
|
|
// In most cases, an override for a vbase virtual method can adjust
|
|
|
|
// the "this" parameter by applying a constant offset.
|
|
|
|
// However, this is not enough while a constructor or a destructor of some
|
|
|
|
// class X is being executed if all the following conditions are met:
|
|
|
|
// - X has virtual bases, (1)
|
|
|
|
// - X overrides a virtual method M of a vbase Y, (2)
|
|
|
|
// - X itself is a vbase of the most derived class.
|
|
|
|
//
|
|
|
|
// If (1) and (2) are true, the vtorDisp for vbase Y is a hidden member of X
|
|
|
|
// which holds the extra amount of "this" adjustment we must do when we use
|
|
|
|
// the X vftables (i.e. during X ctor or dtor).
|
|
|
|
// Outside the ctors and dtors, the values of vtorDisps are zero.
|
|
|
|
|
|
|
|
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
|
|
|
|
typedef ASTRecordLayout::VBaseOffsetsMapTy VBOffsets;
|
|
|
|
const VBOffsets &VBaseMap = Layout.getVBaseOffsetsMap();
|
|
|
|
CGBuilderTy &Builder = CGF.Builder;
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
unsigned AS = getThisAddress(CGF).getAddressSpace();
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *Int8This = nullptr; // Initialize lazily.
|
2013-10-10 02:16:58 +08:00
|
|
|
|
2018-03-08 07:15:20 +08:00
|
|
|
for (const CXXBaseSpecifier &S : RD->vbases()) {
|
|
|
|
const CXXRecordDecl *VBase = S.getType()->getAsCXXRecordDecl();
|
|
|
|
auto I = VBaseMap.find(VBase);
|
|
|
|
assert(I != VBaseMap.end());
|
2013-10-10 02:16:58 +08:00
|
|
|
if (!I->second.hasVtorDisp())
|
|
|
|
continue;
|
|
|
|
|
2013-11-14 00:03:43 +08:00
|
|
|
llvm::Value *VBaseOffset =
|
2018-03-08 07:15:20 +08:00
|
|
|
GetVirtualBaseClassOffset(CGF, getThisAddress(CGF), RD, VBase);
|
|
|
|
uint64_t ConstantVBaseOffset = I->second.VBaseOffset.getQuantity();
|
2013-10-10 02:16:58 +08:00
|
|
|
|
|
|
|
// vtorDisp_for_vbase = vbptr[vbase_idx] - offsetof(RD, vbase).
|
|
|
|
llvm::Value *VtorDispValue = Builder.CreateSub(
|
2016-07-10 03:26:25 +08:00
|
|
|
VBaseOffset, llvm::ConstantInt::get(CGM.PtrDiffTy, ConstantVBaseOffset),
|
2013-10-10 02:16:58 +08:00
|
|
|
"vtordisp.value");
|
2016-07-10 03:26:25 +08:00
|
|
|
VtorDispValue = Builder.CreateTruncOrBitCast(VtorDispValue, CGF.Int32Ty);
|
2013-10-10 02:16:58 +08:00
|
|
|
|
|
|
|
if (!Int8This)
|
|
|
|
Int8This = Builder.CreateBitCast(getThisValue(CGF),
|
|
|
|
CGF.Int8Ty->getPointerTo(AS));
|
|
|
|
llvm::Value *VtorDispPtr = Builder.CreateInBoundsGEP(Int8This, VBaseOffset);
|
|
|
|
// vtorDisp is always the 32-bits before the vbase in the class layout.
|
|
|
|
VtorDispPtr = Builder.CreateConstGEP1_32(VtorDispPtr, -4);
|
|
|
|
VtorDispPtr = Builder.CreateBitCast(
|
|
|
|
VtorDispPtr, CGF.Int32Ty->getPointerTo(AS), "vtordisp.ptr");
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Builder.CreateAlignedStore(VtorDispValue, VtorDispPtr,
|
|
|
|
CharUnits::fromQuantity(4));
|
2013-10-10 02:16:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-14 06:36:55 +08:00
|
|
|
static bool hasDefaultCXXMethodCC(ASTContext &Context,
|
|
|
|
const CXXMethodDecl *MD) {
|
|
|
|
CallingConv ExpectedCallingConv = Context.getDefaultCallingConvention(
|
|
|
|
/*IsVariadic=*/false, /*IsCXXMethod=*/true);
|
|
|
|
CallingConv ActualCallingConv =
|
2019-10-08 00:42:25 +08:00
|
|
|
MD->getType()->castAs<FunctionProtoType>()->getCallConv();
|
2015-03-14 06:36:55 +08:00
|
|
|
return ExpectedCallingConv == ActualCallingConv;
|
|
|
|
}
|
|
|
|
|
2013-08-05 01:30:04 +08:00
|
|
|
void MicrosoftCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
|
|
|
|
// There's only one constructor type in this ABI.
|
|
|
|
CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
|
2015-03-14 06:36:55 +08:00
|
|
|
|
|
|
|
// Exported default constructors either have a simple call-site where they use
|
|
|
|
// the typical calling convention and have a single 'this' pointer for an
|
|
|
|
// argument -or- they get a wrapper function which appropriately thunks to the
|
|
|
|
// real default constructor. This thunk is the default constructor closure.
|
2020-11-10 00:22:32 +08:00
|
|
|
if (D->hasAttr<DLLExportAttr>() && D->isDefaultConstructor() &&
|
|
|
|
D->isDefined()) {
|
2015-03-14 06:36:55 +08:00
|
|
|
if (!hasDefaultCXXMethodCC(getContext(), D) || D->getNumParams() != 0) {
|
|
|
|
llvm::Function *Fn = getAddrOfCXXCtorClosure(D, Ctor_DefaultClosure);
|
|
|
|
Fn->setLinkage(llvm::GlobalValue::WeakODRLinkage);
|
2018-03-01 08:35:47 +08:00
|
|
|
CGM.setGVProperties(Fn, D);
|
2015-03-14 06:36:55 +08:00
|
|
|
}
|
2020-11-10 00:22:32 +08:00
|
|
|
}
|
2013-08-05 01:30:04 +08:00
|
|
|
}
|
|
|
|
|
2013-06-19 23:20:38 +08:00
|
|
|
void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
|
|
|
|
const CXXRecordDecl *RD) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address This = getThisAddress(CGF);
|
|
|
|
This = CGF.Builder.CreateElementBitCast(This, CGM.Int8Ty, "this.int8");
|
2015-03-15 07:44:48 +08:00
|
|
|
const ASTContext &Context = getContext();
|
|
|
|
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
|
2013-06-19 23:20:38 +08:00
|
|
|
|
2014-01-03 08:14:35 +08:00
|
|
|
const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
|
|
|
|
for (unsigned I = 0, E = VBGlobals.VBTables->size(); I != E; ++I) {
|
2016-10-11 00:26:29 +08:00
|
|
|
const std::unique_ptr<VPtrInfo> &VBT = (*VBGlobals.VBTables)[I];
|
2014-01-03 08:14:35 +08:00
|
|
|
llvm::GlobalVariable *GV = VBGlobals.Globals[I];
|
2013-06-19 23:20:38 +08:00
|
|
|
const ASTRecordLayout &SubobjectLayout =
|
2016-07-20 22:40:25 +08:00
|
|
|
Context.getASTRecordLayout(VBT->IntroducingObject);
|
2014-01-04 07:42:00 +08:00
|
|
|
CharUnits Offs = VBT->NonVirtualOffset;
|
|
|
|
Offs += SubobjectLayout.getVBPtrOffset();
|
2014-02-28 03:40:09 +08:00
|
|
|
if (VBT->getVBaseWithVPtr())
|
|
|
|
Offs += Layout.getVBaseClassOffset(VBT->getVBaseWithVPtr());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address VBPtr = CGF.Builder.CreateConstInBoundsByteGEP(This, Offs);
|
2015-04-06 06:45:47 +08:00
|
|
|
llvm::Value *GVPtr =
|
|
|
|
CGF.Builder.CreateConstInBoundsGEP2_32(GV->getValueType(), GV, 0, 0);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
VBPtr = CGF.Builder.CreateElementBitCast(VBPtr, GVPtr->getType(),
|
2016-07-20 22:40:25 +08:00
|
|
|
"vbptr." + VBT->ObjectWithVPtr->getName());
|
2014-10-23 01:26:00 +08:00
|
|
|
CGF.Builder.CreateStore(GVPtr, VBPtr);
|
2013-06-19 23:20:38 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-19 14:43:46 +08:00
|
|
|
CGCXXABI::AddedStructorArgCounts
|
2019-03-23 07:05:10 +08:00
|
|
|
MicrosoftCXXABI::buildStructorSignature(GlobalDecl GD,
|
2013-02-13 16:37:51 +08:00
|
|
|
SmallVectorImpl<CanQualType> &ArgTys) {
|
2020-05-19 14:43:46 +08:00
|
|
|
AddedStructorArgCounts Added;
|
2013-02-13 16:37:51 +08:00
|
|
|
// TODO: 'for base' flag
|
2019-03-23 07:05:10 +08:00
|
|
|
if (isa<CXXDestructorDecl>(GD.getDecl()) &&
|
|
|
|
GD.getDtorType() == Dtor_Deleting) {
|
2013-08-27 18:38:19 +08:00
|
|
|
// The scalar deleting destructor takes an implicit int parameter.
|
2015-03-15 07:44:48 +08:00
|
|
|
ArgTys.push_back(getContext().IntTy);
|
2017-02-23 04:28:02 +08:00
|
|
|
++Added.Suffix;
|
2013-02-13 16:37:51 +08:00
|
|
|
}
|
2019-03-23 07:05:10 +08:00
|
|
|
auto *CD = dyn_cast<CXXConstructorDecl>(GD.getDecl());
|
2014-09-09 00:01:27 +08:00
|
|
|
if (!CD)
|
2017-02-23 04:28:02 +08:00
|
|
|
return Added;
|
2014-09-09 00:01:27 +08:00
|
|
|
|
|
|
|
// All parameters are already in place except is_most_derived, which goes
|
|
|
|
// after 'this' if it's variadic and last if it's not.
|
|
|
|
|
|
|
|
const CXXRecordDecl *Class = CD->getParent();
|
|
|
|
const FunctionProtoType *FPT = CD->getType()->castAs<FunctionProtoType>();
|
|
|
|
if (Class->getNumVBases()) {
|
2017-02-23 04:28:02 +08:00
|
|
|
if (FPT->isVariadic()) {
|
2015-03-15 07:44:48 +08:00
|
|
|
ArgTys.insert(ArgTys.begin() + 1, getContext().IntTy);
|
2017-02-23 04:28:02 +08:00
|
|
|
++Added.Prefix;
|
|
|
|
} else {
|
2015-03-15 07:44:48 +08:00
|
|
|
ArgTys.push_back(getContext().IntTy);
|
2017-02-23 04:28:02 +08:00
|
|
|
++Added.Suffix;
|
|
|
|
}
|
2014-09-09 00:01:27 +08:00
|
|
|
}
|
2017-02-23 04:28:02 +08:00
|
|
|
|
|
|
|
return Added;
|
2013-02-13 16:37:51 +08:00
|
|
|
}
|
|
|
|
|
2018-03-17 03:40:50 +08:00
|
|
|
void MicrosoftCXXABI::setCXXDestructorDLLStorage(llvm::GlobalValue *GV,
|
|
|
|
const CXXDestructorDecl *Dtor,
|
|
|
|
CXXDtorType DT) const {
|
|
|
|
// Deleting destructor variants are never imported or exported. Give them the
|
|
|
|
// default storage class.
|
|
|
|
if (DT == Dtor_Deleting) {
|
|
|
|
GV->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
|
|
|
|
} else {
|
|
|
|
const NamedDecl *ND = Dtor;
|
|
|
|
CGM.setDLLImportDLLExport(GV, ND);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::GlobalValue::LinkageTypes MicrosoftCXXABI::getCXXDestructorLinkage(
|
|
|
|
GVALinkage Linkage, const CXXDestructorDecl *Dtor, CXXDtorType DT) const {
|
|
|
|
// Internal things are always internal, regardless of attributes. After this,
|
|
|
|
// we know the thunk is externally visible.
|
|
|
|
if (Linkage == GVA_Internal)
|
|
|
|
return llvm::GlobalValue::InternalLinkage;
|
|
|
|
|
|
|
|
switch (DT) {
|
|
|
|
case Dtor_Base:
|
|
|
|
// The base destructor most closely tracks the user-declared constructor, so
|
|
|
|
// we delegate back to the normal declarator case.
|
|
|
|
return CGM.getLLVMLinkageForDeclarator(Dtor, Linkage,
|
2019-07-16 12:46:31 +08:00
|
|
|
/*IsConstantVariable=*/false);
|
2018-03-17 03:40:50 +08:00
|
|
|
case Dtor_Complete:
|
|
|
|
// The complete destructor is like an inline function, but it may be
|
|
|
|
// imported and therefore must be exported as well. This requires changing
|
|
|
|
// the linkage if a DLL attribute is present.
|
|
|
|
if (Dtor->hasAttr<DLLExportAttr>())
|
|
|
|
return llvm::GlobalValue::WeakODRLinkage;
|
|
|
|
if (Dtor->hasAttr<DLLImportAttr>())
|
|
|
|
return llvm::GlobalValue::AvailableExternallyLinkage;
|
|
|
|
return llvm::GlobalValue::LinkOnceODRLinkage;
|
|
|
|
case Dtor_Deleting:
|
|
|
|
// Deleting destructors are like inline functions. They have vague linkage
|
|
|
|
// and are emitted everywhere they are used. They are internal if the class
|
|
|
|
// is internal.
|
|
|
|
return llvm::GlobalValue::LinkOnceODRLinkage;
|
|
|
|
case Dtor_Comdat:
|
|
|
|
llvm_unreachable("MS C++ ABI does not support comdat dtors");
|
|
|
|
}
|
|
|
|
llvm_unreachable("invalid dtor type");
|
|
|
|
}
|
|
|
|
|
[ms-cxxabi] Emit linkonce complete dtors in TUs that need them
Based on Peter Collingbourne's destructor patches.
Prior to this change, clang was considering ?1 to be the complete
destructor and the base destructor, which was wrong. This lead to
crashes when clang tried to emit two LLVM functions with the same name.
In this ABI, TUs with non-inline dtors might not emit a complete
destructor. They are emitted as inline thunks in TUs that need them,
and they always delegate to the base dtors of the complete class and its
virtual bases. This change uses the DeferredDecls machinery to emit
complete dtors as needed.
Currently in clang try body destructors can catch exceptions thrown by
virtual base destructors. In the Microsoft C++ ABI, clang may not have
the destructor definition, in which case clang won't wrap the virtual
virtual base destructor calls in a try-catch. Diagnosing this in user
code is TODO.
Finally, for classes that don't use virtual inheritance, MSVC always
calls the base destructor (?1) directly. This is a useful code size
optimization that avoids emitting lots of extra thunks or aliases.
Implementing it also means our existing tests continue to pass, and is
consistent with MSVC's output.
We can do the same for Itanium by tweaking GetAddrOfCXXDestructor, but
it will require further testing.
Reviewers: rjmccall
CC: cfe-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1066
llvm-svn: 186828
2013-07-22 21:51:44 +08:00
|
|
|
void MicrosoftCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
|
|
|
|
// The TU defining a dtor is only guaranteed to emit a base destructor. All
|
|
|
|
// other destructor variants are delegating thunks.
|
|
|
|
CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
|
2019-12-03 07:22:44 +08:00
|
|
|
|
|
|
|
// If the class is dllexported, emit the complete (vbase) destructor wherever
|
|
|
|
// the base dtor is emitted.
|
|
|
|
// FIXME: To match MSVC, this should only be done when the class is exported
|
|
|
|
// with -fdllexport-inlines enabled.
|
|
|
|
if (D->getParent()->getNumVBases() > 0 && D->hasAttr<DLLExportAttr>())
|
|
|
|
CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
|
[ms-cxxabi] Emit linkonce complete dtors in TUs that need them
Based on Peter Collingbourne's destructor patches.
Prior to this change, clang was considering ?1 to be the complete
destructor and the base destructor, which was wrong. This lead to
crashes when clang tried to emit two LLVM functions with the same name.
In this ABI, TUs with non-inline dtors might not emit a complete
destructor. They are emitted as inline thunks in TUs that need them,
and they always delegate to the base dtors of the complete class and its
virtual bases. This change uses the DeferredDecls machinery to emit
complete dtors as needed.
Currently in clang try body destructors can catch exceptions thrown by
virtual base destructors. In the Microsoft C++ ABI, clang may not have
the destructor definition, in which case clang won't wrap the virtual
virtual base destructor calls in a try-catch. Diagnosing this in user
code is TODO.
Finally, for classes that don't use virtual inheritance, MSVC always
calls the base destructor (?1) directly. This is a useful code size
optimization that avoids emitting lots of extra thunks or aliases.
Implementing it also means our existing tests continue to pass, and is
consistent with MSVC's output.
We can do the same for Itanium by tweaking GetAddrOfCXXDestructor, but
it will require further testing.
Reviewers: rjmccall
CC: cfe-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1066
llvm-svn: 186828
2013-07-22 21:51:44 +08:00
|
|
|
}
|
|
|
|
|
2014-03-15 01:43:37 +08:00
|
|
|
CharUnits
|
|
|
|
MicrosoftCXXABI::getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD) {
|
|
|
|
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
|
|
|
|
|
|
|
|
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
|
|
|
|
// Complete destructors take a pointer to the complete object as a
|
|
|
|
// parameter, thus don't need this adjustment.
|
|
|
|
if (GD.getDtorType() == Dtor_Complete)
|
|
|
|
return CharUnits();
|
|
|
|
|
|
|
|
// There's no Dtor_Base in vftable but it shares the this adjustment with
|
|
|
|
// the deleting one, so look it up instead.
|
2018-05-18 02:12:18 +08:00
|
|
|
GD = GlobalDecl(DD, Dtor_Deleting);
|
2014-03-15 01:43:37 +08:00
|
|
|
}
|
|
|
|
|
2018-04-03 04:00:39 +08:00
|
|
|
MethodVFTableLocation ML =
|
2018-05-18 02:12:18 +08:00
|
|
|
CGM.getMicrosoftVTableContext().getMethodVFTableLocation(GD);
|
2014-03-15 01:43:37 +08:00
|
|
|
CharUnits Adjustment = ML.VFPtrOffset;
|
|
|
|
|
|
|
|
// Normal virtual instance methods need to adjust from the vfptr that first
|
|
|
|
// defined the virtual method to the virtual base subobject, but destructors
|
|
|
|
// do not. The vector deleting destructor thunk applies this adjustment for
|
|
|
|
// us if necessary.
|
|
|
|
if (isa<CXXDestructorDecl>(MD))
|
|
|
|
Adjustment = CharUnits::Zero();
|
|
|
|
|
|
|
|
if (ML.VBase) {
|
|
|
|
const ASTRecordLayout &DerivedLayout =
|
2015-03-15 07:44:48 +08:00
|
|
|
getContext().getASTRecordLayout(MD->getParent());
|
2014-03-15 01:43:37 +08:00
|
|
|
Adjustment += DerivedLayout.getVBaseClassOffset(ML.VBase);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Adjustment;
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
|
|
|
|
CodeGenFunction &CGF, GlobalDecl GD, Address This,
|
|
|
|
bool VirtualCall) {
|
2014-03-15 01:43:37 +08:00
|
|
|
if (!VirtualCall) {
|
|
|
|
// If the call of a virtual function is not virtual, we just have to
|
|
|
|
// compensate for the adjustment the virtual function does in its prologue.
|
|
|
|
CharUnits Adjustment = getVirtualFunctionPrologueThisAdjustment(GD);
|
|
|
|
if (Adjustment.isZero())
|
|
|
|
return This;
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
This = CGF.Builder.CreateElementBitCast(This, CGF.Int8Ty);
|
2014-03-15 01:43:37 +08:00
|
|
|
assert(Adjustment.isPositive());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return CGF.Builder.CreateConstByteGEP(This, Adjustment);
|
2014-03-15 01:43:37 +08:00
|
|
|
}
|
|
|
|
|
2013-08-21 14:25:03 +08:00
|
|
|
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
|
2013-10-17 02:24:06 +08:00
|
|
|
|
|
|
|
GlobalDecl LookupGD = GD;
|
|
|
|
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
|
|
|
|
// Complete dtors take a pointer to the complete object,
|
|
|
|
// thus don't need adjustment.
|
|
|
|
if (GD.getDtorType() == Dtor_Complete)
|
|
|
|
return This;
|
|
|
|
|
|
|
|
// There's only Dtor_Deleting in vftable but it shares the this adjustment
|
|
|
|
// with the base one, so look up the deleting one instead.
|
|
|
|
LookupGD = GlobalDecl(DD, Dtor_Deleting);
|
|
|
|
}
|
2018-04-03 04:00:39 +08:00
|
|
|
MethodVFTableLocation ML =
|
2013-11-05 23:54:58 +08:00
|
|
|
CGM.getMicrosoftVTableContext().getMethodVFTableLocation(LookupGD);
|
2013-08-21 14:25:03 +08:00
|
|
|
|
2013-11-07 21:34:02 +08:00
|
|
|
CharUnits StaticOffset = ML.VFPtrOffset;
|
2014-02-19 06:51:52 +08:00
|
|
|
|
|
|
|
// Base destructors expect 'this' to point to the beginning of the base
|
|
|
|
// subobject, not the first vfptr that happens to contain the virtual dtor.
|
|
|
|
// However, we still need to apply the virtual base adjustment.
|
|
|
|
if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
|
|
|
|
StaticOffset = CharUnits::Zero();
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Result = This;
|
2013-08-21 14:25:03 +08:00
|
|
|
if (ML.VBase) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Result = CGF.Builder.CreateElementBitCast(Result, CGF.Int8Ty);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
const CXXRecordDecl *Derived = MD->getParent();
|
|
|
|
const CXXRecordDecl *VBase = ML.VBase;
|
2014-03-15 01:43:37 +08:00
|
|
|
llvm::Value *VBaseOffset =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
GetVirtualBaseClassOffset(CGF, Result, Derived, VBase);
|
|
|
|
llvm::Value *VBasePtr =
|
|
|
|
CGF.Builder.CreateInBoundsGEP(Result.getPointer(), VBaseOffset);
|
|
|
|
CharUnits VBaseAlign =
|
|
|
|
CGF.CGM.getVBaseAlignment(Result.getAlignment(), Derived, VBase);
|
|
|
|
Result = Address(VBasePtr, VBaseAlign);
|
2013-08-21 14:25:03 +08:00
|
|
|
}
|
|
|
|
if (!StaticOffset.isZero()) {
|
|
|
|
assert(StaticOffset.isPositive());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Result = CGF.Builder.CreateElementBitCast(Result, CGF.Int8Ty);
|
2013-10-23 02:15:24 +08:00
|
|
|
if (ML.VBase) {
|
|
|
|
// Non-virtual adjustment might result in a pointer outside the allocated
|
|
|
|
// object, e.g. if the final overrider class is laid out after the virtual
|
|
|
|
// base that declares a method in the most derived class.
|
|
|
|
// FIXME: Update the code that emits this adjustment in thunks prologues.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Result = CGF.Builder.CreateConstByteGEP(Result, StaticOffset);
|
2013-10-23 02:15:24 +08:00
|
|
|
} else {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Result = CGF.Builder.CreateConstInBoundsByteGEP(Result, StaticOffset);
|
2013-10-23 02:15:24 +08:00
|
|
|
}
|
2013-08-21 14:25:03 +08:00
|
|
|
}
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return Result;
|
2013-08-21 14:25:03 +08:00
|
|
|
}
|
|
|
|
|
2013-12-18 03:46:40 +08:00
|
|
|
void MicrosoftCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
|
|
|
|
QualType &ResTy,
|
|
|
|
FunctionArgList &Params) {
|
2013-02-27 21:46:31 +08:00
|
|
|
ASTContext &Context = getContext();
|
|
|
|
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
|
2013-12-18 03:46:40 +08:00
|
|
|
assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
|
2013-02-27 21:46:31 +08:00
|
|
|
if (isa<CXXConstructorDecl>(MD) && MD->getParent()->getNumVBases()) {
|
2017-06-09 21:40:18 +08:00
|
|
|
auto *IsMostDerived = ImplicitParamDecl::Create(
|
|
|
|
Context, /*DC=*/nullptr, CGF.CurGD.getDecl()->getLocation(),
|
|
|
|
&Context.Idents.get("is_most_derived"), Context.IntTy,
|
|
|
|
ImplicitParamDecl::Other);
|
2013-12-18 03:46:40 +08:00
|
|
|
// The 'most_derived' parameter goes second if the ctor is variadic and last
|
|
|
|
// if it's not. Dtors can't be variadic.
|
|
|
|
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
|
|
|
|
if (FPT->isVariadic())
|
|
|
|
Params.insert(Params.begin() + 1, IsMostDerived);
|
|
|
|
else
|
|
|
|
Params.push_back(IsMostDerived);
|
2013-02-27 21:46:31 +08:00
|
|
|
getStructorImplicitParamDecl(CGF) = IsMostDerived;
|
2014-11-01 04:09:12 +08:00
|
|
|
} else if (isDeletingDtor(CGF.CurGD)) {
|
2017-06-09 21:40:18 +08:00
|
|
|
auto *ShouldDelete = ImplicitParamDecl::Create(
|
|
|
|
Context, /*DC=*/nullptr, CGF.CurGD.getDecl()->getLocation(),
|
|
|
|
&Context.Idents.get("should_call_delete"), Context.IntTy,
|
|
|
|
ImplicitParamDecl::Other);
|
2013-02-13 16:37:51 +08:00
|
|
|
Params.push_back(ShouldDelete);
|
|
|
|
getStructorImplicitParamDecl(CGF) = ShouldDelete;
|
|
|
|
}
|
2012-09-25 16:00:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
|
2016-07-28 06:04:24 +08:00
|
|
|
// Naked functions have no prolog.
|
|
|
|
if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
|
|
|
|
return;
|
|
|
|
|
[MS] Apply adjustments after storing 'this'
Summary:
The MS ABI convention is that the 'this' pointer on entry is the address
of the vfptr that was used to make the virtual method call. In other
words, the pointer on entry always points to the base subobject that
introduced the virtual method. Consider this hierarchy:
struct A { virtual void f() = 0; };
struct B { virtual void g() = 0; };
struct C : A, B {
void f() override;
void g() override;
};
On entry to C::g, [ER]CX will contain the address of C's B subobject,
and C::g will have to subtract sizeof(A) to recover a pointer to C.
Before this change, we applied this adjustment in the prologue and
stored the new value into the "this" local variable alloca used for
debug info. However, MSVC does not do this, presumably because it is
often profitable to fold the adjustment into later field accesses. This
creates a problem, because the debugger expects the variable to be
unadjusted. Unfortunately, CodeView doesn't have anything like DWARF
expressions for computing variables that aren't in the program anymore,
so we have to declare 'this' to be the unadjusted value if we want the
debugger to see the right value.
This has the side benefit that, in optimized builds, the 'this' pointer
will usually be available on function entry because it doesn't require
any adjustment.
Reviewers: hans
Subscribers: aprantl, cfe-commits
Differential Revision: https://reviews.llvm.org/D40109
llvm-svn: 318440
2017-11-17 03:09:36 +08:00
|
|
|
// Overridden virtual methods of non-primary bases need to adjust the incoming
|
|
|
|
// 'this' pointer in the prologue. In this hierarchy, C::b will subtract
|
|
|
|
// sizeof(void*) to adjust from B* to C*:
|
|
|
|
// struct A { virtual void a(); };
|
|
|
|
// struct B { virtual void b(); };
|
|
|
|
// struct C : A, B { virtual void b(); };
|
|
|
|
//
|
|
|
|
// Leave the value stored in the 'this' alloca unadjusted, so that the
|
|
|
|
// debugger sees the unadjusted value. Microsoft debuggers require this, and
|
|
|
|
// will apply the ThisAdjustment in the method type information.
|
|
|
|
// FIXME: Do something better for DWARF debuggers, which won't expect this,
|
|
|
|
// without making our codegen depend on debug info settings.
|
|
|
|
llvm::Value *This = loadIncomingCXXThis(CGF);
|
|
|
|
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
|
|
|
|
if (!CGF.CurFuncIsThunk && MD->isVirtual()) {
|
|
|
|
CharUnits Adjustment = getVirtualFunctionPrologueThisAdjustment(CGF.CurGD);
|
|
|
|
if (!Adjustment.isZero()) {
|
|
|
|
unsigned AS = cast<llvm::PointerType>(This->getType())->getAddressSpace();
|
|
|
|
llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS),
|
|
|
|
*thisTy = This->getType();
|
|
|
|
This = CGF.Builder.CreateBitCast(This, charPtrTy);
|
|
|
|
assert(Adjustment.isPositive());
|
|
|
|
This = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, This,
|
|
|
|
-Adjustment.getQuantity());
|
|
|
|
This = CGF.Builder.CreateBitCast(This, thisTy, "this.adjusted");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
setCXXABIThisValue(CGF, This);
|
2013-07-01 04:40:16 +08:00
|
|
|
|
[MS] Apply adjustments after storing 'this'
Summary:
The MS ABI convention is that the 'this' pointer on entry is the address
of the vfptr that was used to make the virtual method call. In other
words, the pointer on entry always points to the base subobject that
introduced the virtual method. Consider this hierarchy:
struct A { virtual void f() = 0; };
struct B { virtual void g() = 0; };
struct C : A, B {
void f() override;
void g() override;
};
On entry to C::g, [ER]CX will contain the address of C's B subobject,
and C::g will have to subtract sizeof(A) to recover a pointer to C.
Before this change, we applied this adjustment in the prologue and
stored the new value into the "this" local variable alloca used for
debug info. However, MSVC does not do this, presumably because it is
often profitable to fold the adjustment into later field accesses. This
creates a problem, because the debugger expects the variable to be
unadjusted. Unfortunately, CodeView doesn't have anything like DWARF
expressions for computing variables that aren't in the program anymore,
so we have to declare 'this' to be the unadjusted value if we want the
debugger to see the right value.
This has the side benefit that, in optimized builds, the 'this' pointer
will usually be available on function entry because it doesn't require
any adjustment.
Reviewers: hans
Subscribers: aprantl, cfe-commits
Differential Revision: https://reviews.llvm.org/D40109
llvm-svn: 318440
2017-11-17 03:09:36 +08:00
|
|
|
// If this is a function that the ABI specifies returns 'this', initialize
|
|
|
|
// the return slot to 'this' at the start of the function.
|
|
|
|
//
|
|
|
|
// Unlike the setting of return types, this is done within the ABI
|
|
|
|
// implementation instead of by clients of CGCXXABI because:
|
|
|
|
// 1) getThisValue is currently protected
|
|
|
|
// 2) in theory, an ABI could implement 'this' returns some other way;
|
|
|
|
// HasThisReturn only specifies a contract, not the implementation
|
2013-07-01 04:40:16 +08:00
|
|
|
if (HasThisReturn(CGF.CurGD))
|
2012-09-25 16:00:39 +08:00
|
|
|
CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
|
2014-11-01 04:09:12 +08:00
|
|
|
else if (hasMostDerivedReturn(CGF.CurGD))
|
|
|
|
CGF.Builder.CreateStore(CGF.EmitCastToVoidPtr(getThisValue(CGF)),
|
|
|
|
CGF.ReturnValue);
|
2013-02-27 21:46:31 +08:00
|
|
|
|
|
|
|
if (isa<CXXConstructorDecl>(MD) && MD->getParent()->getNumVBases()) {
|
|
|
|
assert(getStructorImplicitParamDecl(CGF) &&
|
|
|
|
"no implicit parameter for a constructor with virtual bases?");
|
|
|
|
getStructorImplicitParamValue(CGF)
|
|
|
|
= CGF.Builder.CreateLoad(
|
|
|
|
CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)),
|
|
|
|
"is_most_derived");
|
|
|
|
}
|
|
|
|
|
2014-11-01 04:09:12 +08:00
|
|
|
if (isDeletingDtor(CGF.CurGD)) {
|
2013-02-13 16:37:51 +08:00
|
|
|
assert(getStructorImplicitParamDecl(CGF) &&
|
|
|
|
"no implicit parameter for a deleting destructor?");
|
|
|
|
getStructorImplicitParamValue(CGF)
|
|
|
|
= CGF.Builder.CreateLoad(
|
|
|
|
CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)),
|
|
|
|
"should_call_delete");
|
|
|
|
}
|
2012-09-25 16:00:39 +08:00
|
|
|
}
|
|
|
|
|
2020-05-19 14:43:46 +08:00
|
|
|
CGCXXABI::AddedStructorArgs MicrosoftCXXABI::getImplicitConstructorArgs(
|
2013-12-18 03:46:40 +08:00
|
|
|
CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
|
2020-05-19 14:43:46 +08:00
|
|
|
bool ForVirtualBase, bool Delegating) {
|
2013-02-27 21:46:31 +08:00
|
|
|
assert(Type == Ctor_Complete || Type == Ctor_Base);
|
|
|
|
|
2013-12-18 03:46:40 +08:00
|
|
|
// Check if we need a 'most_derived' parameter.
|
|
|
|
if (!D->getParent()->getNumVBases())
|
2017-02-23 04:28:02 +08:00
|
|
|
return AddedStructorArgs{};
|
2013-12-18 03:46:40 +08:00
|
|
|
|
|
|
|
// Add the 'most_derived' argument second if we are variadic or last if not.
|
|
|
|
const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
|
2016-05-14 04:05:09 +08:00
|
|
|
llvm::Value *MostDerivedArg;
|
|
|
|
if (Delegating) {
|
|
|
|
MostDerivedArg = getStructorImplicitParamValue(CGF);
|
|
|
|
} else {
|
|
|
|
MostDerivedArg = llvm::ConstantInt::get(CGM.Int32Ty, Type == Ctor_Complete);
|
2013-02-27 21:46:31 +08:00
|
|
|
}
|
2017-02-23 04:28:02 +08:00
|
|
|
if (FPT->isVariadic()) {
|
2020-05-19 14:43:46 +08:00
|
|
|
return AddedStructorArgs::prefix({{MostDerivedArg, getContext().IntTy}});
|
2017-02-23 04:28:02 +08:00
|
|
|
}
|
2020-05-19 14:43:46 +08:00
|
|
|
return AddedStructorArgs::suffix({{MostDerivedArg, getContext().IntTy}});
|
2013-02-27 21:46:31 +08:00
|
|
|
}
|
|
|
|
|
2020-07-02 01:57:45 +08:00
|
|
|
llvm::Value *MicrosoftCXXABI::getCXXDestructorImplicitParam(
|
|
|
|
CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
|
|
|
|
bool ForVirtualBase, bool Delegating) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2013-12-13 08:53:54 +08:00
|
|
|
void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
|
|
|
|
const CXXDestructorDecl *DD,
|
|
|
|
CXXDtorType Type, bool ForVirtualBase,
|
2019-07-22 17:39:13 +08:00
|
|
|
bool Delegating, Address This,
|
|
|
|
QualType ThisTy) {
|
2018-03-17 03:40:50 +08:00
|
|
|
// Use the base destructor variant in place of the complete destructor variant
|
|
|
|
// if the class has no virtual bases. This effectively implements some of the
|
|
|
|
// -mconstructor-aliases optimization, but as part of the MS C++ ABI.
|
|
|
|
if (Type == Dtor_Complete && DD->getParent()->getNumVBases() == 0)
|
|
|
|
Type = Dtor_Base;
|
|
|
|
|
2019-03-23 07:05:10 +08:00
|
|
|
GlobalDecl GD(DD, Type);
|
|
|
|
CGCallee Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
|
2013-12-13 08:53:54 +08:00
|
|
|
|
2014-03-15 01:43:37 +08:00
|
|
|
if (DD->isVirtual()) {
|
|
|
|
assert(Type != CXXDtorType::Dtor_Deleting &&
|
|
|
|
"The deleting destructor should only be called via a virtual call");
|
|
|
|
This = adjustThisArgumentForVirtualFunctionCall(CGF, GlobalDecl(DD, Type),
|
|
|
|
This, false);
|
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
[MS-ABI]V-base dtor called more than needed when throw happens in v-base ctor in window. Need add "complete object flag" check in eh cleanup code.
The problem only happen on window ( A MS-ABI issuer )
The nature of the problem is virtual base dtor called more than it is needed after exception throw in inheriting base class(with virtual bases) ctor.
The root problem is when throw happen, not all virtual base classes have been contructed, so not all virtual base dtors are need to call for ehcleanup.
clang has code to handle vbase initialization: basically add check for "complete object flag" before call to v-base ctor.
But that part is missing for cleanup code.
To fix this add similar code as v-base init to cleanup code, same algorithm.
1> Add new routine:
EmitDtorCompleteObjectHandler
With corresponding to EmitCtorCompleteObjectHandler
2> In the EmitDestructorCal
Call EmitDtorCompleteObjectHandler when generate ehcleanup inside ctor.
Just add check for "complete object flag" before call to v-base dtor.
Without my change:
ehcleanup: ; preds = %ctor.skip_vbases
%13 = cleanuppad within none [], !dbg !66
%14 = bitcast %struct.class_0* %this1 to i8*, !dbg !66
%15 = getelementptr inbounds i8, i8* %14, i64 8, !dbg !66
%16 = bitcast i8* %15 to %struct.class_2*, !dbg !66
call void @"\01??1class_2@@UEAA@XZ"(%struct.class_2* %16) #6 [ "funclet"(token
%13) ], !dbg !66
cleanupret from %13 unwind to caller, !dbg !66
with my change:
ehcleanup: ; preds = %ctor.skip_vbases
%13 = cleanuppad within none [], !dbg !66
%14 = bitcast %struct.class_0* %this1 to i8*, !dbg !66
%15 = getelementptr inbounds i8, i8* %14, i64 8, !dbg !66
%16 = bitcast i8* %15 to %struct.class_2*, !dbg !66
%is_complete_object4 = icmp ne i32 %is_most_derived2, 0, !dbg !66
br i1 %is_complete_object4, label %Dtor.dtor_vbase, label %Dtor.skip_vbase, !d
bg !66
Dtor.dtor_vbase: ; preds = %ehcleanup
call void @"\01??1class_2@@UEAA@XZ"(%struct.class_2* %16) #6 [ "funclet"(token
%13) ], !dbg !66
br label %Dtor.skip_vbase, !dbg !66
Dtor.skip_vbase: ; preds = %Dtor.dtor_vbase, %ehcleanup
cleanupret from %13 unwind to caller, !dbg !66
Please let me know you need more info.
Patch by Jennifer Yu.
Differential Revision: https://reviews.llvm.org/D27358
llvm-svn: 288869
2016-12-07 08:21:45 +08:00
|
|
|
llvm::BasicBlock *BaseDtorEndBB = nullptr;
|
|
|
|
if (ForVirtualBase && isa<CXXConstructorDecl>(CGF.CurCodeDecl)) {
|
|
|
|
BaseDtorEndBB = EmitDtorCompleteObjectHandler(CGF);
|
2018-07-31 03:24:48 +08:00
|
|
|
}
|
2013-12-13 08:53:54 +08:00
|
|
|
|
2020-07-02 01:57:45 +08:00
|
|
|
llvm::Value *Implicit =
|
|
|
|
getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase,
|
|
|
|
Delegating); // = nullptr
|
2019-07-22 17:39:13 +08:00
|
|
|
CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
|
2020-07-02 01:57:45 +08:00
|
|
|
/*ImplicitParam=*/Implicit,
|
2019-03-23 07:05:10 +08:00
|
|
|
/*ImplicitParamTy=*/QualType(), nullptr);
|
[MS-ABI]V-base dtor called more than needed when throw happens in v-base ctor in window. Need add "complete object flag" check in eh cleanup code.
The problem only happen on window ( A MS-ABI issuer )
The nature of the problem is virtual base dtor called more than it is needed after exception throw in inheriting base class(with virtual bases) ctor.
The root problem is when throw happen, not all virtual base classes have been contructed, so not all virtual base dtors are need to call for ehcleanup.
clang has code to handle vbase initialization: basically add check for "complete object flag" before call to v-base ctor.
But that part is missing for cleanup code.
To fix this add similar code as v-base init to cleanup code, same algorithm.
1> Add new routine:
EmitDtorCompleteObjectHandler
With corresponding to EmitCtorCompleteObjectHandler
2> In the EmitDestructorCal
Call EmitDtorCompleteObjectHandler when generate ehcleanup inside ctor.
Just add check for "complete object flag" before call to v-base dtor.
Without my change:
ehcleanup: ; preds = %ctor.skip_vbases
%13 = cleanuppad within none [], !dbg !66
%14 = bitcast %struct.class_0* %this1 to i8*, !dbg !66
%15 = getelementptr inbounds i8, i8* %14, i64 8, !dbg !66
%16 = bitcast i8* %15 to %struct.class_2*, !dbg !66
call void @"\01??1class_2@@UEAA@XZ"(%struct.class_2* %16) #6 [ "funclet"(token
%13) ], !dbg !66
cleanupret from %13 unwind to caller, !dbg !66
with my change:
ehcleanup: ; preds = %ctor.skip_vbases
%13 = cleanuppad within none [], !dbg !66
%14 = bitcast %struct.class_0* %this1 to i8*, !dbg !66
%15 = getelementptr inbounds i8, i8* %14, i64 8, !dbg !66
%16 = bitcast i8* %15 to %struct.class_2*, !dbg !66
%is_complete_object4 = icmp ne i32 %is_most_derived2, 0, !dbg !66
br i1 %is_complete_object4, label %Dtor.dtor_vbase, label %Dtor.skip_vbase, !d
bg !66
Dtor.dtor_vbase: ; preds = %ehcleanup
call void @"\01??1class_2@@UEAA@XZ"(%struct.class_2* %16) #6 [ "funclet"(token
%13) ], !dbg !66
br label %Dtor.skip_vbase, !dbg !66
Dtor.skip_vbase: ; preds = %Dtor.dtor_vbase, %ehcleanup
cleanupret from %13 unwind to caller, !dbg !66
Please let me know you need more info.
Patch by Jennifer Yu.
Differential Revision: https://reviews.llvm.org/D27358
llvm-svn: 288869
2016-12-07 08:21:45 +08:00
|
|
|
if (BaseDtorEndBB) {
|
2018-07-31 03:24:48 +08:00
|
|
|
// Complete object handler should continue to be the remaining
|
[MS-ABI]V-base dtor called more than needed when throw happens in v-base ctor in window. Need add "complete object flag" check in eh cleanup code.
The problem only happen on window ( A MS-ABI issuer )
The nature of the problem is virtual base dtor called more than it is needed after exception throw in inheriting base class(with virtual bases) ctor.
The root problem is when throw happen, not all virtual base classes have been contructed, so not all virtual base dtors are need to call for ehcleanup.
clang has code to handle vbase initialization: basically add check for "complete object flag" before call to v-base ctor.
But that part is missing for cleanup code.
To fix this add similar code as v-base init to cleanup code, same algorithm.
1> Add new routine:
EmitDtorCompleteObjectHandler
With corresponding to EmitCtorCompleteObjectHandler
2> In the EmitDestructorCal
Call EmitDtorCompleteObjectHandler when generate ehcleanup inside ctor.
Just add check for "complete object flag" before call to v-base dtor.
Without my change:
ehcleanup: ; preds = %ctor.skip_vbases
%13 = cleanuppad within none [], !dbg !66
%14 = bitcast %struct.class_0* %this1 to i8*, !dbg !66
%15 = getelementptr inbounds i8, i8* %14, i64 8, !dbg !66
%16 = bitcast i8* %15 to %struct.class_2*, !dbg !66
call void @"\01??1class_2@@UEAA@XZ"(%struct.class_2* %16) #6 [ "funclet"(token
%13) ], !dbg !66
cleanupret from %13 unwind to caller, !dbg !66
with my change:
ehcleanup: ; preds = %ctor.skip_vbases
%13 = cleanuppad within none [], !dbg !66
%14 = bitcast %struct.class_0* %this1 to i8*, !dbg !66
%15 = getelementptr inbounds i8, i8* %14, i64 8, !dbg !66
%16 = bitcast i8* %15 to %struct.class_2*, !dbg !66
%is_complete_object4 = icmp ne i32 %is_most_derived2, 0, !dbg !66
br i1 %is_complete_object4, label %Dtor.dtor_vbase, label %Dtor.skip_vbase, !d
bg !66
Dtor.dtor_vbase: ; preds = %ehcleanup
call void @"\01??1class_2@@UEAA@XZ"(%struct.class_2* %16) #6 [ "funclet"(token
%13) ], !dbg !66
br label %Dtor.skip_vbase, !dbg !66
Dtor.skip_vbase: ; preds = %Dtor.dtor_vbase, %ehcleanup
cleanupret from %13 unwind to caller, !dbg !66
Please let me know you need more info.
Patch by Jennifer Yu.
Differential Revision: https://reviews.llvm.org/D27358
llvm-svn: 288869
2016-12-07 08:21:45 +08:00
|
|
|
CGF.Builder.CreateBr(BaseDtorEndBB);
|
|
|
|
CGF.EmitBlock(BaseDtorEndBB);
|
2018-07-31 03:24:48 +08:00
|
|
|
}
|
2013-12-13 08:53:54 +08:00
|
|
|
}
|
|
|
|
|
2016-10-11 00:26:29 +08:00
|
|
|
void MicrosoftCXXABI::emitVTableTypeMetadata(const VPtrInfo &Info,
|
2016-06-25 05:21:46 +08:00
|
|
|
const CXXRecordDecl *RD,
|
|
|
|
llvm::GlobalVariable *VTable) {
|
2017-01-19 07:55:27 +08:00
|
|
|
if (!CGM.getCodeGenOpts().LTOUnit)
|
CFI: Implement bitset emission for the Microsoft ABI.
Clang's control flow integrity implementation works by conceptually attaching
"tags" (in the form of bitset entries) to each virtual table, identifying
the names of the classes that the virtual table is compatible with. Under
the Itanium ABI, it is simple to assign tags to virtual tables; they are
simply the address points, which are available via VTableLayout. Because any
overridden methods receive an entry in the derived class's virtual table,
a check for an overridden method call can always be done by checking the
tag of whichever derived class overrode the method call.
The Microsoft ABI is a little different, as it does not directly use address
points, and overrides in a derived class do not cause new virtual table entries
to be added to the derived class; instead, the slot in the base class is
reused, and the compiler needs to adjust the this pointer at the call site
to (generally) the base class that initially defined the method. After the
this pointer has been adjusted, we cannot check for the derived class's tag,
as the virtual table may not be compatible with the derived class. So we
need to determine which base class we have been adjusted to.
Specifically, at each call site, we use ASTRecordLayout to identify the most
derived class whose virtual table is laid out at the "this" pointer offset
we are using to make the call, and check the virtual table for that tag.
Because address point information is unavailable, we "reconstruct" it as
follows: any virtual tables we create for a non-derived class receive a tag
for that class, and virtual tables for a base class inside a derived class
receive a tag for the base class, together with tags for any derived classes
which are laid out at the same position as the derived class (and therefore
have compatible virtual tables).
Differential Revision: http://reviews.llvm.org/D10520
llvm-svn: 240117
2015-06-19 10:30:43 +08:00
|
|
|
return;
|
|
|
|
|
2020-01-25 23:26:09 +08:00
|
|
|
// TODO: Should VirtualFunctionElimination also be supported here?
|
|
|
|
// See similar handling in CodeGenModule::EmitVTableTypeMetadata.
|
|
|
|
if (CGM.getCodeGenOpts().WholeProgramVTables) {
|
|
|
|
llvm::GlobalObject::VCallVisibility TypeVis =
|
|
|
|
CGM.GetVCallVisibilityLevel(RD);
|
|
|
|
if (TypeVis != llvm::GlobalObject::VCallVisibilityPublic)
|
|
|
|
VTable->setVCallVisibilityMetadata(TypeVis);
|
|
|
|
}
|
|
|
|
|
2015-07-10 03:56:14 +08:00
|
|
|
// The location of the first virtual function pointer in the virtual table,
|
|
|
|
// aka the "address point" on Itanium. This is at offset 0 if RTTI is
|
|
|
|
// disabled, or sizeof(void*) if RTTI is enabled.
|
|
|
|
CharUnits AddressPoint =
|
|
|
|
getContext().getLangOpts().RTTIData
|
|
|
|
? getContext().toCharUnitsFromBits(
|
|
|
|
getContext().getTargetInfo().getPointerWidth(0))
|
|
|
|
: CharUnits::Zero();
|
CFI: Implement bitset emission for the Microsoft ABI.
Clang's control flow integrity implementation works by conceptually attaching
"tags" (in the form of bitset entries) to each virtual table, identifying
the names of the classes that the virtual table is compatible with. Under
the Itanium ABI, it is simple to assign tags to virtual tables; they are
simply the address points, which are available via VTableLayout. Because any
overridden methods receive an entry in the derived class's virtual table,
a check for an overridden method call can always be done by checking the
tag of whichever derived class overrode the method call.
The Microsoft ABI is a little different, as it does not directly use address
points, and overrides in a derived class do not cause new virtual table entries
to be added to the derived class; instead, the slot in the base class is
reused, and the compiler needs to adjust the this pointer at the call site
to (generally) the base class that initially defined the method. After the
this pointer has been adjusted, we cannot check for the derived class's tag,
as the virtual table may not be compatible with the derived class. So we
need to determine which base class we have been adjusted to.
Specifically, at each call site, we use ASTRecordLayout to identify the most
derived class whose virtual table is laid out at the "this" pointer offset
we are using to make the call, and check the virtual table for that tag.
Because address point information is unavailable, we "reconstruct" it as
follows: any virtual tables we create for a non-derived class receive a tag
for that class, and virtual tables for a base class inside a derived class
receive a tag for the base class, together with tags for any derived classes
which are laid out at the same position as the derived class (and therefore
have compatible virtual tables).
Differential Revision: http://reviews.llvm.org/D10520
llvm-svn: 240117
2015-06-19 10:30:43 +08:00
|
|
|
|
2016-10-11 00:26:29 +08:00
|
|
|
if (Info.PathToIntroducingObject.empty()) {
|
2016-06-25 05:21:46 +08:00
|
|
|
CGM.AddVTableTypeMetadata(VTable, AddressPoint, RD);
|
CFI: Implement bitset emission for the Microsoft ABI.
Clang's control flow integrity implementation works by conceptually attaching
"tags" (in the form of bitset entries) to each virtual table, identifying
the names of the classes that the virtual table is compatible with. Under
the Itanium ABI, it is simple to assign tags to virtual tables; they are
simply the address points, which are available via VTableLayout. Because any
overridden methods receive an entry in the derived class's virtual table,
a check for an overridden method call can always be done by checking the
tag of whichever derived class overrode the method call.
The Microsoft ABI is a little different, as it does not directly use address
points, and overrides in a derived class do not cause new virtual table entries
to be added to the derived class; instead, the slot in the base class is
reused, and the compiler needs to adjust the this pointer at the call site
to (generally) the base class that initially defined the method. After the
this pointer has been adjusted, we cannot check for the derived class's tag,
as the virtual table may not be compatible with the derived class. So we
need to determine which base class we have been adjusted to.
Specifically, at each call site, we use ASTRecordLayout to identify the most
derived class whose virtual table is laid out at the "this" pointer offset
we are using to make the call, and check the virtual table for that tag.
Because address point information is unavailable, we "reconstruct" it as
follows: any virtual tables we create for a non-derived class receive a tag
for that class, and virtual tables for a base class inside a derived class
receive a tag for the base class, together with tags for any derived classes
which are laid out at the same position as the derived class (and therefore
have compatible virtual tables).
Differential Revision: http://reviews.llvm.org/D10520
llvm-svn: 240117
2015-06-19 10:30:43 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a bitset entry for the least derived base belonging to this vftable.
|
2016-06-25 05:21:46 +08:00
|
|
|
CGM.AddVTableTypeMetadata(VTable, AddressPoint,
|
2016-10-11 00:26:29 +08:00
|
|
|
Info.PathToIntroducingObject.back());
|
CFI: Implement bitset emission for the Microsoft ABI.
Clang's control flow integrity implementation works by conceptually attaching
"tags" (in the form of bitset entries) to each virtual table, identifying
the names of the classes that the virtual table is compatible with. Under
the Itanium ABI, it is simple to assign tags to virtual tables; they are
simply the address points, which are available via VTableLayout. Because any
overridden methods receive an entry in the derived class's virtual table,
a check for an overridden method call can always be done by checking the
tag of whichever derived class overrode the method call.
The Microsoft ABI is a little different, as it does not directly use address
points, and overrides in a derived class do not cause new virtual table entries
to be added to the derived class; instead, the slot in the base class is
reused, and the compiler needs to adjust the this pointer at the call site
to (generally) the base class that initially defined the method. After the
this pointer has been adjusted, we cannot check for the derived class's tag,
as the virtual table may not be compatible with the derived class. So we
need to determine which base class we have been adjusted to.
Specifically, at each call site, we use ASTRecordLayout to identify the most
derived class whose virtual table is laid out at the "this" pointer offset
we are using to make the call, and check the virtual table for that tag.
Because address point information is unavailable, we "reconstruct" it as
follows: any virtual tables we create for a non-derived class receive a tag
for that class, and virtual tables for a base class inside a derived class
receive a tag for the base class, together with tags for any derived classes
which are laid out at the same position as the derived class (and therefore
have compatible virtual tables).
Differential Revision: http://reviews.llvm.org/D10520
llvm-svn: 240117
2015-06-19 10:30:43 +08:00
|
|
|
|
|
|
|
// Add a bitset entry for each derived class that is laid out at the same
|
|
|
|
// offset as the least derived base.
|
2016-10-11 00:26:29 +08:00
|
|
|
for (unsigned I = Info.PathToIntroducingObject.size() - 1; I != 0; --I) {
|
|
|
|
const CXXRecordDecl *DerivedRD = Info.PathToIntroducingObject[I - 1];
|
|
|
|
const CXXRecordDecl *BaseRD = Info.PathToIntroducingObject[I];
|
CFI: Implement bitset emission for the Microsoft ABI.
Clang's control flow integrity implementation works by conceptually attaching
"tags" (in the form of bitset entries) to each virtual table, identifying
the names of the classes that the virtual table is compatible with. Under
the Itanium ABI, it is simple to assign tags to virtual tables; they are
simply the address points, which are available via VTableLayout. Because any
overridden methods receive an entry in the derived class's virtual table,
a check for an overridden method call can always be done by checking the
tag of whichever derived class overrode the method call.
The Microsoft ABI is a little different, as it does not directly use address
points, and overrides in a derived class do not cause new virtual table entries
to be added to the derived class; instead, the slot in the base class is
reused, and the compiler needs to adjust the this pointer at the call site
to (generally) the base class that initially defined the method. After the
this pointer has been adjusted, we cannot check for the derived class's tag,
as the virtual table may not be compatible with the derived class. So we
need to determine which base class we have been adjusted to.
Specifically, at each call site, we use ASTRecordLayout to identify the most
derived class whose virtual table is laid out at the "this" pointer offset
we are using to make the call, and check the virtual table for that tag.
Because address point information is unavailable, we "reconstruct" it as
follows: any virtual tables we create for a non-derived class receive a tag
for that class, and virtual tables for a base class inside a derived class
receive a tag for the base class, together with tags for any derived classes
which are laid out at the same position as the derived class (and therefore
have compatible virtual tables).
Differential Revision: http://reviews.llvm.org/D10520
llvm-svn: 240117
2015-06-19 10:30:43 +08:00
|
|
|
|
|
|
|
const ASTRecordLayout &Layout =
|
|
|
|
getContext().getASTRecordLayout(DerivedRD);
|
|
|
|
CharUnits Offset;
|
|
|
|
auto VBI = Layout.getVBaseOffsetsMap().find(BaseRD);
|
|
|
|
if (VBI == Layout.getVBaseOffsetsMap().end())
|
|
|
|
Offset = Layout.getBaseClassOffset(BaseRD);
|
|
|
|
else
|
|
|
|
Offset = VBI->second.VBaseOffset;
|
|
|
|
if (!Offset.isZero())
|
|
|
|
return;
|
2016-06-25 05:21:46 +08:00
|
|
|
CGM.AddVTableTypeMetadata(VTable, AddressPoint, DerivedRD);
|
CFI: Implement bitset emission for the Microsoft ABI.
Clang's control flow integrity implementation works by conceptually attaching
"tags" (in the form of bitset entries) to each virtual table, identifying
the names of the classes that the virtual table is compatible with. Under
the Itanium ABI, it is simple to assign tags to virtual tables; they are
simply the address points, which are available via VTableLayout. Because any
overridden methods receive an entry in the derived class's virtual table,
a check for an overridden method call can always be done by checking the
tag of whichever derived class overrode the method call.
The Microsoft ABI is a little different, as it does not directly use address
points, and overrides in a derived class do not cause new virtual table entries
to be added to the derived class; instead, the slot in the base class is
reused, and the compiler needs to adjust the this pointer at the call site
to (generally) the base class that initially defined the method. After the
this pointer has been adjusted, we cannot check for the derived class's tag,
as the virtual table may not be compatible with the derived class. So we
need to determine which base class we have been adjusted to.
Specifically, at each call site, we use ASTRecordLayout to identify the most
derived class whose virtual table is laid out at the "this" pointer offset
we are using to make the call, and check the virtual table for that tag.
Because address point information is unavailable, we "reconstruct" it as
follows: any virtual tables we create for a non-derived class receive a tag
for that class, and virtual tables for a base class inside a derived class
receive a tag for the base class, together with tags for any derived classes
which are laid out at the same position as the derived class (and therefore
have compatible virtual tables).
Differential Revision: http://reviews.llvm.org/D10520
llvm-svn: 240117
2015-06-19 10:30:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Finally do the same for the most derived class.
|
2016-10-11 00:26:29 +08:00
|
|
|
if (Info.FullOffsetInMDC.isZero())
|
2016-06-25 05:21:46 +08:00
|
|
|
CGM.AddVTableTypeMetadata(VTable, AddressPoint, RD);
|
CFI: Implement bitset emission for the Microsoft ABI.
Clang's control flow integrity implementation works by conceptually attaching
"tags" (in the form of bitset entries) to each virtual table, identifying
the names of the classes that the virtual table is compatible with. Under
the Itanium ABI, it is simple to assign tags to virtual tables; they are
simply the address points, which are available via VTableLayout. Because any
overridden methods receive an entry in the derived class's virtual table,
a check for an overridden method call can always be done by checking the
tag of whichever derived class overrode the method call.
The Microsoft ABI is a little different, as it does not directly use address
points, and overrides in a derived class do not cause new virtual table entries
to be added to the derived class; instead, the slot in the base class is
reused, and the compiler needs to adjust the this pointer at the call site
to (generally) the base class that initially defined the method. After the
this pointer has been adjusted, we cannot check for the derived class's tag,
as the virtual table may not be compatible with the derived class. So we
need to determine which base class we have been adjusted to.
Specifically, at each call site, we use ASTRecordLayout to identify the most
derived class whose virtual table is laid out at the "this" pointer offset
we are using to make the call, and check the virtual table for that tag.
Because address point information is unavailable, we "reconstruct" it as
follows: any virtual tables we create for a non-derived class receive a tag
for that class, and virtual tables for a base class inside a derived class
receive a tag for the base class, together with tags for any derived classes
which are laid out at the same position as the derived class (and therefore
have compatible virtual tables).
Differential Revision: http://reviews.llvm.org/D10520
llvm-svn: 240117
2015-06-19 10:30:43 +08:00
|
|
|
}
|
|
|
|
|
2013-09-27 22:48:01 +08:00
|
|
|
void MicrosoftCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
|
|
|
|
const CXXRecordDecl *RD) {
|
2013-11-05 23:54:58 +08:00
|
|
|
MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
|
2014-09-11 22:13:49 +08:00
|
|
|
const VPtrInfoVector &VFPtrs = VFTContext.getVFPtrOffsets(RD);
|
2013-09-27 22:48:01 +08:00
|
|
|
|
2016-10-11 00:26:29 +08:00
|
|
|
for (const std::unique_ptr<VPtrInfo>& Info : VFPtrs) {
|
2014-05-24 00:07:43 +08:00
|
|
|
llvm::GlobalVariable *VTable = getAddrOfVTable(RD, Info->FullOffsetInMDC);
|
2013-09-27 22:48:01 +08:00
|
|
|
if (VTable->hasInitializer())
|
|
|
|
continue;
|
2014-07-02 04:30:31 +08:00
|
|
|
|
2013-09-27 22:48:01 +08:00
|
|
|
const VTableLayout &VTLayout =
|
2014-05-24 00:07:43 +08:00
|
|
|
VFTContext.getVFTableLayout(RD, Info->FullOffsetInMDC);
|
2016-02-10 01:27:52 +08:00
|
|
|
|
|
|
|
llvm::Constant *RTTI = nullptr;
|
|
|
|
if (any_of(VTLayout.vtable_components(),
|
|
|
|
[](const VTableComponent &VTC) { return VTC.isRTTIKind(); }))
|
2016-10-11 00:26:29 +08:00
|
|
|
RTTI = getMSCompleteObjectLocator(RD, *Info);
|
2016-02-10 01:27:52 +08:00
|
|
|
|
2020-06-12 02:17:08 +08:00
|
|
|
ConstantInitBuilder builder(CGM);
|
|
|
|
auto components = builder.beginStruct();
|
|
|
|
CGVT.createVTableInitializer(components, VTLayout, RTTI,
|
|
|
|
VTable->hasLocalLinkage());
|
|
|
|
components.finishAndSetAsInitializer(VTable);
|
CFI: Implement bitset emission for the Microsoft ABI.
Clang's control flow integrity implementation works by conceptually attaching
"tags" (in the form of bitset entries) to each virtual table, identifying
the names of the classes that the virtual table is compatible with. Under
the Itanium ABI, it is simple to assign tags to virtual tables; they are
simply the address points, which are available via VTableLayout. Because any
overridden methods receive an entry in the derived class's virtual table,
a check for an overridden method call can always be done by checking the
tag of whichever derived class overrode the method call.
The Microsoft ABI is a little different, as it does not directly use address
points, and overrides in a derived class do not cause new virtual table entries
to be added to the derived class; instead, the slot in the base class is
reused, and the compiler needs to adjust the this pointer at the call site
to (generally) the base class that initially defined the method. After the
this pointer has been adjusted, we cannot check for the derived class's tag,
as the virtual table may not be compatible with the derived class. So we
need to determine which base class we have been adjusted to.
Specifically, at each call site, we use ASTRecordLayout to identify the most
derived class whose virtual table is laid out at the "this" pointer offset
we are using to make the call, and check the virtual table for that tag.
Because address point information is unavailable, we "reconstruct" it as
follows: any virtual tables we create for a non-derived class receive a tag
for that class, and virtual tables for a base class inside a derived class
receive a tag for the base class, together with tags for any derived classes
which are laid out at the same position as the derived class (and therefore
have compatible virtual tables).
Differential Revision: http://reviews.llvm.org/D10520
llvm-svn: 240117
2015-06-19 10:30:43 +08:00
|
|
|
|
2016-10-11 00:26:29 +08:00
|
|
|
emitVTableTypeMetadata(*Info, RD, VTable);
|
2013-09-27 22:48:01 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-15 08:37:06 +08:00
|
|
|
bool MicrosoftCXXABI::isVirtualOffsetNeededForVTableField(
|
|
|
|
CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
|
|
|
|
return Vptr.NearestVBase != nullptr;
|
|
|
|
}
|
|
|
|
|
2013-09-27 22:48:01 +08:00
|
|
|
llvm::Value *MicrosoftCXXABI::getVTableAddressPointInStructor(
|
|
|
|
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
|
2015-09-15 08:37:06 +08:00
|
|
|
const CXXRecordDecl *NearestVBase) {
|
|
|
|
llvm::Constant *VTableAddressPoint = getVTableAddressPoint(Base, VTableClass);
|
2013-09-27 22:48:01 +08:00
|
|
|
if (!VTableAddressPoint) {
|
|
|
|
assert(Base.getBase()->getNumVBases() &&
|
2015-03-15 07:44:48 +08:00
|
|
|
!getContext().getASTRecordLayout(Base.getBase()).hasOwnVFPtr());
|
2013-09-27 22:48:01 +08:00
|
|
|
}
|
|
|
|
return VTableAddressPoint;
|
|
|
|
}
|
|
|
|
|
2013-10-03 14:26:13 +08:00
|
|
|
static void mangleVFTableName(MicrosoftMangleContext &MangleContext,
|
2016-10-11 00:26:29 +08:00
|
|
|
const CXXRecordDecl *RD, const VPtrInfo &VFPtr,
|
2013-10-03 14:26:13 +08:00
|
|
|
SmallString<256> &Name) {
|
2013-09-27 22:48:01 +08:00
|
|
|
llvm::raw_svector_ostream Out(Name);
|
2016-10-11 00:26:29 +08:00
|
|
|
MangleContext.mangleCXXVFTable(RD, VFPtr.MangledPath, Out);
|
2013-09-27 22:48:01 +08:00
|
|
|
}
|
|
|
|
|
2015-09-15 08:37:06 +08:00
|
|
|
llvm::Constant *
|
|
|
|
MicrosoftCXXABI::getVTableAddressPoint(BaseSubobject Base,
|
|
|
|
const CXXRecordDecl *VTableClass) {
|
2015-09-11 04:18:30 +08:00
|
|
|
(void)getAddrOfVTable(VTableClass, Base.getBaseOffset());
|
|
|
|
VFTableIdTy ID(VTableClass, Base.getBaseOffset());
|
2015-09-15 08:37:06 +08:00
|
|
|
return VFTablesMap[ID];
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Constant *MicrosoftCXXABI::getVTableAddressPointForConstExpr(
|
|
|
|
BaseSubobject Base, const CXXRecordDecl *VTableClass) {
|
|
|
|
llvm::Constant *VFTable = getVTableAddressPoint(Base, VTableClass);
|
2014-07-02 04:30:31 +08:00
|
|
|
assert(VFTable && "Couldn't find a vftable for the given base?");
|
|
|
|
return VFTable;
|
2013-09-27 22:48:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
|
|
|
|
CharUnits VPtrOffset) {
|
|
|
|
// getAddrOfVTable may return 0 if asked to get an address of a vtable which
|
|
|
|
// shouldn't be used in the given record type. We want to cache this result in
|
|
|
|
// VFTablesMap, thus a simple zero check is not sufficient.
|
2015-09-15 08:37:06 +08:00
|
|
|
|
2013-09-27 22:48:01 +08:00
|
|
|
VFTableIdTy ID(RD, VPtrOffset);
|
2014-07-02 04:30:31 +08:00
|
|
|
VTablesMapTy::iterator I;
|
2013-09-27 22:48:01 +08:00
|
|
|
bool Inserted;
|
2014-07-02 04:30:31 +08:00
|
|
|
std::tie(I, Inserted) = VTablesMap.insert(std::make_pair(ID, nullptr));
|
2013-09-27 22:48:01 +08:00
|
|
|
if (!Inserted)
|
|
|
|
return I->second;
|
|
|
|
|
|
|
|
llvm::GlobalVariable *&VTable = I->second;
|
|
|
|
|
2013-11-05 23:54:58 +08:00
|
|
|
MicrosoftVTableContext &VTContext = CGM.getMicrosoftVTableContext();
|
2014-02-28 03:40:09 +08:00
|
|
|
const VPtrInfoVector &VFPtrs = VTContext.getVFPtrOffsets(RD);
|
2013-09-27 22:48:01 +08:00
|
|
|
|
2014-11-19 15:49:47 +08:00
|
|
|
if (DeferredVFTables.insert(RD).second) {
|
2013-09-27 22:48:01 +08:00
|
|
|
// We haven't processed this record type before.
|
2016-01-29 09:35:53 +08:00
|
|
|
// Queue up this vtable for possible deferred emission.
|
2013-09-27 22:48:01 +08:00
|
|
|
CGM.addDeferredVTable(RD);
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
// Create all the vftables at once in order to make sure each vftable has
|
|
|
|
// a unique mangled name.
|
|
|
|
llvm::StringSet<> ObservedMangledNames;
|
|
|
|
for (size_t J = 0, F = VFPtrs.size(); J != F; ++J) {
|
|
|
|
SmallString<256> Name;
|
2016-10-11 00:26:29 +08:00
|
|
|
mangleVFTableName(getMangleContext(), RD, *VFPtrs[J], Name);
|
2014-11-19 10:56:13 +08:00
|
|
|
if (!ObservedMangledNames.insert(Name.str()).second)
|
2013-09-27 22:48:01 +08:00
|
|
|
llvm_unreachable("Already saw this mangling before?");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-10-11 00:26:29 +08:00
|
|
|
const std::unique_ptr<VPtrInfo> *VFPtrI = std::find_if(
|
|
|
|
VFPtrs.begin(), VFPtrs.end(), [&](const std::unique_ptr<VPtrInfo>& VPI) {
|
2015-03-19 06:04:43 +08:00
|
|
|
return VPI->FullOffsetInMDC == VPtrOffset;
|
|
|
|
});
|
|
|
|
if (VFPtrI == VFPtrs.end()) {
|
|
|
|
VFTablesMap[ID] = nullptr;
|
|
|
|
return nullptr;
|
|
|
|
}
|
2016-10-11 00:26:29 +08:00
|
|
|
const std::unique_ptr<VPtrInfo> &VFPtr = *VFPtrI;
|
2015-03-19 06:04:43 +08:00
|
|
|
|
|
|
|
SmallString<256> VFTableName;
|
2016-10-11 00:26:29 +08:00
|
|
|
mangleVFTableName(getMangleContext(), RD, *VFPtr, VFTableName);
|
2015-03-19 06:04:43 +08:00
|
|
|
|
2016-02-12 01:49:28 +08:00
|
|
|
// Classes marked __declspec(dllimport) need vftables generated on the
|
|
|
|
// import-side in order to support features like constexpr. No other
|
|
|
|
// translation unit relies on the emission of the local vftable, translation
|
|
|
|
// units are expected to generate them as needed.
|
|
|
|
//
|
|
|
|
// Because of this unique behavior, we maintain this logic here instead of
|
|
|
|
// getVTableLinkage.
|
|
|
|
llvm::GlobalValue::LinkageTypes VFTableLinkage =
|
|
|
|
RD->hasAttr<DLLImportAttr>() ? llvm::GlobalValue::LinkOnceODRLinkage
|
|
|
|
: CGM.getVTableLinkage(RD);
|
2015-03-19 06:04:43 +08:00
|
|
|
bool VFTableComesFromAnotherTU =
|
|
|
|
llvm::GlobalValue::isAvailableExternallyLinkage(VFTableLinkage) ||
|
|
|
|
llvm::GlobalValue::isExternalLinkage(VFTableLinkage);
|
|
|
|
bool VTableAliasIsRequred =
|
|
|
|
!VFTableComesFromAnotherTU && getContext().getLangOpts().RTTIData;
|
|
|
|
|
|
|
|
if (llvm::GlobalValue *VFTable =
|
|
|
|
CGM.getModule().getNamedGlobal(VFTableName)) {
|
|
|
|
VFTablesMap[ID] = VFTable;
|
2015-09-15 08:37:06 +08:00
|
|
|
VTable = VTableAliasIsRequred
|
|
|
|
? cast<llvm::GlobalVariable>(
|
|
|
|
cast<llvm::GlobalAlias>(VFTable)->getBaseObject())
|
|
|
|
: cast<llvm::GlobalVariable>(VFTable);
|
|
|
|
return VTable;
|
2015-03-19 06:04:43 +08:00
|
|
|
}
|
2014-07-02 04:30:31 +08:00
|
|
|
|
2016-12-14 04:40:39 +08:00
|
|
|
const VTableLayout &VTLayout =
|
|
|
|
VTContext.getVFTableLayout(RD, VFPtr->FullOffsetInMDC);
|
2015-03-19 06:04:43 +08:00
|
|
|
llvm::GlobalValue::LinkageTypes VTableLinkage =
|
|
|
|
VTableAliasIsRequred ? llvm::GlobalValue::PrivateLinkage : VFTableLinkage;
|
|
|
|
|
|
|
|
StringRef VTableName = VTableAliasIsRequred ? StringRef() : VFTableName.str();
|
|
|
|
|
2016-12-14 04:40:39 +08:00
|
|
|
llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
|
2015-03-19 06:04:43 +08:00
|
|
|
|
|
|
|
// Create a backing variable for the contents of VTable. The VTable may
|
|
|
|
// or may not include space for a pointer to RTTI data.
|
|
|
|
llvm::GlobalValue *VFTable;
|
|
|
|
VTable = new llvm::GlobalVariable(CGM.getModule(), VTableType,
|
|
|
|
/*isConstant=*/true, VTableLinkage,
|
|
|
|
/*Initializer=*/nullptr, VTableName);
|
2016-06-15 05:02:05 +08:00
|
|
|
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
|
2015-03-19 06:04:43 +08:00
|
|
|
|
|
|
|
llvm::Comdat *C = nullptr;
|
|
|
|
if (!VFTableComesFromAnotherTU &&
|
|
|
|
(llvm::GlobalValue::isWeakForLinker(VFTableLinkage) ||
|
|
|
|
(llvm::GlobalValue::isLocalLinkage(VFTableLinkage) &&
|
|
|
|
VTableAliasIsRequred)))
|
|
|
|
C = CGM.getModule().getOrInsertComdat(VFTableName.str());
|
|
|
|
|
|
|
|
// Only insert a pointer into the VFTable for RTTI data if we are not
|
|
|
|
// importing it. We never reference the RTTI data directly so there is no
|
|
|
|
// need to make room for it.
|
|
|
|
if (VTableAliasIsRequred) {
|
2016-12-14 04:40:39 +08:00
|
|
|
llvm::Value *GEPIndices[] = {llvm::ConstantInt::get(CGM.Int32Ty, 0),
|
|
|
|
llvm::ConstantInt::get(CGM.Int32Ty, 0),
|
|
|
|
llvm::ConstantInt::get(CGM.Int32Ty, 1)};
|
2015-03-19 06:04:43 +08:00
|
|
|
// Create a GEP which points just after the first entry in the VFTable,
|
|
|
|
// this should be the location of the first virtual method.
|
2015-04-03 02:55:21 +08:00
|
|
|
llvm::Constant *VTableGEP = llvm::ConstantExpr::getInBoundsGetElementPtr(
|
|
|
|
VTable->getValueType(), VTable, GEPIndices);
|
2015-03-19 06:04:43 +08:00
|
|
|
if (llvm::GlobalValue::isWeakForLinker(VFTableLinkage)) {
|
|
|
|
VFTableLinkage = llvm::GlobalValue::ExternalLinkage;
|
|
|
|
if (C)
|
|
|
|
C->setSelectionKind(llvm::Comdat::Largest);
|
2014-07-02 04:30:31 +08:00
|
|
|
}
|
2015-09-15 02:38:22 +08:00
|
|
|
VFTable = llvm::GlobalAlias::create(CGM.Int8PtrTy,
|
|
|
|
/*AddressSpace=*/0, VFTableLinkage,
|
|
|
|
VFTableName.str(), VTableGEP,
|
|
|
|
&CGM.getModule());
|
2016-06-15 05:02:05 +08:00
|
|
|
VFTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
|
2015-03-19 06:04:43 +08:00
|
|
|
} else {
|
|
|
|
// We don't need a GlobalAlias to be a symbol for the VTable if we won't
|
|
|
|
// be referencing any RTTI data.
|
|
|
|
// The GlobalVariable will end up being an appropriate definition of the
|
|
|
|
// VFTable.
|
|
|
|
VFTable = VTable;
|
2013-09-27 22:48:01 +08:00
|
|
|
}
|
2015-03-19 06:04:43 +08:00
|
|
|
if (C)
|
|
|
|
VTable->setComdat(C);
|
|
|
|
|
2016-02-12 01:49:28 +08:00
|
|
|
if (RD->hasAttr<DLLExportAttr>())
|
2015-03-19 06:04:43 +08:00
|
|
|
VFTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
|
2013-09-27 22:48:01 +08:00
|
|
|
|
2015-03-19 06:04:43 +08:00
|
|
|
VFTablesMap[ID] = VFTable;
|
2013-09-27 22:48:01 +08:00
|
|
|
return VTable;
|
|
|
|
}
|
|
|
|
|
2018-02-07 02:52:44 +08:00
|
|
|
CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
|
|
|
|
GlobalDecl GD,
|
|
|
|
Address This,
|
|
|
|
llvm::Type *Ty,
|
|
|
|
SourceLocation Loc) {
|
2013-08-21 14:25:03 +08:00
|
|
|
CGBuilderTy &Builder = CGF.Builder;
|
|
|
|
|
|
|
|
Ty = Ty->getPointerTo()->getPointerTo();
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address VPtr =
|
2014-03-15 01:43:37 +08:00
|
|
|
adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
|
2015-09-16 05:46:55 +08:00
|
|
|
|
|
|
|
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
|
|
|
|
llvm::Value *VTable = CGF.GetVTablePtr(VPtr, Ty, MethodDecl->getParent());
|
2013-08-21 14:25:03 +08:00
|
|
|
|
2016-10-28 01:11:51 +08:00
|
|
|
MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
|
2018-04-03 04:00:39 +08:00
|
|
|
MethodVFTableLocation ML = VFTContext.getMethodVFTableLocation(GD);
|
2016-10-28 01:11:51 +08:00
|
|
|
|
|
|
|
// Compute the identity of the most derived class whose virtual table is
|
|
|
|
// located at the MethodVFTableLocation ML.
|
|
|
|
auto getObjectWithVPtr = [&] {
|
|
|
|
return llvm::find_if(VFTContext.getVFPtrOffsets(
|
|
|
|
ML.VBase ? ML.VBase : MethodDecl->getParent()),
|
|
|
|
[&](const std::unique_ptr<VPtrInfo> &Info) {
|
|
|
|
return Info->FullOffsetInMDC == ML.VFPtrOffset;
|
|
|
|
})
|
|
|
|
->get()
|
|
|
|
->ObjectWithVPtr;
|
|
|
|
};
|
CFI: Implement bitset emission for the Microsoft ABI.
Clang's control flow integrity implementation works by conceptually attaching
"tags" (in the form of bitset entries) to each virtual table, identifying
the names of the classes that the virtual table is compatible with. Under
the Itanium ABI, it is simple to assign tags to virtual tables; they are
simply the address points, which are available via VTableLayout. Because any
overridden methods receive an entry in the derived class's virtual table,
a check for an overridden method call can always be done by checking the
tag of whichever derived class overrode the method call.
The Microsoft ABI is a little different, as it does not directly use address
points, and overrides in a derived class do not cause new virtual table entries
to be added to the derived class; instead, the slot in the base class is
reused, and the compiler needs to adjust the this pointer at the call site
to (generally) the base class that initially defined the method. After the
this pointer has been adjusted, we cannot check for the derived class's tag,
as the virtual table may not be compatible with the derived class. So we
need to determine which base class we have been adjusted to.
Specifically, at each call site, we use ASTRecordLayout to identify the most
derived class whose virtual table is laid out at the "this" pointer offset
we are using to make the call, and check the virtual table for that tag.
Because address point information is unavailable, we "reconstruct" it as
follows: any virtual tables we create for a non-derived class receive a tag
for that class, and virtual tables for a base class inside a derived class
receive a tag for the base class, together with tags for any derived classes
which are laid out at the same position as the derived class (and therefore
have compatible virtual tables).
Differential Revision: http://reviews.llvm.org/D10520
llvm-svn: 240117
2015-06-19 10:30:43 +08:00
|
|
|
|
2018-02-07 02:52:44 +08:00
|
|
|
llvm::Value *VFunc;
|
|
|
|
if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
|
|
|
|
VFunc = CGF.EmitVTableTypeCheckedLoad(
|
2016-10-28 01:11:51 +08:00
|
|
|
getObjectWithVPtr(), VTable,
|
2016-06-25 08:24:06 +08:00
|
|
|
ML.Index * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
|
2018-02-07 02:52:44 +08:00
|
|
|
} else {
|
|
|
|
if (CGM.getCodeGenOpts().PrepareForLTO)
|
|
|
|
CGF.EmitTypeMetadataCodeForVCall(getObjectWithVPtr(), VTable, Loc);
|
2016-06-25 08:24:06 +08:00
|
|
|
|
2018-02-07 02:52:44 +08:00
|
|
|
llvm::Value *VFuncPtr =
|
|
|
|
Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
|
|
|
|
VFunc = Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
|
|
|
|
}
|
2016-10-27 07:46:34 +08:00
|
|
|
|
2018-11-13 23:48:08 +08:00
|
|
|
CGCallee Callee(GD, VFunc);
|
2018-02-07 02:52:44 +08:00
|
|
|
return Callee;
|
2013-08-21 14:25:03 +08:00
|
|
|
}
|
|
|
|
|
2014-11-01 04:09:12 +08:00
|
|
|
llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
|
|
|
|
CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
|
2019-07-22 17:39:13 +08:00
|
|
|
Address This, DeleteOrMemberCallExpr E) {
|
|
|
|
auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
|
|
|
|
auto *D = E.dyn_cast<const CXXDeleteExpr *>();
|
|
|
|
assert((CE != nullptr) ^ (D != nullptr));
|
2014-08-26 04:17:35 +08:00
|
|
|
assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
|
2013-02-15 22:45:22 +08:00
|
|
|
assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
|
|
|
|
|
|
|
|
// We have only one destructor in the vftable but can get both behaviors
|
2013-08-27 18:38:19 +08:00
|
|
|
// by passing an implicit int parameter.
|
2013-10-17 02:24:06 +08:00
|
|
|
GlobalDecl GD(Dtor, Dtor_Deleting);
|
2019-03-23 07:05:10 +08:00
|
|
|
const CGFunctionInfo *FInfo =
|
|
|
|
&CGM.getTypes().arrangeCXXStructorDeclaration(GD);
|
2018-03-01 13:43:23 +08:00
|
|
|
llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
|
2018-02-06 07:09:13 +08:00
|
|
|
CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
|
2013-02-15 22:45:22 +08:00
|
|
|
|
2015-03-15 07:44:48 +08:00
|
|
|
ASTContext &Context = getContext();
|
2015-01-13 05:24:10 +08:00
|
|
|
llvm::Value *ImplicitParam = llvm::ConstantInt::get(
|
|
|
|
llvm::IntegerType::getInt32Ty(CGF.getLLVMContext()),
|
|
|
|
DtorType == Dtor_Deleting);
|
2013-02-15 22:45:22 +08:00
|
|
|
|
2019-07-22 17:39:13 +08:00
|
|
|
QualType ThisTy;
|
|
|
|
if (CE) {
|
|
|
|
ThisTy = CE->getObjectType();
|
|
|
|
} else {
|
|
|
|
ThisTy = D->getDestroyedType();
|
|
|
|
}
|
|
|
|
|
2014-03-15 01:43:37 +08:00
|
|
|
This = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
|
2019-07-22 17:39:13 +08:00
|
|
|
RValue RV = CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
|
2019-03-23 07:05:10 +08:00
|
|
|
ImplicitParam, Context.IntTy, CE);
|
2014-11-01 04:09:12 +08:00
|
|
|
return RV.getScalarVal();
|
2013-02-15 22:45:22 +08:00
|
|
|
}
|
|
|
|
|
2014-01-03 08:14:35 +08:00
|
|
|
const VBTableGlobals &
|
|
|
|
MicrosoftCXXABI::enumerateVBTables(const CXXRecordDecl *RD) {
|
2013-06-19 23:20:38 +08:00
|
|
|
// At this layer, we can key the cache off of a single class, which is much
|
2014-01-03 08:14:35 +08:00
|
|
|
// easier than caching each vbtable individually.
|
|
|
|
llvm::DenseMap<const CXXRecordDecl*, VBTableGlobals>::iterator Entry;
|
|
|
|
bool Added;
|
2014-03-02 21:01:17 +08:00
|
|
|
std::tie(Entry, Added) =
|
|
|
|
VBTablesMap.insert(std::make_pair(RD, VBTableGlobals()));
|
2014-01-03 08:14:35 +08:00
|
|
|
VBTableGlobals &VBGlobals = Entry->second;
|
|
|
|
if (!Added)
|
|
|
|
return VBGlobals;
|
|
|
|
|
|
|
|
MicrosoftVTableContext &Context = CGM.getMicrosoftVTableContext();
|
|
|
|
VBGlobals.VBTables = &Context.enumerateVBTables(RD);
|
|
|
|
|
|
|
|
// Cache the globals for all vbtables so we don't have to recompute the
|
|
|
|
// mangled names.
|
|
|
|
llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
|
2014-02-28 03:40:09 +08:00
|
|
|
for (VPtrInfoVector::const_iterator I = VBGlobals.VBTables->begin(),
|
|
|
|
E = VBGlobals.VBTables->end();
|
2014-01-03 08:14:35 +08:00
|
|
|
I != E; ++I) {
|
2014-01-04 07:42:00 +08:00
|
|
|
VBGlobals.Globals.push_back(getAddrOfVBTable(**I, RD, Linkage));
|
2014-01-03 08:14:35 +08:00
|
|
|
}
|
2013-06-19 23:20:38 +08:00
|
|
|
|
2014-01-03 08:14:35 +08:00
|
|
|
return VBGlobals;
|
2013-06-19 23:20:38 +08:00
|
|
|
}
|
|
|
|
|
2018-04-03 04:00:39 +08:00
|
|
|
llvm::Function *
|
|
|
|
MicrosoftCXXABI::EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
|
|
|
|
const MethodVFTableLocation &ML) {
|
2014-08-30 05:43:29 +08:00
|
|
|
assert(!isa<CXXConstructorDecl>(MD) && !isa<CXXDestructorDecl>(MD) &&
|
|
|
|
"can't form pointers to ctors or virtual dtors");
|
|
|
|
|
2014-02-21 10:27:32 +08:00
|
|
|
// Calculate the mangled name.
|
|
|
|
SmallString<256> ThunkName;
|
|
|
|
llvm::raw_svector_ostream Out(ThunkName);
|
2018-04-03 04:00:39 +08:00
|
|
|
getMangleContext().mangleVirtualMemPtrThunk(MD, ML, Out);
|
2014-02-21 10:27:32 +08:00
|
|
|
|
2013-11-16 01:24:45 +08:00
|
|
|
// If the thunk has been generated previously, just return it.
|
|
|
|
if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName))
|
|
|
|
return cast<llvm::Function>(GV);
|
|
|
|
|
|
|
|
// Create the llvm::Function.
|
[MS] Emit vftable thunks for functions with incomplete prototypes
Summary:
The following class hierarchy requires that we be able to emit a
this-adjusting thunk for B::foo in C's vftable:
struct Incomplete;
struct A {
virtual A* foo(Incomplete p) = 0;
};
struct B : virtual A {
void foo(Incomplete p) override;
};
struct C : B { int c; };
This TU is valid, but lacks a definition of 'Incomplete', which makes it
hard to build a thunk for the final overrider, B::foo.
Before this change, Clang gives up attempting to emit the thunk, because
it assumes that if the parameter types are incomplete, it must be
emitting the thunk for optimization purposes. This is untrue for the MS
ABI, where the implementation of B::foo has no idea what thunks C's
vftable may require. Clang needs to emit the thunk without necessarily
having access to the complete prototype of foo.
This change makes Clang emit a musttail variadic call when it needs such
a thunk. I call these "unprototyped" thunks, because they only prototype
the "this" parameter, which must always come first in the MS C++ ABI.
These thunks work, but they create ugly LLVM IR. If the call to the
thunk is devirtualized, it will be a call to a bitcast of a function
pointer. Today, LLVM cannot inline through such a call, but I want to
address that soon, because we also use this pattern for virtual member
pointer thunks.
This change also implements an old FIXME in the code about reusing the
thunk's computed CGFunctionInfo as much as possible. Now we don't end up
computing the thunk's mangled name and arranging it's prototype up to
around three times.
Fixes PR25641
Reviewers: rjmccall, rsmith, hans
Subscribers: Prazek, cfe-commits
Differential Revision: https://reviews.llvm.org/D45112
llvm-svn: 329009
2018-04-03 04:20:33 +08:00
|
|
|
const CGFunctionInfo &FnInfo =
|
|
|
|
CGM.getTypes().arrangeUnprototypedMustTailThunk(MD);
|
2013-11-16 01:24:45 +08:00
|
|
|
llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo);
|
|
|
|
llvm::Function *ThunkFn =
|
|
|
|
llvm::Function::Create(ThunkTy, llvm::Function::ExternalLinkage,
|
|
|
|
ThunkName.str(), &CGM.getModule());
|
|
|
|
assert(ThunkFn->getName() == ThunkName && "name was uniqued!");
|
|
|
|
|
|
|
|
ThunkFn->setLinkage(MD->isExternallyVisible()
|
|
|
|
? llvm::GlobalValue::LinkOnceODRLinkage
|
|
|
|
: llvm::GlobalValue::InternalLinkage);
|
2015-01-21 09:21:31 +08:00
|
|
|
if (MD->isExternallyVisible())
|
|
|
|
ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName()));
|
2013-11-16 01:24:45 +08:00
|
|
|
|
|
|
|
CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn);
|
|
|
|
CGM.SetLLVMFunctionAttributesForDefinition(MD, ThunkFn);
|
|
|
|
|
2015-01-22 06:18:17 +08:00
|
|
|
// Add the "thunk" attribute so that LLVM knows that the return type is
|
|
|
|
// meaningless. These thunks can be used to call functions with differing
|
|
|
|
// return types, and the caller is required to cast the prototype
|
|
|
|
// appropriately to extract the correct value.
|
|
|
|
ThunkFn->addFnAttr("thunk");
|
|
|
|
|
2014-08-16 02:12:40 +08:00
|
|
|
// These thunks can be compared, so they are not unnamed.
|
2016-06-15 05:02:05 +08:00
|
|
|
ThunkFn->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::None);
|
2014-08-16 02:12:40 +08:00
|
|
|
|
2013-11-16 01:24:45 +08:00
|
|
|
// Start codegen.
|
|
|
|
CodeGenFunction CGF(CGM);
|
2014-08-30 05:43:29 +08:00
|
|
|
CGF.CurGD = GlobalDecl(MD);
|
|
|
|
CGF.CurFuncIsThunk = true;
|
|
|
|
|
|
|
|
// Build FunctionArgs, but only include the implicit 'this' parameter
|
|
|
|
// declaration.
|
|
|
|
FunctionArgList FunctionArgs;
|
|
|
|
buildThisParam(CGF, FunctionArgs);
|
|
|
|
|
|
|
|
// Start defining the function.
|
|
|
|
CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo,
|
|
|
|
FunctionArgs, MD->getLocation(), SourceLocation());
|
[MS] Apply adjustments after storing 'this'
Summary:
The MS ABI convention is that the 'this' pointer on entry is the address
of the vfptr that was used to make the virtual method call. In other
words, the pointer on entry always points to the base subobject that
introduced the virtual method. Consider this hierarchy:
struct A { virtual void f() = 0; };
struct B { virtual void g() = 0; };
struct C : A, B {
void f() override;
void g() override;
};
On entry to C::g, [ER]CX will contain the address of C's B subobject,
and C::g will have to subtract sizeof(A) to recover a pointer to C.
Before this change, we applied this adjustment in the prologue and
stored the new value into the "this" local variable alloca used for
debug info. However, MSVC does not do this, presumably because it is
often profitable to fold the adjustment into later field accesses. This
creates a problem, because the debugger expects the variable to be
unadjusted. Unfortunately, CodeView doesn't have anything like DWARF
expressions for computing variables that aren't in the program anymore,
so we have to declare 'this' to be the unadjusted value if we want the
debugger to see the right value.
This has the side benefit that, in optimized builds, the 'this' pointer
will usually be available on function entry because it doesn't require
any adjustment.
Reviewers: hans
Subscribers: aprantl, cfe-commits
Differential Revision: https://reviews.llvm.org/D40109
llvm-svn: 318440
2017-11-17 03:09:36 +08:00
|
|
|
setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
|
2013-11-16 01:24:45 +08:00
|
|
|
|
2014-02-21 10:27:32 +08:00
|
|
|
// Load the vfptr and then callee from the vftable. The callee should have
|
|
|
|
// adjusted 'this' so that the vfptr is at offset zero.
|
2014-08-30 05:43:29 +08:00
|
|
|
llvm::Value *VTable = CGF.GetVTablePtr(
|
2015-09-16 05:46:55 +08:00
|
|
|
getThisAddress(CGF), ThunkTy->getPointerTo()->getPointerTo(), MD->getParent());
|
|
|
|
|
2014-02-21 10:27:32 +08:00
|
|
|
llvm::Value *VFuncPtr =
|
|
|
|
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *Callee =
|
|
|
|
CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
|
2013-11-16 01:24:45 +08:00
|
|
|
|
2019-02-06 03:17:50 +08:00
|
|
|
CGF.EmitMustTailThunk(MD, getThisValue(CGF), {ThunkTy, Callee});
|
2013-11-16 01:24:45 +08:00
|
|
|
|
|
|
|
return ThunkFn;
|
|
|
|
}
|
|
|
|
|
2013-09-27 22:48:01 +08:00
|
|
|
void MicrosoftCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
|
2014-01-03 08:14:35 +08:00
|
|
|
const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
|
|
|
|
for (unsigned I = 0, E = VBGlobals.VBTables->size(); I != E; ++I) {
|
2016-10-11 00:26:29 +08:00
|
|
|
const std::unique_ptr<VPtrInfo>& VBT = (*VBGlobals.VBTables)[I];
|
2014-01-03 08:14:35 +08:00
|
|
|
llvm::GlobalVariable *GV = VBGlobals.Globals[I];
|
2015-02-02 18:22:20 +08:00
|
|
|
if (GV->isDeclaration())
|
|
|
|
emitVBTableDefinition(*VBT, RD, GV);
|
2014-01-03 08:14:35 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::GlobalVariable *
|
2014-02-28 03:40:09 +08:00
|
|
|
MicrosoftCXXABI::getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD,
|
2014-01-03 08:14:35 +08:00
|
|
|
llvm::GlobalVariable::LinkageTypes Linkage) {
|
|
|
|
SmallString<256> OutName;
|
|
|
|
llvm::raw_svector_ostream Out(OutName);
|
2014-07-07 16:09:15 +08:00
|
|
|
getMangleContext().mangleCXXVBTable(RD, VBT.MangledPath, Out);
|
2014-01-03 08:14:35 +08:00
|
|
|
StringRef Name = OutName.str();
|
|
|
|
|
|
|
|
llvm::ArrayType *VBTableType =
|
2016-07-20 22:40:25 +08:00
|
|
|
llvm::ArrayType::get(CGM.IntTy, 1 + VBT.ObjectWithVPtr->getNumVBases());
|
2014-01-03 08:14:35 +08:00
|
|
|
|
|
|
|
assert(!CGM.getModule().getNamedGlobal(Name) &&
|
|
|
|
"vbtable with this name already exists: mangling bug?");
|
2018-09-12 22:09:06 +08:00
|
|
|
CharUnits Alignment =
|
|
|
|
CGM.getContext().getTypeAlignInChars(CGM.getContext().IntTy);
|
|
|
|
llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
|
|
|
|
Name, VBTableType, Linkage, Alignment.getQuantity());
|
2016-06-15 05:02:05 +08:00
|
|
|
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
|
2014-05-31 00:59:42 +08:00
|
|
|
|
|
|
|
if (RD->hasAttr<DLLImportAttr>())
|
|
|
|
GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
|
|
|
|
else if (RD->hasAttr<DLLExportAttr>())
|
|
|
|
GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
|
|
|
|
|
2015-02-02 18:22:20 +08:00
|
|
|
if (!GV->hasExternalLinkage())
|
|
|
|
emitVBTableDefinition(VBT, RD, GV);
|
|
|
|
|
2014-01-03 08:14:35 +08:00
|
|
|
return GV;
|
|
|
|
}
|
|
|
|
|
2014-02-28 03:40:09 +08:00
|
|
|
void MicrosoftCXXABI::emitVBTableDefinition(const VPtrInfo &VBT,
|
2014-01-03 08:14:35 +08:00
|
|
|
const CXXRecordDecl *RD,
|
|
|
|
llvm::GlobalVariable *GV) const {
|
2016-07-20 22:40:25 +08:00
|
|
|
const CXXRecordDecl *ObjectWithVPtr = VBT.ObjectWithVPtr;
|
2014-01-03 08:14:35 +08:00
|
|
|
|
2016-07-20 22:40:25 +08:00
|
|
|
assert(RD->getNumVBases() && ObjectWithVPtr->getNumVBases() &&
|
2014-01-03 08:14:35 +08:00
|
|
|
"should only emit vbtables for classes with vbtables");
|
2013-09-27 22:48:01 +08:00
|
|
|
|
2014-01-03 08:14:35 +08:00
|
|
|
const ASTRecordLayout &BaseLayout =
|
2016-07-20 22:40:25 +08:00
|
|
|
getContext().getASTRecordLayout(VBT.IntroducingObject);
|
2015-03-15 07:44:48 +08:00
|
|
|
const ASTRecordLayout &DerivedLayout = getContext().getASTRecordLayout(RD);
|
2014-01-03 08:14:35 +08:00
|
|
|
|
2016-07-20 22:40:25 +08:00
|
|
|
SmallVector<llvm::Constant *, 4> Offsets(1 + ObjectWithVPtr->getNumVBases(),
|
2014-05-21 13:09:00 +08:00
|
|
|
nullptr);
|
2014-01-03 08:14:35 +08:00
|
|
|
|
2016-07-20 22:40:25 +08:00
|
|
|
// The offset from ObjectWithVPtr's vbptr to itself always leads.
|
2014-01-03 08:14:35 +08:00
|
|
|
CharUnits VBPtrOffset = BaseLayout.getVBPtrOffset();
|
|
|
|
Offsets[0] = llvm::ConstantInt::get(CGM.IntTy, -VBPtrOffset.getQuantity());
|
|
|
|
|
|
|
|
MicrosoftVTableContext &Context = CGM.getMicrosoftVTableContext();
|
2016-07-20 22:40:25 +08:00
|
|
|
for (const auto &I : ObjectWithVPtr->vbases()) {
|
2014-03-14 00:15:17 +08:00
|
|
|
const CXXRecordDecl *VBase = I.getType()->getAsCXXRecordDecl();
|
2014-01-03 08:14:35 +08:00
|
|
|
CharUnits Offset = DerivedLayout.getVBaseClassOffset(VBase);
|
|
|
|
assert(!Offset.isNegative());
|
2014-01-04 07:42:00 +08:00
|
|
|
|
2014-01-03 08:14:35 +08:00
|
|
|
// Make it relative to the subobject vbptr.
|
2014-01-04 07:42:00 +08:00
|
|
|
CharUnits CompleteVBPtrOffset = VBT.NonVirtualOffset + VBPtrOffset;
|
2014-02-28 03:40:09 +08:00
|
|
|
if (VBT.getVBaseWithVPtr())
|
2014-01-04 07:42:00 +08:00
|
|
|
CompleteVBPtrOffset +=
|
2014-02-28 03:40:09 +08:00
|
|
|
DerivedLayout.getVBaseClassOffset(VBT.getVBaseWithVPtr());
|
2014-01-04 07:42:00 +08:00
|
|
|
Offset -= CompleteVBPtrOffset;
|
|
|
|
|
2016-07-20 22:40:25 +08:00
|
|
|
unsigned VBIndex = Context.getVBTableIndex(ObjectWithVPtr, VBase);
|
2014-05-21 13:09:00 +08:00
|
|
|
assert(Offsets[VBIndex] == nullptr && "The same vbindex seen twice?");
|
2014-01-03 08:14:35 +08:00
|
|
|
Offsets[VBIndex] = llvm::ConstantInt::get(CGM.IntTy, Offset.getQuantity());
|
2013-06-19 23:20:38 +08:00
|
|
|
}
|
2014-01-03 08:14:35 +08:00
|
|
|
|
|
|
|
assert(Offsets.size() ==
|
|
|
|
cast<llvm::ArrayType>(cast<llvm::PointerType>(GV->getType())
|
|
|
|
->getElementType())->getNumElements());
|
|
|
|
llvm::ArrayType *VBTableType =
|
|
|
|
llvm::ArrayType::get(CGM.IntTy, Offsets.size());
|
|
|
|
llvm::Constant *Init = llvm::ConstantArray::get(VBTableType, Offsets);
|
|
|
|
GV->setInitializer(Init);
|
2016-02-23 01:22:08 +08:00
|
|
|
|
|
|
|
if (RD->hasAttr<DLLImportAttr>())
|
|
|
|
GV->setLinkage(llvm::GlobalVariable::AvailableExternallyLinkage);
|
2013-06-19 23:20:38 +08:00
|
|
|
}
|
|
|
|
|
2013-10-30 19:55:43 +08:00
|
|
|
llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address This,
|
2013-10-30 19:55:43 +08:00
|
|
|
const ThisAdjustment &TA) {
|
|
|
|
if (TA.isEmpty())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return This.getPointer();
|
2013-10-30 19:55:43 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
This = CGF.Builder.CreateElementBitCast(This, CGF.Int8Ty);
|
2013-10-30 19:55:43 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *V;
|
|
|
|
if (TA.Virtual.isEmpty()) {
|
|
|
|
V = This.getPointer();
|
|
|
|
} else {
|
2013-11-06 14:24:31 +08:00
|
|
|
assert(TA.Virtual.Microsoft.VtordispOffset < 0);
|
|
|
|
// Adjust the this argument based on the vtordisp value.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address VtorDispPtr =
|
|
|
|
CGF.Builder.CreateConstInBoundsByteGEP(This,
|
|
|
|
CharUnits::fromQuantity(TA.Virtual.Microsoft.VtordispOffset));
|
|
|
|
VtorDispPtr = CGF.Builder.CreateElementBitCast(VtorDispPtr, CGF.Int32Ty);
|
2013-11-06 14:24:31 +08:00
|
|
|
llvm::Value *VtorDisp = CGF.Builder.CreateLoad(VtorDispPtr, "vtordisp");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
V = CGF.Builder.CreateGEP(This.getPointer(),
|
|
|
|
CGF.Builder.CreateNeg(VtorDisp));
|
|
|
|
|
|
|
|
// Unfortunately, having applied the vtordisp means that we no
|
|
|
|
// longer really have a known alignment for the vbptr step.
|
|
|
|
// We'll assume the vbptr is pointer-aligned.
|
2013-11-06 14:24:31 +08:00
|
|
|
|
|
|
|
if (TA.Virtual.Microsoft.VBPtrOffset) {
|
|
|
|
// If the final overrider is defined in a virtual base other than the one
|
|
|
|
// that holds the vfptr, we have to use a vtordispex thunk which looks up
|
|
|
|
// the vbtable of the derived class.
|
|
|
|
assert(TA.Virtual.Microsoft.VBPtrOffset > 0);
|
|
|
|
assert(TA.Virtual.Microsoft.VBOffsetOffset >= 0);
|
|
|
|
llvm::Value *VBPtr;
|
|
|
|
llvm::Value *VBaseOffset =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
GetVBaseOffsetFromVBPtr(CGF, Address(V, CGF.getPointerAlign()),
|
|
|
|
-TA.Virtual.Microsoft.VBPtrOffset,
|
2013-11-06 14:24:31 +08:00
|
|
|
TA.Virtual.Microsoft.VBOffsetOffset, &VBPtr);
|
|
|
|
V = CGF.Builder.CreateInBoundsGEP(VBPtr, VBaseOffset);
|
|
|
|
}
|
|
|
|
}
|
2013-10-30 19:55:43 +08:00
|
|
|
|
|
|
|
if (TA.NonVirtual) {
|
|
|
|
// Non-virtual adjustment might result in a pointer outside the allocated
|
|
|
|
// object, e.g. if the final overrider class is laid out after the virtual
|
|
|
|
// base that declares a method in the most derived class.
|
|
|
|
V = CGF.Builder.CreateConstGEP1_32(V, TA.NonVirtual);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't need to bitcast back, the call CodeGen will handle this.
|
|
|
|
return V;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
|
2013-10-30 19:55:43 +08:00
|
|
|
const ReturnAdjustment &RA) {
|
|
|
|
if (RA.isEmpty())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return Ret.getPointer();
|
2013-10-30 19:55:43 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
auto OrigTy = Ret.getType();
|
|
|
|
Ret = CGF.Builder.CreateElementBitCast(Ret, CGF.Int8Ty);
|
2013-10-30 19:55:43 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *V = Ret.getPointer();
|
2013-10-30 19:55:43 +08:00
|
|
|
if (RA.Virtual.Microsoft.VBIndex) {
|
|
|
|
assert(RA.Virtual.Microsoft.VBIndex > 0);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
int32_t IntSize = CGF.getIntSize().getQuantity();
|
2013-10-30 19:55:43 +08:00
|
|
|
llvm::Value *VBPtr;
|
|
|
|
llvm::Value *VBaseOffset =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
GetVBaseOffsetFromVBPtr(CGF, Ret, RA.Virtual.Microsoft.VBPtrOffset,
|
2013-10-30 19:55:43 +08:00
|
|
|
IntSize * RA.Virtual.Microsoft.VBIndex, &VBPtr);
|
|
|
|
V = CGF.Builder.CreateInBoundsGEP(VBPtr, VBaseOffset);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (RA.NonVirtual)
|
2015-04-04 23:12:29 +08:00
|
|
|
V = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, V, RA.NonVirtual);
|
2013-10-30 19:55:43 +08:00
|
|
|
|
|
|
|
// Cast back to the original type.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return CGF.Builder.CreateBitCast(V, OrigTy);
|
2013-10-30 19:55:43 +08:00
|
|
|
}
|
|
|
|
|
2012-05-01 13:23:51 +08:00
|
|
|
bool MicrosoftCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
|
|
|
|
QualType elementType) {
|
|
|
|
// Microsoft seems to completely ignore the possibility of a
|
|
|
|
// two-argument usual deallocation function.
|
|
|
|
return elementType.isDestructedType();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool MicrosoftCXXABI::requiresArrayCookie(const CXXNewExpr *expr) {
|
|
|
|
// Microsoft seems to completely ignore the possibility of a
|
|
|
|
// two-argument usual deallocation function.
|
|
|
|
return expr->getAllocatedType().isDestructedType();
|
|
|
|
}
|
|
|
|
|
|
|
|
CharUnits MicrosoftCXXABI::getArrayCookieSizeImpl(QualType type) {
|
|
|
|
// The array cookie is always a size_t; we then pad that out to the
|
|
|
|
// alignment of the element type.
|
|
|
|
ASTContext &Ctx = getContext();
|
|
|
|
return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
|
|
|
|
Ctx.getTypeAlignInChars(type));
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *MicrosoftCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address allocPtr,
|
2012-05-01 13:23:51 +08:00
|
|
|
CharUnits cookieSize) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address numElementsPtr =
|
|
|
|
CGF.Builder.CreateElementBitCast(allocPtr, CGF.SizeTy);
|
2012-05-01 13:23:51 +08:00
|
|
|
return CGF.Builder.CreateLoad(numElementsPtr);
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
|
|
|
|
Address newPtr,
|
|
|
|
llvm::Value *numElements,
|
|
|
|
const CXXNewExpr *expr,
|
|
|
|
QualType elementType) {
|
2012-05-01 13:23:51 +08:00
|
|
|
assert(requiresArrayCookie(expr));
|
|
|
|
|
|
|
|
// The size of the cookie.
|
|
|
|
CharUnits cookieSize = getArrayCookieSizeImpl(elementType);
|
|
|
|
|
|
|
|
// Compute an offset to the cookie.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address cookiePtr = newPtr;
|
2012-05-01 13:23:51 +08:00
|
|
|
|
|
|
|
// Write the number of elements into the appropriate slot.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address numElementsPtr
|
|
|
|
= CGF.Builder.CreateElementBitCast(cookiePtr, CGF.SizeTy);
|
2012-05-01 13:23:51 +08:00
|
|
|
CGF.Builder.CreateStore(numElements, numElementsPtr);
|
|
|
|
|
|
|
|
// Finally, compute a pointer to the actual data buffer by skipping
|
|
|
|
// over the cookie completely.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
|
2012-05-01 13:23:51 +08:00
|
|
|
}
|
|
|
|
|
2014-10-05 13:05:40 +08:00
|
|
|
static void emitGlobalDtorWithTLRegDtor(CodeGenFunction &CGF, const VarDecl &VD,
|
2019-02-07 09:14:17 +08:00
|
|
|
llvm::FunctionCallee Dtor,
|
2014-10-05 13:05:40 +08:00
|
|
|
llvm::Constant *Addr) {
|
|
|
|
// Create a function which calls the destructor.
|
|
|
|
llvm::Constant *DtorStub = CGF.createAtExitStub(VD, Dtor, Addr);
|
|
|
|
|
|
|
|
// extern "C" int __tlregdtor(void (*f)(void));
|
|
|
|
llvm::FunctionType *TLRegDtorTy = llvm::FunctionType::get(
|
2019-07-16 12:46:31 +08:00
|
|
|
CGF.IntTy, DtorStub->getType(), /*isVarArg=*/false);
|
2014-10-05 13:05:40 +08:00
|
|
|
|
2019-02-06 00:42:33 +08:00
|
|
|
llvm::FunctionCallee TLRegDtor = CGF.CGM.CreateRuntimeFunction(
|
2017-03-22 00:57:30 +08:00
|
|
|
TLRegDtorTy, "__tlregdtor", llvm::AttributeList(), /*Local=*/true);
|
2019-02-06 00:42:33 +08:00
|
|
|
if (llvm::Function *TLRegDtorFn =
|
|
|
|
dyn_cast<llvm::Function>(TLRegDtor.getCallee()))
|
2014-10-05 13:05:40 +08:00
|
|
|
TLRegDtorFn->setDoesNotThrow();
|
|
|
|
|
|
|
|
CGF.EmitNounwindRuntimeCall(TLRegDtor, DtorStub);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MicrosoftCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
|
2019-02-07 09:14:17 +08:00
|
|
|
llvm::FunctionCallee Dtor,
|
2014-10-05 13:05:40 +08:00
|
|
|
llvm::Constant *Addr) {
|
2018-08-22 01:24:06 +08:00
|
|
|
if (D.isNoDestroy(CGM.getContext()))
|
|
|
|
return;
|
|
|
|
|
2014-10-05 13:05:40 +08:00
|
|
|
if (D.getTLSKind())
|
|
|
|
return emitGlobalDtorWithTLRegDtor(CGF, D, Dtor, Addr);
|
|
|
|
|
|
|
|
// The default behavior is to use atexit.
|
|
|
|
CGF.registerGlobalDtorWithAtExit(D, Dtor, Addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MicrosoftCXXABI::EmitThreadLocalInitFuncs(
|
2015-12-01 09:10:48 +08:00
|
|
|
CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
|
2014-10-05 13:05:40 +08:00
|
|
|
ArrayRef<llvm::Function *> CXXThreadLocalInits,
|
2015-12-01 09:10:48 +08:00
|
|
|
ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
|
2016-09-12 10:51:43 +08:00
|
|
|
if (CXXThreadLocalInits.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
CGM.AppendLinkerOptions(CGM.getTarget().getTriple().getArch() ==
|
|
|
|
llvm::Triple::x86
|
|
|
|
? "/include:___dyn_tls_init@12"
|
|
|
|
: "/include:__dyn_tls_init");
|
|
|
|
|
2014-10-05 13:05:40 +08:00
|
|
|
// This will create a GV in the .CRT$XDU section. It will point to our
|
|
|
|
// initialization function. The CRT will call all of these function
|
|
|
|
// pointers at start-up time and, eventually, at thread-creation time.
|
|
|
|
auto AddToXDU = [&CGM](llvm::Function *InitFunc) {
|
|
|
|
llvm::GlobalVariable *InitFuncPtr = new llvm::GlobalVariable(
|
2019-07-16 12:46:31 +08:00
|
|
|
CGM.getModule(), InitFunc->getType(), /*isConstant=*/true,
|
2014-10-05 13:05:40 +08:00
|
|
|
llvm::GlobalVariable::InternalLinkage, InitFunc,
|
|
|
|
Twine(InitFunc->getName(), "$initializer$"));
|
|
|
|
InitFuncPtr->setSection(".CRT$XDU");
|
|
|
|
// This variable has discardable linkage, we have to add it to @llvm.used to
|
|
|
|
// ensure it won't get discarded.
|
|
|
|
CGM.addUsedGlobal(InitFuncPtr);
|
|
|
|
return InitFuncPtr;
|
|
|
|
};
|
|
|
|
|
|
|
|
std::vector<llvm::Function *> NonComdatInits;
|
|
|
|
for (size_t I = 0, E = CXXThreadLocalInitVars.size(); I != E; ++I) {
|
2015-12-01 09:10:48 +08:00
|
|
|
llvm::GlobalVariable *GV = cast<llvm::GlobalVariable>(
|
|
|
|
CGM.GetGlobalValue(CGM.getMangledName(CXXThreadLocalInitVars[I])));
|
2014-10-05 13:05:40 +08:00
|
|
|
llvm::Function *F = CXXThreadLocalInits[I];
|
|
|
|
|
|
|
|
// If the GV is already in a comdat group, then we have to join it.
|
2015-01-13 06:13:53 +08:00
|
|
|
if (llvm::Comdat *C = GV->getComdat())
|
2014-10-05 13:05:40 +08:00
|
|
|
AddToXDU(F)->setComdat(C);
|
2015-01-13 06:13:53 +08:00
|
|
|
else
|
2014-10-05 13:05:40 +08:00
|
|
|
NonComdatInits.push_back(F);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!NonComdatInits.empty()) {
|
|
|
|
llvm::FunctionType *FTy =
|
|
|
|
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
|
2020-05-28 05:04:43 +08:00
|
|
|
llvm::Function *InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(
|
2015-10-31 09:28:07 +08:00
|
|
|
FTy, "__tls_init", CGM.getTypes().arrangeNullaryFunction(),
|
|
|
|
SourceLocation(), /*TLS=*/true);
|
2014-10-05 13:05:40 +08:00
|
|
|
CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, NonComdatInits);
|
|
|
|
|
|
|
|
AddToXDU(InitFunc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
LValue MicrosoftCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
|
|
|
|
const VarDecl *VD,
|
|
|
|
QualType LValType) {
|
|
|
|
CGF.CGM.ErrorUnsupported(VD, "thread wrappers");
|
|
|
|
return LValue();
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
|
2015-05-07 14:15:46 +08:00
|
|
|
StringRef VarName("_Init_thread_epoch");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CharUnits Align = CGM.getIntAlign();
|
2015-05-07 14:15:46 +08:00
|
|
|
if (auto *GV = CGM.getModule().getNamedGlobal(VarName))
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return ConstantAddress(GV, Align);
|
2015-05-07 14:15:46 +08:00
|
|
|
auto *GV = new llvm::GlobalVariable(
|
|
|
|
CGM.getModule(), CGM.IntTy,
|
2019-07-16 12:46:31 +08:00
|
|
|
/*isConstant=*/false, llvm::GlobalVariable::ExternalLinkage,
|
2015-05-07 14:15:46 +08:00
|
|
|
/*Initializer=*/nullptr, VarName,
|
|
|
|
/*InsertBefore=*/nullptr, llvm::GlobalVariable::GeneralDynamicTLSModel);
|
2019-10-03 21:00:29 +08:00
|
|
|
GV->setAlignment(Align.getAsAlign());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return ConstantAddress(GV, Align);
|
2015-05-07 14:15:46 +08:00
|
|
|
}
|
|
|
|
|
2019-02-06 00:42:33 +08:00
|
|
|
static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) {
|
2015-05-07 14:15:46 +08:00
|
|
|
llvm::FunctionType *FTy =
|
|
|
|
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
|
|
|
|
CGM.IntTy->getPointerTo(), /*isVarArg=*/false);
|
|
|
|
return CGM.CreateRuntimeFunction(
|
|
|
|
FTy, "_Init_thread_header",
|
2017-03-22 00:57:30 +08:00
|
|
|
llvm::AttributeList::get(CGM.getLLVMContext(),
|
|
|
|
llvm::AttributeList::FunctionIndex,
|
|
|
|
llvm::Attribute::NoUnwind),
|
2016-12-15 14:59:05 +08:00
|
|
|
/*Local=*/true);
|
2015-05-07 14:15:46 +08:00
|
|
|
}
|
|
|
|
|
2019-02-06 00:42:33 +08:00
|
|
|
static llvm::FunctionCallee getInitThreadFooterFn(CodeGenModule &CGM) {
|
2015-05-07 14:15:46 +08:00
|
|
|
llvm::FunctionType *FTy =
|
|
|
|
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
|
|
|
|
CGM.IntTy->getPointerTo(), /*isVarArg=*/false);
|
|
|
|
return CGM.CreateRuntimeFunction(
|
|
|
|
FTy, "_Init_thread_footer",
|
2017-03-22 00:57:30 +08:00
|
|
|
llvm::AttributeList::get(CGM.getLLVMContext(),
|
|
|
|
llvm::AttributeList::FunctionIndex,
|
|
|
|
llvm::Attribute::NoUnwind),
|
2016-12-15 14:59:05 +08:00
|
|
|
/*Local=*/true);
|
2015-05-07 14:15:46 +08:00
|
|
|
}
|
|
|
|
|
2019-02-06 00:42:33 +08:00
|
|
|
static llvm::FunctionCallee getInitThreadAbortFn(CodeGenModule &CGM) {
|
2015-05-07 14:15:46 +08:00
|
|
|
llvm::FunctionType *FTy =
|
|
|
|
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
|
|
|
|
CGM.IntTy->getPointerTo(), /*isVarArg=*/false);
|
|
|
|
return CGM.CreateRuntimeFunction(
|
|
|
|
FTy, "_Init_thread_abort",
|
2017-03-22 00:57:30 +08:00
|
|
|
llvm::AttributeList::get(CGM.getLLVMContext(),
|
|
|
|
llvm::AttributeList::FunctionIndex,
|
|
|
|
llvm::Attribute::NoUnwind),
|
2016-12-15 14:59:05 +08:00
|
|
|
/*Local=*/true);
|
2015-05-07 14:15:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
2015-08-19 06:40:54 +08:00
|
|
|
struct ResetGuardBit final : EHScopeStack::Cleanup {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Guard;
|
2015-05-07 14:15:46 +08:00
|
|
|
unsigned GuardNum;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
ResetGuardBit(Address Guard, unsigned GuardNum)
|
2015-05-07 14:15:46 +08:00
|
|
|
: Guard(Guard), GuardNum(GuardNum) {}
|
|
|
|
|
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
|
|
|
// Reset the bit in the mask so that the static variable may be
|
|
|
|
// reinitialized.
|
|
|
|
CGBuilderTy &Builder = CGF.Builder;
|
|
|
|
llvm::LoadInst *LI = Builder.CreateLoad(Guard);
|
|
|
|
llvm::ConstantInt *Mask =
|
2016-02-11 03:09:15 +08:00
|
|
|
llvm::ConstantInt::get(CGF.IntTy, ~(1ULL << GuardNum));
|
2015-05-07 14:15:46 +08:00
|
|
|
Builder.CreateStore(Builder.CreateAnd(LI, Mask), Guard);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-08-19 06:40:54 +08:00
|
|
|
struct CallInitThreadAbort final : EHScopeStack::Cleanup {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *Guard;
|
|
|
|
CallInitThreadAbort(Address Guard) : Guard(Guard.getPointer()) {}
|
2015-05-07 14:15:46 +08:00
|
|
|
|
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
|
|
|
// Calling _Init_thread_abort will reset the guard's state.
|
|
|
|
CGF.EmitNounwindRuntimeCall(getInitThreadAbortFn(CGF.CGM), Guard);
|
|
|
|
}
|
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
}
|
2015-05-07 14:15:46 +08:00
|
|
|
|
2012-05-01 14:13:13 +08:00
|
|
|
void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
|
2013-09-11 04:14:30 +08:00
|
|
|
llvm::GlobalVariable *GV,
|
2012-05-01 14:13:13 +08:00
|
|
|
bool PerformInit) {
|
2014-05-24 05:13:45 +08:00
|
|
|
// MSVC only uses guards for static locals.
|
|
|
|
if (!D.isStaticLocal()) {
|
|
|
|
assert(GV->hasWeakLinkage() || GV->hasLinkOnceLinkage());
|
|
|
|
// GlobalOpt is allowed to discard the initializer, so use linkonce_odr.
|
2015-01-17 00:04:45 +08:00
|
|
|
llvm::Function *F = CGF.CurFn;
|
|
|
|
F->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage);
|
|
|
|
F->setComdat(CGM.getModule().getOrInsertComdat(F->getName()));
|
2014-05-24 05:13:45 +08:00
|
|
|
CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-05-08 05:19:06 +08:00
|
|
|
bool ThreadlocalStatic = D.getTLSKind();
|
|
|
|
bool ThreadsafeStatic = getContext().getLangOpts().ThreadsafeStatics;
|
|
|
|
|
|
|
|
// Thread-safe static variables which aren't thread-specific have a
|
|
|
|
// per-variable guard.
|
|
|
|
bool HasPerVariableGuard = ThreadsafeStatic && !ThreadlocalStatic;
|
2013-04-15 07:01:42 +08:00
|
|
|
|
2013-09-11 04:14:30 +08:00
|
|
|
CGBuilderTy &Builder = CGF.Builder;
|
|
|
|
llvm::IntegerType *GuardTy = CGF.Int32Ty;
|
|
|
|
llvm::ConstantInt *Zero = llvm::ConstantInt::get(GuardTy, 0);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CharUnits GuardAlign = CharUnits::fromQuantity(4);
|
2013-09-11 04:14:30 +08:00
|
|
|
|
|
|
|
// Get the guard variable for this function if we have one already.
|
2015-05-08 05:19:06 +08:00
|
|
|
GuardInfo *GI = nullptr;
|
|
|
|
if (ThreadlocalStatic)
|
|
|
|
GI = &ThreadLocalGuardVariableMap[D.getDeclContext()];
|
|
|
|
else if (!ThreadsafeStatic)
|
|
|
|
GI = &GuardVariableMap[D.getDeclContext()];
|
|
|
|
|
|
|
|
llvm::GlobalVariable *GuardVar = GI ? GI->Guard : nullptr;
|
2015-05-07 14:15:46 +08:00
|
|
|
unsigned GuardNum;
|
2015-05-08 05:19:06 +08:00
|
|
|
if (D.isExternallyVisible()) {
|
2013-09-11 04:14:30 +08:00
|
|
|
// Externally visible variables have to be numbered in Sema to properly
|
|
|
|
// handle unreachable VarDecls.
|
2015-05-07 14:15:46 +08:00
|
|
|
GuardNum = getContext().getStaticLocalNumber(&D);
|
|
|
|
assert(GuardNum > 0);
|
|
|
|
GuardNum--;
|
|
|
|
} else if (HasPerVariableGuard) {
|
|
|
|
GuardNum = ThreadSafeGuardNumMap[D.getDeclContext()]++;
|
2013-09-11 04:14:30 +08:00
|
|
|
} else {
|
|
|
|
// Non-externally visible variables are numbered here in CodeGen.
|
2015-05-07 14:15:46 +08:00
|
|
|
GuardNum = GI->BitIndex++;
|
2013-09-11 04:14:30 +08:00
|
|
|
}
|
|
|
|
|
2015-05-07 14:15:46 +08:00
|
|
|
if (!HasPerVariableGuard && GuardNum >= 32) {
|
2013-09-11 04:14:30 +08:00
|
|
|
if (D.isExternallyVisible())
|
|
|
|
ErrorUnsupportedABI(CGF, "more than 32 guarded initializations");
|
2015-05-07 14:15:46 +08:00
|
|
|
GuardNum %= 32;
|
|
|
|
GuardVar = nullptr;
|
2013-09-11 04:14:30 +08:00
|
|
|
}
|
|
|
|
|
2015-05-07 14:15:46 +08:00
|
|
|
if (!GuardVar) {
|
2013-09-11 04:14:30 +08:00
|
|
|
// Mangle the name for the guard.
|
|
|
|
SmallString<256> GuardName;
|
|
|
|
{
|
|
|
|
llvm::raw_svector_ostream Out(GuardName);
|
2015-05-07 14:15:46 +08:00
|
|
|
if (HasPerVariableGuard)
|
|
|
|
getMangleContext().mangleThreadSafeStaticGuardVariable(&D, GuardNum,
|
|
|
|
Out);
|
|
|
|
else
|
|
|
|
getMangleContext().mangleStaticGuardVariable(&D, Out);
|
2013-09-11 04:14:30 +08:00
|
|
|
}
|
|
|
|
|
2014-06-18 23:55:13 +08:00
|
|
|
// Create the guard variable with a zero-initializer. Just absorb linkage,
|
|
|
|
// visibility and dll storage class from the guarded variable.
|
2015-05-07 14:15:46 +08:00
|
|
|
GuardVar =
|
|
|
|
new llvm::GlobalVariable(CGM.getModule(), GuardTy, /*isConstant=*/false,
|
2014-04-24 02:22:11 +08:00
|
|
|
GV->getLinkage(), Zero, GuardName.str());
|
2015-05-07 14:15:46 +08:00
|
|
|
GuardVar->setVisibility(GV->getVisibility());
|
|
|
|
GuardVar->setDLLStorageClass(GV->getDLLStorageClass());
|
2019-10-03 21:00:29 +08:00
|
|
|
GuardVar->setAlignment(GuardAlign.getAsAlign());
|
2015-05-07 14:15:46 +08:00
|
|
|
if (GuardVar->isWeakForLinker())
|
|
|
|
GuardVar->setComdat(
|
|
|
|
CGM.getModule().getOrInsertComdat(GuardVar->getName()));
|
|
|
|
if (D.getTLSKind())
|
2020-06-16 14:30:36 +08:00
|
|
|
CGM.setTLSMode(GuardVar, D);
|
2015-05-07 14:15:46 +08:00
|
|
|
if (GI && !HasPerVariableGuard)
|
|
|
|
GI->Guard = GuardVar;
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
ConstantAddress GuardAddr(GuardVar, GuardAlign);
|
|
|
|
|
2015-05-07 14:15:46 +08:00
|
|
|
assert(GuardVar->getLinkage() == GV->getLinkage() &&
|
|
|
|
"static local from the same function had different linkage");
|
|
|
|
|
|
|
|
if (!HasPerVariableGuard) {
|
|
|
|
// Pseudo code for the test:
|
|
|
|
// if (!(GuardVar & MyGuardBit)) {
|
|
|
|
// GuardVar |= MyGuardBit;
|
|
|
|
// ... initialize the object ...;
|
|
|
|
// }
|
|
|
|
|
|
|
|
// Test our bit from the guard variable.
|
2016-03-31 05:33:34 +08:00
|
|
|
llvm::ConstantInt *Bit = llvm::ConstantInt::get(GuardTy, 1ULL << GuardNum);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::LoadInst *LI = Builder.CreateLoad(GuardAddr);
|
2017-07-27 06:01:09 +08:00
|
|
|
llvm::Value *NeedsInit =
|
|
|
|
Builder.CreateICmpEQ(Builder.CreateAnd(LI, Bit), Zero);
|
2015-05-07 14:15:46 +08:00
|
|
|
llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
|
|
|
|
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
|
2017-07-27 06:01:09 +08:00
|
|
|
CGF.EmitCXXGuardedInitBranch(NeedsInit, InitBlock, EndBlock,
|
|
|
|
CodeGenFunction::GuardKind::VariableGuard, &D);
|
2015-05-07 14:15:46 +08:00
|
|
|
|
|
|
|
// Set our bit in the guard variable and emit the initializer and add a global
|
|
|
|
// destructor if appropriate.
|
|
|
|
CGF.EmitBlock(InitBlock);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Builder.CreateStore(Builder.CreateOr(LI, Bit), GuardAddr);
|
|
|
|
CGF.EHStack.pushCleanup<ResetGuardBit>(EHCleanup, GuardAddr, GuardNum);
|
2015-05-07 14:15:46 +08:00
|
|
|
CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit);
|
|
|
|
CGF.PopCleanupBlock();
|
|
|
|
Builder.CreateBr(EndBlock);
|
|
|
|
|
|
|
|
// Continue.
|
|
|
|
CGF.EmitBlock(EndBlock);
|
2013-09-11 04:14:30 +08:00
|
|
|
} else {
|
2015-05-07 14:15:46 +08:00
|
|
|
// Pseudo code for the test:
|
|
|
|
// if (TSS > _Init_thread_epoch) {
|
|
|
|
// _Init_thread_header(&TSS);
|
|
|
|
// if (TSS == -1) {
|
|
|
|
// ... initialize the object ...;
|
|
|
|
// _Init_thread_footer(&TSS);
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// The algorithm is almost identical to what can be found in the appendix
|
|
|
|
// found in N2325.
|
|
|
|
|
|
|
|
// This BasicBLock determines whether or not we have any work to do.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::LoadInst *FirstGuardLoad = Builder.CreateLoad(GuardAddr);
|
2015-05-07 14:15:46 +08:00
|
|
|
FirstGuardLoad->setOrdering(llvm::AtomicOrdering::Unordered);
|
|
|
|
llvm::LoadInst *InitThreadEpoch =
|
|
|
|
Builder.CreateLoad(getInitThreadEpochPtr(CGM));
|
|
|
|
llvm::Value *IsUninitialized =
|
|
|
|
Builder.CreateICmpSGT(FirstGuardLoad, InitThreadEpoch);
|
|
|
|
llvm::BasicBlock *AttemptInitBlock = CGF.createBasicBlock("init.attempt");
|
|
|
|
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
|
2017-07-27 06:01:09 +08:00
|
|
|
CGF.EmitCXXGuardedInitBranch(IsUninitialized, AttemptInitBlock, EndBlock,
|
|
|
|
CodeGenFunction::GuardKind::VariableGuard, &D);
|
2015-05-07 14:15:46 +08:00
|
|
|
|
|
|
|
// This BasicBlock attempts to determine whether or not this thread is
|
|
|
|
// responsible for doing the initialization.
|
|
|
|
CGF.EmitBlock(AttemptInitBlock);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.EmitNounwindRuntimeCall(getInitThreadHeaderFn(CGM),
|
|
|
|
GuardAddr.getPointer());
|
|
|
|
llvm::LoadInst *SecondGuardLoad = Builder.CreateLoad(GuardAddr);
|
2015-05-07 14:15:46 +08:00
|
|
|
SecondGuardLoad->setOrdering(llvm::AtomicOrdering::Unordered);
|
|
|
|
llvm::Value *ShouldDoInit =
|
|
|
|
Builder.CreateICmpEQ(SecondGuardLoad, getAllOnesInt());
|
|
|
|
llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
|
|
|
|
Builder.CreateCondBr(ShouldDoInit, InitBlock, EndBlock);
|
|
|
|
|
|
|
|
// Ok, we ended up getting selected as the initializing thread.
|
|
|
|
CGF.EmitBlock(InitBlock);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.EHStack.pushCleanup<CallInitThreadAbort>(EHCleanup, GuardAddr);
|
2015-05-07 14:15:46 +08:00
|
|
|
CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit);
|
|
|
|
CGF.PopCleanupBlock();
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.EmitNounwindRuntimeCall(getInitThreadFooterFn(CGM),
|
|
|
|
GuardAddr.getPointer());
|
2015-05-07 14:15:46 +08:00
|
|
|
Builder.CreateBr(EndBlock);
|
|
|
|
|
|
|
|
CGF.EmitBlock(EndBlock);
|
|
|
|
}
|
2012-05-01 14:13:13 +08:00
|
|
|
}
|
|
|
|
|
2013-04-12 02:13:19 +08:00
|
|
|
bool MicrosoftCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
|
|
|
|
// Null-ness for function memptrs only depends on the first field, which is
|
|
|
|
// the function pointer. The rest don't matter, so we can zero initialize.
|
|
|
|
if (MPT->isMemberFunctionPointer())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// The virtual base adjustment field is always -1 for null, so if we have one
|
|
|
|
// we can't zero initialize. The field offset is sometimes also -1 if 0 is a
|
|
|
|
// valid field offset.
|
2014-01-17 17:01:00 +08:00
|
|
|
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
|
2019-11-16 10:49:32 +08:00
|
|
|
MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
|
|
|
|
return (!inheritanceModelHasVBTableOffsetField(Inheritance) &&
|
2014-02-06 01:27:08 +08:00
|
|
|
RD->nullFieldOffsetIsZero());
|
2013-03-23 03:02:54 +08:00
|
|
|
}
|
|
|
|
|
2013-04-12 02:13:19 +08:00
|
|
|
llvm::Type *
|
|
|
|
MicrosoftCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
|
2014-01-17 17:01:00 +08:00
|
|
|
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
|
2019-11-16 10:49:32 +08:00
|
|
|
MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
|
2013-04-12 02:13:19 +08:00
|
|
|
llvm::SmallVector<llvm::Type *, 4> fields;
|
|
|
|
if (MPT->isMemberFunctionPointer())
|
|
|
|
fields.push_back(CGM.VoidPtrTy); // FunctionPointerOrVirtualThunk
|
|
|
|
else
|
|
|
|
fields.push_back(CGM.IntTy); // FieldOffset
|
|
|
|
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasNVOffsetField(MPT->isMemberFunctionPointer(),
|
|
|
|
Inheritance))
|
2013-04-12 02:13:19 +08:00
|
|
|
fields.push_back(CGM.IntTy);
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBPtrOffsetField(Inheritance))
|
2013-04-12 02:13:19 +08:00
|
|
|
fields.push_back(CGM.IntTy);
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBTableOffsetField(Inheritance))
|
2013-04-12 02:13:19 +08:00
|
|
|
fields.push_back(CGM.IntTy); // VirtualBaseAdjustmentOffset
|
|
|
|
|
|
|
|
if (fields.size() == 1)
|
|
|
|
return fields[0];
|
|
|
|
return llvm::StructType::get(CGM.getLLVMContext(), fields);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MicrosoftCXXABI::
|
|
|
|
GetNullMemberPointerFields(const MemberPointerType *MPT,
|
|
|
|
llvm::SmallVectorImpl<llvm::Constant *> &fields) {
|
|
|
|
assert(fields.empty());
|
2014-01-17 17:01:00 +08:00
|
|
|
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
|
2019-11-16 10:49:32 +08:00
|
|
|
MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
|
2013-04-12 02:13:19 +08:00
|
|
|
if (MPT->isMemberFunctionPointer()) {
|
|
|
|
// FunctionPointerOrVirtualThunk
|
|
|
|
fields.push_back(llvm::Constant::getNullValue(CGM.VoidPtrTy));
|
|
|
|
} else {
|
2014-02-06 01:27:08 +08:00
|
|
|
if (RD->nullFieldOffsetIsZero())
|
2013-04-12 02:13:19 +08:00
|
|
|
fields.push_back(getZeroInt()); // FieldOffset
|
|
|
|
else
|
|
|
|
fields.push_back(getAllOnesInt()); // FieldOffset
|
2013-03-23 03:02:54 +08:00
|
|
|
}
|
2013-04-12 02:13:19 +08:00
|
|
|
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasNVOffsetField(MPT->isMemberFunctionPointer(),
|
|
|
|
Inheritance))
|
2013-04-12 02:13:19 +08:00
|
|
|
fields.push_back(getZeroInt());
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBPtrOffsetField(Inheritance))
|
2013-04-12 02:13:19 +08:00
|
|
|
fields.push_back(getZeroInt());
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBTableOffsetField(Inheritance))
|
2013-04-12 02:13:19 +08:00
|
|
|
fields.push_back(getAllOnesInt());
|
2013-03-23 03:02:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Constant *
|
|
|
|
MicrosoftCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
|
2013-04-12 02:13:19 +08:00
|
|
|
llvm::SmallVector<llvm::Constant *, 4> fields;
|
|
|
|
GetNullMemberPointerFields(MPT, fields);
|
|
|
|
if (fields.size() == 1)
|
|
|
|
return fields[0];
|
|
|
|
llvm::Constant *Res = llvm::ConstantStruct::getAnon(fields);
|
|
|
|
assert(Res->getType() == ConvertMemberPointerType(MPT));
|
|
|
|
return Res;
|
2013-03-23 03:02:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Constant *
|
2013-05-03 09:15:11 +08:00
|
|
|
MicrosoftCXXABI::EmitFullMemberPointer(llvm::Constant *FirstField,
|
|
|
|
bool IsMemberFunction,
|
2013-05-10 05:01:17 +08:00
|
|
|
const CXXRecordDecl *RD,
|
2015-05-11 05:48:08 +08:00
|
|
|
CharUnits NonVirtualBaseAdjustment,
|
|
|
|
unsigned VBTableIndex) {
|
2019-11-16 10:49:32 +08:00
|
|
|
MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
|
2013-05-03 09:15:11 +08:00
|
|
|
|
|
|
|
// Single inheritance class member pointer are represented as scalars instead
|
|
|
|
// of aggregates.
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasOnlyOneField(IsMemberFunction, Inheritance))
|
2013-05-03 09:15:11 +08:00
|
|
|
return FirstField;
|
|
|
|
|
2013-04-12 02:13:19 +08:00
|
|
|
llvm::SmallVector<llvm::Constant *, 4> fields;
|
2013-05-03 09:15:11 +08:00
|
|
|
fields.push_back(FirstField);
|
|
|
|
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasNVOffsetField(IsMemberFunction, Inheritance))
|
2013-05-10 05:01:17 +08:00
|
|
|
fields.push_back(llvm::ConstantInt::get(
|
|
|
|
CGM.IntTy, NonVirtualBaseAdjustment.getQuantity()));
|
2013-05-03 09:15:11 +08:00
|
|
|
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBPtrOffsetField(Inheritance)) {
|
2013-10-15 09:18:02 +08:00
|
|
|
CharUnits Offs = CharUnits::Zero();
|
2015-06-23 15:31:11 +08:00
|
|
|
if (VBTableIndex)
|
2014-01-14 08:50:39 +08:00
|
|
|
Offs = getContext().getASTRecordLayout(RD).getVBPtrOffset();
|
2013-10-15 09:18:02 +08:00
|
|
|
fields.push_back(llvm::ConstantInt::get(CGM.IntTy, Offs.getQuantity()));
|
2013-04-12 02:13:19 +08:00
|
|
|
}
|
2013-05-03 09:15:11 +08:00
|
|
|
|
|
|
|
// The rest of the fields are adjusted by conversions to a more derived class.
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBTableOffsetField(Inheritance))
|
2015-05-11 05:48:08 +08:00
|
|
|
fields.push_back(llvm::ConstantInt::get(CGM.IntTy, VBTableIndex));
|
2013-05-03 09:15:11 +08:00
|
|
|
|
2013-04-12 02:13:19 +08:00
|
|
|
return llvm::ConstantStruct::getAnon(fields);
|
2013-03-23 03:02:54 +08:00
|
|
|
}
|
|
|
|
|
2013-05-03 09:15:11 +08:00
|
|
|
llvm::Constant *
|
|
|
|
MicrosoftCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
|
|
|
|
CharUnits offset) {
|
2019-10-29 08:05:34 +08:00
|
|
|
return EmitMemberDataPointer(MPT->getMostRecentCXXRecordDecl(), offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Constant *MicrosoftCXXABI::EmitMemberDataPointer(const CXXRecordDecl *RD,
|
|
|
|
CharUnits offset) {
|
2015-06-23 15:31:11 +08:00
|
|
|
if (RD->getMSInheritanceModel() ==
|
2019-11-16 10:49:32 +08:00
|
|
|
MSInheritanceModel::Virtual)
|
2015-06-24 04:34:18 +08:00
|
|
|
offset -= getContext().getOffsetOfBaseWithVBPtr(RD);
|
2013-05-03 09:15:11 +08:00
|
|
|
llvm::Constant *FirstField =
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, offset.getQuantity());
|
2013-05-10 05:01:17 +08:00
|
|
|
return EmitFullMemberPointer(FirstField, /*IsMemberFunction=*/false, RD,
|
2015-05-11 05:48:08 +08:00
|
|
|
CharUnits::Zero(), /*VBTableIndex=*/0);
|
2013-05-10 05:01:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Constant *MicrosoftCXXABI::EmitMemberPointer(const APValue &MP,
|
|
|
|
QualType MPType) {
|
2015-06-23 15:31:07 +08:00
|
|
|
const MemberPointerType *DstTy = MPType->castAs<MemberPointerType>();
|
2013-05-10 05:01:17 +08:00
|
|
|
const ValueDecl *MPD = MP.getMemberPointerDecl();
|
|
|
|
if (!MPD)
|
2015-06-23 15:31:07 +08:00
|
|
|
return EmitNullMemberPointer(DstTy);
|
2013-05-10 05:01:17 +08:00
|
|
|
|
2015-06-23 15:31:07 +08:00
|
|
|
ASTContext &Ctx = getContext();
|
|
|
|
ArrayRef<const CXXRecordDecl *> MemberPointerPath = MP.getMemberPointerPath();
|
2013-05-10 05:01:17 +08:00
|
|
|
|
2015-06-23 15:31:07 +08:00
|
|
|
llvm::Constant *C;
|
|
|
|
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) {
|
|
|
|
C = EmitMemberFunctionPointer(MD);
|
|
|
|
} else {
|
2019-10-29 08:05:34 +08:00
|
|
|
// For a pointer to data member, start off with the offset of the field in
|
|
|
|
// the class in which it was declared, and convert from there if necessary.
|
|
|
|
// For indirect field decls, get the outermost anonymous field and use the
|
|
|
|
// parent class.
|
2015-06-23 15:31:07 +08:00
|
|
|
CharUnits FieldOffset = Ctx.toCharUnitsFromBits(Ctx.getFieldOffset(MPD));
|
2019-10-29 08:05:34 +08:00
|
|
|
const FieldDecl *FD = dyn_cast<FieldDecl>(MPD);
|
|
|
|
if (!FD)
|
|
|
|
FD = cast<FieldDecl>(*cast<IndirectFieldDecl>(MPD)->chain_begin());
|
|
|
|
const CXXRecordDecl *RD = cast<CXXRecordDecl>(FD->getParent());
|
|
|
|
RD = RD->getMostRecentNonInjectedDecl();
|
|
|
|
C = EmitMemberDataPointer(RD, FieldOffset);
|
2015-06-23 15:31:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!MemberPointerPath.empty()) {
|
|
|
|
const CXXRecordDecl *SrcRD = cast<CXXRecordDecl>(MPD->getDeclContext());
|
|
|
|
const Type *SrcRecTy = Ctx.getTypeDeclType(SrcRD).getTypePtr();
|
|
|
|
const MemberPointerType *SrcTy =
|
|
|
|
Ctx.getMemberPointerType(DstTy->getPointeeType(), SrcRecTy)
|
|
|
|
->castAs<MemberPointerType>();
|
|
|
|
|
|
|
|
bool DerivedMember = MP.isMemberPointerToDerivedMember();
|
|
|
|
SmallVector<const CXXBaseSpecifier *, 4> DerivedToBasePath;
|
|
|
|
const CXXRecordDecl *PrevRD = SrcRD;
|
|
|
|
for (const CXXRecordDecl *PathElem : MemberPointerPath) {
|
|
|
|
const CXXRecordDecl *Base = nullptr;
|
|
|
|
const CXXRecordDecl *Derived = nullptr;
|
|
|
|
if (DerivedMember) {
|
|
|
|
Base = PathElem;
|
|
|
|
Derived = PrevRD;
|
|
|
|
} else {
|
|
|
|
Base = PrevRD;
|
|
|
|
Derived = PathElem;
|
|
|
|
}
|
|
|
|
for (const CXXBaseSpecifier &BS : Derived->bases())
|
|
|
|
if (BS.getType()->getAsCXXRecordDecl()->getCanonicalDecl() ==
|
|
|
|
Base->getCanonicalDecl())
|
|
|
|
DerivedToBasePath.push_back(&BS);
|
|
|
|
PrevRD = PathElem;
|
|
|
|
}
|
|
|
|
assert(DerivedToBasePath.size() == MemberPointerPath.size());
|
2013-05-10 05:01:17 +08:00
|
|
|
|
2015-06-23 15:31:07 +08:00
|
|
|
CastKind CK = DerivedMember ? CK_DerivedToBaseMemberPointer
|
|
|
|
: CK_BaseToDerivedMemberPointer;
|
|
|
|
C = EmitMemberPointerConversion(SrcTy, DstTy, CK, DerivedToBasePath.begin(),
|
|
|
|
DerivedToBasePath.end(), C);
|
|
|
|
}
|
|
|
|
return C;
|
2013-05-03 09:15:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Constant *
|
2015-06-23 15:31:07 +08:00
|
|
|
MicrosoftCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
|
2013-05-03 09:15:11 +08:00
|
|
|
assert(MD->isInstance() && "Member function must not be static!");
|
2015-06-23 15:31:07 +08:00
|
|
|
|
|
|
|
CharUnits NonVirtualBaseAdjustment = CharUnits::Zero();
|
2018-06-01 02:42:29 +08:00
|
|
|
const CXXRecordDecl *RD = MD->getParent()->getMostRecentNonInjectedDecl();
|
2013-05-03 09:15:11 +08:00
|
|
|
CodeGenTypes &Types = CGM.getTypes();
|
|
|
|
|
2015-05-11 05:48:08 +08:00
|
|
|
unsigned VBTableIndex = 0;
|
2013-05-03 09:15:11 +08:00
|
|
|
llvm::Constant *FirstField;
|
2014-08-30 05:43:29 +08:00
|
|
|
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
|
2013-11-16 01:24:45 +08:00
|
|
|
if (!MD->isVirtual()) {
|
2013-05-03 09:15:11 +08:00
|
|
|
llvm::Type *Ty;
|
|
|
|
// Check whether the function has a computable LLVM signature.
|
|
|
|
if (Types.isFuncTypeConvertible(FPT)) {
|
|
|
|
// The function has a computable LLVM signature; use the correct type.
|
|
|
|
Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
|
|
|
|
} else {
|
|
|
|
// Use an arbitrary non-function type to tell GetAddrOfFunction that the
|
|
|
|
// function type is incomplete.
|
|
|
|
Ty = CGM.PtrDiffTy;
|
|
|
|
}
|
|
|
|
FirstField = CGM.GetAddrOfFunction(MD, Ty);
|
2013-11-16 01:24:45 +08:00
|
|
|
} else {
|
2015-06-11 16:12:44 +08:00
|
|
|
auto &VTableContext = CGM.getMicrosoftVTableContext();
|
2018-04-03 04:00:39 +08:00
|
|
|
MethodVFTableLocation ML = VTableContext.getMethodVFTableLocation(MD);
|
2015-06-23 15:31:11 +08:00
|
|
|
FirstField = EmitVirtualMemPtrThunk(MD, ML);
|
2015-06-11 16:12:44 +08:00
|
|
|
// Include the vfptr adjustment if the method is in a non-primary vftable.
|
|
|
|
NonVirtualBaseAdjustment += ML.VFPtrOffset;
|
|
|
|
if (ML.VBase)
|
|
|
|
VBTableIndex = VTableContext.getVBTableIndex(RD, ML.VBase) * 4;
|
2013-05-03 09:15:11 +08:00
|
|
|
}
|
|
|
|
|
2015-06-23 15:31:11 +08:00
|
|
|
if (VBTableIndex == 0 &&
|
|
|
|
RD->getMSInheritanceModel() ==
|
2019-11-16 10:49:32 +08:00
|
|
|
MSInheritanceModel::Virtual)
|
2015-06-24 04:34:18 +08:00
|
|
|
NonVirtualBaseAdjustment -= getContext().getOffsetOfBaseWithVBPtr(RD);
|
2015-06-23 15:31:11 +08:00
|
|
|
|
2013-05-03 09:15:11 +08:00
|
|
|
// The rest of the fields are common with data member pointers.
|
2015-06-23 15:31:11 +08:00
|
|
|
FirstField = llvm::ConstantExpr::getBitCast(FirstField, CGM.VoidPtrTy);
|
2013-05-10 05:01:17 +08:00
|
|
|
return EmitFullMemberPointer(FirstField, /*IsMemberFunction=*/true, RD,
|
2015-05-11 05:48:08 +08:00
|
|
|
NonVirtualBaseAdjustment, VBTableIndex);
|
2013-05-03 09:15:11 +08:00
|
|
|
}
|
|
|
|
|
2013-05-01 04:15:14 +08:00
|
|
|
/// Member pointers are the same if they're either bitwise identical *or* both
|
|
|
|
/// null. Null-ness for function members is determined by the first field,
|
|
|
|
/// while for data member pointers we must compare all fields.
|
|
|
|
llvm::Value *
|
|
|
|
MicrosoftCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *L,
|
|
|
|
llvm::Value *R,
|
|
|
|
const MemberPointerType *MPT,
|
|
|
|
bool Inequality) {
|
|
|
|
CGBuilderTy &Builder = CGF.Builder;
|
|
|
|
|
|
|
|
// Handle != comparisons by switching the sense of all boolean operations.
|
|
|
|
llvm::ICmpInst::Predicate Eq;
|
|
|
|
llvm::Instruction::BinaryOps And, Or;
|
|
|
|
if (Inequality) {
|
|
|
|
Eq = llvm::ICmpInst::ICMP_NE;
|
|
|
|
And = llvm::Instruction::Or;
|
|
|
|
Or = llvm::Instruction::And;
|
|
|
|
} else {
|
|
|
|
Eq = llvm::ICmpInst::ICMP_EQ;
|
|
|
|
And = llvm::Instruction::And;
|
|
|
|
Or = llvm::Instruction::Or;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a single field member pointer (single inheritance), this is a
|
|
|
|
// single icmp.
|
2014-01-17 17:01:00 +08:00
|
|
|
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
|
2019-11-16 10:49:32 +08:00
|
|
|
MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
|
|
|
|
if (inheritanceModelHasOnlyOneField(MPT->isMemberFunctionPointer(),
|
|
|
|
Inheritance))
|
2013-05-01 04:15:14 +08:00
|
|
|
return Builder.CreateICmp(Eq, L, R);
|
|
|
|
|
|
|
|
// Compare the first field.
|
|
|
|
llvm::Value *L0 = Builder.CreateExtractValue(L, 0, "lhs.0");
|
|
|
|
llvm::Value *R0 = Builder.CreateExtractValue(R, 0, "rhs.0");
|
|
|
|
llvm::Value *Cmp0 = Builder.CreateICmp(Eq, L0, R0, "memptr.cmp.first");
|
|
|
|
|
|
|
|
// Compare everything other than the first field.
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *Res = nullptr;
|
2013-05-01 04:15:14 +08:00
|
|
|
llvm::StructType *LType = cast<llvm::StructType>(L->getType());
|
|
|
|
for (unsigned I = 1, E = LType->getNumElements(); I != E; ++I) {
|
|
|
|
llvm::Value *LF = Builder.CreateExtractValue(L, I);
|
|
|
|
llvm::Value *RF = Builder.CreateExtractValue(R, I);
|
|
|
|
llvm::Value *Cmp = Builder.CreateICmp(Eq, LF, RF, "memptr.cmp.rest");
|
|
|
|
if (Res)
|
|
|
|
Res = Builder.CreateBinOp(And, Res, Cmp);
|
|
|
|
else
|
|
|
|
Res = Cmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the first field is 0 if this is a function pointer.
|
|
|
|
if (MPT->isMemberFunctionPointer()) {
|
|
|
|
// (l1 == r1 && ...) || l0 == 0
|
|
|
|
llvm::Value *Zero = llvm::Constant::getNullValue(L0->getType());
|
|
|
|
llvm::Value *IsZero = Builder.CreateICmp(Eq, L0, Zero, "memptr.cmp.iszero");
|
|
|
|
Res = Builder.CreateBinOp(Or, Res, IsZero);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Combine the comparison of the first field, which must always be true for
|
|
|
|
// this comparison to succeeed.
|
|
|
|
return Builder.CreateBinOp(And, Res, Cmp0, "memptr.cmp");
|
|
|
|
}
|
|
|
|
|
2013-03-23 03:02:54 +08:00
|
|
|
llvm::Value *
|
|
|
|
MicrosoftCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *MemPtr,
|
|
|
|
const MemberPointerType *MPT) {
|
|
|
|
CGBuilderTy &Builder = CGF.Builder;
|
2013-04-12 02:13:19 +08:00
|
|
|
llvm::SmallVector<llvm::Constant *, 4> fields;
|
|
|
|
// We only need one field for member functions.
|
|
|
|
if (MPT->isMemberFunctionPointer())
|
|
|
|
fields.push_back(llvm::Constant::getNullValue(CGM.VoidPtrTy));
|
|
|
|
else
|
|
|
|
GetNullMemberPointerFields(MPT, fields);
|
|
|
|
assert(!fields.empty());
|
|
|
|
llvm::Value *FirstField = MemPtr;
|
|
|
|
if (MemPtr->getType()->isStructTy())
|
|
|
|
FirstField = Builder.CreateExtractValue(MemPtr, 0);
|
|
|
|
llvm::Value *Res = Builder.CreateICmpNE(FirstField, fields[0], "memptr.cmp0");
|
|
|
|
|
|
|
|
// For function member pointers, we only need to test the function pointer
|
|
|
|
// field. The other fields if any can be garbage.
|
|
|
|
if (MPT->isMemberFunctionPointer())
|
|
|
|
return Res;
|
|
|
|
|
|
|
|
// Otherwise, emit a series of compares and combine the results.
|
|
|
|
for (int I = 1, E = fields.size(); I < E; ++I) {
|
|
|
|
llvm::Value *Field = Builder.CreateExtractValue(MemPtr, I);
|
|
|
|
llvm::Value *Next = Builder.CreateICmpNE(Field, fields[I], "memptr.cmp");
|
2014-05-02 08:05:16 +08:00
|
|
|
Res = Builder.CreateOr(Res, Next, "memptr.tobool");
|
2013-04-12 02:13:19 +08:00
|
|
|
}
|
|
|
|
return Res;
|
|
|
|
}
|
2013-03-23 03:02:54 +08:00
|
|
|
|
2013-05-10 05:01:17 +08:00
|
|
|
bool MicrosoftCXXABI::MemberPointerConstantIsNull(const MemberPointerType *MPT,
|
|
|
|
llvm::Constant *Val) {
|
|
|
|
// Function pointers are null if the pointer in the first field is null.
|
|
|
|
if (MPT->isMemberFunctionPointer()) {
|
|
|
|
llvm::Constant *FirstField = Val->getType()->isStructTy() ?
|
|
|
|
Val->getAggregateElement(0U) : Val;
|
|
|
|
return FirstField->isNullValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
// If it's not a function pointer and it's zero initializable, we can easily
|
|
|
|
// check zero.
|
|
|
|
if (isZeroInitializable(MPT) && Val->isNullValue())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Otherwise, break down all the fields for comparison. Hopefully these
|
|
|
|
// little Constants are reused, while a big null struct might not be.
|
|
|
|
llvm::SmallVector<llvm::Constant *, 4> Fields;
|
|
|
|
GetNullMemberPointerFields(MPT, Fields);
|
|
|
|
if (Fields.size() == 1) {
|
|
|
|
assert(Val->getType()->isIntegerTy());
|
|
|
|
return Val == Fields[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned I, E;
|
|
|
|
for (I = 0, E = Fields.size(); I != E; ++I) {
|
|
|
|
if (Val->getAggregateElement(I) != Fields[I])
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return I == E;
|
|
|
|
}
|
|
|
|
|
2013-05-30 02:02:47 +08:00
|
|
|
llvm::Value *
|
|
|
|
MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address This,
|
2013-05-30 02:02:47 +08:00
|
|
|
llvm::Value *VBPtrOffset,
|
2013-10-28 01:10:27 +08:00
|
|
|
llvm::Value *VBTableOffset,
|
2013-05-30 02:02:47 +08:00
|
|
|
llvm::Value **VBPtrOut) {
|
|
|
|
CGBuilderTy &Builder = CGF.Builder;
|
|
|
|
// Load the vbtable pointer from the vbptr in the instance.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
This = Builder.CreateElementBitCast(This, CGM.Int8Ty);
|
2013-05-30 02:02:47 +08:00
|
|
|
llvm::Value *VBPtr =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Builder.CreateInBoundsGEP(This.getPointer(), VBPtrOffset, "vbptr");
|
2013-05-30 02:02:47 +08:00
|
|
|
if (VBPtrOut) *VBPtrOut = VBPtr;
|
2014-10-23 01:26:00 +08:00
|
|
|
VBPtr = Builder.CreateBitCast(VBPtr,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGM.Int32Ty->getPointerTo(0)->getPointerTo(This.getAddressSpace()));
|
|
|
|
|
|
|
|
CharUnits VBPtrAlign;
|
|
|
|
if (auto CI = dyn_cast<llvm::ConstantInt>(VBPtrOffset)) {
|
|
|
|
VBPtrAlign = This.getAlignment().alignmentAtOffset(
|
|
|
|
CharUnits::fromQuantity(CI->getSExtValue()));
|
|
|
|
} else {
|
|
|
|
VBPtrAlign = CGF.getPointerAlign();
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *VBTable = Builder.CreateAlignedLoad(VBPtr, VBPtrAlign, "vbtable");
|
2013-05-30 02:02:47 +08:00
|
|
|
|
2014-10-23 01:26:00 +08:00
|
|
|
// Translate from byte offset to table index. It improves analyzability.
|
|
|
|
llvm::Value *VBTableIndex = Builder.CreateAShr(
|
|
|
|
VBTableOffset, llvm::ConstantInt::get(VBTableOffset->getType(), 2),
|
|
|
|
"vbtindex", /*isExact=*/true);
|
|
|
|
|
2013-05-30 02:02:47 +08:00
|
|
|
// Load an i32 offset from the vb-table.
|
2014-10-23 01:26:00 +08:00
|
|
|
llvm::Value *VBaseOffs = Builder.CreateInBoundsGEP(VBTable, VBTableIndex);
|
2013-05-30 02:02:47 +08:00
|
|
|
VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0));
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return Builder.CreateAlignedLoad(VBaseOffs, CharUnits::fromQuantity(4),
|
|
|
|
"vbase_offs");
|
2013-05-30 02:02:47 +08:00
|
|
|
}
|
|
|
|
|
2013-04-12 02:13:19 +08:00
|
|
|
// Returns an adjusted base cast to i8*, since we do more address arithmetic on
|
|
|
|
// it.
|
2014-02-21 07:22:07 +08:00
|
|
|
llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
|
|
|
|
CodeGenFunction &CGF, const Expr *E, const CXXRecordDecl *RD,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Base, llvm::Value *VBTableOffset, llvm::Value *VBPtrOffset) {
|
2013-04-12 02:13:19 +08:00
|
|
|
CGBuilderTy &Builder = CGF.Builder;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Base = Builder.CreateElementBitCast(Base, CGM.Int8Ty);
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::BasicBlock *OriginalBB = nullptr;
|
|
|
|
llvm::BasicBlock *SkipAdjustBB = nullptr;
|
|
|
|
llvm::BasicBlock *VBaseAdjustBB = nullptr;
|
2013-04-12 02:13:19 +08:00
|
|
|
|
|
|
|
// In the unspecified inheritance model, there might not be a vbtable at all,
|
|
|
|
// in which case we need to skip the virtual base lookup. If there is a
|
|
|
|
// vbtable, the first entry is a no-op entry that gives back the original
|
|
|
|
// base, so look for a virtual base adjustment offset of zero.
|
|
|
|
if (VBPtrOffset) {
|
|
|
|
OriginalBB = Builder.GetInsertBlock();
|
|
|
|
VBaseAdjustBB = CGF.createBasicBlock("memptr.vadjust");
|
|
|
|
SkipAdjustBB = CGF.createBasicBlock("memptr.skip_vadjust");
|
|
|
|
llvm::Value *IsVirtual =
|
2013-05-30 02:02:47 +08:00
|
|
|
Builder.CreateICmpNE(VBTableOffset, getZeroInt(),
|
2013-04-12 02:13:19 +08:00
|
|
|
"memptr.is_vbase");
|
|
|
|
Builder.CreateCondBr(IsVirtual, VBaseAdjustBB, SkipAdjustBB);
|
|
|
|
CGF.EmitBlock(VBaseAdjustBB);
|
2013-03-23 03:02:54 +08:00
|
|
|
}
|
|
|
|
|
2013-04-12 02:13:19 +08:00
|
|
|
// If we weren't given a dynamic vbptr offset, RD should be complete and we'll
|
|
|
|
// know the vbptr offset.
|
|
|
|
if (!VBPtrOffset) {
|
2013-05-30 02:02:47 +08:00
|
|
|
CharUnits offs = CharUnits::Zero();
|
2014-02-21 07:22:07 +08:00
|
|
|
if (!RD->hasDefinition()) {
|
|
|
|
DiagnosticsEngine &Diags = CGF.CGM.getDiags();
|
|
|
|
unsigned DiagID = Diags.getCustomDiagID(
|
|
|
|
DiagnosticsEngine::Error,
|
|
|
|
"member pointer representation requires a "
|
|
|
|
"complete class type for %0 to perform this expression");
|
|
|
|
Diags.Report(E->getExprLoc(), DiagID) << RD << E->getSourceRange();
|
|
|
|
} else if (RD->getNumVBases())
|
2014-01-14 08:50:39 +08:00
|
|
|
offs = getContext().getASTRecordLayout(RD).getVBPtrOffset();
|
2013-04-12 02:13:19 +08:00
|
|
|
VBPtrOffset = llvm::ConstantInt::get(CGM.IntTy, offs.getQuantity());
|
|
|
|
}
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *VBPtr = nullptr;
|
2013-04-12 02:13:19 +08:00
|
|
|
llvm::Value *VBaseOffs =
|
2013-10-28 01:10:27 +08:00
|
|
|
GetVBaseOffsetFromVBPtr(CGF, Base, VBPtrOffset, VBTableOffset, &VBPtr);
|
2013-04-12 02:13:19 +08:00
|
|
|
llvm::Value *AdjustedBase = Builder.CreateInBoundsGEP(VBPtr, VBaseOffs);
|
|
|
|
|
|
|
|
// Merge control flow with the case where we didn't have to adjust.
|
|
|
|
if (VBaseAdjustBB) {
|
|
|
|
Builder.CreateBr(SkipAdjustBB);
|
|
|
|
CGF.EmitBlock(SkipAdjustBB);
|
|
|
|
llvm::PHINode *Phi = Builder.CreatePHI(CGM.Int8PtrTy, 2, "memptr.base");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Phi->addIncoming(Base.getPointer(), OriginalBB);
|
2013-04-12 02:13:19 +08:00
|
|
|
Phi->addIncoming(AdjustedBase, VBaseAdjustBB);
|
|
|
|
return Phi;
|
|
|
|
}
|
|
|
|
return AdjustedBase;
|
2013-03-23 03:02:54 +08:00
|
|
|
}
|
|
|
|
|
2014-02-21 07:22:07 +08:00
|
|
|
llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress(
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
|
2014-02-21 07:22:07 +08:00
|
|
|
const MemberPointerType *MPT) {
|
2013-04-12 02:13:19 +08:00
|
|
|
assert(MPT->isMemberDataPointer());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
unsigned AS = Base.getAddressSpace();
|
2013-03-23 03:02:54 +08:00
|
|
|
llvm::Type *PType =
|
|
|
|
CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
|
|
|
|
CGBuilderTy &Builder = CGF.Builder;
|
2014-01-17 17:01:00 +08:00
|
|
|
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
|
2019-11-16 10:49:32 +08:00
|
|
|
MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
|
2013-04-12 02:13:19 +08:00
|
|
|
|
|
|
|
// Extract the fields we need, regardless of model. We'll apply them if we
|
|
|
|
// have them.
|
|
|
|
llvm::Value *FieldOffset = MemPtr;
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *VirtualBaseAdjustmentOffset = nullptr;
|
|
|
|
llvm::Value *VBPtrOffset = nullptr;
|
2013-04-12 02:13:19 +08:00
|
|
|
if (MemPtr->getType()->isStructTy()) {
|
|
|
|
// We need to extract values.
|
|
|
|
unsigned I = 0;
|
|
|
|
FieldOffset = Builder.CreateExtractValue(MemPtr, I++);
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBPtrOffsetField(Inheritance))
|
2013-04-12 02:13:19 +08:00
|
|
|
VBPtrOffset = Builder.CreateExtractValue(MemPtr, I++);
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBTableOffsetField(Inheritance))
|
2013-04-12 02:13:19 +08:00
|
|
|
VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(MemPtr, I++);
|
2013-03-23 03:02:54 +08:00
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *Addr;
|
2013-04-12 02:13:19 +08:00
|
|
|
if (VirtualBaseAdjustmentOffset) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Addr = AdjustVirtualBase(CGF, E, RD, Base, VirtualBaseAdjustmentOffset,
|
2013-04-12 02:13:19 +08:00
|
|
|
VBPtrOffset);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
} else {
|
|
|
|
Addr = Base.getPointer();
|
2013-03-23 03:02:54 +08:00
|
|
|
}
|
2013-12-06 06:44:07 +08:00
|
|
|
|
|
|
|
// Cast to char*.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Addr = Builder.CreateBitCast(Addr, CGF.Int8Ty->getPointerTo(AS));
|
2013-12-06 06:44:07 +08:00
|
|
|
|
|
|
|
// Apply the offset, which we assume is non-null.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Addr = Builder.CreateInBoundsGEP(Addr, FieldOffset, "memptr.offset");
|
2013-03-23 03:02:54 +08:00
|
|
|
|
|
|
|
// Cast the address to the appropriate pointer type, adopting the address
|
|
|
|
// space of the base pointer.
|
|
|
|
return Builder.CreateBitCast(Addr, PType);
|
|
|
|
}
|
|
|
|
|
2013-05-10 05:01:17 +08:00
|
|
|
llvm::Value *
|
|
|
|
MicrosoftCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
|
|
|
|
const CastExpr *E,
|
|
|
|
llvm::Value *Src) {
|
|
|
|
assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
|
|
|
|
E->getCastKind() == CK_BaseToDerivedMemberPointer ||
|
|
|
|
E->getCastKind() == CK_ReinterpretMemberPointer);
|
|
|
|
|
|
|
|
// Use constant emission if we can.
|
|
|
|
if (isa<llvm::Constant>(Src))
|
|
|
|
return EmitMemberPointerConversion(E, cast<llvm::Constant>(Src));
|
|
|
|
|
|
|
|
// We may be adding or dropping fields from the member pointer, so we need
|
|
|
|
// both types and the inheritance models of both records.
|
|
|
|
const MemberPointerType *SrcTy =
|
|
|
|
E->getSubExpr()->getType()->castAs<MemberPointerType>();
|
|
|
|
const MemberPointerType *DstTy = E->getType()->castAs<MemberPointerType>();
|
|
|
|
bool IsFunc = SrcTy->isMemberFunctionPointer();
|
|
|
|
|
|
|
|
// If the classes use the same null representation, reinterpret_cast is a nop.
|
|
|
|
bool IsReinterpret = E->getCastKind() == CK_ReinterpretMemberPointer;
|
2014-01-17 17:01:00 +08:00
|
|
|
if (IsReinterpret && IsFunc)
|
|
|
|
return Src;
|
|
|
|
|
|
|
|
CXXRecordDecl *SrcRD = SrcTy->getMostRecentCXXRecordDecl();
|
|
|
|
CXXRecordDecl *DstRD = DstTy->getMostRecentCXXRecordDecl();
|
|
|
|
if (IsReinterpret &&
|
2014-02-06 01:27:08 +08:00
|
|
|
SrcRD->nullFieldOffsetIsZero() == DstRD->nullFieldOffsetIsZero())
|
2013-05-10 05:01:17 +08:00
|
|
|
return Src;
|
|
|
|
|
|
|
|
CGBuilderTy &Builder = CGF.Builder;
|
|
|
|
|
|
|
|
// Branch past the conversion if Src is null.
|
|
|
|
llvm::Value *IsNotNull = EmitMemberPointerIsNotNull(CGF, Src, SrcTy);
|
|
|
|
llvm::Constant *DstNull = EmitNullMemberPointer(DstTy);
|
|
|
|
|
|
|
|
// C++ 5.2.10p9: The null member pointer value is converted to the null member
|
|
|
|
// pointer value of the destination type.
|
|
|
|
if (IsReinterpret) {
|
|
|
|
// For reinterpret casts, sema ensures that src and dst are both functions
|
|
|
|
// or data and have the same size, which means the LLVM types should match.
|
|
|
|
assert(Src->getType() == DstNull->getType());
|
|
|
|
return Builder.CreateSelect(IsNotNull, Src, DstNull);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::BasicBlock *OriginalBB = Builder.GetInsertBlock();
|
|
|
|
llvm::BasicBlock *ConvertBB = CGF.createBasicBlock("memptr.convert");
|
|
|
|
llvm::BasicBlock *ContinueBB = CGF.createBasicBlock("memptr.converted");
|
|
|
|
Builder.CreateCondBr(IsNotNull, ConvertBB, ContinueBB);
|
|
|
|
CGF.EmitBlock(ConvertBB);
|
|
|
|
|
2015-06-29 08:06:50 +08:00
|
|
|
llvm::Value *Dst = EmitNonNullMemberPointerConversion(
|
|
|
|
SrcTy, DstTy, E->getCastKind(), E->path_begin(), E->path_end(), Src,
|
|
|
|
Builder);
|
|
|
|
|
|
|
|
Builder.CreateBr(ContinueBB);
|
|
|
|
|
|
|
|
// In the continuation, choose between DstNull and Dst.
|
|
|
|
CGF.EmitBlock(ContinueBB);
|
|
|
|
llvm::PHINode *Phi = Builder.CreatePHI(DstNull->getType(), 2, "memptr.converted");
|
|
|
|
Phi->addIncoming(DstNull, OriginalBB);
|
|
|
|
Phi->addIncoming(Dst, ConvertBB);
|
|
|
|
return Phi;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *MicrosoftCXXABI::EmitNonNullMemberPointerConversion(
|
|
|
|
const MemberPointerType *SrcTy, const MemberPointerType *DstTy, CastKind CK,
|
|
|
|
CastExpr::path_const_iterator PathBegin,
|
|
|
|
CastExpr::path_const_iterator PathEnd, llvm::Value *Src,
|
|
|
|
CGBuilderTy &Builder) {
|
|
|
|
const CXXRecordDecl *SrcRD = SrcTy->getMostRecentCXXRecordDecl();
|
|
|
|
const CXXRecordDecl *DstRD = DstTy->getMostRecentCXXRecordDecl();
|
2019-11-16 10:49:32 +08:00
|
|
|
MSInheritanceModel SrcInheritance = SrcRD->getMSInheritanceModel();
|
|
|
|
MSInheritanceModel DstInheritance = DstRD->getMSInheritanceModel();
|
2015-06-29 08:06:50 +08:00
|
|
|
bool IsFunc = SrcTy->isMemberFunctionPointer();
|
|
|
|
bool IsConstant = isa<llvm::Constant>(Src);
|
|
|
|
|
2013-05-10 05:01:17 +08:00
|
|
|
// Decompose src.
|
|
|
|
llvm::Value *FirstField = Src;
|
2015-06-23 15:31:11 +08:00
|
|
|
llvm::Value *NonVirtualBaseAdjustment = getZeroInt();
|
|
|
|
llvm::Value *VirtualBaseAdjustmentOffset = getZeroInt();
|
|
|
|
llvm::Value *VBPtrOffset = getZeroInt();
|
2019-11-16 10:49:32 +08:00
|
|
|
if (!inheritanceModelHasOnlyOneField(IsFunc, SrcInheritance)) {
|
2013-05-10 05:01:17 +08:00
|
|
|
// We need to extract values.
|
|
|
|
unsigned I = 0;
|
|
|
|
FirstField = Builder.CreateExtractValue(Src, I++);
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasNVOffsetField(IsFunc, SrcInheritance))
|
2013-05-10 05:01:17 +08:00
|
|
|
NonVirtualBaseAdjustment = Builder.CreateExtractValue(Src, I++);
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBPtrOffsetField(SrcInheritance))
|
2013-05-10 05:01:17 +08:00
|
|
|
VBPtrOffset = Builder.CreateExtractValue(Src, I++);
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBTableOffsetField(SrcInheritance))
|
2013-05-10 05:01:17 +08:00
|
|
|
VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(Src, I++);
|
|
|
|
}
|
|
|
|
|
2015-06-29 08:06:50 +08:00
|
|
|
bool IsDerivedToBase = (CK == CK_DerivedToBaseMemberPointer);
|
2015-06-23 15:31:11 +08:00
|
|
|
const MemberPointerType *DerivedTy = IsDerivedToBase ? SrcTy : DstTy;
|
|
|
|
const CXXRecordDecl *DerivedClass = DerivedTy->getMostRecentCXXRecordDecl();
|
|
|
|
|
2013-05-10 05:01:17 +08:00
|
|
|
// For data pointers, we adjust the field offset directly. For functions, we
|
|
|
|
// have a separate field.
|
2015-06-23 15:31:11 +08:00
|
|
|
llvm::Value *&NVAdjustField = IsFunc ? NonVirtualBaseAdjustment : FirstField;
|
|
|
|
|
|
|
|
// The virtual inheritance model has a quirk: the virtual base table is always
|
|
|
|
// referenced when dereferencing a member pointer even if the member pointer
|
|
|
|
// is non-virtual. This is accounted for by adjusting the non-virtual offset
|
|
|
|
// to point backwards to the top of the MDC from the first VBase. Undo this
|
|
|
|
// adjustment to normalize the member pointer.
|
|
|
|
llvm::Value *SrcVBIndexEqZero =
|
|
|
|
Builder.CreateICmpEQ(VirtualBaseAdjustmentOffset, getZeroInt());
|
2019-11-16 10:49:32 +08:00
|
|
|
if (SrcInheritance == MSInheritanceModel::Virtual) {
|
2015-06-23 15:31:11 +08:00
|
|
|
if (int64_t SrcOffsetToFirstVBase =
|
2015-06-24 04:34:18 +08:00
|
|
|
getContext().getOffsetOfBaseWithVBPtr(SrcRD).getQuantity()) {
|
2015-06-23 15:31:11 +08:00
|
|
|
llvm::Value *UndoSrcAdjustment = Builder.CreateSelect(
|
|
|
|
SrcVBIndexEqZero,
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, SrcOffsetToFirstVBase),
|
|
|
|
getZeroInt());
|
|
|
|
NVAdjustField = Builder.CreateNSWAdd(NVAdjustField, UndoSrcAdjustment);
|
|
|
|
}
|
2013-05-10 05:01:17 +08:00
|
|
|
}
|
|
|
|
|
2015-06-23 15:31:11 +08:00
|
|
|
// A non-zero vbindex implies that we are dealing with a source member in a
|
|
|
|
// floating virtual base in addition to some non-virtual offset. If the
|
|
|
|
// vbindex is zero, we are dealing with a source that exists in a non-virtual,
|
|
|
|
// fixed, base. The difference between these two cases is that the vbindex +
|
|
|
|
// nvoffset *always* point to the member regardless of what context they are
|
|
|
|
// evaluated in so long as the vbindex is adjusted. A member inside a fixed
|
|
|
|
// base requires explicit nv adjustment.
|
|
|
|
llvm::Constant *BaseClassOffset = llvm::ConstantInt::get(
|
2015-06-29 08:06:50 +08:00
|
|
|
CGM.IntTy,
|
|
|
|
CGM.computeNonVirtualBaseClassOffset(DerivedClass, PathBegin, PathEnd)
|
|
|
|
.getQuantity());
|
2015-06-23 15:31:11 +08:00
|
|
|
|
|
|
|
llvm::Value *NVDisp;
|
|
|
|
if (IsDerivedToBase)
|
|
|
|
NVDisp = Builder.CreateNSWSub(NVAdjustField, BaseClassOffset, "adj");
|
|
|
|
else
|
|
|
|
NVDisp = Builder.CreateNSWAdd(NVAdjustField, BaseClassOffset, "adj");
|
|
|
|
|
|
|
|
NVAdjustField = Builder.CreateSelect(SrcVBIndexEqZero, NVDisp, getZeroInt());
|
|
|
|
|
|
|
|
// Update the vbindex to an appropriate value in the destination because
|
|
|
|
// SrcRD's vbtable might not be a strict prefix of the one in DstRD.
|
|
|
|
llvm::Value *DstVBIndexEqZero = SrcVBIndexEqZero;
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBTableOffsetField(DstInheritance) &&
|
|
|
|
inheritanceModelHasVBTableOffsetField(SrcInheritance)) {
|
2015-06-23 15:31:11 +08:00
|
|
|
if (llvm::GlobalVariable *VDispMap =
|
|
|
|
getAddrOfVirtualDisplacementMap(SrcRD, DstRD)) {
|
|
|
|
llvm::Value *VBIndex = Builder.CreateExactUDiv(
|
|
|
|
VirtualBaseAdjustmentOffset, llvm::ConstantInt::get(CGM.IntTy, 4));
|
2015-06-29 08:06:50 +08:00
|
|
|
if (IsConstant) {
|
|
|
|
llvm::Constant *Mapping = VDispMap->getInitializer();
|
|
|
|
VirtualBaseAdjustmentOffset =
|
|
|
|
Mapping->getAggregateElement(cast<llvm::Constant>(VBIndex));
|
|
|
|
} else {
|
|
|
|
llvm::Value *Idxs[] = {getZeroInt(), VBIndex};
|
|
|
|
VirtualBaseAdjustmentOffset =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(VDispMap, Idxs),
|
|
|
|
CharUnits::fromQuantity(4));
|
2015-06-29 08:06:50 +08:00
|
|
|
}
|
2015-06-23 15:31:11 +08:00
|
|
|
|
|
|
|
DstVBIndexEqZero =
|
|
|
|
Builder.CreateICmpEQ(VirtualBaseAdjustmentOffset, getZeroInt());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the VBPtrOffset to zero if the vbindex is zero. Otherwise, initialize
|
|
|
|
// it to the offset of the vbptr.
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBPtrOffsetField(DstInheritance)) {
|
2015-06-23 15:31:11 +08:00
|
|
|
llvm::Value *DstVBPtrOffset = llvm::ConstantInt::get(
|
|
|
|
CGM.IntTy,
|
|
|
|
getContext().getASTRecordLayout(DstRD).getVBPtrOffset().getQuantity());
|
|
|
|
VBPtrOffset =
|
|
|
|
Builder.CreateSelect(DstVBIndexEqZero, getZeroInt(), DstVBPtrOffset);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Likewise, apply a similar adjustment so that dereferencing the member
|
|
|
|
// pointer correctly accounts for the distance between the start of the first
|
|
|
|
// virtual base and the top of the MDC.
|
2019-11-16 10:49:32 +08:00
|
|
|
if (DstInheritance == MSInheritanceModel::Virtual) {
|
2015-06-23 15:31:11 +08:00
|
|
|
if (int64_t DstOffsetToFirstVBase =
|
2015-06-24 04:34:18 +08:00
|
|
|
getContext().getOffsetOfBaseWithVBPtr(DstRD).getQuantity()) {
|
2015-06-23 15:31:11 +08:00
|
|
|
llvm::Value *DoDstAdjustment = Builder.CreateSelect(
|
|
|
|
DstVBIndexEqZero,
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, DstOffsetToFirstVBase),
|
|
|
|
getZeroInt());
|
|
|
|
NVAdjustField = Builder.CreateNSWSub(NVAdjustField, DoDstAdjustment);
|
|
|
|
}
|
|
|
|
}
|
2013-05-10 05:01:17 +08:00
|
|
|
|
|
|
|
// Recompose dst from the null struct and the adjusted fields from src.
|
|
|
|
llvm::Value *Dst;
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasOnlyOneField(IsFunc, DstInheritance)) {
|
2013-05-10 05:01:17 +08:00
|
|
|
Dst = FirstField;
|
|
|
|
} else {
|
2015-06-29 08:06:50 +08:00
|
|
|
Dst = llvm::UndefValue::get(ConvertMemberPointerType(DstTy));
|
2013-05-10 05:01:17 +08:00
|
|
|
unsigned Idx = 0;
|
|
|
|
Dst = Builder.CreateInsertValue(Dst, FirstField, Idx++);
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasNVOffsetField(IsFunc, DstInheritance))
|
2015-06-23 15:31:11 +08:00
|
|
|
Dst = Builder.CreateInsertValue(Dst, NonVirtualBaseAdjustment, Idx++);
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBPtrOffsetField(DstInheritance))
|
2015-06-23 15:31:11 +08:00
|
|
|
Dst = Builder.CreateInsertValue(Dst, VBPtrOffset, Idx++);
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBTableOffsetField(DstInheritance))
|
2015-06-23 15:31:11 +08:00
|
|
|
Dst = Builder.CreateInsertValue(Dst, VirtualBaseAdjustmentOffset, Idx++);
|
2013-05-10 05:01:17 +08:00
|
|
|
}
|
2015-06-29 08:06:50 +08:00
|
|
|
return Dst;
|
2013-05-10 05:01:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Constant *
|
|
|
|
MicrosoftCXXABI::EmitMemberPointerConversion(const CastExpr *E,
|
|
|
|
llvm::Constant *Src) {
|
|
|
|
const MemberPointerType *SrcTy =
|
2015-06-23 15:31:07 +08:00
|
|
|
E->getSubExpr()->getType()->castAs<MemberPointerType>();
|
2013-05-10 05:01:17 +08:00
|
|
|
const MemberPointerType *DstTy = E->getType()->castAs<MemberPointerType>();
|
|
|
|
|
2015-06-23 15:31:07 +08:00
|
|
|
CastKind CK = E->getCastKind();
|
|
|
|
|
|
|
|
return EmitMemberPointerConversion(SrcTy, DstTy, CK, E->path_begin(),
|
|
|
|
E->path_end(), Src);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Constant *MicrosoftCXXABI::EmitMemberPointerConversion(
|
|
|
|
const MemberPointerType *SrcTy, const MemberPointerType *DstTy, CastKind CK,
|
|
|
|
CastExpr::path_const_iterator PathBegin,
|
|
|
|
CastExpr::path_const_iterator PathEnd, llvm::Constant *Src) {
|
|
|
|
assert(CK == CK_DerivedToBaseMemberPointer ||
|
|
|
|
CK == CK_BaseToDerivedMemberPointer ||
|
|
|
|
CK == CK_ReinterpretMemberPointer);
|
2013-05-10 05:01:17 +08:00
|
|
|
// If src is null, emit a new null for dst. We can't return src because dst
|
|
|
|
// might have a new representation.
|
|
|
|
if (MemberPointerConstantIsNull(SrcTy, Src))
|
|
|
|
return EmitNullMemberPointer(DstTy);
|
|
|
|
|
|
|
|
// We don't need to do anything for reinterpret_casts of non-null member
|
|
|
|
// pointers. We should only get here when the two type representations have
|
|
|
|
// the same size.
|
2015-06-23 15:31:07 +08:00
|
|
|
if (CK == CK_ReinterpretMemberPointer)
|
2013-05-10 05:01:17 +08:00
|
|
|
return Src;
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGBuilderTy Builder(CGM, CGM.getLLVMContext());
|
2015-06-29 08:06:50 +08:00
|
|
|
auto *Dst = cast<llvm::Constant>(EmitNonNullMemberPointerConversion(
|
|
|
|
SrcTy, DstTy, CK, PathBegin, PathEnd, Src, Builder));
|
2015-06-23 15:31:11 +08:00
|
|
|
|
2015-06-29 08:06:50 +08:00
|
|
|
return Dst;
|
2013-05-10 05:01:17 +08:00
|
|
|
}
|
|
|
|
|
2016-10-27 07:46:34 +08:00
|
|
|
CGCallee MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CodeGenFunction &CGF, const Expr *E, Address This,
|
|
|
|
llvm::Value *&ThisPtrForCall, llvm::Value *MemPtr,
|
|
|
|
const MemberPointerType *MPT) {
|
2013-04-12 02:13:19 +08:00
|
|
|
assert(MPT->isMemberFunctionPointer());
|
|
|
|
const FunctionProtoType *FPT =
|
|
|
|
MPT->getPointeeType()->castAs<FunctionProtoType>();
|
2014-01-17 17:01:00 +08:00
|
|
|
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
|
2015-12-03 05:58:08 +08:00
|
|
|
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
|
|
|
|
CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
|
2013-04-12 02:13:19 +08:00
|
|
|
CGBuilderTy &Builder = CGF.Builder;
|
|
|
|
|
2019-11-16 10:49:32 +08:00
|
|
|
MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
|
2013-04-12 02:13:19 +08:00
|
|
|
|
|
|
|
// Extract the fields we need, regardless of model. We'll apply them if we
|
|
|
|
// have them.
|
|
|
|
llvm::Value *FunctionPointer = MemPtr;
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *NonVirtualBaseAdjustment = nullptr;
|
|
|
|
llvm::Value *VirtualBaseAdjustmentOffset = nullptr;
|
|
|
|
llvm::Value *VBPtrOffset = nullptr;
|
2013-04-12 02:13:19 +08:00
|
|
|
if (MemPtr->getType()->isStructTy()) {
|
|
|
|
// We need to extract values.
|
|
|
|
unsigned I = 0;
|
|
|
|
FunctionPointer = Builder.CreateExtractValue(MemPtr, I++);
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasNVOffsetField(MPT, Inheritance))
|
2013-04-12 02:13:19 +08:00
|
|
|
NonVirtualBaseAdjustment = Builder.CreateExtractValue(MemPtr, I++);
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBPtrOffsetField(Inheritance))
|
2013-05-03 09:15:11 +08:00
|
|
|
VBPtrOffset = Builder.CreateExtractValue(MemPtr, I++);
|
2019-11-16 10:49:32 +08:00
|
|
|
if (inheritanceModelHasVBTableOffsetField(Inheritance))
|
2013-04-12 02:13:19 +08:00
|
|
|
VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(MemPtr, I++);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (VirtualBaseAdjustmentOffset) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
ThisPtrForCall = AdjustVirtualBase(CGF, E, RD, This,
|
|
|
|
VirtualBaseAdjustmentOffset, VBPtrOffset);
|
|
|
|
} else {
|
|
|
|
ThisPtrForCall = This.getPointer();
|
2013-04-12 02:13:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (NonVirtualBaseAdjustment) {
|
|
|
|
// Apply the adjustment and cast back to the original struct type.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *Ptr = Builder.CreateBitCast(ThisPtrForCall, CGF.Int8PtrTy);
|
2013-04-12 02:13:19 +08:00
|
|
|
Ptr = Builder.CreateInBoundsGEP(Ptr, NonVirtualBaseAdjustment);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
ThisPtrForCall = Builder.CreateBitCast(Ptr, ThisPtrForCall->getType(),
|
|
|
|
"this.adjusted");
|
2013-04-12 02:13:19 +08:00
|
|
|
}
|
|
|
|
|
2016-10-27 07:46:34 +08:00
|
|
|
FunctionPointer =
|
|
|
|
Builder.CreateBitCast(FunctionPointer, FTy->getPointerTo());
|
|
|
|
CGCallee Callee(FPT, FunctionPointer);
|
|
|
|
return Callee;
|
2013-04-12 02:13:19 +08:00
|
|
|
}
|
|
|
|
|
2010-08-16 11:33:14 +08:00
|
|
|
CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
|
2010-06-10 07:25:41 +08:00
|
|
|
return new MicrosoftCXXABI(CGM);
|
|
|
|
}
|
2014-07-07 14:20:47 +08:00
|
|
|
|
|
|
|
// MS RTTI Overview:
|
|
|
|
// The run time type information emitted by cl.exe contains 5 distinct types of
|
|
|
|
// structures. Many of them reference each other.
|
|
|
|
//
|
|
|
|
// TypeInfo: Static classes that are returned by typeid.
|
|
|
|
//
|
|
|
|
// CompleteObjectLocator: Referenced by vftables. They contain information
|
|
|
|
// required for dynamic casting, including OffsetFromTop. They also contain
|
|
|
|
// a reference to the TypeInfo for the type and a reference to the
|
|
|
|
// CompleteHierarchyDescriptor for the type.
|
|
|
|
//
|
2018-07-20 02:59:38 +08:00
|
|
|
// ClassHierarchyDescriptor: Contains information about a class hierarchy.
|
2014-07-07 14:20:47 +08:00
|
|
|
// Used during dynamic_cast to walk a class hierarchy. References a base
|
|
|
|
// class array and the size of said array.
|
|
|
|
//
|
|
|
|
// BaseClassArray: Contains a list of classes in a hierarchy. BaseClassArray is
|
|
|
|
// somewhat of a misnomer because the most derived class is also in the list
|
|
|
|
// as well as multiple copies of virtual bases (if they occur multiple times
|
2018-04-06 23:14:32 +08:00
|
|
|
// in the hierarchy.) The BaseClassArray contains one BaseClassDescriptor for
|
2014-07-07 14:20:47 +08:00
|
|
|
// every path in the hierarchy, in pre-order depth first order. Note, we do
|
|
|
|
// not declare a specific llvm type for BaseClassArray, it's merely an array
|
|
|
|
// of BaseClassDescriptor pointers.
|
|
|
|
//
|
|
|
|
// BaseClassDescriptor: Contains information about a class in a class hierarchy.
|
|
|
|
// BaseClassDescriptor is also somewhat of a misnomer for the same reason that
|
|
|
|
// BaseClassArray is. It contains information about a class within a
|
|
|
|
// hierarchy such as: is this base is ambiguous and what is its offset in the
|
|
|
|
// vbtable. The names of the BaseClassDescriptors have all of their fields
|
|
|
|
// mangled into them so they can be aggressively deduplicated by the linker.
|
|
|
|
|
|
|
|
static llvm::GlobalVariable *getTypeInfoVTable(CodeGenModule &CGM) {
|
2018-03-17 04:36:49 +08:00
|
|
|
StringRef MangledName("??_7type_info@@6B@");
|
2014-07-07 14:20:47 +08:00
|
|
|
if (auto VTable = CGM.getModule().getNamedGlobal(MangledName))
|
|
|
|
return VTable;
|
|
|
|
return new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
|
2019-07-16 12:46:31 +08:00
|
|
|
/*isConstant=*/true,
|
2014-07-07 14:20:47 +08:00
|
|
|
llvm::GlobalVariable::ExternalLinkage,
|
|
|
|
/*Initializer=*/nullptr, MangledName);
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// A Helper struct that stores information about a class in a class
|
2014-07-07 14:20:47 +08:00
|
|
|
/// hierarchy. The information stored in these structs struct is used during
|
|
|
|
/// the generation of ClassHierarchyDescriptors and BaseClassDescriptors.
|
|
|
|
// During RTTI creation, MSRTTIClasses are stored in a contiguous array with
|
|
|
|
// implicit depth first pre-order tree connectivity. getFirstChild and
|
|
|
|
// getNextSibling allow us to walk the tree efficiently.
|
|
|
|
struct MSRTTIClass {
|
|
|
|
enum {
|
|
|
|
IsPrivateOnPath = 1 | 8,
|
|
|
|
IsAmbiguous = 2,
|
|
|
|
IsPrivate = 4,
|
|
|
|
IsVirtual = 16,
|
|
|
|
HasHierarchyDescriptor = 64
|
|
|
|
};
|
|
|
|
MSRTTIClass(const CXXRecordDecl *RD) : RD(RD) {}
|
|
|
|
uint32_t initialize(const MSRTTIClass *Parent,
|
|
|
|
const CXXBaseSpecifier *Specifier);
|
|
|
|
|
|
|
|
MSRTTIClass *getFirstChild() { return this + 1; }
|
|
|
|
static MSRTTIClass *getNextChild(MSRTTIClass *Child) {
|
|
|
|
return Child + 1 + Child->NumBases;
|
|
|
|
}
|
|
|
|
|
|
|
|
const CXXRecordDecl *RD, *VirtualRoot;
|
|
|
|
uint32_t Flags, NumBases, OffsetInVBase;
|
|
|
|
};
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Recursively initialize the base class array.
|
2014-07-07 14:20:47 +08:00
|
|
|
uint32_t MSRTTIClass::initialize(const MSRTTIClass *Parent,
|
|
|
|
const CXXBaseSpecifier *Specifier) {
|
|
|
|
Flags = HasHierarchyDescriptor;
|
|
|
|
if (!Parent) {
|
|
|
|
VirtualRoot = nullptr;
|
|
|
|
OffsetInVBase = 0;
|
|
|
|
} else {
|
|
|
|
if (Specifier->getAccessSpecifier() != AS_public)
|
|
|
|
Flags |= IsPrivate | IsPrivateOnPath;
|
|
|
|
if (Specifier->isVirtual()) {
|
|
|
|
Flags |= IsVirtual;
|
|
|
|
VirtualRoot = RD;
|
|
|
|
OffsetInVBase = 0;
|
|
|
|
} else {
|
|
|
|
if (Parent->Flags & IsPrivateOnPath)
|
|
|
|
Flags |= IsPrivateOnPath;
|
|
|
|
VirtualRoot = Parent->VirtualRoot;
|
|
|
|
OffsetInVBase = Parent->OffsetInVBase + RD->getASTContext()
|
|
|
|
.getASTRecordLayout(Parent->RD).getBaseClassOffset(RD).getQuantity();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
NumBases = 0;
|
|
|
|
MSRTTIClass *Child = getFirstChild();
|
|
|
|
for (const CXXBaseSpecifier &Base : RD->bases()) {
|
|
|
|
NumBases += Child->initialize(this, &Base) + 1;
|
|
|
|
Child = getNextChild(Child);
|
|
|
|
}
|
|
|
|
return NumBases;
|
|
|
|
}
|
|
|
|
|
|
|
|
static llvm::GlobalValue::LinkageTypes getLinkageForRTTI(QualType Ty) {
|
|
|
|
switch (Ty->getLinkage()) {
|
|
|
|
case NoLinkage:
|
|
|
|
case InternalLinkage:
|
|
|
|
case UniqueExternalLinkage:
|
|
|
|
return llvm::GlobalValue::InternalLinkage;
|
|
|
|
|
|
|
|
case VisibleNoLinkage:
|
2017-07-08 04:04:28 +08:00
|
|
|
case ModuleInternalLinkage:
|
|
|
|
case ModuleLinkage:
|
2014-07-07 14:20:47 +08:00
|
|
|
case ExternalLinkage:
|
|
|
|
return llvm::GlobalValue::LinkOnceODRLinkage;
|
|
|
|
}
|
|
|
|
llvm_unreachable("Invalid linkage!");
|
|
|
|
}
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// An ephemeral helper class for building MS RTTI types. It caches some
|
2014-07-07 14:20:47 +08:00
|
|
|
/// calls to the module and information about the most derived class in a
|
|
|
|
/// hierarchy.
|
|
|
|
struct MSRTTIBuilder {
|
|
|
|
enum {
|
|
|
|
HasBranchingHierarchy = 1,
|
|
|
|
HasVirtualBranchingHierarchy = 2,
|
|
|
|
HasAmbiguousBases = 4
|
|
|
|
};
|
|
|
|
|
2014-07-07 16:09:15 +08:00
|
|
|
MSRTTIBuilder(MicrosoftCXXABI &ABI, const CXXRecordDecl *RD)
|
|
|
|
: CGM(ABI.CGM), Context(CGM.getContext()),
|
|
|
|
VMContext(CGM.getLLVMContext()), Module(CGM.getModule()), RD(RD),
|
2014-07-07 14:20:47 +08:00
|
|
|
Linkage(getLinkageForRTTI(CGM.getContext().getTagDeclType(RD))),
|
2014-07-07 16:09:15 +08:00
|
|
|
ABI(ABI) {}
|
2014-07-07 14:20:47 +08:00
|
|
|
|
|
|
|
llvm::GlobalVariable *getBaseClassDescriptor(const MSRTTIClass &Classes);
|
|
|
|
llvm::GlobalVariable *
|
|
|
|
getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes);
|
|
|
|
llvm::GlobalVariable *getClassHierarchyDescriptor();
|
2016-10-11 00:26:29 +08:00
|
|
|
llvm::GlobalVariable *getCompleteObjectLocator(const VPtrInfo &Info);
|
2014-07-07 14:20:47 +08:00
|
|
|
|
|
|
|
CodeGenModule &CGM;
|
|
|
|
ASTContext &Context;
|
|
|
|
llvm::LLVMContext &VMContext;
|
|
|
|
llvm::Module &Module;
|
|
|
|
const CXXRecordDecl *RD;
|
|
|
|
llvm::GlobalVariable::LinkageTypes Linkage;
|
2014-07-07 16:09:15 +08:00
|
|
|
MicrosoftCXXABI &ABI;
|
2014-07-07 14:20:47 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Recursively serializes a class hierarchy in pre-order depth first
|
2014-07-07 14:20:47 +08:00
|
|
|
/// order.
|
|
|
|
static void serializeClassHierarchy(SmallVectorImpl<MSRTTIClass> &Classes,
|
|
|
|
const CXXRecordDecl *RD) {
|
|
|
|
Classes.push_back(MSRTTIClass(RD));
|
|
|
|
for (const CXXBaseSpecifier &Base : RD->bases())
|
|
|
|
serializeClassHierarchy(Classes, Base.getType()->getAsCXXRecordDecl());
|
|
|
|
}
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Find ambiguity among base classes.
|
2014-07-07 14:20:47 +08:00
|
|
|
static void
|
|
|
|
detectAmbiguousBases(SmallVectorImpl<MSRTTIClass> &Classes) {
|
|
|
|
llvm::SmallPtrSet<const CXXRecordDecl *, 8> VirtualBases;
|
|
|
|
llvm::SmallPtrSet<const CXXRecordDecl *, 8> UniqueBases;
|
|
|
|
llvm::SmallPtrSet<const CXXRecordDecl *, 8> AmbiguousBases;
|
|
|
|
for (MSRTTIClass *Class = &Classes.front(); Class <= &Classes.back();) {
|
|
|
|
if ((Class->Flags & MSRTTIClass::IsVirtual) &&
|
2014-11-19 15:49:47 +08:00
|
|
|
!VirtualBases.insert(Class->RD).second) {
|
2014-07-07 14:20:47 +08:00
|
|
|
Class = MSRTTIClass::getNextChild(Class);
|
|
|
|
continue;
|
|
|
|
}
|
2014-11-19 15:49:47 +08:00
|
|
|
if (!UniqueBases.insert(Class->RD).second)
|
2014-07-07 14:20:47 +08:00
|
|
|
AmbiguousBases.insert(Class->RD);
|
|
|
|
Class++;
|
|
|
|
}
|
|
|
|
if (AmbiguousBases.empty())
|
|
|
|
return;
|
|
|
|
for (MSRTTIClass &Class : Classes)
|
|
|
|
if (AmbiguousBases.count(Class.RD))
|
|
|
|
Class.Flags |= MSRTTIClass::IsAmbiguous;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::GlobalVariable *MSRTTIBuilder::getClassHierarchyDescriptor() {
|
|
|
|
SmallString<256> MangledName;
|
|
|
|
{
|
|
|
|
llvm::raw_svector_ostream Out(MangledName);
|
2014-07-07 16:09:15 +08:00
|
|
|
ABI.getMangleContext().mangleCXXRTTIClassHierarchyDescriptor(RD, Out);
|
2014-07-07 14:20:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check to see if we've already declared this ClassHierarchyDescriptor.
|
|
|
|
if (auto CHD = Module.getNamedGlobal(MangledName))
|
|
|
|
return CHD;
|
|
|
|
|
|
|
|
// Serialize the class hierarchy and initialize the CHD Fields.
|
|
|
|
SmallVector<MSRTTIClass, 8> Classes;
|
|
|
|
serializeClassHierarchy(Classes, RD);
|
|
|
|
Classes.front().initialize(/*Parent=*/nullptr, /*Specifier=*/nullptr);
|
|
|
|
detectAmbiguousBases(Classes);
|
|
|
|
int Flags = 0;
|
|
|
|
for (auto Class : Classes) {
|
|
|
|
if (Class.RD->getNumBases() > 1)
|
|
|
|
Flags |= HasBranchingHierarchy;
|
|
|
|
// Note: cl.exe does not calculate "HasAmbiguousBases" correctly. We
|
|
|
|
// believe the field isn't actually used.
|
|
|
|
if (Class.Flags & MSRTTIClass::IsAmbiguous)
|
|
|
|
Flags |= HasAmbiguousBases;
|
|
|
|
}
|
|
|
|
if ((Flags & HasBranchingHierarchy) && RD->getNumVBases() != 0)
|
|
|
|
Flags |= HasVirtualBranchingHierarchy;
|
|
|
|
// These gep indices are used to get the address of the first element of the
|
|
|
|
// base class array.
|
|
|
|
llvm::Value *GEPIndices[] = {llvm::ConstantInt::get(CGM.IntTy, 0),
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, 0)};
|
|
|
|
|
|
|
|
// Forward-declare the class hierarchy descriptor
|
2014-07-07 16:09:15 +08:00
|
|
|
auto Type = ABI.getClassHierarchyDescriptorType();
|
2019-07-16 12:46:31 +08:00
|
|
|
auto CHD = new llvm::GlobalVariable(Module, Type, /*isConstant=*/true, Linkage,
|
2014-07-07 14:20:47 +08:00
|
|
|
/*Initializer=*/nullptr,
|
2015-12-01 16:14:39 +08:00
|
|
|
MangledName);
|
2015-01-17 03:23:42 +08:00
|
|
|
if (CHD->isWeakForLinker())
|
|
|
|
CHD->setComdat(CGM.getModule().getOrInsertComdat(CHD->getName()));
|
2014-07-07 14:20:47 +08:00
|
|
|
|
2015-04-03 02:55:21 +08:00
|
|
|
auto *Bases = getBaseClassArray(Classes);
|
|
|
|
|
2014-07-07 14:20:47 +08:00
|
|
|
// Initialize the base class ClassHierarchyDescriptor.
|
|
|
|
llvm::Constant *Fields[] = {
|
2017-01-02 03:16:02 +08:00
|
|
|
llvm::ConstantInt::get(CGM.IntTy, 0), // reserved by the runtime
|
2014-07-07 16:09:15 +08:00
|
|
|
llvm::ConstantInt::get(CGM.IntTy, Flags),
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, Classes.size()),
|
|
|
|
ABI.getImageRelativeConstant(llvm::ConstantExpr::getInBoundsGetElementPtr(
|
2015-04-03 02:55:21 +08:00
|
|
|
Bases->getValueType(), Bases,
|
2014-07-07 16:09:15 +08:00
|
|
|
llvm::ArrayRef<llvm::Value *>(GEPIndices))),
|
2014-07-07 14:20:47 +08:00
|
|
|
};
|
|
|
|
CHD->setInitializer(llvm::ConstantStruct::get(Type, Fields));
|
|
|
|
return CHD;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::GlobalVariable *
|
|
|
|
MSRTTIBuilder::getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes) {
|
|
|
|
SmallString<256> MangledName;
|
|
|
|
{
|
|
|
|
llvm::raw_svector_ostream Out(MangledName);
|
2014-07-07 16:09:15 +08:00
|
|
|
ABI.getMangleContext().mangleCXXRTTIBaseClassArray(RD, Out);
|
2014-07-07 14:20:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Forward-declare the base class array.
|
|
|
|
// cl.exe pads the base class array with 1 (in 32 bit mode) or 4 (in 64 bit
|
|
|
|
// mode) bytes of padding. We provide a pointer sized amount of padding by
|
|
|
|
// adding +1 to Classes.size(). The sections have pointer alignment and are
|
|
|
|
// marked pick-any so it shouldn't matter.
|
2014-07-07 23:29:10 +08:00
|
|
|
llvm::Type *PtrType = ABI.getImageRelativeType(
|
2014-07-07 16:09:15 +08:00
|
|
|
ABI.getBaseClassDescriptorType()->getPointerTo());
|
2014-07-07 23:29:10 +08:00
|
|
|
auto *ArrType = llvm::ArrayType::get(PtrType, Classes.size() + 1);
|
2015-03-05 08:46:22 +08:00
|
|
|
auto *BCA =
|
|
|
|
new llvm::GlobalVariable(Module, ArrType,
|
2019-07-16 12:46:31 +08:00
|
|
|
/*isConstant=*/true, Linkage,
|
2015-12-01 16:14:39 +08:00
|
|
|
/*Initializer=*/nullptr, MangledName);
|
2015-01-17 03:23:42 +08:00
|
|
|
if (BCA->isWeakForLinker())
|
|
|
|
BCA->setComdat(CGM.getModule().getOrInsertComdat(BCA->getName()));
|
2014-07-07 14:20:47 +08:00
|
|
|
|
|
|
|
// Initialize the BaseClassArray.
|
|
|
|
SmallVector<llvm::Constant *, 8> BaseClassArrayData;
|
|
|
|
for (MSRTTIClass &Class : Classes)
|
|
|
|
BaseClassArrayData.push_back(
|
2014-07-07 16:09:15 +08:00
|
|
|
ABI.getImageRelativeConstant(getBaseClassDescriptor(Class)));
|
2014-07-07 14:20:47 +08:00
|
|
|
BaseClassArrayData.push_back(llvm::Constant::getNullValue(PtrType));
|
2014-07-07 23:29:10 +08:00
|
|
|
BCA->setInitializer(llvm::ConstantArray::get(ArrType, BaseClassArrayData));
|
2014-07-07 14:20:47 +08:00
|
|
|
return BCA;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::GlobalVariable *
|
|
|
|
MSRTTIBuilder::getBaseClassDescriptor(const MSRTTIClass &Class) {
|
|
|
|
// Compute the fields for the BaseClassDescriptor. They are computed up front
|
|
|
|
// because they are mangled into the name of the object.
|
|
|
|
uint32_t OffsetInVBTable = 0;
|
|
|
|
int32_t VBPtrOffset = -1;
|
|
|
|
if (Class.VirtualRoot) {
|
|
|
|
auto &VTableContext = CGM.getMicrosoftVTableContext();
|
|
|
|
OffsetInVBTable = VTableContext.getVBTableIndex(RD, Class.VirtualRoot) * 4;
|
|
|
|
VBPtrOffset = Context.getASTRecordLayout(RD).getVBPtrOffset().getQuantity();
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallString<256> MangledName;
|
|
|
|
{
|
|
|
|
llvm::raw_svector_ostream Out(MangledName);
|
2014-07-07 16:09:15 +08:00
|
|
|
ABI.getMangleContext().mangleCXXRTTIBaseClassDescriptor(
|
|
|
|
Class.RD, Class.OffsetInVBase, VBPtrOffset, OffsetInVBTable,
|
|
|
|
Class.Flags, Out);
|
2014-07-07 14:20:47 +08:00
|
|
|
}
|
|
|
|
|
2014-07-07 23:29:10 +08:00
|
|
|
// Check to see if we've already declared this object.
|
2014-07-07 14:20:47 +08:00
|
|
|
if (auto BCD = Module.getNamedGlobal(MangledName))
|
|
|
|
return BCD;
|
|
|
|
|
|
|
|
// Forward-declare the base class descriptor.
|
2014-07-07 16:09:15 +08:00
|
|
|
auto Type = ABI.getBaseClassDescriptorType();
|
2015-03-05 08:46:22 +08:00
|
|
|
auto BCD =
|
2019-07-16 12:46:31 +08:00
|
|
|
new llvm::GlobalVariable(Module, Type, /*isConstant=*/true, Linkage,
|
2015-12-01 16:14:39 +08:00
|
|
|
/*Initializer=*/nullptr, MangledName);
|
2015-01-17 03:23:42 +08:00
|
|
|
if (BCD->isWeakForLinker())
|
|
|
|
BCD->setComdat(CGM.getModule().getOrInsertComdat(BCD->getName()));
|
2014-07-07 14:20:47 +08:00
|
|
|
|
|
|
|
// Initialize the BaseClassDescriptor.
|
|
|
|
llvm::Constant *Fields[] = {
|
2014-07-07 16:09:15 +08:00
|
|
|
ABI.getImageRelativeConstant(
|
2015-03-18 04:35:00 +08:00
|
|
|
ABI.getAddrOfRTTIDescriptor(Context.getTypeDeclType(Class.RD))),
|
2014-07-07 16:09:15 +08:00
|
|
|
llvm::ConstantInt::get(CGM.IntTy, Class.NumBases),
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, Class.OffsetInVBase),
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset),
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, OffsetInVBTable),
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, Class.Flags),
|
|
|
|
ABI.getImageRelativeConstant(
|
|
|
|
MSRTTIBuilder(ABI, Class.RD).getClassHierarchyDescriptor()),
|
2014-07-07 14:20:47 +08:00
|
|
|
};
|
|
|
|
BCD->setInitializer(llvm::ConstantStruct::get(Type, Fields));
|
|
|
|
return BCD;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::GlobalVariable *
|
2016-10-11 00:26:29 +08:00
|
|
|
MSRTTIBuilder::getCompleteObjectLocator(const VPtrInfo &Info) {
|
2014-07-07 14:20:47 +08:00
|
|
|
SmallString<256> MangledName;
|
|
|
|
{
|
|
|
|
llvm::raw_svector_ostream Out(MangledName);
|
2016-10-11 00:26:29 +08:00
|
|
|
ABI.getMangleContext().mangleCXXRTTICompleteObjectLocator(RD, Info.MangledPath, Out);
|
2014-07-07 14:20:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check to see if we've already computed this complete object locator.
|
|
|
|
if (auto COL = Module.getNamedGlobal(MangledName))
|
|
|
|
return COL;
|
|
|
|
|
|
|
|
// Compute the fields of the complete object locator.
|
2016-10-11 00:26:29 +08:00
|
|
|
int OffsetToTop = Info.FullOffsetInMDC.getQuantity();
|
2014-07-07 14:20:47 +08:00
|
|
|
int VFPtrOffset = 0;
|
|
|
|
// The offset includes the vtordisp if one exists.
|
2016-10-11 00:26:29 +08:00
|
|
|
if (const CXXRecordDecl *VBase = Info.getVBaseWithVPtr())
|
2014-07-07 14:20:47 +08:00
|
|
|
if (Context.getASTRecordLayout(RD)
|
|
|
|
.getVBaseOffsetsMap()
|
|
|
|
.find(VBase)
|
|
|
|
->second.hasVtorDisp())
|
2016-10-11 00:26:29 +08:00
|
|
|
VFPtrOffset = Info.NonVirtualOffset.getQuantity() + 4;
|
2014-07-07 14:20:47 +08:00
|
|
|
|
|
|
|
// Forward-declare the complete object locator.
|
2014-07-07 16:09:15 +08:00
|
|
|
llvm::StructType *Type = ABI.getCompleteObjectLocatorType();
|
2019-07-16 12:46:31 +08:00
|
|
|
auto COL = new llvm::GlobalVariable(Module, Type, /*isConstant=*/true, Linkage,
|
2015-12-01 16:14:39 +08:00
|
|
|
/*Initializer=*/nullptr, MangledName);
|
2014-07-07 14:20:47 +08:00
|
|
|
|
|
|
|
// Initialize the CompleteObjectLocator.
|
|
|
|
llvm::Constant *Fields[] = {
|
2014-07-07 16:09:15 +08:00
|
|
|
llvm::ConstantInt::get(CGM.IntTy, ABI.isImageRelative()),
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, OffsetToTop),
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, VFPtrOffset),
|
|
|
|
ABI.getImageRelativeConstant(
|
|
|
|
CGM.GetAddrOfRTTIDescriptor(Context.getTypeDeclType(RD))),
|
|
|
|
ABI.getImageRelativeConstant(getClassHierarchyDescriptor()),
|
|
|
|
ABI.getImageRelativeConstant(COL),
|
2014-07-07 14:20:47 +08:00
|
|
|
};
|
|
|
|
llvm::ArrayRef<llvm::Constant *> FieldsRef(Fields);
|
2014-07-07 16:09:15 +08:00
|
|
|
if (!ABI.isImageRelative())
|
|
|
|
FieldsRef = FieldsRef.drop_back();
|
2014-07-07 14:20:47 +08:00
|
|
|
COL->setInitializer(llvm::ConstantStruct::get(Type, FieldsRef));
|
2015-01-17 03:23:42 +08:00
|
|
|
if (COL->isWeakForLinker())
|
|
|
|
COL->setComdat(CGM.getModule().getOrInsertComdat(COL->getName()));
|
2014-07-07 14:20:47 +08:00
|
|
|
return COL;
|
|
|
|
}
|
|
|
|
|
2015-03-15 15:10:01 +08:00
|
|
|
static QualType decomposeTypeForEH(ASTContext &Context, QualType T,
|
2016-07-12 12:42:50 +08:00
|
|
|
bool &IsConst, bool &IsVolatile,
|
|
|
|
bool &IsUnaligned) {
|
2015-03-15 15:10:01 +08:00
|
|
|
T = Context.getExceptionObjectType(T);
|
|
|
|
|
|
|
|
// C++14 [except.handle]p3:
|
|
|
|
// A handler is a match for an exception object of type E if [...]
|
|
|
|
// - the handler is of type cv T or const T& where T is a pointer type and
|
|
|
|
// E is a pointer type that can be converted to T by [...]
|
|
|
|
// - a qualification conversion
|
|
|
|
IsConst = false;
|
|
|
|
IsVolatile = false;
|
2016-07-12 12:42:50 +08:00
|
|
|
IsUnaligned = false;
|
2015-03-15 15:10:01 +08:00
|
|
|
QualType PointeeType = T->getPointeeType();
|
|
|
|
if (!PointeeType.isNull()) {
|
|
|
|
IsConst = PointeeType.isConstQualified();
|
|
|
|
IsVolatile = PointeeType.isVolatileQualified();
|
2016-07-12 12:42:50 +08:00
|
|
|
IsUnaligned = PointeeType.getQualifiers().hasUnaligned();
|
2015-03-15 15:10:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Member pointer types like "const int A::*" are represented by having RTTI
|
|
|
|
// for "int A::*" and separately storing the const qualifier.
|
|
|
|
if (const auto *MPTy = T->getAs<MemberPointerType>())
|
|
|
|
T = Context.getMemberPointerType(PointeeType.getUnqualifiedType(),
|
|
|
|
MPTy->getClass());
|
|
|
|
|
|
|
|
// Pointer types like "const int * const *" are represented by having RTTI
|
|
|
|
// for "const int **" and separately storing the const qualifier.
|
|
|
|
if (T->isPointerType())
|
|
|
|
T = Context.getPointerType(PointeeType.getUnqualifiedType());
|
|
|
|
|
|
|
|
return T;
|
|
|
|
}
|
|
|
|
|
2015-09-17 04:15:55 +08:00
|
|
|
CatchTypeInfo
|
2015-03-30 05:55:10 +08:00
|
|
|
MicrosoftCXXABI::getAddrOfCXXCatchHandlerType(QualType Type,
|
|
|
|
QualType CatchHandlerType) {
|
2015-03-18 04:35:05 +08:00
|
|
|
// TypeDescriptors for exceptions never have qualified pointer types,
|
2017-03-30 22:13:19 +08:00
|
|
|
// qualifiers are stored separately in order to support qualification
|
2015-03-15 15:10:01 +08:00
|
|
|
// conversions.
|
2016-07-12 12:42:50 +08:00
|
|
|
bool IsConst, IsVolatile, IsUnaligned;
|
|
|
|
Type =
|
|
|
|
decomposeTypeForEH(getContext(), Type, IsConst, IsVolatile, IsUnaligned);
|
2015-03-15 15:10:01 +08:00
|
|
|
|
2015-03-18 04:35:05 +08:00
|
|
|
bool IsReference = CatchHandlerType->isReferenceType();
|
|
|
|
|
|
|
|
uint32_t Flags = 0;
|
|
|
|
if (IsConst)
|
|
|
|
Flags |= 1;
|
|
|
|
if (IsVolatile)
|
|
|
|
Flags |= 2;
|
2016-07-12 12:42:50 +08:00
|
|
|
if (IsUnaligned)
|
|
|
|
Flags |= 4;
|
2015-03-18 04:35:05 +08:00
|
|
|
if (IsReference)
|
|
|
|
Flags |= 8;
|
|
|
|
|
2015-09-17 04:15:55 +08:00
|
|
|
return CatchTypeInfo{getAddrOfRTTIDescriptor(Type)->stripPointerCasts(),
|
|
|
|
Flags};
|
2015-03-18 04:35:00 +08:00
|
|
|
}
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Gets a TypeDescriptor. Returns a llvm::Constant * rather than a
|
2015-03-18 04:35:00 +08:00
|
|
|
/// llvm::GlobalVariable * because different type descriptors have different
|
|
|
|
/// types, and need to be abstracted. They are abstracting by casting the
|
|
|
|
/// address to an Int8PtrTy.
|
|
|
|
llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) {
|
2015-03-18 04:35:05 +08:00
|
|
|
SmallString<256> MangledName;
|
2014-07-07 14:20:47 +08:00
|
|
|
{
|
|
|
|
llvm::raw_svector_ostream Out(MangledName);
|
2014-07-07 16:09:15 +08:00
|
|
|
getMangleContext().mangleCXXRTTI(Type, Out);
|
2014-07-07 14:20:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check to see if we've already declared this TypeDescriptor.
|
|
|
|
if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName))
|
|
|
|
return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
|
|
|
|
|
2017-06-01 16:04:05 +08:00
|
|
|
// Note for the future: If we would ever like to do deferred emission of
|
|
|
|
// RTTI, check if emitting vtables opportunistically need any adjustment.
|
|
|
|
|
2014-07-07 14:20:47 +08:00
|
|
|
// Compute the fields for the TypeDescriptor.
|
2015-03-18 04:35:05 +08:00
|
|
|
SmallString<256> TypeInfoString;
|
2014-07-07 14:20:47 +08:00
|
|
|
{
|
|
|
|
llvm::raw_svector_ostream Out(TypeInfoString);
|
2014-07-07 16:09:15 +08:00
|
|
|
getMangleContext().mangleCXXRTTIName(Type, Out);
|
2014-07-07 14:20:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Declare and initialize the TypeDescriptor.
|
|
|
|
llvm::Constant *Fields[] = {
|
|
|
|
getTypeInfoVTable(CGM), // VFPtr
|
|
|
|
llvm::ConstantPointerNull::get(CGM.Int8PtrTy), // Runtime data
|
|
|
|
llvm::ConstantDataArray::getString(CGM.getLLVMContext(), TypeInfoString)};
|
|
|
|
llvm::StructType *TypeDescriptorType =
|
2014-07-07 16:09:15 +08:00
|
|
|
getTypeDescriptorType(TypeInfoString);
|
2015-01-17 03:23:42 +08:00
|
|
|
auto *Var = new llvm::GlobalVariable(
|
2019-07-16 12:46:31 +08:00
|
|
|
CGM.getModule(), TypeDescriptorType, /*isConstant=*/false,
|
2015-01-17 03:23:42 +08:00
|
|
|
getLinkageForRTTI(Type),
|
|
|
|
llvm::ConstantStruct::get(TypeDescriptorType, Fields),
|
2015-12-01 16:14:39 +08:00
|
|
|
MangledName);
|
2015-01-17 03:23:42 +08:00
|
|
|
if (Var->isWeakForLinker())
|
|
|
|
Var->setComdat(CGM.getModule().getOrInsertComdat(Var->getName()));
|
|
|
|
return llvm::ConstantExpr::getBitCast(Var, CGM.Int8PtrTy);
|
2014-07-07 14:20:47 +08:00
|
|
|
}
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Gets or a creates a Microsoft CompleteObjectLocator.
|
2014-07-07 14:20:47 +08:00
|
|
|
llvm::GlobalVariable *
|
|
|
|
MicrosoftCXXABI::getMSCompleteObjectLocator(const CXXRecordDecl *RD,
|
2016-10-11 00:26:29 +08:00
|
|
|
const VPtrInfo &Info) {
|
2014-07-07 16:09:15 +08:00
|
|
|
return MSRTTIBuilder(*this, RD).getCompleteObjectLocator(Info);
|
2014-07-07 14:20:47 +08:00
|
|
|
}
|
2014-09-16 03:20:10 +08:00
|
|
|
|
2019-03-23 07:05:10 +08:00
|
|
|
void MicrosoftCXXABI::emitCXXStructor(GlobalDecl GD) {
|
|
|
|
if (auto *ctor = dyn_cast<CXXConstructorDecl>(GD.getDecl())) {
|
|
|
|
// There are no constructor variants, always emit the complete destructor.
|
|
|
|
llvm::Function *Fn =
|
|
|
|
CGM.codegenCXXStructor(GD.getWithCtorType(Ctor_Complete));
|
|
|
|
CGM.maybeSetTrivialComdat(*ctor, *Fn);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto *dtor = cast<CXXDestructorDecl>(GD.getDecl());
|
2014-09-16 03:20:10 +08:00
|
|
|
|
2018-03-17 03:40:50 +08:00
|
|
|
// Emit the base destructor if the base and complete (vbase) destructors are
|
|
|
|
// equivalent. This effectively implements -mconstructor-aliases as part of
|
|
|
|
// the ABI.
|
2019-03-23 07:05:10 +08:00
|
|
|
if (GD.getDtorType() == Dtor_Complete &&
|
2018-03-17 03:40:50 +08:00
|
|
|
dtor->getParent()->getNumVBases() == 0)
|
2019-03-23 07:05:10 +08:00
|
|
|
GD = GD.getWithDtorType(Dtor_Base);
|
2014-09-16 03:20:10 +08:00
|
|
|
|
|
|
|
// The base destructor is equivalent to the base destructor of its
|
|
|
|
// base class if there is exactly one non-virtual base class with a
|
|
|
|
// non-trivial destructor, there are no fields with a non-trivial
|
|
|
|
// destructor, and the body of the destructor is trivial.
|
2019-03-23 07:05:10 +08:00
|
|
|
if (GD.getDtorType() == Dtor_Base && !CGM.TryEmitBaseDestructorAsAlias(dtor))
|
2014-09-16 03:20:10 +08:00
|
|
|
return;
|
|
|
|
|
2019-03-23 07:05:10 +08:00
|
|
|
llvm::Function *Fn = CGM.codegenCXXStructor(GD);
|
2015-01-17 09:47:39 +08:00
|
|
|
if (Fn->isWeakForLinker())
|
|
|
|
Fn->setComdat(CGM.getModule().getOrInsertComdat(Fn->getName()));
|
2014-09-16 03:20:10 +08:00
|
|
|
}
|
|
|
|
|
2015-03-12 02:36:39 +08:00
|
|
|
llvm::Function *
|
2015-03-14 06:36:55 +08:00
|
|
|
MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
|
|
|
|
CXXCtorType CT) {
|
|
|
|
assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
|
|
|
|
|
2015-03-12 02:36:39 +08:00
|
|
|
// Calculate the mangled name.
|
|
|
|
SmallString<256> ThunkName;
|
|
|
|
llvm::raw_svector_ostream Out(ThunkName);
|
2020-03-06 01:02:13 +08:00
|
|
|
getMangleContext().mangleName(GlobalDecl(CD, CT), Out);
|
2015-03-12 02:36:39 +08:00
|
|
|
|
|
|
|
// If the thunk has been generated previously, just return it.
|
|
|
|
if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName))
|
|
|
|
return cast<llvm::Function>(GV);
|
|
|
|
|
|
|
|
// Create the llvm::Function.
|
2015-03-14 06:36:55 +08:00
|
|
|
const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeMSCtorClosure(CD, CT);
|
2015-03-12 02:36:39 +08:00
|
|
|
llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo);
|
|
|
|
const CXXRecordDecl *RD = CD->getParent();
|
|
|
|
QualType RecordTy = getContext().getRecordType(RD);
|
|
|
|
llvm::Function *ThunkFn = llvm::Function::Create(
|
|
|
|
ThunkTy, getLinkageForRTTI(RecordTy), ThunkName.str(), &CGM.getModule());
|
2015-03-18 03:00:50 +08:00
|
|
|
ThunkFn->setCallingConv(static_cast<llvm::CallingConv::ID>(
|
|
|
|
FnInfo.getEffectiveCallingConvention()));
|
2015-07-01 05:23:51 +08:00
|
|
|
if (ThunkFn->isWeakForLinker())
|
|
|
|
ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName()));
|
2015-03-14 06:36:55 +08:00
|
|
|
bool IsCopy = CT == Ctor_CopyingClosure;
|
2015-03-12 02:36:39 +08:00
|
|
|
|
|
|
|
// Start codegen.
|
|
|
|
CodeGenFunction CGF(CGM);
|
|
|
|
CGF.CurGD = GlobalDecl(CD, Ctor_Complete);
|
|
|
|
|
|
|
|
// Build FunctionArgs.
|
|
|
|
FunctionArgList FunctionArgs;
|
|
|
|
|
2015-03-14 06:36:55 +08:00
|
|
|
// A constructor always starts with a 'this' pointer as its first argument.
|
2015-03-12 02:36:39 +08:00
|
|
|
buildThisParam(CGF, FunctionArgs);
|
|
|
|
|
|
|
|
// Following the 'this' pointer is a reference to the source object that we
|
|
|
|
// are copying from.
|
|
|
|
ImplicitParamDecl SrcParam(
|
2017-06-09 21:40:18 +08:00
|
|
|
getContext(), /*DC=*/nullptr, SourceLocation(),
|
|
|
|
&getContext().Idents.get("src"),
|
2015-03-12 02:36:39 +08:00
|
|
|
getContext().getLValueReferenceType(RecordTy,
|
2017-06-09 21:40:18 +08:00
|
|
|
/*SpelledAsLValue=*/true),
|
|
|
|
ImplicitParamDecl::Other);
|
2015-03-14 06:36:55 +08:00
|
|
|
if (IsCopy)
|
|
|
|
FunctionArgs.push_back(&SrcParam);
|
2015-03-12 02:36:39 +08:00
|
|
|
|
2015-03-14 06:36:55 +08:00
|
|
|
// Constructors for classes which utilize virtual bases have an additional
|
|
|
|
// parameter which indicates whether or not it is being delegated to by a more
|
|
|
|
// derived constructor.
|
2017-06-09 21:40:18 +08:00
|
|
|
ImplicitParamDecl IsMostDerived(getContext(), /*DC=*/nullptr,
|
|
|
|
SourceLocation(),
|
2015-03-12 02:36:39 +08:00
|
|
|
&getContext().Idents.get("is_most_derived"),
|
2017-06-09 21:40:18 +08:00
|
|
|
getContext().IntTy, ImplicitParamDecl::Other);
|
2018-04-06 23:14:32 +08:00
|
|
|
// Only add the parameter to the list if the class has virtual bases.
|
2015-03-12 02:36:39 +08:00
|
|
|
if (RD->getNumVBases() > 0)
|
|
|
|
FunctionArgs.push_back(&IsMostDerived);
|
|
|
|
|
|
|
|
// Start defining the function.
|
2016-11-17 02:49:47 +08:00
|
|
|
auto NL = ApplyDebugLocation::CreateEmpty(CGF);
|
2015-03-12 02:36:39 +08:00
|
|
|
CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo,
|
|
|
|
FunctionArgs, CD->getLocation(), SourceLocation());
|
2016-11-17 02:49:47 +08:00
|
|
|
// Create a scope with an artificial location for the body of this function.
|
|
|
|
auto AL = ApplyDebugLocation::CreateArtificial(CGF);
|
[MS] Apply adjustments after storing 'this'
Summary:
The MS ABI convention is that the 'this' pointer on entry is the address
of the vfptr that was used to make the virtual method call. In other
words, the pointer on entry always points to the base subobject that
introduced the virtual method. Consider this hierarchy:
struct A { virtual void f() = 0; };
struct B { virtual void g() = 0; };
struct C : A, B {
void f() override;
void g() override;
};
On entry to C::g, [ER]CX will contain the address of C's B subobject,
and C::g will have to subtract sizeof(A) to recover a pointer to C.
Before this change, we applied this adjustment in the prologue and
stored the new value into the "this" local variable alloca used for
debug info. However, MSVC does not do this, presumably because it is
often profitable to fold the adjustment into later field accesses. This
creates a problem, because the debugger expects the variable to be
unadjusted. Unfortunately, CodeView doesn't have anything like DWARF
expressions for computing variables that aren't in the program anymore,
so we have to declare 'this' to be the unadjusted value if we want the
debugger to see the right value.
This has the side benefit that, in optimized builds, the 'this' pointer
will usually be available on function entry because it doesn't require
any adjustment.
Reviewers: hans
Subscribers: aprantl, cfe-commits
Differential Revision: https://reviews.llvm.org/D40109
llvm-svn: 318440
2017-11-17 03:09:36 +08:00
|
|
|
setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
|
2015-03-12 02:36:39 +08:00
|
|
|
llvm::Value *This = getThisValue(CGF);
|
|
|
|
|
|
|
|
llvm::Value *SrcVal =
|
2015-03-14 06:36:55 +08:00
|
|
|
IsCopy ? CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&SrcParam), "src")
|
|
|
|
: nullptr;
|
2015-03-12 02:36:39 +08:00
|
|
|
|
|
|
|
CallArgList Args;
|
|
|
|
|
|
|
|
// Push the this ptr.
|
2019-01-11 09:54:53 +08:00
|
|
|
Args.add(RValue::get(This), CD->getThisType());
|
2015-03-12 02:36:39 +08:00
|
|
|
|
|
|
|
// Push the src ptr.
|
2015-03-14 06:36:55 +08:00
|
|
|
if (SrcVal)
|
|
|
|
Args.add(RValue::get(SrcVal), SrcParam.getType());
|
2015-03-12 02:36:39 +08:00
|
|
|
|
|
|
|
// Add the rest of the default arguments.
|
2016-11-24 00:51:30 +08:00
|
|
|
SmallVector<const Stmt *, 4> ArgVec;
|
|
|
|
ArrayRef<ParmVarDecl *> params = CD->parameters().drop_front(IsCopy ? 1 : 0);
|
|
|
|
for (const ParmVarDecl *PD : params) {
|
|
|
|
assert(PD->hasDefaultArg() && "ctor closure lacks default args");
|
|
|
|
ArgVec.push_back(PD->getDefaultArg());
|
2015-03-18 05:51:43 +08:00
|
|
|
}
|
2015-03-12 02:36:39 +08:00
|
|
|
|
|
|
|
CodeGenFunction::RunCleanupsScope Cleanups(CGF);
|
|
|
|
|
|
|
|
const auto *FPT = CD->getType()->castAs<FunctionProtoType>();
|
2015-07-22 02:37:18 +08:00
|
|
|
CGF.EmitCallArgs(Args, FPT, llvm::makeArrayRef(ArgVec), CD, IsCopy ? 1 : 0);
|
2015-03-12 02:36:39 +08:00
|
|
|
|
|
|
|
// Insert any ABI-specific implicit constructor arguments.
|
2020-05-19 14:43:46 +08:00
|
|
|
AddedStructorArgCounts ExtraArgs =
|
2017-02-23 04:28:02 +08:00
|
|
|
addImplicitConstructorArgs(CGF, CD, Ctor_Complete,
|
|
|
|
/*ForVirtualBase=*/false,
|
|
|
|
/*Delegating=*/false, Args);
|
2015-03-12 02:36:39 +08:00
|
|
|
// Call the destructor with our arguments.
|
2016-10-27 07:46:34 +08:00
|
|
|
llvm::Constant *CalleePtr =
|
2019-03-23 07:05:10 +08:00
|
|
|
CGM.getAddrOfCXXStructor(GlobalDecl(CD, Ctor_Complete));
|
2018-11-13 23:48:08 +08:00
|
|
|
CGCallee Callee =
|
|
|
|
CGCallee::forDirect(CalleePtr, GlobalDecl(CD, Ctor_Complete));
|
2015-03-12 02:36:39 +08:00
|
|
|
const CGFunctionInfo &CalleeInfo = CGM.getTypes().arrangeCXXConstructorCall(
|
2017-02-24 06:07:35 +08:00
|
|
|
Args, CD, Ctor_Complete, ExtraArgs.Prefix, ExtraArgs.Suffix);
|
2016-10-27 07:46:34 +08:00
|
|
|
CGF.EmitCall(CalleeInfo, Callee, ReturnValueSlot(), Args);
|
2015-03-12 02:36:39 +08:00
|
|
|
|
|
|
|
Cleanups.ForceCleanup();
|
|
|
|
|
|
|
|
// Emit the ret instruction, remove any temporary instructions created for the
|
|
|
|
// aid of CodeGen.
|
|
|
|
CGF.FinishFunction(SourceLocation());
|
|
|
|
|
|
|
|
return ThunkFn;
|
|
|
|
}
|
|
|
|
|
2015-03-05 08:46:22 +08:00
|
|
|
llvm::Constant *MicrosoftCXXABI::getCatchableType(QualType T,
|
|
|
|
uint32_t NVOffset,
|
|
|
|
int32_t VBPtrOffset,
|
|
|
|
uint32_t VBIndex) {
|
|
|
|
assert(!T->isReferenceType());
|
|
|
|
|
2015-03-07 02:53:55 +08:00
|
|
|
CXXRecordDecl *RD = T->getAsCXXRecordDecl();
|
|
|
|
const CXXConstructorDecl *CD =
|
|
|
|
RD ? CGM.getContext().getCopyConstructorForExceptionObject(RD) : nullptr;
|
2015-03-12 02:36:39 +08:00
|
|
|
CXXCtorType CT = Ctor_Complete;
|
2015-03-14 06:36:55 +08:00
|
|
|
if (CD)
|
|
|
|
if (!hasDefaultCXXMethodCC(getContext(), CD) || CD->getNumParams() != 1)
|
2015-03-12 02:36:39 +08:00
|
|
|
CT = Ctor_CopyingClosure;
|
|
|
|
|
2015-03-05 08:46:22 +08:00
|
|
|
uint32_t Size = getContext().getTypeSizeInChars(T).getQuantity();
|
|
|
|
SmallString<256> MangledName;
|
|
|
|
{
|
|
|
|
llvm::raw_svector_ostream Out(MangledName);
|
2015-03-12 02:36:39 +08:00
|
|
|
getMangleContext().mangleCXXCatchableType(T, CD, CT, Size, NVOffset,
|
2015-03-11 03:01:51 +08:00
|
|
|
VBPtrOffset, VBIndex, Out);
|
2015-03-05 08:46:22 +08:00
|
|
|
}
|
|
|
|
if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName))
|
|
|
|
return getImageRelativeConstant(GV);
|
|
|
|
|
2015-03-12 02:36:39 +08:00
|
|
|
// The TypeDescriptor is used by the runtime to determine if a catch handler
|
2015-03-05 08:46:22 +08:00
|
|
|
// is appropriate for the exception object.
|
2015-03-18 04:35:00 +08:00
|
|
|
llvm::Constant *TD = getImageRelativeConstant(getAddrOfRTTIDescriptor(T));
|
2015-03-05 08:46:22 +08:00
|
|
|
|
|
|
|
// The runtime is responsible for calling the copy constructor if the
|
|
|
|
// exception is caught by value.
|
2015-03-12 02:36:39 +08:00
|
|
|
llvm::Constant *CopyCtor;
|
|
|
|
if (CD) {
|
|
|
|
if (CT == Ctor_CopyingClosure)
|
2015-03-14 06:36:55 +08:00
|
|
|
CopyCtor = getAddrOfCXXCtorClosure(CD, Ctor_CopyingClosure);
|
2015-03-12 02:36:39 +08:00
|
|
|
else
|
2019-03-23 07:05:10 +08:00
|
|
|
CopyCtor = CGM.getAddrOfCXXStructor(GlobalDecl(CD, Ctor_Complete));
|
2015-03-12 02:36:39 +08:00
|
|
|
|
|
|
|
CopyCtor = llvm::ConstantExpr::getBitCast(CopyCtor, CGM.Int8PtrTy);
|
|
|
|
} else {
|
|
|
|
CopyCtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
|
|
|
|
}
|
2015-03-07 02:53:55 +08:00
|
|
|
CopyCtor = getImageRelativeConstant(CopyCtor);
|
2015-03-05 08:46:22 +08:00
|
|
|
|
2015-03-07 02:53:55 +08:00
|
|
|
bool IsScalar = !RD;
|
2015-03-05 08:46:22 +08:00
|
|
|
bool HasVirtualBases = false;
|
|
|
|
bool IsStdBadAlloc = false; // std::bad_alloc is special for some reason.
|
|
|
|
QualType PointeeType = T;
|
|
|
|
if (T->isPointerType())
|
|
|
|
PointeeType = T->getPointeeType();
|
|
|
|
if (const CXXRecordDecl *RD = PointeeType->getAsCXXRecordDecl()) {
|
|
|
|
HasVirtualBases = RD->getNumVBases() > 0;
|
|
|
|
if (IdentifierInfo *II = RD->getIdentifier())
|
|
|
|
IsStdBadAlloc = II->isStr("bad_alloc") && RD->isInStdNamespace();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Encode the relevant CatchableType properties into the Flags bitfield.
|
|
|
|
// FIXME: Figure out how bits 2 or 8 can get set.
|
|
|
|
uint32_t Flags = 0;
|
|
|
|
if (IsScalar)
|
|
|
|
Flags |= 1;
|
|
|
|
if (HasVirtualBases)
|
|
|
|
Flags |= 4;
|
|
|
|
if (IsStdBadAlloc)
|
|
|
|
Flags |= 16;
|
|
|
|
|
|
|
|
llvm::Constant *Fields[] = {
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, Flags), // Flags
|
|
|
|
TD, // TypeDescriptor
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, NVOffset), // NonVirtualAdjustment
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset), // OffsetToVBPtr
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, VBIndex), // VBTableIndex
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, Size), // Size
|
|
|
|
CopyCtor // CopyCtor
|
|
|
|
};
|
|
|
|
llvm::StructType *CTType = getCatchableTypeType();
|
|
|
|
auto *GV = new llvm::GlobalVariable(
|
2019-07-16 12:46:31 +08:00
|
|
|
CGM.getModule(), CTType, /*isConstant=*/true, getLinkageForRTTI(T),
|
2015-12-01 16:14:39 +08:00
|
|
|
llvm::ConstantStruct::get(CTType, Fields), MangledName);
|
2016-06-15 05:02:05 +08:00
|
|
|
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
|
2015-03-07 07:45:23 +08:00
|
|
|
GV->setSection(".xdata");
|
2015-03-05 08:46:22 +08:00
|
|
|
if (GV->isWeakForLinker())
|
|
|
|
GV->setComdat(CGM.getModule().getOrInsertComdat(GV->getName()));
|
|
|
|
return getImageRelativeConstant(GV);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) {
|
|
|
|
assert(!T->isReferenceType());
|
|
|
|
|
|
|
|
// See if we've already generated a CatchableTypeArray for this type before.
|
|
|
|
llvm::GlobalVariable *&CTA = CatchableTypeArrays[T];
|
|
|
|
if (CTA)
|
|
|
|
return CTA;
|
|
|
|
|
|
|
|
// Ensure that we don't have duplicate entries in our CatchableTypeArray by
|
|
|
|
// using a SmallSetVector. Duplicates may arise due to virtual bases
|
|
|
|
// occurring more than once in the hierarchy.
|
|
|
|
llvm::SmallSetVector<llvm::Constant *, 2> CatchableTypes;
|
|
|
|
|
|
|
|
// C++14 [except.handle]p3:
|
|
|
|
// A handler is a match for an exception object of type E if [...]
|
|
|
|
// - the handler is of type cv T or cv T& and T is an unambiguous public
|
|
|
|
// base class of E, or
|
|
|
|
// - the handler is of type cv T or const T& where T is a pointer type and
|
|
|
|
// E is a pointer type that can be converted to T by [...]
|
|
|
|
// - a standard pointer conversion (4.10) not involving conversions to
|
|
|
|
// pointers to private or protected or ambiguous classes
|
|
|
|
const CXXRecordDecl *MostDerivedClass = nullptr;
|
|
|
|
bool IsPointer = T->isPointerType();
|
|
|
|
if (IsPointer)
|
|
|
|
MostDerivedClass = T->getPointeeType()->getAsCXXRecordDecl();
|
|
|
|
else
|
|
|
|
MostDerivedClass = T->getAsCXXRecordDecl();
|
|
|
|
|
|
|
|
// Collect all the unambiguous public bases of the MostDerivedClass.
|
|
|
|
if (MostDerivedClass) {
|
2015-03-15 07:44:48 +08:00
|
|
|
const ASTContext &Context = getContext();
|
2015-03-05 08:46:22 +08:00
|
|
|
const ASTRecordLayout &MostDerivedLayout =
|
|
|
|
Context.getASTRecordLayout(MostDerivedClass);
|
|
|
|
MicrosoftVTableContext &VTableContext = CGM.getMicrosoftVTableContext();
|
|
|
|
SmallVector<MSRTTIClass, 8> Classes;
|
|
|
|
serializeClassHierarchy(Classes, MostDerivedClass);
|
|
|
|
Classes.front().initialize(/*Parent=*/nullptr, /*Specifier=*/nullptr);
|
|
|
|
detectAmbiguousBases(Classes);
|
|
|
|
for (const MSRTTIClass &Class : Classes) {
|
|
|
|
// Skip any ambiguous or private bases.
|
|
|
|
if (Class.Flags &
|
|
|
|
(MSRTTIClass::IsPrivateOnPath | MSRTTIClass::IsAmbiguous))
|
|
|
|
continue;
|
|
|
|
// Write down how to convert from a derived pointer to a base pointer.
|
|
|
|
uint32_t OffsetInVBTable = 0;
|
|
|
|
int32_t VBPtrOffset = -1;
|
|
|
|
if (Class.VirtualRoot) {
|
|
|
|
OffsetInVBTable =
|
|
|
|
VTableContext.getVBTableIndex(MostDerivedClass, Class.VirtualRoot)*4;
|
|
|
|
VBPtrOffset = MostDerivedLayout.getVBPtrOffset().getQuantity();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Turn our record back into a pointer if the exception object is a
|
|
|
|
// pointer.
|
|
|
|
QualType RTTITy = QualType(Class.RD->getTypeForDecl(), 0);
|
|
|
|
if (IsPointer)
|
|
|
|
RTTITy = Context.getPointerType(RTTITy);
|
|
|
|
CatchableTypes.insert(getCatchableType(RTTITy, Class.OffsetInVBase,
|
|
|
|
VBPtrOffset, OffsetInVBTable));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// C++14 [except.handle]p3:
|
|
|
|
// A handler is a match for an exception object of type E if
|
|
|
|
// - The handler is of type cv T or cv T& and E and T are the same type
|
|
|
|
// (ignoring the top-level cv-qualifiers)
|
|
|
|
CatchableTypes.insert(getCatchableType(T));
|
|
|
|
|
|
|
|
// C++14 [except.handle]p3:
|
|
|
|
// A handler is a match for an exception object of type E if
|
|
|
|
// - the handler is of type cv T or const T& where T is a pointer type and
|
|
|
|
// E is a pointer type that can be converted to T by [...]
|
|
|
|
// - a standard pointer conversion (4.10) not involving conversions to
|
|
|
|
// pointers to private or protected or ambiguous classes
|
|
|
|
//
|
2015-04-04 13:37:48 +08:00
|
|
|
// C++14 [conv.ptr]p2:
|
|
|
|
// A prvalue of type "pointer to cv T," where T is an object type, can be
|
|
|
|
// converted to a prvalue of type "pointer to cv void".
|
|
|
|
if (IsPointer && T->getPointeeType()->isObjectType())
|
2015-03-05 08:46:22 +08:00
|
|
|
CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy));
|
|
|
|
|
2015-03-13 01:44:49 +08:00
|
|
|
// C++14 [except.handle]p3:
|
|
|
|
// A handler is a match for an exception object of type E if [...]
|
|
|
|
// - the handler is of type cv T or const T& where T is a pointer or
|
|
|
|
// pointer to member type and E is std::nullptr_t.
|
|
|
|
//
|
|
|
|
// We cannot possibly list all possible pointer types here, making this
|
|
|
|
// implementation incompatible with the standard. However, MSVC includes an
|
|
|
|
// entry for pointer-to-void in this case. Let's do the same.
|
|
|
|
if (T->isNullPtrType())
|
|
|
|
CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy));
|
|
|
|
|
2015-03-05 08:46:22 +08:00
|
|
|
uint32_t NumEntries = CatchableTypes.size();
|
|
|
|
llvm::Type *CTType =
|
|
|
|
getImageRelativeType(getCatchableTypeType()->getPointerTo());
|
|
|
|
llvm::ArrayType *AT = llvm::ArrayType::get(CTType, NumEntries);
|
|
|
|
llvm::StructType *CTAType = getCatchableTypeArrayType(NumEntries);
|
|
|
|
llvm::Constant *Fields[] = {
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, NumEntries), // NumEntries
|
|
|
|
llvm::ConstantArray::get(
|
|
|
|
AT, llvm::makeArrayRef(CatchableTypes.begin(),
|
|
|
|
CatchableTypes.end())) // CatchableTypes
|
|
|
|
};
|
|
|
|
SmallString<256> MangledName;
|
|
|
|
{
|
|
|
|
llvm::raw_svector_ostream Out(MangledName);
|
|
|
|
getMangleContext().mangleCXXCatchableTypeArray(T, NumEntries, Out);
|
|
|
|
}
|
|
|
|
CTA = new llvm::GlobalVariable(
|
2019-07-16 12:46:31 +08:00
|
|
|
CGM.getModule(), CTAType, /*isConstant=*/true, getLinkageForRTTI(T),
|
2015-12-01 16:14:39 +08:00
|
|
|
llvm::ConstantStruct::get(CTAType, Fields), MangledName);
|
2016-06-15 05:02:05 +08:00
|
|
|
CTA->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
|
2015-03-07 07:45:23 +08:00
|
|
|
CTA->setSection(".xdata");
|
2015-03-05 08:46:22 +08:00
|
|
|
if (CTA->isWeakForLinker())
|
|
|
|
CTA->setComdat(CGM.getModule().getOrInsertComdat(CTA->getName()));
|
|
|
|
return CTA;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::GlobalVariable *MicrosoftCXXABI::getThrowInfo(QualType T) {
|
2016-07-12 12:42:50 +08:00
|
|
|
bool IsConst, IsVolatile, IsUnaligned;
|
|
|
|
T = decomposeTypeForEH(getContext(), T, IsConst, IsVolatile, IsUnaligned);
|
2015-03-05 08:46:22 +08:00
|
|
|
|
|
|
|
// The CatchableTypeArray enumerates the various (CV-unqualified) types that
|
|
|
|
// the exception object may be caught as.
|
|
|
|
llvm::GlobalVariable *CTA = getCatchableTypeArray(T);
|
|
|
|
// The first field in a CatchableTypeArray is the number of CatchableTypes.
|
|
|
|
// This is used as a component of the mangled name which means that we need to
|
|
|
|
// know what it is in order to see if we have previously generated the
|
|
|
|
// ThrowInfo.
|
|
|
|
uint32_t NumEntries =
|
|
|
|
cast<llvm::ConstantInt>(CTA->getInitializer()->getAggregateElement(0U))
|
|
|
|
->getLimitedValue();
|
|
|
|
|
|
|
|
SmallString<256> MangledName;
|
|
|
|
{
|
|
|
|
llvm::raw_svector_ostream Out(MangledName);
|
2016-07-12 12:42:50 +08:00
|
|
|
getMangleContext().mangleCXXThrowInfo(T, IsConst, IsVolatile, IsUnaligned,
|
|
|
|
NumEntries, Out);
|
2015-03-05 08:46:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Reuse a previously generated ThrowInfo if we have generated an appropriate
|
|
|
|
// one before.
|
|
|
|
if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName))
|
|
|
|
return GV;
|
|
|
|
|
|
|
|
// The RTTI TypeDescriptor uses an unqualified type but catch clauses must
|
|
|
|
// be at least as CV qualified. Encode this requirement into the Flags
|
|
|
|
// bitfield.
|
|
|
|
uint32_t Flags = 0;
|
|
|
|
if (IsConst)
|
|
|
|
Flags |= 1;
|
|
|
|
if (IsVolatile)
|
|
|
|
Flags |= 2;
|
2016-07-12 12:42:50 +08:00
|
|
|
if (IsUnaligned)
|
|
|
|
Flags |= 4;
|
2015-03-05 08:46:22 +08:00
|
|
|
|
|
|
|
// The cleanup-function (a destructor) must be called when the exception
|
|
|
|
// object's lifetime ends.
|
|
|
|
llvm::Constant *CleanupFn = llvm::Constant::getNullValue(CGM.Int8PtrTy);
|
|
|
|
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
|
|
|
|
if (CXXDestructorDecl *DtorD = RD->getDestructor())
|
|
|
|
if (!DtorD->isTrivial())
|
|
|
|
CleanupFn = llvm::ConstantExpr::getBitCast(
|
2019-03-23 07:05:10 +08:00
|
|
|
CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete)),
|
2015-03-05 08:46:22 +08:00
|
|
|
CGM.Int8PtrTy);
|
|
|
|
// This is unused as far as we can tell, initialize it to null.
|
|
|
|
llvm::Constant *ForwardCompat =
|
|
|
|
getImageRelativeConstant(llvm::Constant::getNullValue(CGM.Int8PtrTy));
|
|
|
|
llvm::Constant *PointerToCatchableTypes = getImageRelativeConstant(
|
|
|
|
llvm::ConstantExpr::getBitCast(CTA, CGM.Int8PtrTy));
|
|
|
|
llvm::StructType *TIType = getThrowInfoType();
|
|
|
|
llvm::Constant *Fields[] = {
|
|
|
|
llvm::ConstantInt::get(CGM.IntTy, Flags), // Flags
|
|
|
|
getImageRelativeConstant(CleanupFn), // CleanupFn
|
|
|
|
ForwardCompat, // ForwardCompat
|
|
|
|
PointerToCatchableTypes // CatchableTypeArray
|
|
|
|
};
|
|
|
|
auto *GV = new llvm::GlobalVariable(
|
2019-07-16 12:46:31 +08:00
|
|
|
CGM.getModule(), TIType, /*isConstant=*/true, getLinkageForRTTI(T),
|
2015-03-05 08:46:22 +08:00
|
|
|
llvm::ConstantStruct::get(TIType, Fields), StringRef(MangledName));
|
2016-06-15 05:02:05 +08:00
|
|
|
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
|
2015-03-07 07:45:23 +08:00
|
|
|
GV->setSection(".xdata");
|
2015-03-05 08:46:22 +08:00
|
|
|
if (GV->isWeakForLinker())
|
|
|
|
GV->setComdat(CGM.getModule().getOrInsertComdat(GV->getName()));
|
|
|
|
return GV;
|
|
|
|
}
|
|
|
|
|
|
|
|
void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
|
|
|
|
const Expr *SubExpr = E->getSubExpr();
|
|
|
|
QualType ThrowType = SubExpr->getType();
|
|
|
|
// The exception object lives on the stack and it's address is passed to the
|
|
|
|
// runtime function.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address AI = CGF.CreateMemTemp(ThrowType);
|
2015-03-05 08:46:22 +08:00
|
|
|
CGF.EmitAnyExprToMem(SubExpr, AI, ThrowType.getQualifiers(),
|
|
|
|
/*IsInit=*/true);
|
|
|
|
|
|
|
|
// The so-called ThrowInfo is used to describe how the exception object may be
|
|
|
|
// caught.
|
|
|
|
llvm::GlobalVariable *TI = getThrowInfo(ThrowType);
|
|
|
|
|
|
|
|
// Call into the runtime to throw the exception.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *Args[] = {
|
|
|
|
CGF.Builder.CreateBitCast(AI.getPointer(), CGM.Int8PtrTy),
|
|
|
|
TI
|
|
|
|
};
|
2015-03-05 08:46:22 +08:00
|
|
|
CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(), Args);
|
|
|
|
}
|
2017-12-14 05:53:04 +08:00
|
|
|
|
|
|
|
std::pair<llvm::Value *, const CXXRecordDecl *>
|
|
|
|
MicrosoftCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
|
|
|
|
const CXXRecordDecl *RD) {
|
|
|
|
std::tie(This, std::ignore, RD) =
|
|
|
|
performBaseAdjustment(CGF, This, QualType(RD->getTypeForDecl(), 0));
|
|
|
|
return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
|
|
|
|
}
|