2010-04-18 04:15:18 +08:00
|
|
|
//===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===//
|
2009-10-12 06:13:54 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code dealing with C++ code generation of virtual tables.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CodeGenFunction.h"
|
2010-08-31 15:33:07 +08:00
|
|
|
#include "CGCXXABI.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "CodeGenModule.h"
|
2009-11-28 04:47:55 +08:00
|
|
|
#include "clang/AST/CXXInheritance.h"
|
2009-10-12 06:13:54 +08:00
|
|
|
#include "clang/AST/RecordLayout.h"
|
2013-10-31 05:53:58 +08:00
|
|
|
#include "clang/CodeGen/CGFunctionInfo.h"
|
2016-04-09 00:52:00 +08:00
|
|
|
#include "clang/Frontend/CodeGenOptions.h"
|
2009-11-27 03:32:45 +08:00
|
|
|
#include "llvm/ADT/DenseSet.h"
|
2010-02-28 00:18:19 +08:00
|
|
|
#include "llvm/ADT/SetVector.h"
|
2010-02-13 18:38:52 +08:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2010-02-11 16:02:13 +08:00
|
|
|
#include "llvm/Support/Format.h"
|
2011-05-07 01:27:27 +08:00
|
|
|
#include "llvm/Transforms/Utils/Cloning.h"
|
2010-03-18 04:06:32 +08:00
|
|
|
#include <algorithm>
|
2009-11-13 13:46:16 +08:00
|
|
|
#include <cstdio>
|
2009-10-12 06:13:54 +08:00
|
|
|
|
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
2014-02-06 01:27:08 +08:00
|
|
|
CodeGenVTables::CodeGenVTables(CodeGenModule &CGM)
|
|
|
|
: CGM(CGM), VTContext(CGM.getContext().getVTableContext()) {}
|
2011-09-26 09:56:30 +08:00
|
|
|
|
2010-03-24 01:17:29 +08:00
|
|
|
llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
|
2011-02-07 01:15:43 +08:00
|
|
|
const ThunkInfo &Thunk) {
|
2010-03-24 01:17:29 +08:00
|
|
|
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
|
|
|
|
|
|
|
|
// Compute the mangled name.
|
2012-02-05 10:13:05 +08:00
|
|
|
SmallString<256> Name;
|
2011-02-11 10:52:17 +08:00
|
|
|
llvm::raw_svector_ostream Out(Name);
|
2010-03-24 01:17:29 +08:00
|
|
|
if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
|
2010-08-31 15:33:07 +08:00
|
|
|
getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(),
|
2011-02-11 10:52:17 +08:00
|
|
|
Thunk.This, Out);
|
2010-03-24 01:17:29 +08:00
|
|
|
else
|
2011-02-11 10:52:17 +08:00
|
|
|
getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out);
|
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
|
2013-12-09 12:29:47 +08:00
|
|
|
return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true,
|
2014-11-01 13:42:23 +08:00
|
|
|
/*DontDefer=*/true, /*IsThunk=*/true);
|
2010-03-24 01:17:29 +08:00
|
|
|
}
|
|
|
|
|
2010-08-05 07:46:35 +08:00
|
|
|
static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
|
|
|
|
const ThunkInfo &Thunk, llvm::Function *Fn) {
|
2011-01-30 03:39:23 +08:00
|
|
|
CGM.setGlobalVisibility(Fn, MD);
|
2010-08-05 07:46:35 +08:00
|
|
|
}
|
|
|
|
|
2015-07-15 22:48:06 +08:00
|
|
|
static void setThunkProperties(CodeGenModule &CGM, const ThunkInfo &Thunk,
|
|
|
|
llvm::Function *ThunkFn, bool ForVTable,
|
|
|
|
GlobalDecl GD) {
|
|
|
|
CGM.setFunctionLinkage(GD, ThunkFn);
|
|
|
|
CGM.getCXXABI().setThunkLinkage(ThunkFn, ForVTable, GD,
|
|
|
|
!Thunk.Return.isEmpty());
|
|
|
|
|
|
|
|
// Set the right visibility.
|
|
|
|
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
|
|
|
|
setThunkVisibility(CGM, MD, Thunk, ThunkFn);
|
|
|
|
|
|
|
|
if (CGM.supportsCOMDAT() && ThunkFn->isWeakForLinker())
|
|
|
|
ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName()));
|
|
|
|
}
|
|
|
|
|
2011-03-09 15:12:35 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
static bool similar(const ABIArgInfo &infoL, CanQualType typeL,
|
|
|
|
const ABIArgInfo &infoR, CanQualType typeR) {
|
|
|
|
return (infoL.getKind() == infoR.getKind() &&
|
|
|
|
(typeL == typeR ||
|
|
|
|
(isa<PointerType>(typeL) && isa<PointerType>(typeR)) ||
|
|
|
|
(isa<ReferenceType>(typeL) && isa<ReferenceType>(typeR))));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-05-07 01:27:27 +08:00
|
|
|
static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
|
|
|
|
QualType ResultType, RValue RV,
|
|
|
|
const ThunkInfo &Thunk) {
|
|
|
|
// Emit the return adjustment.
|
|
|
|
bool NullCheckValue = !ResultType->isReferenceType();
|
2014-05-21 13:09:00 +08:00
|
|
|
|
|
|
|
llvm::BasicBlock *AdjustNull = nullptr;
|
|
|
|
llvm::BasicBlock *AdjustNotNull = nullptr;
|
|
|
|
llvm::BasicBlock *AdjustEnd = nullptr;
|
|
|
|
|
2011-05-07 01:27:27 +08:00
|
|
|
llvm::Value *ReturnValue = RV.getScalarVal();
|
|
|
|
|
|
|
|
if (NullCheckValue) {
|
|
|
|
AdjustNull = CGF.createBasicBlock("adjust.null");
|
|
|
|
AdjustNotNull = CGF.createBasicBlock("adjust.notnull");
|
|
|
|
AdjustEnd = CGF.createBasicBlock("adjust.end");
|
|
|
|
|
|
|
|
llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue);
|
|
|
|
CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
|
|
|
|
CGF.EmitBlock(AdjustNotNull);
|
|
|
|
}
|
2013-10-30 19:55:43 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
auto ClassDecl = ResultType->getPointeeType()->getAsCXXRecordDecl();
|
|
|
|
auto ClassAlign = CGF.CGM.getClassPointerAlignment(ClassDecl);
|
|
|
|
ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF,
|
|
|
|
Address(ReturnValue, ClassAlign),
|
|
|
|
Thunk.Return);
|
2013-10-30 19:55:43 +08:00
|
|
|
|
2011-05-07 01:27:27 +08:00
|
|
|
if (NullCheckValue) {
|
|
|
|
CGF.Builder.CreateBr(AdjustEnd);
|
|
|
|
CGF.EmitBlock(AdjustNull);
|
|
|
|
CGF.Builder.CreateBr(AdjustEnd);
|
|
|
|
CGF.EmitBlock(AdjustEnd);
|
|
|
|
|
|
|
|
llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2);
|
|
|
|
PHI->addIncoming(ReturnValue, AdjustNotNull);
|
|
|
|
PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()),
|
|
|
|
AdjustNull);
|
|
|
|
ReturnValue = PHI;
|
|
|
|
}
|
|
|
|
|
|
|
|
return RValue::get(ReturnValue);
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function does roughly the same thing as GenerateThunk, but in a
|
|
|
|
// very different way, so that va_start and va_end work correctly.
|
|
|
|
// FIXME: This function assumes "this" is the first non-sret LLVM argument of
|
|
|
|
// a function, and that there is an alloca built in the entry block
|
|
|
|
// for all accesses to "this".
|
|
|
|
// FIXME: This function assumes there is only one "ret" statement per function.
|
|
|
|
// FIXME: Cloning isn't correct in the presence of indirect goto!
|
|
|
|
// FIXME: This implementation of thunks bloats codesize by duplicating the
|
|
|
|
// function definition. There are alternatives:
|
|
|
|
// 1. Add some sort of stub support to LLVM for cases where we can
|
|
|
|
// do a this adjustment, then a sibcall.
|
|
|
|
// 2. We could transform the definition to take a va_list instead of an
|
|
|
|
// actual variable argument list, then have the thunks (including a
|
|
|
|
// no-op thunk for the regular definition) call va_start/va_end.
|
|
|
|
// There's a bit of per-call overhead for this solution, but it's
|
|
|
|
// better for codesize if the definition is long.
|
2015-07-01 06:08:44 +08:00
|
|
|
llvm::Function *
|
|
|
|
CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn,
|
2011-05-07 01:27:27 +08:00
|
|
|
const CGFunctionInfo &FnInfo,
|
|
|
|
GlobalDecl GD, const ThunkInfo &Thunk) {
|
|
|
|
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
|
|
|
|
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
|
2014-01-26 00:55:45 +08:00
|
|
|
QualType ResultType = FPT->getReturnType();
|
2011-05-07 01:27:27 +08:00
|
|
|
|
|
|
|
// Get the original function
|
2012-02-17 11:33:10 +08:00
|
|
|
assert(FnInfo.isVariadic());
|
|
|
|
llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo);
|
2011-05-07 01:27:27 +08:00
|
|
|
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
|
|
|
|
llvm::Function *BaseFn = cast<llvm::Function>(Callee);
|
|
|
|
|
|
|
|
// Clone to thunk.
|
2012-09-19 21:13:52 +08:00
|
|
|
llvm::ValueToValueMapTy VMap;
|
|
|
|
llvm::Function *NewFn = llvm::CloneFunction(BaseFn, VMap,
|
|
|
|
/*ModuleLevelChanges=*/false);
|
2011-05-07 01:27:27 +08:00
|
|
|
CGM.getModule().getFunctionList().push_back(NewFn);
|
|
|
|
Fn->replaceAllUsesWith(NewFn);
|
|
|
|
NewFn->takeName(Fn);
|
|
|
|
Fn->eraseFromParent();
|
|
|
|
Fn = NewFn;
|
|
|
|
|
|
|
|
// "Initialize" CGF (minimally).
|
|
|
|
CurFn = Fn;
|
|
|
|
|
|
|
|
// Get the "this" value
|
|
|
|
llvm::Function::arg_iterator AI = Fn->arg_begin();
|
|
|
|
if (CGM.ReturnTypeUsesSRet(FnInfo))
|
|
|
|
++AI;
|
|
|
|
|
|
|
|
// Find the first store of "this", which will be to the alloca associated
|
|
|
|
// with "this".
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ThisPtr(&*AI, CGM.getClassPointerAlignment(MD->getParent()));
|
2015-11-07 07:00:41 +08:00
|
|
|
llvm::BasicBlock *EntryBB = &Fn->front();
|
|
|
|
llvm::BasicBlock::iterator ThisStore =
|
2014-12-30 06:39:45 +08:00
|
|
|
std::find_if(EntryBB->begin(), EntryBB->end(), [&](llvm::Instruction &I) {
|
2015-11-07 07:00:41 +08:00
|
|
|
return isa<llvm::StoreInst>(I) &&
|
|
|
|
I.getOperand(0) == ThisPtr.getPointer();
|
|
|
|
});
|
|
|
|
assert(ThisStore != EntryBB->end() &&
|
|
|
|
"Store of this should be in entry block?");
|
2011-05-07 01:27:27 +08:00
|
|
|
// Adjust "this", if necessary.
|
2015-11-07 07:00:41 +08:00
|
|
|
Builder.SetInsertPoint(&*ThisStore);
|
2013-10-30 19:55:43 +08:00
|
|
|
llvm::Value *AdjustedThisPtr =
|
|
|
|
CGM.getCXXABI().performThisAdjustment(*this, ThisPtr, Thunk.This);
|
2011-05-07 01:27:27 +08:00
|
|
|
ThisStore->setOperand(0, AdjustedThisPtr);
|
|
|
|
|
|
|
|
if (!Thunk.Return.isEmpty()) {
|
|
|
|
// Fix up the returned value, if necessary.
|
2015-07-29 00:10:58 +08:00
|
|
|
for (llvm::BasicBlock &BB : *Fn) {
|
|
|
|
llvm::Instruction *T = BB.getTerminator();
|
2011-05-07 01:27:27 +08:00
|
|
|
if (isa<llvm::ReturnInst>(T)) {
|
|
|
|
RValue RV = RValue::get(T->getOperand(0));
|
|
|
|
T->eraseFromParent();
|
2015-07-29 00:10:58 +08:00
|
|
|
Builder.SetInsertPoint(&BB);
|
2011-05-07 01:27:27 +08:00
|
|
|
RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
|
|
|
|
Builder.CreateRet(RV.getScalarVal());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-07-01 06:08:44 +08:00
|
|
|
|
|
|
|
return Fn;
|
2011-05-07 01:27:27 +08:00
|
|
|
}
|
|
|
|
|
2013-11-16 01:24:45 +08:00
|
|
|
void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD,
|
|
|
|
const CGFunctionInfo &FnInfo) {
|
|
|
|
assert(!CurGD.getDecl() && "CurGD was already set!");
|
|
|
|
CurGD = GD;
|
2014-07-26 05:39:46 +08:00
|
|
|
CurFuncIsThunk = true;
|
2013-11-16 01:24:45 +08:00
|
|
|
|
|
|
|
// Build FunctionArgs.
|
2010-03-24 08:39:18 +08:00
|
|
|
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
|
|
|
|
QualType ThisType = MD->getThisType(getContext());
|
2013-11-16 01:24:45 +08:00
|
|
|
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
|
2014-11-01 04:09:12 +08:00
|
|
|
QualType ResultType = CGM.getCXXABI().HasThisReturn(GD)
|
|
|
|
? ThisType
|
|
|
|
: CGM.getCXXABI().hasMostDerivedReturn(GD)
|
|
|
|
? CGM.getContext().VoidPtrTy
|
|
|
|
: FPT->getReturnType();
|
2010-03-24 08:39:18 +08:00
|
|
|
FunctionArgList FunctionArgs;
|
|
|
|
|
|
|
|
// Create the implicit 'this' parameter declaration.
|
2013-12-18 03:46:40 +08:00
|
|
|
CGM.getCXXABI().buildThisParam(*this, FunctionArgs);
|
2010-03-24 08:39:18 +08:00
|
|
|
|
|
|
|
// Add the rest of the parameters.
|
2014-08-14 04:06:24 +08:00
|
|
|
FunctionArgs.append(MD->param_begin(), MD->param_end());
|
2012-10-25 18:18:50 +08:00
|
|
|
|
2013-12-18 03:46:40 +08:00
|
|
|
if (isa<CXXDestructorDecl>(MD))
|
|
|
|
CGM.getCXXABI().addImplicitStructorParams(*this, ResultType, FunctionArgs);
|
|
|
|
|
2013-11-16 01:24:45 +08:00
|
|
|
// Start defining the function.
|
2011-03-09 12:27:21 +08:00
|
|
|
StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs,
|
2014-12-30 06:53:52 +08:00
|
|
|
MD->getLocation(), MD->getLocation());
|
2010-03-24 08:39:18 +08:00
|
|
|
|
2013-11-16 01:24:45 +08:00
|
|
|
// Since we didn't pass a GlobalDecl to StartFunction, do this ourselves.
|
2010-08-31 15:33:07 +08:00
|
|
|
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
|
2012-02-11 10:57:39 +08:00
|
|
|
CXXThisValue = CXXABIThisValue;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CurCodeDecl = MD;
|
|
|
|
CurFuncDecl = MD;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::FinishThunk() {
|
|
|
|
// Clear these to restore the invariants expected by
|
|
|
|
// StartFunction/FinishFunction.
|
|
|
|
CurCodeDecl = nullptr;
|
|
|
|
CurFuncDecl = nullptr;
|
|
|
|
|
|
|
|
FinishFunction();
|
2013-11-16 01:24:45 +08:00
|
|
|
}
|
2010-08-31 15:33:07 +08:00
|
|
|
|
2014-07-26 09:30:05 +08:00
|
|
|
void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
|
2013-11-16 01:24:45 +08:00
|
|
|
const ThunkInfo *Thunk) {
|
|
|
|
assert(isa<CXXMethodDecl>(CurGD.getDecl()) &&
|
|
|
|
"Please use a new CGF for this thunk");
|
2014-07-26 09:30:05 +08:00
|
|
|
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CurGD.getDecl());
|
2013-11-16 01:24:45 +08:00
|
|
|
|
|
|
|
// Adjust the 'this' pointer if necessary
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *AdjustedThisPtr =
|
|
|
|
Thunk ? CGM.getCXXABI().performThisAdjustment(
|
|
|
|
*this, LoadCXXThisAddress(), Thunk->This)
|
|
|
|
: LoadCXXThis();
|
2013-10-30 19:55:43 +08:00
|
|
|
|
2014-07-26 09:34:32 +08:00
|
|
|
if (CurFnInfo->usesInAlloca()) {
|
|
|
|
// We don't handle return adjusting thunks, because they require us to call
|
|
|
|
// the copy constructor. For now, fall through and pretend the return
|
|
|
|
// adjustment was empty so we don't crash.
|
|
|
|
if (Thunk && !Thunk->Return.isEmpty()) {
|
|
|
|
CGM.ErrorUnsupported(
|
|
|
|
MD, "non-trivial argument copy for return-adjusting thunk");
|
|
|
|
}
|
|
|
|
EmitMustTailThunk(MD, AdjustedThisPtr, Callee);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-11-16 01:24:45 +08:00
|
|
|
// Start building CallArgs.
|
2010-03-24 08:39:18 +08:00
|
|
|
CallArgList CallArgs;
|
2013-11-16 01:24:45 +08:00
|
|
|
QualType ThisType = MD->getThisType(getContext());
|
2011-05-03 01:57:46 +08:00
|
|
|
CallArgs.add(RValue::get(AdjustedThisPtr), ThisType);
|
2010-03-24 08:39:18 +08:00
|
|
|
|
2013-10-09 17:23:58 +08:00
|
|
|
if (isa<CXXDestructorDecl>(MD))
|
2014-07-26 09:30:05 +08:00
|
|
|
CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, CurGD, CallArgs);
|
2013-10-09 17:23:58 +08:00
|
|
|
|
2013-11-16 01:24:45 +08:00
|
|
|
// Add the rest of the arguments.
|
2014-07-26 09:30:05 +08:00
|
|
|
for (const ParmVarDecl *PD : MD->params())
|
|
|
|
EmitDelegateCallArg(CallArgs, PD, PD->getLocStart());
|
2010-03-24 08:39:18 +08:00
|
|
|
|
2013-11-16 01:24:45 +08:00
|
|
|
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
|
2010-03-24 08:39:18 +08:00
|
|
|
|
2011-03-09 12:27:21 +08:00
|
|
|
#ifndef NDEBUG
|
2012-07-07 14:41:13 +08:00
|
|
|
const CGFunctionInfo &CallFnInfo =
|
|
|
|
CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT,
|
2012-02-17 11:33:10 +08:00
|
|
|
RequiredArgs::forPrototypePlus(FPT, 1));
|
2013-11-16 01:24:45 +08:00
|
|
|
assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() &&
|
|
|
|
CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() &&
|
|
|
|
CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention());
|
2012-07-07 14:41:13 +08:00
|
|
|
assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types
|
|
|
|
similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
|
2013-11-16 01:24:45 +08:00
|
|
|
CurFnInfo->getReturnInfo(), CurFnInfo->getReturnType()));
|
|
|
|
assert(CallFnInfo.arg_size() == CurFnInfo->arg_size());
|
|
|
|
for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i)
|
2011-03-09 15:12:35 +08:00
|
|
|
assert(similar(CallFnInfo.arg_begin()[i].info,
|
|
|
|
CallFnInfo.arg_begin()[i].type,
|
2013-11-16 01:24:45 +08:00
|
|
|
CurFnInfo->arg_begin()[i].info,
|
|
|
|
CurFnInfo->arg_begin()[i].type));
|
2011-03-09 12:27:21 +08:00
|
|
|
#endif
|
2013-11-16 01:24:45 +08:00
|
|
|
|
2010-05-20 13:54:35 +08:00
|
|
|
// Determine whether we have a return value slot to use.
|
2014-11-01 04:09:12 +08:00
|
|
|
QualType ResultType = CGM.getCXXABI().HasThisReturn(CurGD)
|
|
|
|
? ThisType
|
|
|
|
: CGM.getCXXABI().hasMostDerivedReturn(CurGD)
|
|
|
|
? CGM.getContext().VoidPtrTy
|
|
|
|
: FPT->getReturnType();
|
2010-05-20 13:54:35 +08:00
|
|
|
ReturnValueSlot Slot;
|
|
|
|
if (!ResultType->isVoidType() &&
|
2013-11-16 01:24:45 +08:00
|
|
|
CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
|
2013-03-08 05:37:08 +08:00
|
|
|
!hasScalarEvaluationKind(CurFnInfo->getReturnType()))
|
2010-05-20 13:54:35 +08:00
|
|
|
Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
|
|
|
|
|
2010-03-24 08:39:18 +08:00
|
|
|
// Now emit our call.
|
2014-07-26 09:34:32 +08:00
|
|
|
llvm::Instruction *CallOrInvoke;
|
|
|
|
RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, MD, &CallOrInvoke);
|
|
|
|
|
2013-11-16 01:24:45 +08:00
|
|
|
// Consider return adjustment if we have ThunkInfo.
|
|
|
|
if (Thunk && !Thunk->Return.isEmpty())
|
|
|
|
RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk);
|
2015-08-06 19:57:15 +08:00
|
|
|
else if (llvm::CallInst* Call = dyn_cast<llvm::CallInst>(CallOrInvoke))
|
|
|
|
Call->setTailCallKind(llvm::CallInst::TCK_Tail);
|
2010-03-24 08:39:18 +08:00
|
|
|
|
2013-11-16 01:24:45 +08:00
|
|
|
// Emit return.
|
2010-05-20 13:54:35 +08:00
|
|
|
if (!ResultType->isVoidType() && Slot.isNull())
|
2011-02-08 16:22:06 +08:00
|
|
|
CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);
|
2010-03-24 08:39:18 +08:00
|
|
|
|
2012-07-31 08:33:55 +08:00
|
|
|
// Disable the final ARC autorelease.
|
|
|
|
AutoreleaseResult = false;
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
FinishThunk();
|
2013-11-16 01:24:45 +08:00
|
|
|
}
|
|
|
|
|
2014-07-26 09:34:32 +08:00
|
|
|
void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
|
|
|
|
llvm::Value *AdjustedThisPtr,
|
|
|
|
llvm::Value *Callee) {
|
|
|
|
// Emitting a musttail call thunk doesn't use any of the CGCall.cpp machinery
|
|
|
|
// to translate AST arguments into LLVM IR arguments. For thunks, we know
|
|
|
|
// that the caller prototype more or less matches the callee prototype with
|
|
|
|
// the exception of 'this'.
|
|
|
|
SmallVector<llvm::Value *, 8> Args;
|
|
|
|
for (llvm::Argument &A : CurFn->args())
|
|
|
|
Args.push_back(&A);
|
|
|
|
|
|
|
|
// Set the adjusted 'this' pointer.
|
|
|
|
const ABIArgInfo &ThisAI = CurFnInfo->arg_begin()->info;
|
|
|
|
if (ThisAI.isDirect()) {
|
|
|
|
const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
|
|
|
|
int ThisArgNo = RetAI.isIndirect() && !RetAI.isSRetAfterThis() ? 1 : 0;
|
|
|
|
llvm::Type *ThisType = Args[ThisArgNo]->getType();
|
|
|
|
if (ThisType != AdjustedThisPtr->getType())
|
|
|
|
AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType);
|
|
|
|
Args[ThisArgNo] = AdjustedThisPtr;
|
|
|
|
} else {
|
|
|
|
assert(ThisAI.isInAlloca() && "this is passed directly or inalloca");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ThisAddr = GetAddrOfLocalVar(CXXABIThisDecl);
|
|
|
|
llvm::Type *ThisType = ThisAddr.getElementType();
|
2014-07-26 09:34:32 +08:00
|
|
|
if (ThisType != AdjustedThisPtr->getType())
|
|
|
|
AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType);
|
|
|
|
Builder.CreateStore(AdjustedThisPtr, ThisAddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the musttail call manually. Even if the prologue pushed cleanups, we
|
|
|
|
// don't actually want to run them.
|
|
|
|
llvm::CallInst *Call = Builder.CreateCall(Callee, Args);
|
|
|
|
Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
|
|
|
|
|
|
|
|
// Apply the standard set of call attributes.
|
|
|
|
unsigned CallingConv;
|
|
|
|
CodeGen::AttributeListType AttributeList;
|
2016-01-06 22:35:46 +08:00
|
|
|
CGM.ConstructAttributeList(Callee->getName(), *CurFnInfo, MD, AttributeList,
|
|
|
|
CallingConv, /*AttrOnCallSite=*/true);
|
2014-07-26 09:34:32 +08:00
|
|
|
llvm::AttributeSet Attrs =
|
|
|
|
llvm::AttributeSet::get(getLLVMContext(), AttributeList);
|
|
|
|
Call->setAttributes(Attrs);
|
|
|
|
Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
|
|
|
|
|
|
|
|
if (Call->getType()->isVoidTy())
|
|
|
|
Builder.CreateRetVoid();
|
|
|
|
else
|
|
|
|
Builder.CreateRet(Call);
|
|
|
|
|
|
|
|
// Finish the function to maintain CodeGenFunction invariants.
|
|
|
|
// FIXME: Don't emit unreachable code.
|
|
|
|
EmitBlock(createBasicBlock());
|
|
|
|
FinishFunction();
|
|
|
|
}
|
|
|
|
|
2015-07-13 14:07:58 +08:00
|
|
|
void CodeGenFunction::generateThunk(llvm::Function *Fn,
|
2013-11-16 01:24:45 +08:00
|
|
|
const CGFunctionInfo &FnInfo,
|
|
|
|
GlobalDecl GD, const ThunkInfo &Thunk) {
|
|
|
|
StartThunk(Fn, GD, FnInfo);
|
|
|
|
|
|
|
|
// Get our callee.
|
|
|
|
llvm::Type *Ty =
|
|
|
|
CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD));
|
|
|
|
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
|
|
|
|
|
|
|
|
// Make the call and return the result.
|
2014-07-26 09:30:05 +08:00
|
|
|
EmitCallAndReturnForThunk(Callee, &Thunk);
|
2010-03-24 08:39:18 +08:00
|
|
|
}
|
|
|
|
|
2013-10-09 17:23:58 +08:00
|
|
|
void CodeGenVTables::emitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
|
|
|
|
bool ForVTable) {
|
2012-02-17 11:33:10 +08:00
|
|
|
const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD);
|
2011-03-09 12:27:21 +08:00
|
|
|
|
|
|
|
// FIXME: re-use FnInfo in this computation.
|
2014-05-08 23:44:45 +08:00
|
|
|
llvm::Constant *C = CGM.GetAddrOfThunk(GD, Thunk);
|
|
|
|
llvm::GlobalValue *Entry;
|
|
|
|
|
2010-03-24 02:18:41 +08:00
|
|
|
// Strip off a bitcast if we got one back.
|
2014-05-08 23:44:45 +08:00
|
|
|
if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(C)) {
|
2010-03-24 02:18:41 +08:00
|
|
|
assert(CE->getOpcode() == llvm::Instruction::BitCast);
|
2014-05-08 23:44:45 +08:00
|
|
|
Entry = cast<llvm::GlobalValue>(CE->getOperand(0));
|
|
|
|
} else {
|
|
|
|
Entry = cast<llvm::GlobalValue>(C);
|
2010-03-24 02:18:41 +08:00
|
|
|
}
|
2014-05-08 23:44:45 +08:00
|
|
|
|
2010-03-24 02:18:41 +08:00
|
|
|
// There's already a declaration with the same name, check if it has the same
|
|
|
|
// type or if we need to replace it.
|
2014-05-08 23:44:45 +08:00
|
|
|
if (Entry->getType()->getElementType() !=
|
2010-08-31 15:33:07 +08:00
|
|
|
CGM.getTypes().GetFunctionTypeForVTable(GD)) {
|
2014-05-08 23:44:45 +08:00
|
|
|
llvm::GlobalValue *OldThunkFn = Entry;
|
|
|
|
|
2010-03-24 02:18:41 +08:00
|
|
|
// If the types mismatch then we have to rewrite the definition.
|
|
|
|
assert(OldThunkFn->isDeclaration() &&
|
|
|
|
"Shouldn't replace non-declaration");
|
|
|
|
|
|
|
|
// Remove the name from the old thunk function and get a new thunk.
|
2011-07-23 18:55:15 +08:00
|
|
|
OldThunkFn->setName(StringRef());
|
2014-05-08 23:44:45 +08:00
|
|
|
Entry = cast<llvm::GlobalValue>(CGM.GetAddrOfThunk(GD, Thunk));
|
2010-03-24 02:18:41 +08:00
|
|
|
|
|
|
|
// If needed, replace the old thunk with a bitcast.
|
|
|
|
if (!OldThunkFn->use_empty()) {
|
|
|
|
llvm::Constant *NewPtrForOldDecl =
|
2010-03-24 08:35:44 +08:00
|
|
|
llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType());
|
2010-03-24 02:18:41 +08:00
|
|
|
OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the old thunk.
|
|
|
|
OldThunkFn->eraseFromParent();
|
|
|
|
}
|
2010-03-24 08:39:18 +08:00
|
|
|
|
|
|
|
llvm::Function *ThunkFn = cast<llvm::Function>(Entry);
|
2013-10-09 17:23:58 +08:00
|
|
|
bool ABIHasKeyFunctions = CGM.getTarget().getCXXABI().hasKeyFunctions();
|
|
|
|
bool UseAvailableExternallyLinkage = ForVTable && ABIHasKeyFunctions;
|
2011-02-07 02:31:40 +08:00
|
|
|
|
|
|
|
if (!ThunkFn->isDeclaration()) {
|
2013-10-09 17:23:58 +08:00
|
|
|
if (!ABIHasKeyFunctions || UseAvailableExternallyLinkage) {
|
2011-02-07 02:31:40 +08:00
|
|
|
// There is already a thunk emitted for this function, do nothing.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-07-15 22:48:06 +08:00
|
|
|
setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD);
|
2011-02-07 04:09:44 +08:00
|
|
|
return;
|
2011-02-07 02:31:40 +08:00
|
|
|
}
|
|
|
|
|
2012-09-22 04:39:32 +08:00
|
|
|
CGM.SetLLVMFunctionAttributesForDefinition(GD.getDecl(), ThunkFn);
|
|
|
|
|
2011-05-07 01:27:27 +08:00
|
|
|
if (ThunkFn->isVarArg()) {
|
|
|
|
// Varargs thunks are special; we can't just generate a call because
|
|
|
|
// we can't copy the varargs. Our implementation is rather
|
|
|
|
// expensive/sucky at the moment, so don't generate the thunk unless
|
|
|
|
// we have to.
|
|
|
|
// FIXME: Do something better here; GenerateVarArgsThunk is extremely ugly.
|
2015-07-01 03:07:26 +08:00
|
|
|
if (UseAvailableExternallyLinkage)
|
|
|
|
return;
|
2015-07-01 06:08:44 +08:00
|
|
|
ThunkFn =
|
|
|
|
CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD, Thunk);
|
2011-05-07 01:27:27 +08:00
|
|
|
} else {
|
|
|
|
// Normal thunk body generation.
|
2015-07-13 14:07:58 +08:00
|
|
|
CodeGenFunction(CGM).generateThunk(ThunkFn, FnInfo, GD, Thunk);
|
2011-05-07 01:27:27 +08:00
|
|
|
}
|
2015-07-01 03:07:26 +08:00
|
|
|
|
2015-07-15 22:48:06 +08:00
|
|
|
setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD);
|
2011-02-07 02:31:40 +08:00
|
|
|
}
|
|
|
|
|
2013-10-09 17:23:58 +08:00
|
|
|
void CodeGenVTables::maybeEmitThunkForVTable(GlobalDecl GD,
|
|
|
|
const ThunkInfo &Thunk) {
|
|
|
|
// If the ABI has key functions, only the TU with the key function should emit
|
|
|
|
// the thunk. However, we can allow inlining of thunks if we emit them with
|
|
|
|
// available_externally linkage together with vtables when optimizations are
|
|
|
|
// enabled.
|
|
|
|
if (CGM.getTarget().getCXXABI().hasKeyFunctions() &&
|
|
|
|
!CGM.getCodeGenOpts().OptimizationLevel)
|
2011-02-07 02:31:40 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
// We can't emit thunks for member functions with incomplete types.
|
|
|
|
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
|
2011-07-10 08:18:59 +08:00
|
|
|
if (!CGM.getTypes().isFuncTypeConvertible(
|
2013-10-12 04:46:27 +08:00
|
|
|
MD->getType()->castAs<FunctionType>()))
|
2011-02-07 02:31:40 +08:00
|
|
|
return;
|
|
|
|
|
2013-10-09 17:23:58 +08:00
|
|
|
emitThunk(GD, Thunk, /*ForVTable=*/true);
|
2010-03-24 00:36:50 +08:00
|
|
|
}
|
|
|
|
|
2010-03-23 12:59:02 +08:00
|
|
|
void CodeGenVTables::EmitThunks(GlobalDecl GD)
|
|
|
|
{
|
2010-03-24 00:36:50 +08:00
|
|
|
const CXXMethodDecl *MD =
|
|
|
|
cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl();
|
|
|
|
|
|
|
|
// We don't need to generate thunks for the base destructor.
|
|
|
|
if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
|
|
|
|
return;
|
|
|
|
|
2013-12-21 07:58:52 +08:00
|
|
|
const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector =
|
|
|
|
VTContext->getThunkInfo(GD);
|
2013-07-30 17:46:19 +08:00
|
|
|
|
2011-09-26 09:56:41 +08:00
|
|
|
if (!ThunkInfoVector)
|
2010-03-25 00:42:11 +08:00
|
|
|
return;
|
|
|
|
|
2015-08-02 03:11:36 +08:00
|
|
|
for (const ThunkInfo& Thunk : *ThunkInfoVector)
|
|
|
|
emitThunk(GD, Thunk, /*ForVTable=*/false);
|
2010-03-23 12:59:02 +08:00
|
|
|
}
|
|
|
|
|
2014-07-02 04:30:31 +08:00
|
|
|
llvm::Constant *CodeGenVTables::CreateVTableInitializer(
|
|
|
|
const CXXRecordDecl *RD, const VTableComponent *Components,
|
|
|
|
unsigned NumComponents, const VTableLayout::VTableThunkTy *VTableThunks,
|
|
|
|
unsigned NumVTableThunks, llvm::Constant *RTTI) {
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<llvm::Constant *, 64> Inits;
|
2010-03-25 23:26:28 +08:00
|
|
|
|
2012-02-07 08:39:47 +08:00
|
|
|
llvm::Type *Int8PtrTy = CGM.Int8PtrTy;
|
2010-03-25 23:26:28 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *PtrDiffTy =
|
2010-03-26 00:49:53 +08:00
|
|
|
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
|
|
|
|
|
|
|
|
unsigned NextVTableThunkIndex = 0;
|
2014-05-21 13:09:00 +08:00
|
|
|
|
|
|
|
llvm::Constant *PureVirtualFn = nullptr, *DeletedVirtualFn = nullptr;
|
2010-03-29 13:40:50 +08:00
|
|
|
|
2010-03-25 23:26:28 +08:00
|
|
|
for (unsigned I = 0; I != NumComponents; ++I) {
|
2011-09-26 09:56:50 +08:00
|
|
|
VTableComponent Component = Components[I];
|
2010-03-26 00:49:53 +08:00
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Constant *Init = nullptr;
|
2010-03-26 00:49:53 +08:00
|
|
|
|
|
|
|
switch (Component.getKind()) {
|
2010-04-11 03:13:06 +08:00
|
|
|
case VTableComponent::CK_VCallOffset:
|
2011-04-02 09:14:48 +08:00
|
|
|
Init = llvm::ConstantInt::get(PtrDiffTy,
|
|
|
|
Component.getVCallOffset().getQuantity());
|
2010-03-26 00:49:53 +08:00
|
|
|
Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
|
|
|
|
break;
|
2010-04-11 03:13:06 +08:00
|
|
|
case VTableComponent::CK_VBaseOffset:
|
2011-04-02 09:14:48 +08:00
|
|
|
Init = llvm::ConstantInt::get(PtrDiffTy,
|
|
|
|
Component.getVBaseOffset().getQuantity());
|
2010-03-26 00:49:53 +08:00
|
|
|
Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
|
|
|
|
break;
|
2010-04-11 03:13:06 +08:00
|
|
|
case VTableComponent::CK_OffsetToTop:
|
2011-04-02 09:14:48 +08:00
|
|
|
Init = llvm::ConstantInt::get(PtrDiffTy,
|
|
|
|
Component.getOffsetToTop().getQuantity());
|
2010-03-26 00:49:53 +08:00
|
|
|
Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
|
|
|
|
break;
|
2010-04-11 03:13:06 +08:00
|
|
|
case VTableComponent::CK_RTTI:
|
2010-03-26 00:49:53 +08:00
|
|
|
Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy);
|
|
|
|
break;
|
2010-04-11 03:13:06 +08:00
|
|
|
case VTableComponent::CK_FunctionPointer:
|
|
|
|
case VTableComponent::CK_CompleteDtorPointer:
|
|
|
|
case VTableComponent::CK_DeletingDtorPointer: {
|
2010-03-26 00:49:53 +08:00
|
|
|
GlobalDecl GD;
|
|
|
|
|
|
|
|
// Get the right global decl.
|
|
|
|
switch (Component.getKind()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected vtable component kind");
|
2010-04-11 03:13:06 +08:00
|
|
|
case VTableComponent::CK_FunctionPointer:
|
2010-03-26 00:49:53 +08:00
|
|
|
GD = Component.getFunctionDecl();
|
|
|
|
break;
|
2010-04-11 03:13:06 +08:00
|
|
|
case VTableComponent::CK_CompleteDtorPointer:
|
2010-03-26 00:49:53 +08:00
|
|
|
GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete);
|
|
|
|
break;
|
2010-04-11 03:13:06 +08:00
|
|
|
case VTableComponent::CK_DeletingDtorPointer:
|
2010-03-26 00:49:53 +08:00
|
|
|
GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-12-18 02:12:36 +08:00
|
|
|
if (CGM.getLangOpts().CUDA) {
|
|
|
|
// Emit NULL for methods we can't codegen on this
|
|
|
|
// side. Otherwise we'd end up with vtable with unresolved
|
|
|
|
// references.
|
|
|
|
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
|
|
|
|
// OK on device side: functions w/ __device__ attribute
|
|
|
|
// OK on host side: anything except __device__-only functions.
|
|
|
|
bool CanEmitMethod = CGM.getLangOpts().CUDAIsDevice
|
|
|
|
? MD->hasAttr<CUDADeviceAttr>()
|
|
|
|
: (MD->hasAttr<CUDAHostAttr>() ||
|
|
|
|
!MD->hasAttr<CUDADeviceAttr>());
|
|
|
|
if (!CanEmitMethod) {
|
|
|
|
Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Method is acceptable, continue processing as usual.
|
|
|
|
}
|
|
|
|
|
2010-03-29 13:40:50 +08:00
|
|
|
if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
|
|
|
|
// We have a pure virtual member function.
|
2012-07-18 03:17:58 +08:00
|
|
|
if (!PureVirtualFn) {
|
2012-09-14 09:19:01 +08:00
|
|
|
llvm::FunctionType *Ty =
|
|
|
|
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
|
|
|
|
StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName();
|
|
|
|
PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName);
|
2016-03-15 02:41:59 +08:00
|
|
|
if (auto *F = dyn_cast<llvm::Function>(PureVirtualFn))
|
|
|
|
F->setUnnamedAddr(true);
|
2012-09-14 09:19:01 +08:00
|
|
|
PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
|
2012-07-18 03:17:58 +08:00
|
|
|
CGM.Int8PtrTy);
|
2010-03-29 13:40:50 +08:00
|
|
|
}
|
|
|
|
Init = PureVirtualFn;
|
2012-10-17 06:56:05 +08:00
|
|
|
} else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) {
|
|
|
|
if (!DeletedVirtualFn) {
|
|
|
|
llvm::FunctionType *Ty =
|
|
|
|
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
|
|
|
|
StringRef DeletedCallName =
|
|
|
|
CGM.getCXXABI().GetDeletedVirtualCallName();
|
|
|
|
DeletedVirtualFn = CGM.CreateRuntimeFunction(Ty, DeletedCallName);
|
2016-03-15 02:41:59 +08:00
|
|
|
if (auto *F = dyn_cast<llvm::Function>(DeletedVirtualFn))
|
|
|
|
F->setUnnamedAddr(true);
|
2012-10-17 06:56:05 +08:00
|
|
|
DeletedVirtualFn = llvm::ConstantExpr::getBitCast(DeletedVirtualFn,
|
|
|
|
CGM.Int8PtrTy);
|
|
|
|
}
|
|
|
|
Init = DeletedVirtualFn;
|
2010-03-26 00:49:53 +08:00
|
|
|
} else {
|
2010-03-29 13:40:50 +08:00
|
|
|
// Check if we should use a thunk.
|
2011-09-26 09:56:50 +08:00
|
|
|
if (NextVTableThunkIndex < NumVTableThunks &&
|
2010-03-29 13:40:50 +08:00
|
|
|
VTableThunks[NextVTableThunkIndex].first == I) {
|
|
|
|
const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
|
|
|
|
|
2013-10-09 17:23:58 +08:00
|
|
|
maybeEmitThunkForVTable(GD, Thunk);
|
2012-03-21 04:18:13 +08:00
|
|
|
Init = CGM.GetAddrOfThunk(GD, Thunk);
|
2011-02-07 02:31:40 +08:00
|
|
|
|
2010-03-29 13:40:50 +08:00
|
|
|
NextVTableThunkIndex++;
|
|
|
|
} else {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
|
2010-03-26 00:49:53 +08:00
|
|
|
|
2011-02-05 12:35:53 +08:00
|
|
|
Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
|
2010-03-29 13:40:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
|
2010-03-26 00:49:53 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-04-11 03:13:06 +08:00
|
|
|
case VTableComponent::CK_UnusedFunctionPointer:
|
2010-03-26 00:49:53 +08:00
|
|
|
Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
|
|
|
|
break;
|
|
|
|
};
|
2010-03-25 23:26:28 +08:00
|
|
|
|
|
|
|
Inits.push_back(Init);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents);
|
2011-06-22 17:24:39 +08:00
|
|
|
return llvm::ConstantArray::get(ArrayType, Inits);
|
2010-03-25 23:26:28 +08:00
|
|
|
}
|
|
|
|
|
2010-03-25 08:35:49 +08:00
|
|
|
llvm::GlobalVariable *
|
|
|
|
CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
|
2010-03-26 11:56:54 +08:00
|
|
|
const BaseSubobject &Base,
|
|
|
|
bool BaseIsVirtual,
|
2011-03-27 17:00:25 +08:00
|
|
|
llvm::GlobalVariable::LinkageTypes Linkage,
|
2010-03-26 11:56:54 +08:00
|
|
|
VTableAddressPointsMapTy& AddressPoints) {
|
2013-08-22 23:23:05 +08:00
|
|
|
if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
|
|
|
|
DI->completeClassData(Base.getBase());
|
|
|
|
|
2014-03-08 04:03:18 +08:00
|
|
|
std::unique_ptr<VTableLayout> VTLayout(
|
2013-12-21 07:58:52 +08:00
|
|
|
getItaniumVTableContext().createConstructionVTableLayout(
|
2013-11-05 23:54:58 +08:00
|
|
|
Base.getBase(), Base.getBaseOffset(), BaseIsVirtual, RD));
|
2010-03-25 23:26:28 +08:00
|
|
|
|
2010-03-26 00:49:53 +08:00
|
|
|
// Add the address points.
|
2011-09-26 09:57:04 +08:00
|
|
|
AddressPoints = VTLayout->getAddressPoints();
|
2010-03-25 23:26:28 +08:00
|
|
|
|
|
|
|
// Get the mangled construction vtable name.
|
2012-02-05 10:13:05 +08:00
|
|
|
SmallString<256> OutName;
|
2011-02-11 10:52:17 +08:00
|
|
|
llvm::raw_svector_ostream Out(OutName);
|
2013-10-03 14:26:13 +08:00
|
|
|
cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext())
|
|
|
|
.mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(),
|
|
|
|
Base.getBase(), Out);
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef Name = OutName.str();
|
2010-03-25 23:26:28 +08:00
|
|
|
|
|
|
|
llvm::ArrayType *ArrayType =
|
2012-02-07 08:39:47 +08:00
|
|
|
llvm::ArrayType::get(CGM.Int8PtrTy, VTLayout->getNumVTableComponents());
|
2010-03-25 23:26:28 +08:00
|
|
|
|
2013-02-16 08:51:21 +08:00
|
|
|
// Construction vtable symbols are not part of the Itanium ABI, so we cannot
|
|
|
|
// guarantee that they actually will be available externally. Instead, when
|
|
|
|
// emitting an available_externally VTT, we provide references to an internal
|
|
|
|
// linkage construction vtable. The ABI only requires complete-object vtables
|
|
|
|
// to be the same for all instances of a type, not construction vtables.
|
|
|
|
if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage)
|
|
|
|
Linkage = llvm::GlobalVariable::InternalLinkage;
|
|
|
|
|
2010-03-25 23:26:28 +08:00
|
|
|
// Create the variable that will hold the construction vtable.
|
|
|
|
llvm::GlobalVariable *VTable =
|
2011-03-27 17:00:25 +08:00
|
|
|
CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType, Linkage);
|
2014-02-08 08:41:16 +08:00
|
|
|
CGM.setGlobalVisibility(VTable, RD);
|
2011-03-27 17:00:25 +08:00
|
|
|
|
|
|
|
// V-tables are always unnamed_addr.
|
|
|
|
VTable->setUnnamedAddr(true);
|
2010-03-25 23:26:28 +08:00
|
|
|
|
2014-07-02 04:30:31 +08:00
|
|
|
llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(
|
|
|
|
CGM.getContext().getTagDeclType(Base.getBase()));
|
|
|
|
|
2010-03-25 23:26:28 +08:00
|
|
|
// Create and set the initializer.
|
2014-07-02 04:30:31 +08:00
|
|
|
llvm::Constant *Init = CreateVTableInitializer(
|
|
|
|
Base.getBase(), VTLayout->vtable_component_begin(),
|
|
|
|
VTLayout->getNumVTableComponents(), VTLayout->vtable_thunk_begin(),
|
|
|
|
VTLayout->getNumVTableThunks(), RTTI);
|
2010-03-25 23:26:28 +08:00
|
|
|
VTable->setInitializer(Init);
|
|
|
|
|
2015-02-21 04:30:56 +08:00
|
|
|
CGM.EmitVTableBitSetEntries(VTable, *VTLayout.get());
|
|
|
|
|
2010-03-25 08:35:49 +08:00
|
|
|
return VTable;
|
|
|
|
}
|
|
|
|
|
2015-07-24 12:04:49 +08:00
|
|
|
static bool shouldEmitAvailableExternallyVTable(const CodeGenModule &CGM,
|
|
|
|
const CXXRecordDecl *RD) {
|
|
|
|
return CGM.getCodeGenOpts().OptimizationLevel > 0 &&
|
2015-09-15 08:37:06 +08:00
|
|
|
CGM.getCXXABI().canSpeculativelyEmitVTable(RD);
|
2015-07-24 12:04:49 +08:00
|
|
|
}
|
|
|
|
|
2016-01-29 09:35:53 +08:00
|
|
|
/// Compute the required linkage of the vtable for the given class.
|
2013-01-26 06:31:03 +08:00
|
|
|
///
|
|
|
|
/// Note that we only call this at the end of the translation unit.
|
|
|
|
llvm::GlobalVariable::LinkageTypes
|
|
|
|
CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
|
2013-05-13 08:12:11 +08:00
|
|
|
if (!RD->isExternallyVisible())
|
2013-01-26 06:31:03 +08:00
|
|
|
return llvm::GlobalVariable::InternalLinkage;
|
|
|
|
|
|
|
|
// We're at the end of the translation unit, so the current key
|
|
|
|
// function is fully correct.
|
2014-10-24 06:40:46 +08:00
|
|
|
const CXXMethodDecl *keyFunction = Context.getCurrentKeyFunction(RD);
|
|
|
|
if (keyFunction && !RD->hasAttr<DLLImportAttr>()) {
|
2013-01-26 06:31:03 +08:00
|
|
|
// If this class has a key function, use that to determine the
|
|
|
|
// linkage of the vtable.
|
2014-05-21 13:09:00 +08:00
|
|
|
const FunctionDecl *def = nullptr;
|
2013-01-26 06:31:03 +08:00
|
|
|
if (keyFunction->hasBody(def))
|
|
|
|
keyFunction = cast<CXXMethodDecl>(def);
|
|
|
|
|
|
|
|
switch (keyFunction->getTemplateSpecializationKind()) {
|
|
|
|
case TSK_Undeclared:
|
|
|
|
case TSK_ExplicitSpecialization:
|
2015-07-24 12:04:49 +08:00
|
|
|
assert((def || CodeGenOpts.OptimizationLevel > 0) &&
|
|
|
|
"Shouldn't query vtable linkage without key function or "
|
|
|
|
"optimizations");
|
|
|
|
if (!def && CodeGenOpts.OptimizationLevel > 0)
|
|
|
|
return llvm::GlobalVariable::AvailableExternallyLinkage;
|
|
|
|
|
2013-01-26 06:31:03 +08:00
|
|
|
if (keyFunction->isInlined())
|
|
|
|
return !Context.getLangOpts().AppleKext ?
|
|
|
|
llvm::GlobalVariable::LinkOnceODRLinkage :
|
|
|
|
llvm::Function::InternalLinkage;
|
|
|
|
|
|
|
|
return llvm::GlobalVariable::ExternalLinkage;
|
2015-07-02 22:44:35 +08:00
|
|
|
|
2013-01-26 06:31:03 +08:00
|
|
|
case TSK_ImplicitInstantiation:
|
|
|
|
return !Context.getLangOpts().AppleKext ?
|
|
|
|
llvm::GlobalVariable::LinkOnceODRLinkage :
|
|
|
|
llvm::Function::InternalLinkage;
|
|
|
|
|
|
|
|
case TSK_ExplicitInstantiationDefinition:
|
|
|
|
return !Context.getLangOpts().AppleKext ?
|
|
|
|
llvm::GlobalVariable::WeakODRLinkage :
|
|
|
|
llvm::Function::InternalLinkage;
|
|
|
|
|
|
|
|
case TSK_ExplicitInstantiationDeclaration:
|
2013-09-04 05:05:13 +08:00
|
|
|
llvm_unreachable("Should not have been asked to emit this");
|
2013-01-26 06:31:03 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// -fapple-kext mode does not support weak linkage, so we must use
|
|
|
|
// internal linkage.
|
|
|
|
if (Context.getLangOpts().AppleKext)
|
|
|
|
return llvm::Function::InternalLinkage;
|
2014-05-31 00:59:42 +08:00
|
|
|
|
|
|
|
llvm::GlobalVariable::LinkageTypes DiscardableODRLinkage =
|
|
|
|
llvm::GlobalValue::LinkOnceODRLinkage;
|
|
|
|
llvm::GlobalVariable::LinkageTypes NonDiscardableODRLinkage =
|
|
|
|
llvm::GlobalValue::WeakODRLinkage;
|
|
|
|
if (RD->hasAttr<DLLExportAttr>()) {
|
|
|
|
// Cannot discard exported vtables.
|
|
|
|
DiscardableODRLinkage = NonDiscardableODRLinkage;
|
|
|
|
} else if (RD->hasAttr<DLLImportAttr>()) {
|
|
|
|
// Imported vtables are available externally.
|
|
|
|
DiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage;
|
|
|
|
NonDiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage;
|
|
|
|
}
|
|
|
|
|
2013-01-26 06:31:03 +08:00
|
|
|
switch (RD->getTemplateSpecializationKind()) {
|
2015-07-24 12:04:49 +08:00
|
|
|
case TSK_Undeclared:
|
|
|
|
case TSK_ExplicitSpecialization:
|
|
|
|
case TSK_ImplicitInstantiation:
|
|
|
|
return DiscardableODRLinkage;
|
|
|
|
|
|
|
|
case TSK_ExplicitInstantiationDeclaration:
|
|
|
|
return shouldEmitAvailableExternallyVTable(*this, RD)
|
|
|
|
? llvm::GlobalVariable::AvailableExternallyLinkage
|
|
|
|
: llvm::GlobalVariable::ExternalLinkage;
|
|
|
|
|
|
|
|
case TSK_ExplicitInstantiationDefinition:
|
|
|
|
return NonDiscardableODRLinkage;
|
2013-01-26 06:31:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm_unreachable("Invalid TemplateSpecializationKind!");
|
|
|
|
}
|
|
|
|
|
2016-01-29 09:35:53 +08:00
|
|
|
/// This is a callback from Sema to tell us that that a particular vtable is
|
2015-01-15 12:07:35 +08:00
|
|
|
/// required to be emitted in this translation unit.
|
2013-01-26 06:31:03 +08:00
|
|
|
///
|
2015-01-15 12:07:35 +08:00
|
|
|
/// This is only called for vtables that _must_ be emitted (mainly due to key
|
|
|
|
/// functions). For weak vtables, CodeGen tracks when they are needed and
|
|
|
|
/// emits them as-needed.
|
|
|
|
void CodeGenModule::EmitVTable(CXXRecordDecl *theClass) {
|
2013-01-26 06:31:03 +08:00
|
|
|
VTables.GenerateClassData(theClass);
|
|
|
|
}
|
|
|
|
|
2010-03-29 11:38:52 +08:00
|
|
|
void
|
2013-01-26 06:31:03 +08:00
|
|
|
CodeGenVTables::GenerateClassData(const CXXRecordDecl *RD) {
|
2013-08-22 23:23:05 +08:00
|
|
|
if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
|
|
|
|
DI->completeClassData(RD);
|
|
|
|
|
2013-06-19 23:20:38 +08:00
|
|
|
if (RD->getNumVBases())
|
2013-09-27 22:48:01 +08:00
|
|
|
CGM.getCXXABI().emitVirtualInheritanceTables(RD);
|
|
|
|
|
|
|
|
CGM.getCXXABI().emitVTableDefinitions(*this, RD);
|
2010-03-29 11:38:52 +08:00
|
|
|
}
|
2013-01-26 06:31:03 +08:00
|
|
|
|
|
|
|
/// At this point in the translation unit, does it appear that can we
|
|
|
|
/// rely on the vtable being defined elsewhere in the program?
|
|
|
|
///
|
|
|
|
/// The response is really only definitive when called at the end of
|
|
|
|
/// the translation unit.
|
|
|
|
///
|
|
|
|
/// The only semantic restriction here is that the object file should
|
2016-01-29 09:35:53 +08:00
|
|
|
/// not contain a vtable definition when that vtable is defined
|
2013-01-26 06:31:03 +08:00
|
|
|
/// strongly elsewhere. Otherwise, we'd just like to avoid emitting
|
2016-01-29 09:35:53 +08:00
|
|
|
/// vtables when unnecessary.
|
2013-01-26 06:31:03 +08:00
|
|
|
bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) {
|
2013-12-05 12:47:09 +08:00
|
|
|
assert(RD->isDynamicClass() && "Non-dynamic classes have no VTable.");
|
2013-01-26 06:31:03 +08:00
|
|
|
|
2016-02-12 01:49:28 +08:00
|
|
|
// We always synthesize vtables on the import side regardless of whether or
|
|
|
|
// not it is an explicit instantiation declaration.
|
|
|
|
if (CGM.getTarget().getCXXABI().isMicrosoft() && RD->hasAttr<DLLImportAttr>())
|
|
|
|
return false;
|
|
|
|
|
2013-01-26 06:31:03 +08:00
|
|
|
// If we have an explicit instantiation declaration (and not a
|
2016-01-29 09:35:53 +08:00
|
|
|
// definition), the vtable is defined elsewhere.
|
2013-01-26 06:31:03 +08:00
|
|
|
TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
|
|
|
|
if (TSK == TSK_ExplicitInstantiationDeclaration)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Otherwise, if the class is an instantiated template, the
|
2016-01-29 09:35:53 +08:00
|
|
|
// vtable must be defined here.
|
2013-01-26 06:31:03 +08:00
|
|
|
if (TSK == TSK_ImplicitInstantiation ||
|
|
|
|
TSK == TSK_ExplicitInstantiationDefinition)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Otherwise, if the class doesn't have a key function (possibly
|
2016-01-29 09:35:53 +08:00
|
|
|
// anymore), the vtable must be defined here.
|
2013-01-26 06:31:03 +08:00
|
|
|
const CXXMethodDecl *keyFunction = CGM.getContext().getCurrentKeyFunction(RD);
|
|
|
|
if (!keyFunction)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Otherwise, if we don't have a definition of the key function, the
|
2016-01-29 09:35:53 +08:00
|
|
|
// vtable must be defined somewhere else.
|
2013-01-26 06:31:03 +08:00
|
|
|
return !keyFunction->hasBody();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Given that we're currently at the end of the translation unit, and
|
2016-01-29 09:35:53 +08:00
|
|
|
/// we've emitted a reference to the vtable for this class, should
|
|
|
|
/// we define that vtable?
|
2013-01-26 06:31:03 +08:00
|
|
|
static bool shouldEmitVTableAtEndOfTranslationUnit(CodeGenModule &CGM,
|
|
|
|
const CXXRecordDecl *RD) {
|
2015-09-15 08:37:06 +08:00
|
|
|
// If vtable is internal then it has to be done.
|
2015-07-24 12:04:49 +08:00
|
|
|
if (!CGM.getVTables().isVTableExternal(RD))
|
|
|
|
return true;
|
|
|
|
|
2015-09-15 08:37:06 +08:00
|
|
|
// If it's external then maybe we will need it as available_externally.
|
2015-07-24 12:04:49 +08:00
|
|
|
return shouldEmitAvailableExternallyVTable(CGM, RD);
|
2013-01-26 06:31:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Given that at some point we emitted a reference to one or more
|
2016-01-29 09:35:53 +08:00
|
|
|
/// vtables, and that we are now at the end of the translation unit,
|
2013-01-26 06:31:03 +08:00
|
|
|
/// decide whether we should emit them.
|
|
|
|
void CodeGenModule::EmitDeferredVTables() {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
// Remember the size of DeferredVTables, because we're going to assume
|
|
|
|
// that this entire operation doesn't modify it.
|
|
|
|
size_t savedSize = DeferredVTables.size();
|
|
|
|
#endif
|
|
|
|
|
2015-07-29 00:10:58 +08:00
|
|
|
for (const CXXRecordDecl *RD : DeferredVTables)
|
2013-01-26 06:31:03 +08:00
|
|
|
if (shouldEmitVTableAtEndOfTranslationUnit(*this, RD))
|
|
|
|
VTables.GenerateClassData(RD);
|
|
|
|
|
|
|
|
assert(savedSize == DeferredVTables.size() &&
|
2016-01-29 09:35:53 +08:00
|
|
|
"deferred extra vtables during vtable emission?");
|
2013-01-26 06:31:03 +08:00
|
|
|
DeferredVTables.clear();
|
|
|
|
}
|
2015-02-21 04:30:56 +08:00
|
|
|
|
2016-04-28 20:14:47 +08:00
|
|
|
bool CodeGenModule::NeedVTableBitSets() {
|
|
|
|
return getCodeGenOpts().WholeProgramVTables ||
|
|
|
|
getLangOpts().Sanitize.has(SanitizerKind::CFIVCall) ||
|
|
|
|
getLangOpts().Sanitize.has(SanitizerKind::CFINVCall) ||
|
|
|
|
getLangOpts().Sanitize.has(SanitizerKind::CFIDerivedCast) ||
|
|
|
|
getLangOpts().Sanitize.has(SanitizerKind::CFIUnrelatedCast);
|
|
|
|
}
|
2016-02-25 04:46:36 +08:00
|
|
|
|
2016-04-28 20:14:47 +08:00
|
|
|
bool CodeGenModule::IsBitSetBlacklistedRecord(const CXXRecordDecl *RD) {
|
|
|
|
std::string TypeName = RD->getQualifiedNameAsString();
|
|
|
|
auto isInBlacklist = [&](const SanitizerBlacklist &BL) {
|
|
|
|
if (RD->hasAttr<UuidAttr>() && BL.isBlacklistedType("attr:uuid"))
|
|
|
|
return true;
|
2016-02-25 04:46:36 +08:00
|
|
|
|
2016-04-28 20:14:47 +08:00
|
|
|
return BL.isBlacklistedType(TypeName);
|
|
|
|
};
|
2016-04-28 04:39:53 +08:00
|
|
|
|
2016-04-28 20:14:47 +08:00
|
|
|
return isInBlacklist(WholeProgramVTablesBlacklist) ||
|
|
|
|
((LangOpts.Sanitize.has(SanitizerKind::CFIVCall) ||
|
|
|
|
LangOpts.Sanitize.has(SanitizerKind::CFINVCall) ||
|
|
|
|
LangOpts.Sanitize.has(SanitizerKind::CFIDerivedCast) ||
|
|
|
|
LangOpts.Sanitize.has(SanitizerKind::CFIUnrelatedCast)) &&
|
|
|
|
isInBlacklist(getContext().getSanitizerBlacklist()));
|
2015-07-10 03:56:14 +08:00
|
|
|
}
|
|
|
|
|
2015-02-21 04:30:56 +08:00
|
|
|
void CodeGenModule::EmitVTableBitSetEntries(llvm::GlobalVariable *VTable,
|
|
|
|
const VTableLayout &VTLayout) {
|
2016-04-28 20:14:47 +08:00
|
|
|
if (!NeedVTableBitSets())
|
2015-02-21 04:30:56 +08:00
|
|
|
return;
|
|
|
|
|
2015-06-18 03:08:05 +08:00
|
|
|
CharUnits PointerWidth =
|
|
|
|
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
|
2015-02-21 04:30:56 +08:00
|
|
|
|
2015-09-10 10:17:40 +08:00
|
|
|
typedef std::pair<const CXXRecordDecl *, unsigned> BSEntry;
|
|
|
|
std::vector<BSEntry> BitsetEntries;
|
2015-02-21 04:30:56 +08:00
|
|
|
// Create a bit set entry for each address point.
|
2016-04-28 20:14:47 +08:00
|
|
|
for (auto &&AP : VTLayout.getAddressPoints()) {
|
|
|
|
if (IsBitSetBlacklistedRecord(AP.first.getBase()))
|
|
|
|
continue;
|
|
|
|
|
2015-09-10 10:17:40 +08:00
|
|
|
BitsetEntries.push_back(std::make_pair(AP.first.getBase(), AP.second));
|
2016-04-28 20:14:47 +08:00
|
|
|
}
|
2015-02-21 04:30:56 +08:00
|
|
|
|
|
|
|
// Sort the bit set entries for determinism.
|
2015-09-10 10:17:40 +08:00
|
|
|
std::sort(BitsetEntries.begin(), BitsetEntries.end(),
|
|
|
|
[this](const BSEntry &E1, const BSEntry &E2) {
|
|
|
|
if (&E1 == &E2)
|
2015-02-24 09:12:53 +08:00
|
|
|
return false;
|
|
|
|
|
2015-09-10 10:17:40 +08:00
|
|
|
std::string S1;
|
|
|
|
llvm::raw_string_ostream O1(S1);
|
|
|
|
getCXXABI().getMangleContext().mangleTypeName(
|
|
|
|
QualType(E1.first->getTypeForDecl(), 0), O1);
|
|
|
|
O1.flush();
|
|
|
|
|
|
|
|
std::string S2;
|
|
|
|
llvm::raw_string_ostream O2(S2);
|
|
|
|
getCXXABI().getMangleContext().mangleTypeName(
|
|
|
|
QualType(E2.first->getTypeForDecl(), 0), O2);
|
|
|
|
O2.flush();
|
|
|
|
|
2015-02-21 04:30:56 +08:00
|
|
|
if (S1 < S2)
|
|
|
|
return true;
|
|
|
|
if (S1 != S2)
|
|
|
|
return false;
|
|
|
|
|
2015-09-10 10:17:40 +08:00
|
|
|
return E1.second < E2.second;
|
2015-02-21 04:30:56 +08:00
|
|
|
});
|
|
|
|
|
|
|
|
llvm::NamedMDNode *BitsetsMD =
|
|
|
|
getModule().getOrInsertNamedMetadata("llvm.bitsets");
|
|
|
|
for (auto BitsetEntry : BitsetEntries)
|
2015-12-16 07:00:20 +08:00
|
|
|
CreateVTableBitSetEntry(BitsetsMD, VTable,
|
|
|
|
PointerWidth * BitsetEntry.second,
|
|
|
|
BitsetEntry.first);
|
2015-02-21 04:30:56 +08:00
|
|
|
}
|