2011-03-25 19:57:33 +08:00
|
|
|
//==- CGObjCRuntime.cpp - Interface to Shared Objective-C Runtime Features ==//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2011-03-25 19:57:33 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This abstract class defines the interface for Objective-C runtime-specific
|
|
|
|
// code generation. It provides some concrete helper methods for functionality
|
|
|
|
// shared between all (or most) of the Objective-C runtimes supported by clang.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CGObjCRuntime.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "CGCleanup.h"
|
2018-08-10 20:53:13 +08:00
|
|
|
#include "CGCXXABI.h"
|
2011-03-25 19:57:33 +08:00
|
|
|
#include "CGRecordLayout.h"
|
|
|
|
#include "CodeGenFunction.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "CodeGenModule.h"
|
2011-03-25 19:57:33 +08:00
|
|
|
#include "clang/AST/RecordLayout.h"
|
|
|
|
#include "clang/AST/StmtObjC.h"
|
2013-10-31 05:53:58 +08:00
|
|
|
#include "clang/CodeGen/CGFunctionInfo.h"
|
2018-08-10 20:53:13 +08:00
|
|
|
#include "llvm/Support/SaveAndRestore.h"
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
2012-11-07 06:15:52 +08:00
|
|
|
uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
|
|
|
|
const ObjCInterfaceDecl *OID,
|
|
|
|
const ObjCIvarDecl *Ivar) {
|
2017-06-27 12:34:04 +08:00
|
|
|
return CGM.getContext().lookupFieldBitOffset(OID, nullptr, Ivar) /
|
|
|
|
CGM.getContext().getCharWidth();
|
2011-03-25 19:57:33 +08:00
|
|
|
}
|
|
|
|
|
2012-11-07 06:15:52 +08:00
|
|
|
uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
|
|
|
|
const ObjCImplementationDecl *OID,
|
|
|
|
const ObjCIvarDecl *Ivar) {
|
2017-06-27 12:34:04 +08:00
|
|
|
return CGM.getContext().lookupFieldBitOffset(OID->getClassInterface(), OID,
|
|
|
|
Ivar) /
|
|
|
|
CGM.getContext().getCharWidth();
|
2011-03-25 19:57:33 +08:00
|
|
|
}
|
|
|
|
|
2012-11-07 07:40:48 +08:00
|
|
|
unsigned CGObjCRuntime::ComputeBitfieldBitOffset(
|
|
|
|
CodeGen::CodeGenModule &CGM,
|
|
|
|
const ObjCInterfaceDecl *ID,
|
|
|
|
const ObjCIvarDecl *Ivar) {
|
2017-06-27 12:34:04 +08:00
|
|
|
return CGM.getContext().lookupFieldBitOffset(ID, ID->getImplementation(),
|
|
|
|
Ivar);
|
2012-11-07 07:40:48 +08:00
|
|
|
}
|
|
|
|
|
2011-03-25 19:57:33 +08:00
|
|
|
LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
|
|
|
|
const ObjCInterfaceDecl *OID,
|
|
|
|
llvm::Value *BaseValue,
|
|
|
|
const ObjCIvarDecl *Ivar,
|
|
|
|
unsigned CVRQualifiers,
|
|
|
|
llvm::Value *Offset) {
|
|
|
|
// Compute (type*) ( (char *) BaseValue + Offset)
|
2017-06-02 02:41:25 +08:00
|
|
|
QualType InterfaceTy{OID->getTypeForDecl(), 0};
|
|
|
|
QualType ObjectPtrTy =
|
|
|
|
CGF.CGM.getContext().getObjCObjectPointerType(InterfaceTy);
|
|
|
|
QualType IvarTy =
|
|
|
|
Ivar->getUsageType(ObjectPtrTy).withCVRQualifiers(CVRQualifiers);
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
|
2012-12-06 19:14:44 +08:00
|
|
|
llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy);
|
2011-03-25 19:57:33 +08:00
|
|
|
V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
|
|
|
|
|
|
|
|
if (!Ivar->isBitField()) {
|
2012-12-06 19:14:44 +08:00
|
|
|
V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
|
2011-12-20 07:03:09 +08:00
|
|
|
LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
|
2011-03-25 19:57:33 +08:00
|
|
|
return LV;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to compute an access strategy for this bit-field. We are given the
|
|
|
|
// offset to the first byte in the bit-field, the sub-byte offset is taken
|
|
|
|
// from the original layout. We reuse the normal bit-field access strategy by
|
|
|
|
// treating this as an access to a struct where the bit-field is in byte 0,
|
|
|
|
// and adjust the containing type size as appropriate.
|
|
|
|
//
|
|
|
|
// FIXME: Note that currently we make a very conservative estimate of the
|
|
|
|
// alignment of the bit-field, because (a) it is not clear what guarantees the
|
|
|
|
// runtime makes us, and (b) we don't have a way to specify that the struct is
|
|
|
|
// at an alignment plus offset.
|
|
|
|
//
|
|
|
|
// Note, there is a subtle invariant here: we can only call this routine on
|
|
|
|
// non-synthesized ivars but we may be called for synthesized ivars. However,
|
|
|
|
// a synthesized ivar can never be a bit-field, so this is safe.
|
2017-06-27 12:34:04 +08:00
|
|
|
uint64_t FieldBitOffset =
|
|
|
|
CGF.CGM.getContext().lookupFieldBitOffset(OID, nullptr, Ivar);
|
2011-04-23 01:23:43 +08:00
|
|
|
uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth();
|
2013-04-17 06:48:15 +08:00
|
|
|
uint64_t AlignmentBits = CGF.CGM.getTarget().getCharAlign();
|
2011-10-11 02:28:20 +08:00
|
|
|
uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext());
|
2016-01-15 05:00:27 +08:00
|
|
|
CharUnits StorageSize = CGF.CGM.getContext().toCharUnitsFromBits(
|
|
|
|
llvm::alignTo(BitOffset + BitFieldSize, AlignmentBits));
|
2012-12-06 19:14:44 +08:00
|
|
|
CharUnits Alignment = CGF.CGM.getContext().toCharUnitsFromBits(AlignmentBits);
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// Allocate a new CGBitFieldInfo object to describe this access.
|
|
|
|
//
|
|
|
|
// FIXME: This is incredibly wasteful, these should be uniqued or part of some
|
|
|
|
// layout object. However, this is blocked on other cleanups to the
|
|
|
|
// Objective-C code, so for now we just live with allocating a bunch of these
|
|
|
|
// objects.
|
|
|
|
CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo(
|
|
|
|
CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize,
|
2012-12-06 19:14:44 +08:00
|
|
|
CGF.CGM.getContext().toBits(StorageSize),
|
Respect alignment of nested bitfields
tools/clang/test/CodeGen/packed-nest-unpacked.c contains this test:
struct XBitfield {
unsigned b1 : 10;
unsigned b2 : 12;
unsigned b3 : 10;
};
struct YBitfield {
char x;
struct XBitfield y;
} __attribute((packed));
struct YBitfield gbitfield;
unsigned test7() {
// CHECK: @test7
// CHECK: load i32, i32* getelementptr inbounds (%struct.YBitfield, %struct.YBitfield* @gbitfield, i32 0, i32 1, i32 0), align 4
return gbitfield.y.b2;
}
The "align 4" is actually wrong. Accessing all of "gbitfield.y" as a single
i32 is of course possible, but that still doesn't make it 4-byte aligned as
it remains packed at offset 1 in the surrounding gbitfield object.
This alignment was changed by commit r169489, which also introduced changes
to bitfield access code in CGExpr.cpp. Code before that change used to take
into account *both* the alignment of the field to be accessed within the
current struct, *and* the alignment of that outer struct itself; this logic
was removed by the above commit.
Neglecting to consider both values can cause incorrect code to be generated
(I've seen an unaligned access crash on SystemZ due to this bug).
In order to always use the best known alignment value, this patch removes
the CGBitFieldInfo::StorageAlignment member and replaces it with a
StorageOffset member specifying the offset from the start of the surrounding
struct to the bitfield's underlying storage. This offset can then be combined
with the best-known alignment for a bitfield access lvalue to determine the
alignment to use when accessing the bitfield's storage.
Differential Revision: http://reviews.llvm.org/D11034
llvm-svn: 241916
2015-07-11 01:30:00 +08:00
|
|
|
CharUnits::fromQuantity(0)));
|
2011-03-25 19:57:33 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Addr(V, Alignment);
|
|
|
|
Addr = CGF.Builder.CreateElementBitCast(Addr,
|
|
|
|
llvm::Type::getIntNTy(CGF.getLLVMContext(),
|
2012-12-06 19:14:44 +08:00
|
|
|
Info->StorageSize));
|
2017-05-19 01:07:11 +08:00
|
|
|
return LValue::MakeBitfield(Addr, *Info, IvarTy,
|
2017-10-31 19:05:34 +08:00
|
|
|
LValueBaseInfo(AlignmentSource::Decl),
|
2017-10-17 18:17:43 +08:00
|
|
|
TBAAAccessInfo());
|
2011-03-25 19:57:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
struct CatchHandler {
|
|
|
|
const VarDecl *Variable;
|
|
|
|
const Stmt *Body;
|
|
|
|
llvm::BasicBlock *Block;
|
2014-06-05 02:51:46 +08:00
|
|
|
llvm::Constant *TypeInfo;
|
2018-08-10 20:53:13 +08:00
|
|
|
/// Flags used to differentiate cleanups and catchalls in Windows SEH
|
|
|
|
unsigned Flags;
|
2011-03-25 19:57:33 +08:00
|
|
|
};
|
|
|
|
|
2015-08-19 06:40:54 +08:00
|
|
|
struct CallObjCEndCatch final : EHScopeStack::Cleanup {
|
2019-02-06 00:42:33 +08:00
|
|
|
CallObjCEndCatch(bool MightThrow, llvm::FunctionCallee Fn)
|
2016-10-14 03:45:08 +08:00
|
|
|
: MightThrow(MightThrow), Fn(Fn) {}
|
2011-03-25 19:57:33 +08:00
|
|
|
bool MightThrow;
|
2019-02-06 00:42:33 +08:00
|
|
|
llvm::FunctionCallee Fn;
|
2011-03-25 19:57:33 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2016-10-14 03:45:08 +08:00
|
|
|
if (MightThrow)
|
|
|
|
CGF.EmitRuntimeCallOrInvoke(Fn);
|
|
|
|
else
|
|
|
|
CGF.EmitNounwindRuntimeCall(Fn);
|
2011-03-25 19:57:33 +08:00
|
|
|
}
|
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
}
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
|
|
|
|
const ObjCAtTryStmt &S,
|
2019-02-06 00:42:33 +08:00
|
|
|
llvm::FunctionCallee beginCatchFn,
|
|
|
|
llvm::FunctionCallee endCatchFn,
|
|
|
|
llvm::FunctionCallee exceptionRethrowFn) {
|
2011-03-25 19:57:33 +08:00
|
|
|
// Jump destination for falling out of catch bodies.
|
|
|
|
CodeGenFunction::JumpDest Cont;
|
|
|
|
if (S.getNumCatchStmts())
|
|
|
|
Cont = CGF.getJumpDestInCurrentScope("eh.cont");
|
|
|
|
|
2018-08-10 20:53:13 +08:00
|
|
|
bool useFunclets = EHPersonality::get(CGF).usesFuncletPads();
|
|
|
|
|
2011-03-25 19:57:33 +08:00
|
|
|
CodeGenFunction::FinallyInfo FinallyInfo;
|
2018-08-10 20:53:13 +08:00
|
|
|
if (!useFunclets)
|
|
|
|
if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
|
|
|
|
FinallyInfo.enter(CGF, Finally->getFinallyBody(),
|
|
|
|
beginCatchFn, endCatchFn, exceptionRethrowFn);
|
2011-03-25 19:57:33 +08:00
|
|
|
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<CatchHandler, 8> Handlers;
|
2011-03-25 19:57:33 +08:00
|
|
|
|
2018-08-10 20:53:13 +08:00
|
|
|
|
2011-03-25 19:57:33 +08:00
|
|
|
// Enter the catch, if there is one.
|
|
|
|
if (S.getNumCatchStmts()) {
|
|
|
|
for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
|
|
|
|
const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
|
|
|
|
const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
|
|
|
|
|
|
|
|
Handlers.push_back(CatchHandler());
|
|
|
|
CatchHandler &Handler = Handlers.back();
|
|
|
|
Handler.Variable = CatchDecl;
|
|
|
|
Handler.Body = CatchStmt->getCatchBody();
|
|
|
|
Handler.Block = CGF.createBasicBlock("catch");
|
2018-08-10 20:53:13 +08:00
|
|
|
Handler.Flags = 0;
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// @catch(...) always matches.
|
|
|
|
if (!CatchDecl) {
|
2018-08-10 20:53:13 +08:00
|
|
|
auto catchAll = getCatchAllTypeInfo();
|
|
|
|
Handler.TypeInfo = catchAll.RTTI;
|
|
|
|
Handler.Flags = catchAll.Flags;
|
2011-03-25 19:57:33 +08:00
|
|
|
// Don't consider any other catches.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
Handler.TypeInfo = GetEHType(CatchDecl->getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
|
|
|
|
for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
|
2018-08-10 20:53:13 +08:00
|
|
|
Catch->setHandler(I, { Handlers[I].TypeInfo, Handlers[I].Flags }, Handlers[I].Block);
|
2011-03-25 19:57:33 +08:00
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2018-08-10 20:53:13 +08:00
|
|
|
if (useFunclets)
|
|
|
|
if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) {
|
|
|
|
CodeGenFunction HelperCGF(CGM, /*suppressNewContext=*/true);
|
|
|
|
if (!CGF.CurSEHParent)
|
|
|
|
CGF.CurSEHParent = cast<NamedDecl>(CGF.CurFuncDecl);
|
|
|
|
// Outline the finally block.
|
|
|
|
const Stmt *FinallyBlock = Finally->getFinallyBody();
|
|
|
|
HelperCGF.startOutlinedSEHHelper(CGF, /*isFilter*/false, FinallyBlock);
|
|
|
|
|
|
|
|
// Emit the original filter expression, convert to i32, and return.
|
|
|
|
HelperCGF.EmitStmt(FinallyBlock);
|
|
|
|
|
2018-08-10 20:53:18 +08:00
|
|
|
HelperCGF.FinishFunction(FinallyBlock->getEndLoc());
|
2018-08-10 20:53:13 +08:00
|
|
|
|
|
|
|
llvm::Function *FinallyFunc = HelperCGF.CurFn;
|
|
|
|
|
|
|
|
|
|
|
|
// Push a cleanup for __finally blocks.
|
|
|
|
CGF.pushSEHCleanup(NormalAndEHCleanup, FinallyFunc);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-03-25 19:57:33 +08:00
|
|
|
// Emit the try body.
|
|
|
|
CGF.EmitStmt(S.getTryBody());
|
|
|
|
|
|
|
|
// Leave the try.
|
|
|
|
if (S.getNumCatchStmts())
|
2011-08-11 10:22:43 +08:00
|
|
|
CGF.popCatchScope();
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// Remember where we were.
|
|
|
|
CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
|
|
|
|
|
|
|
|
// Emit the handlers.
|
|
|
|
for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
|
|
|
|
CatchHandler &Handler = Handlers[I];
|
|
|
|
|
|
|
|
CGF.EmitBlock(Handler.Block);
|
2018-08-10 20:53:13 +08:00
|
|
|
llvm::CatchPadInst *CPI = nullptr;
|
|
|
|
SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(CGF.CurrentFuncletPad);
|
|
|
|
if (useFunclets)
|
|
|
|
if ((CPI = dyn_cast_or_null<llvm::CatchPadInst>(Handler.Block->getFirstNonPHI()))) {
|
|
|
|
CGF.CurrentFuncletPad = CPI;
|
|
|
|
CPI->setOperand(2, CGF.getExceptionSlot().getPointer());
|
|
|
|
}
|
2011-09-16 02:57:19 +08:00
|
|
|
llvm::Value *RawExn = CGF.getExceptionFromSlot();
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// Enter the catch.
|
|
|
|
llvm::Value *Exn = RawExn;
|
2016-10-14 03:45:08 +08:00
|
|
|
if (beginCatchFn)
|
|
|
|
Exn = CGF.EmitNounwindRuntimeCall(beginCatchFn, RawExn, "exn.adjusted");
|
2011-03-25 19:57:33 +08:00
|
|
|
|
2011-10-19 08:44:01 +08:00
|
|
|
CodeGenFunction::LexicalScope cleanups(CGF, Handler.Body->getSourceRange());
|
2011-05-12 09:00:15 +08:00
|
|
|
|
2011-03-25 19:57:33 +08:00
|
|
|
if (endCatchFn) {
|
|
|
|
// Add a cleanup to leave the catch.
|
2014-05-21 13:09:00 +08:00
|
|
|
bool EndCatchMightThrow = (Handler.Variable == nullptr);
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
|
|
|
|
EndCatchMightThrow,
|
|
|
|
endCatchFn);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Bind the catch parameter if it exists.
|
|
|
|
if (const VarDecl *CatchParam = Handler.Variable) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
|
2011-03-25 19:57:33 +08:00
|
|
|
llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
|
|
|
|
|
|
|
|
CGF.EmitAutoVarDecl(*CatchParam);
|
2015-10-30 08:56:02 +08:00
|
|
|
EmitInitOfCatchParam(CGF, CastExn, CatchParam);
|
2011-03-25 19:57:33 +08:00
|
|
|
}
|
2018-08-10 20:53:13 +08:00
|
|
|
if (CPI)
|
|
|
|
CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
CGF.ObjCEHValueStack.push_back(Exn);
|
|
|
|
CGF.EmitStmt(Handler.Body);
|
|
|
|
CGF.ObjCEHValueStack.pop_back();
|
|
|
|
|
2011-05-12 09:00:15 +08:00
|
|
|
// Leave any cleanups associated with the catch.
|
|
|
|
cleanups.ForceCleanup();
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
CGF.EmitBranchThroughCleanup(Cont);
|
2018-08-10 20:53:13 +08:00
|
|
|
}
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// Go back to the try-statement fallthrough.
|
|
|
|
CGF.Builder.restoreIP(SavedIP);
|
|
|
|
|
2011-06-22 10:32:12 +08:00
|
|
|
// Pop out of the finally.
|
2018-08-10 20:53:13 +08:00
|
|
|
if (!useFunclets && S.getFinallyStmt())
|
2011-06-22 10:32:12 +08:00
|
|
|
FinallyInfo.exit(CGF);
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
if (Cont.isValid())
|
|
|
|
CGF.EmitBlock(Cont.getBlock());
|
|
|
|
}
|
|
|
|
|
2015-10-30 08:56:02 +08:00
|
|
|
void CGObjCRuntime::EmitInitOfCatchParam(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *exn,
|
|
|
|
const VarDecl *paramDecl) {
|
|
|
|
|
|
|
|
Address paramAddr = CGF.GetAddrOfLocalVar(paramDecl);
|
|
|
|
|
|
|
|
switch (paramDecl->getType().getQualifiers().getObjCLifetime()) {
|
|
|
|
case Qualifiers::OCL_Strong:
|
|
|
|
exn = CGF.EmitARCRetainNonBlock(exn);
|
Fix clang -Wimplicit-fallthrough warnings across llvm, NFC
This patch should not introduce any behavior changes. It consists of
mostly one of two changes:
1. Replacing fall through comments with the LLVM_FALLTHROUGH macro
2. Inserting 'break' before falling through into a case block consisting
of only 'break'.
We were already using this warning with GCC, but its warning behaves
slightly differently. In this patch, the following differences are
relevant:
1. GCC recognizes comments that say "fall through" as annotations, clang
doesn't
2. GCC doesn't warn on "case N: foo(); default: break;", clang does
3. GCC doesn't warn when the case contains a switch, but falls through
the outer case.
I will enable the warning separately in a follow-up patch so that it can
be cleanly reverted if necessary.
Reviewers: alexfh, rsmith, lattner, rtrieu, EricWF, bollu
Differential Revision: https://reviews.llvm.org/D53950
llvm-svn: 345882
2018-11-02 03:54:45 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2015-10-30 08:56:02 +08:00
|
|
|
|
|
|
|
case Qualifiers::OCL_None:
|
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
|
|
|
CGF.Builder.CreateStore(exn, paramAddr);
|
|
|
|
return;
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Weak:
|
|
|
|
CGF.EmitARCInitWeak(paramAddr, exn);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
llvm_unreachable("invalid ownership qualifier");
|
|
|
|
}
|
|
|
|
|
2011-03-25 19:57:33 +08:00
|
|
|
namespace {
|
2015-08-19 06:40:54 +08:00
|
|
|
struct CallSyncExit final : EHScopeStack::Cleanup {
|
2019-02-06 00:42:33 +08:00
|
|
|
llvm::FunctionCallee SyncExitFn;
|
2011-03-25 19:57:33 +08:00
|
|
|
llvm::Value *SyncArg;
|
2019-02-06 00:42:33 +08:00
|
|
|
CallSyncExit(llvm::FunctionCallee SyncExitFn, llvm::Value *SyncArg)
|
|
|
|
: SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
|
2011-03-25 19:57:33 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2018-08-10 20:53:13 +08:00
|
|
|
CGF.EmitNounwindRuntimeCall(SyncExitFn, SyncArg);
|
2011-03-25 19:57:33 +08:00
|
|
|
}
|
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
}
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
|
|
|
|
const ObjCAtSynchronizedStmt &S,
|
2019-02-06 00:42:33 +08:00
|
|
|
llvm::FunctionCallee syncEnterFn,
|
|
|
|
llvm::FunctionCallee syncExitFn) {
|
2011-07-28 05:50:02 +08:00
|
|
|
CodeGenFunction::RunCleanupsScope cleanups(CGF);
|
|
|
|
|
|
|
|
// Evaluate the lock operand. This is guaranteed to dominate the
|
|
|
|
// ARC release and lock-release cleanups.
|
|
|
|
const Expr *lockExpr = S.getSynchExpr();
|
|
|
|
llvm::Value *lock;
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CGF.getLangOpts().ObjCAutoRefCount) {
|
2011-07-28 05:50:02 +08:00
|
|
|
lock = CGF.EmitARCRetainScalarExpr(lockExpr);
|
|
|
|
lock = CGF.EmitObjCConsumeObject(lockExpr->getType(), lock);
|
|
|
|
} else {
|
|
|
|
lock = CGF.EmitScalarExpr(lockExpr);
|
|
|
|
}
|
|
|
|
lock = CGF.Builder.CreateBitCast(lock, CGF.VoidPtrTy);
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// Acquire the lock.
|
2011-07-28 05:50:02 +08:00
|
|
|
CGF.Builder.CreateCall(syncEnterFn, lock)->setDoesNotThrow();
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// Register an all-paths cleanup to release the lock.
|
2011-07-28 05:50:02 +08:00
|
|
|
CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, syncExitFn, lock);
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// Emit the body of the statement.
|
|
|
|
CGF.EmitStmt(S.getSynchBody());
|
|
|
|
}
|
2012-02-17 11:33:10 +08:00
|
|
|
|
|
|
|
/// Compute the pointer-to-function type to which a message send
|
|
|
|
/// should be casted in order to correctly call the given method
|
|
|
|
/// with the given arguments.
|
|
|
|
///
|
|
|
|
/// \param method - may be null
|
|
|
|
/// \param resultType - the result type to use if there's no method
|
2012-06-15 17:02:08 +08:00
|
|
|
/// \param callArgs - the actual arguments, including implicit ones
|
2012-02-17 11:33:10 +08:00
|
|
|
CGObjCRuntime::MessageSendInfo
|
|
|
|
CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
|
|
|
|
QualType resultType,
|
|
|
|
CallArgList &callArgs) {
|
|
|
|
// If there's a method, use information from that.
|
|
|
|
if (method) {
|
|
|
|
const CGFunctionInfo &signature =
|
|
|
|
CGM.getTypes().arrangeObjCMessageSendSignature(method, callArgs[0].Ty);
|
|
|
|
|
|
|
|
llvm::PointerType *signatureType =
|
|
|
|
CGM.getTypes().GetFunctionType(signature)->getPointerTo();
|
|
|
|
|
2016-03-11 12:30:31 +08:00
|
|
|
const CGFunctionInfo &signatureForCall =
|
|
|
|
CGM.getTypes().arrangeCall(signature, callArgs);
|
|
|
|
|
|
|
|
return MessageSendInfo(signatureForCall, signatureType);
|
2012-02-17 11:33:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// There's no method; just use a default CC.
|
|
|
|
const CGFunctionInfo &argsInfo =
|
2016-03-11 12:30:31 +08:00
|
|
|
CGM.getTypes().arrangeUnprototypedObjCMessageSend(resultType, callArgs);
|
2012-02-17 11:33:10 +08:00
|
|
|
|
|
|
|
// Derive the signature to call from that.
|
|
|
|
llvm::PointerType *signatureType =
|
|
|
|
CGM.getTypes().GetFunctionType(argsInfo)->getPointerTo();
|
|
|
|
return MessageSendInfo(argsInfo, signatureType);
|
|
|
|
}
|