2011-03-25 19:57:33 +08:00
|
|
|
//==- CGObjCRuntime.cpp - Interface to Shared Objective-C Runtime Features ==//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This abstract class defines the interface for Objective-C runtime-specific
|
|
|
|
// code generation. It provides some concrete helper methods for functionality
|
|
|
|
// shared between all (or most) of the Objective-C runtimes supported by clang.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CGObjCRuntime.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "CGCleanup.h"
|
2011-03-25 19:57:33 +08:00
|
|
|
#include "CGRecordLayout.h"
|
|
|
|
#include "CodeGenFunction.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "CodeGenModule.h"
|
2011-03-25 19:57:33 +08:00
|
|
|
#include "clang/AST/RecordLayout.h"
|
|
|
|
#include "clang/AST/StmtObjC.h"
|
2013-10-31 05:53:58 +08:00
|
|
|
#include "clang/CodeGen/CGFunctionInfo.h"
|
2014-03-04 19:02:08 +08:00
|
|
|
#include "llvm/IR/CallSite.h"
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
|
|
|
static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
|
|
|
|
const ObjCInterfaceDecl *OID,
|
|
|
|
const ObjCImplementationDecl *ID,
|
|
|
|
const ObjCIvarDecl *Ivar) {
|
|
|
|
const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
|
|
|
|
|
|
|
|
// FIXME: We should eliminate the need to have ObjCImplementationDecl passed
|
|
|
|
// in here; it should never be necessary because that should be the lexical
|
|
|
|
// decl context for the ivar.
|
|
|
|
|
|
|
|
// If we know have an implementation (and the ivar is in it) then
|
|
|
|
// look up in the implementation layout.
|
|
|
|
const ASTRecordLayout *RL;
|
2011-12-15 08:29:59 +08:00
|
|
|
if (ID && declaresSameEntity(ID->getClassInterface(), Container))
|
2011-03-25 19:57:33 +08:00
|
|
|
RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
|
|
|
|
else
|
|
|
|
RL = &CGM.getContext().getASTObjCInterfaceLayout(Container);
|
|
|
|
|
|
|
|
// Compute field index.
|
|
|
|
//
|
|
|
|
// FIXME: The index here is closely tied to how ASTContext::getObjCLayout is
|
|
|
|
// implemented. This should be fixed to get the information from the layout
|
|
|
|
// directly.
|
|
|
|
unsigned Index = 0;
|
2011-06-29 02:05:25 +08:00
|
|
|
|
2011-07-22 10:08:32 +08:00
|
|
|
for (const ObjCIvarDecl *IVD = Container->all_declared_ivar_begin();
|
2011-06-29 02:05:25 +08:00
|
|
|
IVD; IVD = IVD->getNextIvar()) {
|
|
|
|
if (Ivar == IVD)
|
2011-03-25 19:57:33 +08:00
|
|
|
break;
|
|
|
|
++Index;
|
|
|
|
}
|
|
|
|
assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!");
|
|
|
|
|
|
|
|
return RL->getFieldOffset(Index);
|
|
|
|
}
|
|
|
|
|
2012-11-07 06:15:52 +08:00
|
|
|
uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
|
|
|
|
const ObjCInterfaceDecl *OID,
|
|
|
|
const ObjCIvarDecl *Ivar) {
|
2014-05-21 13:09:00 +08:00
|
|
|
return LookupFieldBitOffset(CGM, OID, nullptr, Ivar) /
|
2012-11-07 06:15:52 +08:00
|
|
|
CGM.getContext().getCharWidth();
|
2011-03-25 19:57:33 +08:00
|
|
|
}
|
|
|
|
|
2012-11-07 06:15:52 +08:00
|
|
|
uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
|
|
|
|
const ObjCImplementationDecl *OID,
|
|
|
|
const ObjCIvarDecl *Ivar) {
|
|
|
|
return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) /
|
|
|
|
CGM.getContext().getCharWidth();
|
2011-03-25 19:57:33 +08:00
|
|
|
}
|
|
|
|
|
2012-11-07 07:40:48 +08:00
|
|
|
unsigned CGObjCRuntime::ComputeBitfieldBitOffset(
|
|
|
|
CodeGen::CodeGenModule &CGM,
|
|
|
|
const ObjCInterfaceDecl *ID,
|
|
|
|
const ObjCIvarDecl *Ivar) {
|
|
|
|
return LookupFieldBitOffset(CGM, ID, ID->getImplementation(), Ivar);
|
|
|
|
}
|
|
|
|
|
2011-03-25 19:57:33 +08:00
|
|
|
LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
|
|
|
|
const ObjCInterfaceDecl *OID,
|
|
|
|
llvm::Value *BaseValue,
|
|
|
|
const ObjCIvarDecl *Ivar,
|
|
|
|
unsigned CVRQualifiers,
|
|
|
|
llvm::Value *Offset) {
|
|
|
|
// Compute (type*) ( (char *) BaseValue + Offset)
|
|
|
|
QualType IvarTy = Ivar->getType();
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
|
2012-12-06 19:14:44 +08:00
|
|
|
llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy);
|
2011-03-25 19:57:33 +08:00
|
|
|
V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
|
|
|
|
|
|
|
|
if (!Ivar->isBitField()) {
|
2012-12-06 19:14:44 +08:00
|
|
|
V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
|
2011-12-20 07:03:09 +08:00
|
|
|
LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
|
2011-03-25 19:57:33 +08:00
|
|
|
LV.getQuals().addCVRQualifiers(CVRQualifiers);
|
|
|
|
return LV;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to compute an access strategy for this bit-field. We are given the
|
|
|
|
// offset to the first byte in the bit-field, the sub-byte offset is taken
|
|
|
|
// from the original layout. We reuse the normal bit-field access strategy by
|
|
|
|
// treating this as an access to a struct where the bit-field is in byte 0,
|
|
|
|
// and adjust the containing type size as appropriate.
|
|
|
|
//
|
|
|
|
// FIXME: Note that currently we make a very conservative estimate of the
|
|
|
|
// alignment of the bit-field, because (a) it is not clear what guarantees the
|
|
|
|
// runtime makes us, and (b) we don't have a way to specify that the struct is
|
|
|
|
// at an alignment plus offset.
|
|
|
|
//
|
|
|
|
// Note, there is a subtle invariant here: we can only call this routine on
|
|
|
|
// non-synthesized ivars but we may be called for synthesized ivars. However,
|
|
|
|
// a synthesized ivar can never be a bit-field, so this is safe.
|
2014-05-21 13:09:00 +08:00
|
|
|
uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, nullptr, Ivar);
|
2011-04-23 01:23:43 +08:00
|
|
|
uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth();
|
2013-04-17 06:48:15 +08:00
|
|
|
uint64_t AlignmentBits = CGF.CGM.getTarget().getCharAlign();
|
2011-10-11 02:28:20 +08:00
|
|
|
uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext());
|
2012-12-06 19:14:44 +08:00
|
|
|
CharUnits StorageSize =
|
|
|
|
CGF.CGM.getContext().toCharUnitsFromBits(
|
|
|
|
llvm::RoundUpToAlignment(BitOffset + BitFieldSize, AlignmentBits));
|
|
|
|
CharUnits Alignment = CGF.CGM.getContext().toCharUnitsFromBits(AlignmentBits);
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// Allocate a new CGBitFieldInfo object to describe this access.
|
|
|
|
//
|
|
|
|
// FIXME: This is incredibly wasteful, these should be uniqued or part of some
|
|
|
|
// layout object. However, this is blocked on other cleanups to the
|
|
|
|
// Objective-C code, so for now we just live with allocating a bunch of these
|
|
|
|
// objects.
|
|
|
|
CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo(
|
|
|
|
CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize,
|
2012-12-06 19:14:44 +08:00
|
|
|
CGF.CGM.getContext().toBits(StorageSize),
|
Respect alignment of nested bitfields
tools/clang/test/CodeGen/packed-nest-unpacked.c contains this test:
struct XBitfield {
unsigned b1 : 10;
unsigned b2 : 12;
unsigned b3 : 10;
};
struct YBitfield {
char x;
struct XBitfield y;
} __attribute((packed));
struct YBitfield gbitfield;
unsigned test7() {
// CHECK: @test7
// CHECK: load i32, i32* getelementptr inbounds (%struct.YBitfield, %struct.YBitfield* @gbitfield, i32 0, i32 1, i32 0), align 4
return gbitfield.y.b2;
}
The "align 4" is actually wrong. Accessing all of "gbitfield.y" as a single
i32 is of course possible, but that still doesn't make it 4-byte aligned as
it remains packed at offset 1 in the surrounding gbitfield object.
This alignment was changed by commit r169489, which also introduced changes
to bitfield access code in CGExpr.cpp. Code before that change used to take
into account *both* the alignment of the field to be accessed within the
current struct, *and* the alignment of that outer struct itself; this logic
was removed by the above commit.
Neglecting to consider both values can cause incorrect code to be generated
(I've seen an unaligned access crash on SystemZ due to this bug).
In order to always use the best known alignment value, this patch removes
the CGBitFieldInfo::StorageAlignment member and replaces it with a
StorageOffset member specifying the offset from the start of the surrounding
struct to the bitfield's underlying storage. This offset can then be combined
with the best-known alignment for a bitfield access lvalue to determine the
alignment to use when accessing the bitfield's storage.
Differential Revision: http://reviews.llvm.org/D11034
llvm-svn: 241916
2015-07-11 01:30:00 +08:00
|
|
|
CharUnits::fromQuantity(0)));
|
2011-03-25 19:57:33 +08:00
|
|
|
|
2012-12-06 19:14:44 +08:00
|
|
|
V = CGF.Builder.CreateBitCast(V,
|
|
|
|
llvm::Type::getIntNPtrTy(CGF.getLLVMContext(),
|
|
|
|
Info->StorageSize));
|
2011-03-25 19:57:33 +08:00
|
|
|
return LValue::MakeBitfield(V, *Info,
|
2012-06-28 05:19:48 +08:00
|
|
|
IvarTy.withCVRQualifiers(CVRQualifiers),
|
2012-12-06 19:14:44 +08:00
|
|
|
Alignment);
|
2011-03-25 19:57:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
struct CatchHandler {
|
|
|
|
const VarDecl *Variable;
|
|
|
|
const Stmt *Body;
|
|
|
|
llvm::BasicBlock *Block;
|
2014-06-05 02:51:46 +08:00
|
|
|
llvm::Constant *TypeInfo;
|
2011-03-25 19:57:33 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct CallObjCEndCatch : EHScopeStack::Cleanup {
|
|
|
|
CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
|
|
|
|
MightThrow(MightThrow), Fn(Fn) {}
|
|
|
|
bool MightThrow;
|
|
|
|
llvm::Value *Fn;
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2011-03-25 19:57:33 +08:00
|
|
|
if (!MightThrow) {
|
2015-05-19 06:14:03 +08:00
|
|
|
CGF.Builder.CreateCall(Fn, {})->setDoesNotThrow();
|
2011-03-25 19:57:33 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-03-01 03:01:20 +08:00
|
|
|
CGF.EmitRuntimeCallOrInvoke(Fn);
|
2011-03-25 19:57:33 +08:00
|
|
|
}
|
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
}
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
|
|
|
|
void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
|
|
|
|
const ObjCAtTryStmt &S,
|
2011-05-24 06:33:28 +08:00
|
|
|
llvm::Constant *beginCatchFn,
|
|
|
|
llvm::Constant *endCatchFn,
|
|
|
|
llvm::Constant *exceptionRethrowFn) {
|
2011-03-25 19:57:33 +08:00
|
|
|
// Jump destination for falling out of catch bodies.
|
|
|
|
CodeGenFunction::JumpDest Cont;
|
|
|
|
if (S.getNumCatchStmts())
|
|
|
|
Cont = CGF.getJumpDestInCurrentScope("eh.cont");
|
|
|
|
|
|
|
|
CodeGenFunction::FinallyInfo FinallyInfo;
|
|
|
|
if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
|
2011-06-22 10:32:12 +08:00
|
|
|
FinallyInfo.enter(CGF, Finally->getFinallyBody(),
|
|
|
|
beginCatchFn, endCatchFn, exceptionRethrowFn);
|
2011-03-25 19:57:33 +08:00
|
|
|
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<CatchHandler, 8> Handlers;
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// Enter the catch, if there is one.
|
|
|
|
if (S.getNumCatchStmts()) {
|
|
|
|
for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
|
|
|
|
const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
|
|
|
|
const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
|
|
|
|
|
|
|
|
Handlers.push_back(CatchHandler());
|
|
|
|
CatchHandler &Handler = Handlers.back();
|
|
|
|
Handler.Variable = CatchDecl;
|
|
|
|
Handler.Body = CatchStmt->getCatchBody();
|
|
|
|
Handler.Block = CGF.createBasicBlock("catch");
|
|
|
|
|
|
|
|
// @catch(...) always matches.
|
|
|
|
if (!CatchDecl) {
|
2014-05-21 13:09:00 +08:00
|
|
|
Handler.TypeInfo = nullptr; // catch-all
|
2011-03-25 19:57:33 +08:00
|
|
|
// Don't consider any other catches.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
Handler.TypeInfo = GetEHType(CatchDecl->getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
|
|
|
|
for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
|
|
|
|
Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the try body.
|
|
|
|
CGF.EmitStmt(S.getTryBody());
|
|
|
|
|
|
|
|
// Leave the try.
|
|
|
|
if (S.getNumCatchStmts())
|
2011-08-11 10:22:43 +08:00
|
|
|
CGF.popCatchScope();
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// Remember where we were.
|
|
|
|
CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
|
|
|
|
|
|
|
|
// Emit the handlers.
|
|
|
|
for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
|
|
|
|
CatchHandler &Handler = Handlers[I];
|
|
|
|
|
|
|
|
CGF.EmitBlock(Handler.Block);
|
2011-09-16 02:57:19 +08:00
|
|
|
llvm::Value *RawExn = CGF.getExceptionFromSlot();
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// Enter the catch.
|
|
|
|
llvm::Value *Exn = RawExn;
|
|
|
|
if (beginCatchFn) {
|
|
|
|
Exn = CGF.Builder.CreateCall(beginCatchFn, RawExn, "exn.adjusted");
|
|
|
|
cast<llvm::CallInst>(Exn)->setDoesNotThrow();
|
|
|
|
}
|
|
|
|
|
2011-10-19 08:44:01 +08:00
|
|
|
CodeGenFunction::LexicalScope cleanups(CGF, Handler.Body->getSourceRange());
|
2011-05-12 09:00:15 +08:00
|
|
|
|
2011-03-25 19:57:33 +08:00
|
|
|
if (endCatchFn) {
|
|
|
|
// Add a cleanup to leave the catch.
|
2014-05-21 13:09:00 +08:00
|
|
|
bool EndCatchMightThrow = (Handler.Variable == nullptr);
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
|
|
|
|
EndCatchMightThrow,
|
|
|
|
endCatchFn);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Bind the catch parameter if it exists.
|
|
|
|
if (const VarDecl *CatchParam = Handler.Variable) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
|
2011-03-25 19:57:33 +08:00
|
|
|
llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
|
|
|
|
|
|
|
|
CGF.EmitAutoVarDecl(*CatchParam);
|
2012-01-18 04:16:56 +08:00
|
|
|
|
|
|
|
llvm::Value *CatchParamAddr = CGF.GetAddrOfLocalVar(CatchParam);
|
|
|
|
|
|
|
|
switch (CatchParam->getType().getQualifiers().getObjCLifetime()) {
|
|
|
|
case Qualifiers::OCL_Strong:
|
|
|
|
CastExn = CGF.EmitARCRetainNonBlock(CastExn);
|
|
|
|
// fallthrough
|
|
|
|
|
|
|
|
case Qualifiers::OCL_None:
|
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
|
|
|
CGF.Builder.CreateStore(CastExn, CatchParamAddr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Weak:
|
|
|
|
CGF.EmitARCInitWeak(CatchParamAddr, CastExn);
|
|
|
|
break;
|
|
|
|
}
|
2011-03-25 19:57:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CGF.ObjCEHValueStack.push_back(Exn);
|
|
|
|
CGF.EmitStmt(Handler.Body);
|
|
|
|
CGF.ObjCEHValueStack.pop_back();
|
|
|
|
|
2011-05-12 09:00:15 +08:00
|
|
|
// Leave any cleanups associated with the catch.
|
|
|
|
cleanups.ForceCleanup();
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
CGF.EmitBranchThroughCleanup(Cont);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Go back to the try-statement fallthrough.
|
|
|
|
CGF.Builder.restoreIP(SavedIP);
|
|
|
|
|
2011-06-22 10:32:12 +08:00
|
|
|
// Pop out of the finally.
|
2011-03-25 19:57:33 +08:00
|
|
|
if (S.getFinallyStmt())
|
2011-06-22 10:32:12 +08:00
|
|
|
FinallyInfo.exit(CGF);
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
if (Cont.isValid())
|
|
|
|
CGF.EmitBlock(Cont.getBlock());
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
struct CallSyncExit : EHScopeStack::Cleanup {
|
|
|
|
llvm::Value *SyncExitFn;
|
|
|
|
llvm::Value *SyncArg;
|
|
|
|
CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
|
|
|
|
: SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2011-03-25 19:57:33 +08:00
|
|
|
CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
|
|
|
|
}
|
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
}
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
|
|
|
|
const ObjCAtSynchronizedStmt &S,
|
|
|
|
llvm::Function *syncEnterFn,
|
|
|
|
llvm::Function *syncExitFn) {
|
2011-07-28 05:50:02 +08:00
|
|
|
CodeGenFunction::RunCleanupsScope cleanups(CGF);
|
|
|
|
|
|
|
|
// Evaluate the lock operand. This is guaranteed to dominate the
|
|
|
|
// ARC release and lock-release cleanups.
|
|
|
|
const Expr *lockExpr = S.getSynchExpr();
|
|
|
|
llvm::Value *lock;
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CGF.getLangOpts().ObjCAutoRefCount) {
|
2011-07-28 05:50:02 +08:00
|
|
|
lock = CGF.EmitARCRetainScalarExpr(lockExpr);
|
|
|
|
lock = CGF.EmitObjCConsumeObject(lockExpr->getType(), lock);
|
|
|
|
} else {
|
|
|
|
lock = CGF.EmitScalarExpr(lockExpr);
|
|
|
|
}
|
|
|
|
lock = CGF.Builder.CreateBitCast(lock, CGF.VoidPtrTy);
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// Acquire the lock.
|
2011-07-28 05:50:02 +08:00
|
|
|
CGF.Builder.CreateCall(syncEnterFn, lock)->setDoesNotThrow();
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// Register an all-paths cleanup to release the lock.
|
2011-07-28 05:50:02 +08:00
|
|
|
CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, syncExitFn, lock);
|
2011-03-25 19:57:33 +08:00
|
|
|
|
|
|
|
// Emit the body of the statement.
|
|
|
|
CGF.EmitStmt(S.getSynchBody());
|
|
|
|
}
|
2012-02-17 11:33:10 +08:00
|
|
|
|
|
|
|
/// Compute the pointer-to-function type to which a message send
|
|
|
|
/// should be casted in order to correctly call the given method
|
|
|
|
/// with the given arguments.
|
|
|
|
///
|
|
|
|
/// \param method - may be null
|
|
|
|
/// \param resultType - the result type to use if there's no method
|
2012-06-15 17:02:08 +08:00
|
|
|
/// \param callArgs - the actual arguments, including implicit ones
|
2012-02-17 11:33:10 +08:00
|
|
|
CGObjCRuntime::MessageSendInfo
|
|
|
|
CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
|
|
|
|
QualType resultType,
|
|
|
|
CallArgList &callArgs) {
|
|
|
|
// If there's a method, use information from that.
|
|
|
|
if (method) {
|
|
|
|
const CGFunctionInfo &signature =
|
|
|
|
CGM.getTypes().arrangeObjCMessageSendSignature(method, callArgs[0].Ty);
|
|
|
|
|
|
|
|
llvm::PointerType *signatureType =
|
|
|
|
CGM.getTypes().GetFunctionType(signature)->getPointerTo();
|
|
|
|
|
|
|
|
// If that's not variadic, there's no need to recompute the ABI
|
|
|
|
// arrangement.
|
|
|
|
if (!signature.isVariadic())
|
|
|
|
return MessageSendInfo(signature, signatureType);
|
|
|
|
|
|
|
|
// Otherwise, there is.
|
|
|
|
FunctionType::ExtInfo einfo = signature.getExtInfo();
|
|
|
|
const CGFunctionInfo &argsInfo =
|
2012-07-07 14:41:13 +08:00
|
|
|
CGM.getTypes().arrangeFreeFunctionCall(resultType, callArgs, einfo,
|
|
|
|
signature.getRequiredArgs());
|
2012-02-17 11:33:10 +08:00
|
|
|
|
|
|
|
return MessageSendInfo(argsInfo, signatureType);
|
|
|
|
}
|
|
|
|
|
|
|
|
// There's no method; just use a default CC.
|
|
|
|
const CGFunctionInfo &argsInfo =
|
2012-07-07 14:41:13 +08:00
|
|
|
CGM.getTypes().arrangeFreeFunctionCall(resultType, callArgs,
|
|
|
|
FunctionType::ExtInfo(),
|
|
|
|
RequiredArgs::All);
|
2012-02-17 11:33:10 +08:00
|
|
|
|
|
|
|
// Derive the signature to call from that.
|
|
|
|
llvm::PointerType *signatureType =
|
|
|
|
CGM.getTypes().GetFunctionType(argsInfo)->getPointerTo();
|
|
|
|
return MessageSendInfo(argsInfo, signatureType);
|
|
|
|
}
|