2007-05-28 09:07:47 +08:00
|
|
|
//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 03:59:25 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-05-28 09:07:47 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This coordinates the per-function state used while generating code.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CodeGenFunction.h"
|
|
|
|
#include "CodeGenModule.h"
|
2008-05-22 09:40:10 +08:00
|
|
|
#include "CGDebugInfo.h"
|
2010-07-06 09:34:17 +08:00
|
|
|
#include "CGException.h"
|
2007-05-30 07:17:50 +08:00
|
|
|
#include "clang/Basic/TargetInfo.h"
|
2008-11-12 16:04:58 +08:00
|
|
|
#include "clang/AST/APValue.h"
|
2008-08-11 13:00:27 +08:00
|
|
|
#include "clang/AST/ASTContext.h"
|
2008-08-11 13:35:13 +08:00
|
|
|
#include "clang/AST/Decl.h"
|
2009-04-05 04:47:02 +08:00
|
|
|
#include "clang/AST/DeclCXX.h"
|
2009-12-05 07:26:17 +08:00
|
|
|
#include "clang/AST/StmtCXX.h"
|
2010-06-22 08:03:40 +08:00
|
|
|
#include "clang/Frontend/CodeGenOptions.h"
|
2009-02-22 04:00:35 +08:00
|
|
|
#include "llvm/Target/TargetData.h"
|
2010-06-22 08:03:40 +08:00
|
|
|
#include "llvm/Intrinsics.h"
|
2007-05-28 09:07:47 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
|
2009-03-06 09:33:24 +08:00
|
|
|
: BlockFunction(cgm, *this, Builder), CGM(cgm),
|
|
|
|
Target(CGM.getContext().Target),
|
2009-07-09 04:52:20 +08:00
|
|
|
Builder(cgm.getModule().getContext()),
|
2010-07-24 05:56:41 +08:00
|
|
|
NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1),
|
2010-07-06 09:34:17 +08:00
|
|
|
ExceptionSlot(0), DebugInfo(0), IndirectBranch(0),
|
2010-07-24 05:56:41 +08:00
|
|
|
SwitchInsn(0), CaseRangeBlock(0),
|
2010-07-06 09:34:17 +08:00
|
|
|
DidCallStackSave(false), UnreachableBlock(0),
|
2010-02-17 06:04:33 +08:00
|
|
|
CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
|
2010-07-06 09:34:17 +08:00
|
|
|
ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0),
|
2010-07-21 05:07:09 +08:00
|
|
|
TrapBB(0) {
|
2010-06-27 15:15:29 +08:00
|
|
|
|
|
|
|
// Get some frequently used types.
|
2009-02-22 04:00:35 +08:00
|
|
|
LLVMPointerWidth = Target.getPointerWidth(0);
|
2010-06-27 15:15:29 +08:00
|
|
|
llvm::LLVMContext &LLVMContext = CGM.getLLVMContext();
|
|
|
|
IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth);
|
|
|
|
Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
|
|
|
|
Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
|
|
|
|
|
2009-12-09 11:35:49 +08:00
|
|
|
Exceptions = getContext().getLangOptions().Exceptions;
|
2009-12-12 09:27:46 +08:00
|
|
|
CatchUndefined = getContext().getLangOptions().CatchUndefined;
|
2010-05-26 01:04:15 +08:00
|
|
|
CGM.getMangleContext().startNewFunction();
|
2008-06-18 02:05:57 +08:00
|
|
|
}
|
2007-05-30 07:17:50 +08:00
|
|
|
|
2007-06-03 06:49:07 +08:00
|
|
|
ASTContext &CodeGenFunction::getContext() const {
|
|
|
|
return CGM.getContext();
|
|
|
|
}
|
|
|
|
|
2007-05-30 07:17:50 +08:00
|
|
|
|
2009-02-26 03:24:29 +08:00
|
|
|
llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) {
|
|
|
|
llvm::Value *Res = LocalDeclMap[VD];
|
|
|
|
assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
|
|
|
|
return Res;
|
2008-02-27 05:41:45 +08:00
|
|
|
}
|
2007-05-30 08:13:02 +08:00
|
|
|
|
2009-02-26 03:24:29 +08:00
|
|
|
llvm::Constant *
|
|
|
|
CodeGenFunction::GetAddrOfStaticLocalVar(const VarDecl *BVD) {
|
|
|
|
return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
|
2008-09-11 17:15:33 +08:00
|
|
|
}
|
|
|
|
|
2009-02-04 07:03:55 +08:00
|
|
|
const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
|
|
|
|
return CGM.getTypes().ConvertTypeForMem(T);
|
|
|
|
}
|
|
|
|
|
2007-06-23 03:05:19 +08:00
|
|
|
const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
|
|
|
|
return CGM.getTypes().ConvertType(T);
|
2007-06-09 10:28:57 +08:00
|
|
|
}
|
2007-05-30 07:17:50 +08:00
|
|
|
|
2008-06-18 02:05:57 +08:00
|
|
|
bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
|
2009-09-29 10:09:01 +08:00
|
|
|
return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
|
2010-08-12 01:37:35 +08:00
|
|
|
T->isMemberFunctionPointerType() || T->isObjCObjectType();
|
2008-06-18 02:05:57 +08:00
|
|
|
}
|
2008-03-31 07:03:07 +08:00
|
|
|
|
2009-01-27 07:27:52 +08:00
|
|
|
void CodeGenFunction::EmitReturnBlock() {
|
|
|
|
// For cleanliness, we try to avoid emitting the return block for
|
|
|
|
// simple cases.
|
|
|
|
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
|
|
|
|
|
|
|
|
if (CurBB) {
|
|
|
|
assert(!CurBB->getTerminator() && "Unexpected terminated block.");
|
|
|
|
|
2009-07-19 16:24:34 +08:00
|
|
|
// We have a valid insert point, reuse it if it is empty or there are no
|
|
|
|
// explicit jumps to the return block.
|
2010-07-24 05:56:41 +08:00
|
|
|
if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
|
|
|
|
ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
|
|
|
|
delete ReturnBlock.getBlock();
|
2009-07-19 16:24:34 +08:00
|
|
|
} else
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(ReturnBlock.getBlock());
|
2009-01-27 07:27:52 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, if the return block is the target of a single direct
|
|
|
|
// branch then we can just put the code in that block instead. This
|
|
|
|
// cleans up functions which started with a unified return block.
|
2010-07-24 05:56:41 +08:00
|
|
|
if (ReturnBlock.getBlock()->hasOneUse()) {
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::BranchInst *BI =
|
2010-07-24 05:56:41 +08:00
|
|
|
dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin());
|
2010-07-06 09:34:17 +08:00
|
|
|
if (BI && BI->isUnconditional() &&
|
2010-07-24 05:56:41 +08:00
|
|
|
BI->getSuccessor(0) == ReturnBlock.getBlock()) {
|
2009-01-27 07:27:52 +08:00
|
|
|
// Reset insertion point and delete the branch.
|
|
|
|
Builder.SetInsertPoint(BI->getParent());
|
|
|
|
BI->eraseFromParent();
|
2010-07-24 05:56:41 +08:00
|
|
|
delete ReturnBlock.getBlock();
|
2009-01-27 07:27:52 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-16 15:57:57 +08:00
|
|
|
// FIXME: We are at an unreachable point, there is no reason to emit the block
|
|
|
|
// unless it has uses. However, we still need a place to put the debug
|
|
|
|
// region.end for now.
|
2009-01-27 07:27:52 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(ReturnBlock.getBlock());
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
|
|
|
|
if (!BB) return;
|
|
|
|
if (!BB->use_empty())
|
|
|
|
return CGF.CurFn->getBasicBlockList().push_back(BB);
|
|
|
|
delete BB;
|
2009-01-27 07:27:52 +08:00
|
|
|
}
|
|
|
|
|
2008-08-26 16:29:31 +08:00
|
|
|
void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
|
2008-03-31 07:03:07 +08:00
|
|
|
assert(BreakContinueStack.empty() &&
|
|
|
|
"mismatched push/pop in break/continue stack!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
|
|
|
// Emit function epilog (to return).
|
2009-01-27 07:27:52 +08:00
|
|
|
EmitReturnBlock();
|
2008-11-12 04:59:54 +08:00
|
|
|
|
2010-06-22 08:03:40 +08:00
|
|
|
EmitFunctionInstrumentation("__cyg_profile_func_exit");
|
|
|
|
|
2008-11-12 04:59:54 +08:00
|
|
|
// Emit debug descriptor for function end.
|
2009-02-13 16:11:52 +08:00
|
|
|
if (CGDebugInfo *DI = getDebugInfo()) {
|
2008-11-12 04:59:54 +08:00
|
|
|
DI->setLocation(EndLoc);
|
2010-07-23 06:29:16 +08:00
|
|
|
DI->EmitFunctionEnd(Builder);
|
2008-11-12 04:59:54 +08:00
|
|
|
}
|
|
|
|
|
Change IR generation for return (in the simple case) to avoid doing silly
load/store nonsense in the epilog. For example, for:
int foo(int X) {
int A[100];
return A[X];
}
we used to generate:
%arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1]
%tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
store i32 %tmp1, i32* %retval
%0 = load i32* %retval ; <i32> [#uses=1]
ret i32 %0
}
which codegen'd to this code:
_foo: ## @foo
## BB#0: ## %entry
subq $408, %rsp ## imm = 0x198
movl %edi, 400(%rsp)
movl 400(%rsp), %edi
movslq %edi, %rax
movl (%rsp,%rax,4), %edi
movl %edi, 404(%rsp)
movl 404(%rsp), %eax
addq $408, %rsp ## imm = 0x198
ret
Now we generate:
%arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1]
%tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
ret i32 %tmp1
}
and:
_foo: ## @foo
## BB#0: ## %entry
subq $408, %rsp ## imm = 0x198
movl %edi, 404(%rsp)
movl 404(%rsp), %edi
movslq %edi, %rax
movl (%rsp,%rax,4), %eax
addq $408, %rsp ## imm = 0x198
ret
This actually does matter, cutting out 2000 lines of IR from CGStmt.ll
for example.
Another interesting effect is that altivec.h functions which are dead
now get dce'd by the inliner. Hence all the changes to
builtins-ppc-altivec.c to ensure the calls aren't dead.
llvm-svn: 106970
2010-06-27 09:06:27 +08:00
|
|
|
EmitFunctionEpilog(*CurFnInfo);
|
2009-12-08 07:38:24 +08:00
|
|
|
EmitEndEHSpec(CurCodeDecl);
|
2008-09-10 05:00:17 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
assert(EHStack.empty() &&
|
|
|
|
"did not remove all scopes from cleanup stack!");
|
|
|
|
|
2009-10-29 07:59:40 +08:00
|
|
|
// If someone did an indirect goto, emit the indirect goto block at the end of
|
|
|
|
// the function.
|
|
|
|
if (IndirectBranch) {
|
|
|
|
EmitBlock(IndirectBranch->getParent());
|
|
|
|
Builder.ClearInsertionPoint();
|
|
|
|
}
|
|
|
|
|
2008-03-31 07:03:07 +08:00
|
|
|
// Remove the AllocaInsertPt instruction, which is just a convenience for us.
|
2009-04-01 06:17:44 +08:00
|
|
|
llvm::Instruction *Ptr = AllocaInsertPt;
|
2008-03-31 07:03:07 +08:00
|
|
|
AllocaInsertPt = 0;
|
2009-04-01 06:17:44 +08:00
|
|
|
Ptr->eraseFromParent();
|
2009-10-29 07:59:40 +08:00
|
|
|
|
|
|
|
// If someone took the address of a label but never did an indirect goto, we
|
|
|
|
// made a zero entry PHI node, which is illegal, zap it now.
|
|
|
|
if (IndirectBranch) {
|
|
|
|
llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
|
|
|
|
if (PN->getNumIncomingValues() == 0) {
|
|
|
|
PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
|
|
|
|
PN->eraseFromParent();
|
|
|
|
}
|
|
|
|
}
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitIfUsed(*this, RethrowBlock.getBlock());
|
2010-07-06 09:34:17 +08:00
|
|
|
EmitIfUsed(*this, TerminateLandingPad);
|
|
|
|
EmitIfUsed(*this, TerminateHandler);
|
|
|
|
EmitIfUsed(*this, UnreachableBlock);
|
2010-07-07 07:57:41 +08:00
|
|
|
|
|
|
|
if (CGM.getCodeGenOpts().EmitDeclMetadata)
|
|
|
|
EmitDeclMetadata();
|
2008-04-04 12:07:35 +08:00
|
|
|
}
|
|
|
|
|
2010-06-22 08:03:40 +08:00
|
|
|
/// ShouldInstrumentFunction - Return true if the current function should be
|
|
|
|
/// instrumented with __cyg_profile_func_* calls
|
|
|
|
bool CodeGenFunction::ShouldInstrumentFunction() {
|
|
|
|
if (!CGM.getCodeGenOpts().InstrumentFunctions)
|
|
|
|
return false;
|
|
|
|
if (CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// EmitFunctionInstrumentation - Emit LLVM code to call the specified
|
|
|
|
/// instrumentation function with the current function and the call site, if
|
|
|
|
/// function instrumentation is enabled.
|
|
|
|
void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
|
|
|
|
if (!ShouldInstrumentFunction())
|
|
|
|
return;
|
|
|
|
|
2010-06-23 13:21:28 +08:00
|
|
|
const llvm::PointerType *PointerTy;
|
2010-06-22 08:03:40 +08:00
|
|
|
const llvm::FunctionType *FunctionTy;
|
|
|
|
std::vector<const llvm::Type*> ProfileFuncArgs;
|
|
|
|
|
2010-06-23 13:21:28 +08:00
|
|
|
// void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
|
|
|
|
PointerTy = llvm::Type::getInt8PtrTy(VMContext);
|
|
|
|
ProfileFuncArgs.push_back(PointerTy);
|
|
|
|
ProfileFuncArgs.push_back(PointerTy);
|
2010-06-22 08:03:40 +08:00
|
|
|
FunctionTy = llvm::FunctionType::get(
|
|
|
|
llvm::Type::getVoidTy(VMContext),
|
|
|
|
ProfileFuncArgs, false);
|
|
|
|
|
|
|
|
llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
|
|
|
|
llvm::CallInst *CallSite = Builder.CreateCall(
|
|
|
|
CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0),
|
2010-06-27 15:15:29 +08:00
|
|
|
llvm::ConstantInt::get(Int32Ty, 0),
|
2010-06-22 08:03:40 +08:00
|
|
|
"callsite");
|
|
|
|
|
2010-06-23 13:21:28 +08:00
|
|
|
Builder.CreateCall2(F,
|
|
|
|
llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
|
|
|
|
CallSite);
|
2010-06-22 08:03:40 +08:00
|
|
|
}
|
|
|
|
|
2009-09-11 08:07:24 +08:00
|
|
|
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
|
2008-09-10 07:14:03 +08:00
|
|
|
llvm::Function *Fn,
|
2008-10-19 02:22:23 +08:00
|
|
|
const FunctionArgList &Args,
|
|
|
|
SourceLocation StartLoc) {
|
2009-09-11 08:07:24 +08:00
|
|
|
const Decl *D = GD.getDecl();
|
|
|
|
|
2009-02-10 04:20:56 +08:00
|
|
|
DidCallStackSave = false;
|
2009-04-23 13:30:27 +08:00
|
|
|
CurCodeDecl = CurFuncDecl = D;
|
2008-09-10 07:14:03 +08:00
|
|
|
FnRetTy = RetTy;
|
2008-07-30 07:18:29 +08:00
|
|
|
CurFn = Fn;
|
2007-06-20 12:44:43 +08:00
|
|
|
assert(CurFn->isDeclaration() && "Function already has body?");
|
2008-03-03 11:28:21 +08:00
|
|
|
|
2010-02-09 08:10:00 +08:00
|
|
|
// Pass inline keyword to optimizer if it appears explicitly on any
|
|
|
|
// declaration.
|
|
|
|
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
|
|
|
|
for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
|
|
|
|
RE = FD->redecls_end(); RI != RE; ++RI)
|
|
|
|
if (RI->isInlineSpecified()) {
|
|
|
|
Fn->addFnAttr(llvm::Attribute::InlineHint);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2008-11-11 10:29:29 +08:00
|
|
|
llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
|
2008-09-10 05:00:17 +08:00
|
|
|
|
2007-06-02 12:53:11 +08:00
|
|
|
// Create a marker to make it easy to insert allocas into the entryblock
|
2007-12-18 04:50:59 +08:00
|
|
|
// later. Don't create this with the builder, because we don't want it
|
|
|
|
// folded.
|
2010-06-27 15:15:29 +08:00
|
|
|
llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
|
|
|
|
AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
|
2009-03-22 08:24:14 +08:00
|
|
|
if (Builder.isNamePreserving())
|
|
|
|
AllocaInsertPt->setName("allocapt");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
ReturnBlock = getJumpDestInCurrentScope("return");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-12-18 04:50:59 +08:00
|
|
|
Builder.SetInsertPoint(EntryBB);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-02-22 06:15:06 +08:00
|
|
|
QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0,
|
|
|
|
false, false, 0, 0,
|
2010-03-31 04:24:48 +08:00
|
|
|
/*FIXME?*/
|
|
|
|
FunctionType::ExtInfo());
|
2009-10-23 09:52:13 +08:00
|
|
|
|
2008-07-04 19:04:26 +08:00
|
|
|
// Emit subprogram debug descriptor.
|
2009-02-13 16:11:52 +08:00
|
|
|
if (CGDebugInfo *DI = getDebugInfo()) {
|
2008-10-19 02:22:23 +08:00
|
|
|
DI->setLocation(StartLoc);
|
2010-01-14 08:36:21 +08:00
|
|
|
DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
|
2008-07-04 19:04:26 +08:00
|
|
|
}
|
|
|
|
|
2010-06-22 08:03:40 +08:00
|
|
|
EmitFunctionInstrumentation("__cyg_profile_func_enter");
|
|
|
|
|
2009-02-03 06:03:45 +08:00
|
|
|
// FIXME: Leaked.
|
2010-02-06 05:31:56 +08:00
|
|
|
// CC info is ignored, hopefully?
|
|
|
|
CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
|
2010-03-31 04:24:48 +08:00
|
|
|
FunctionType::ExtInfo());
|
2009-12-04 10:43:40 +08:00
|
|
|
|
|
|
|
if (RetTy->isVoidType()) {
|
|
|
|
// Void type; nothing to return.
|
|
|
|
ReturnValue = 0;
|
|
|
|
} else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
|
|
|
|
hasAggregateLLVMType(CurFnInfo->getReturnType())) {
|
|
|
|
// Indirect aggregate return; emit returned value directly into sret slot.
|
2010-02-17 03:45:20 +08:00
|
|
|
// This reduces code size, and affects correctness in C++.
|
2009-12-04 10:43:40 +08:00
|
|
|
ReturnValue = CurFn->arg_begin();
|
|
|
|
} else {
|
2010-02-17 03:45:20 +08:00
|
|
|
ReturnValue = CreateIRTemp(RetTy, "retval");
|
2009-12-04 10:43:40 +08:00
|
|
|
}
|
|
|
|
|
2009-12-08 07:38:24 +08:00
|
|
|
EmitStartEHSpec(CurCodeDecl);
|
2009-02-03 06:03:45 +08:00
|
|
|
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-02-17 06:04:33 +08:00
|
|
|
if (CXXThisDecl)
|
|
|
|
CXXThisValue = Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
|
|
|
|
if (CXXVTTDecl)
|
|
|
|
CXXVTTValue = Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt");
|
|
|
|
|
2008-12-21 05:28:43 +08:00
|
|
|
// If any of the arguments have a variably modified type, make sure to
|
|
|
|
// emit the type size.
|
|
|
|
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
|
|
|
|
i != e; ++i) {
|
|
|
|
QualType Ty = i->second;
|
|
|
|
|
|
|
|
if (Ty->isVariablyModifiedType())
|
|
|
|
EmitVLASize(Ty);
|
|
|
|
}
|
2008-09-10 07:14:03 +08:00
|
|
|
}
|
2008-08-26 05:31:01 +08:00
|
|
|
|
2010-02-19 17:25:03 +08:00
|
|
|
void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
|
|
|
|
const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
|
Complete reimplementation of the synthesis for implicitly-defined copy
assignment operators.
Previously, Sema provided type-checking and template instantiation for
copy assignment operators, then CodeGen would synthesize the actual
body of the copy constructor. Unfortunately, the two were not in sync,
and CodeGen might pick a copy-assignment operator that is different
from what Sema chose, leading to strange failures, e.g., link-time
failures when CodeGen called a copy-assignment operator that was not
instantiation, run-time failures when copy-assignment operators were
overloaded for const/non-const references and the wrong one was
picked, and run-time failures when by-value copy-assignment operators
did not have their arguments properly copy-initialized.
This implementation synthesizes the implicitly-defined copy assignment
operator bodies in Sema, so that the resulting ASTs encode exactly
what CodeGen needs to do; there is no longer any special code in
CodeGen to synthesize copy-assignment operators. The synthesis of the
body is relatively simple, and we generate one of three different
kinds of copy statements for each base or member:
- For a class subobject, call the appropriate copy-assignment
operator, after overload resolution has determined what that is.
- For an array of scalar types or an array of class types that have
trivial copy assignment operators, construct a call to
__builtin_memcpy.
- For an array of class types with non-trivial copy assignment
operators, synthesize a (possibly nested!) for loop whose inner
statement calls the copy constructor.
- For a scalar type, use built-in assignment.
This patch fixes at least a few tests cases in Boost.Spirit that were
failing because CodeGen picked the wrong copy-assignment operator
(leading to link-time failures), and I suspect a number of undiagnosed
problems will also go away with this change.
Some of the diagnostics we had previously have gotten worse with this
change, since we're going through generic code for our
type-checking. I will improve this in a subsequent patch.
llvm-svn: 102853
2010-05-02 04:49:11 +08:00
|
|
|
assert(FD->getBody());
|
|
|
|
EmitStmt(FD->getBody());
|
2010-02-18 11:17:58 +08:00
|
|
|
}
|
|
|
|
|
2010-08-04 06:46:07 +08:00
|
|
|
/// Tries to mark the given function nounwind based on the
|
|
|
|
/// non-existence of any throwing calls within it. We believe this is
|
|
|
|
/// lightweight enough to do at -O0.
|
|
|
|
static void TryMarkNoThrow(llvm::Function *F) {
|
2010-08-12 06:38:33 +08:00
|
|
|
// LLVM treats 'nounwind' on a function as part of the type, so we
|
|
|
|
// can't do this on functions that can be overwritten.
|
|
|
|
if (F->mayBeOverridden()) return;
|
|
|
|
|
2010-08-04 06:46:07 +08:00
|
|
|
for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
|
|
|
|
for (llvm::BasicBlock::iterator
|
|
|
|
BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
|
|
|
|
if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI))
|
|
|
|
if (!Call->doesNotThrow())
|
|
|
|
return;
|
|
|
|
F->setDoesNotThrow(true);
|
|
|
|
}
|
|
|
|
|
2010-01-02 09:01:18 +08:00
|
|
|
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
|
2009-09-11 08:07:24 +08:00
|
|
|
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
|
|
|
|
|
2009-02-13 16:11:52 +08:00
|
|
|
// Check if we should generate debug info for this function.
|
2009-08-27 06:31:08 +08:00
|
|
|
if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>())
|
2009-02-13 16:11:52 +08:00
|
|
|
DebugInfo = CGM.getDebugInfo();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-09-10 07:14:03 +08:00
|
|
|
FunctionArgList Args;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-12-05 07:26:17 +08:00
|
|
|
CurGD = GD;
|
2009-04-05 04:47:02 +08:00
|
|
|
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
|
|
|
|
if (MD->isInstance()) {
|
|
|
|
// Create the implicit 'this' decl.
|
|
|
|
// FIXME: I'm not entirely sure I like using a fake decl just for code
|
|
|
|
// generation. Maybe we can come up with a better way?
|
2010-02-17 06:04:33 +08:00
|
|
|
CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0,
|
|
|
|
FD->getLocation(),
|
2009-09-09 23:08:12 +08:00
|
|
|
&getContext().Idents.get("this"),
|
2009-04-05 04:47:02 +08:00
|
|
|
MD->getThisType(getContext()));
|
|
|
|
Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
|
2009-11-25 11:15:49 +08:00
|
|
|
|
|
|
|
// Check if we need a VTT parameter as well.
|
2010-03-23 12:11:45 +08:00
|
|
|
if (CodeGenVTables::needsVTTParameter(GD)) {
|
2009-11-25 11:15:49 +08:00
|
|
|
// FIXME: The comment about using a fake decl above applies here too.
|
|
|
|
QualType T = getContext().getPointerType(getContext().VoidPtrTy);
|
|
|
|
CXXVTTDecl =
|
2010-02-17 06:04:33 +08:00
|
|
|
ImplicitParamDecl::Create(getContext(), 0, FD->getLocation(),
|
2009-11-25 11:15:49 +08:00
|
|
|
&getContext().Idents.get("vtt"), T);
|
|
|
|
Args.push_back(std::make_pair(CXXVTTDecl, CXXVTTDecl->getType()));
|
|
|
|
}
|
2009-04-05 04:47:02 +08:00
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-08-26 05:31:01 +08:00
|
|
|
if (FD->getNumParams()) {
|
2009-09-22 07:43:11 +08:00
|
|
|
const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>();
|
2008-08-26 05:31:01 +08:00
|
|
|
assert(FProto && "Function def must have prototype!");
|
2008-09-10 07:14:03 +08:00
|
|
|
|
|
|
|
for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
|
2009-09-09 23:08:12 +08:00
|
|
|
Args.push_back(std::make_pair(FD->getParamDecl(i),
|
2008-09-10 07:14:03 +08:00
|
|
|
FProto->getArgType(i)));
|
2007-06-14 04:44:40 +08:00
|
|
|
}
|
2008-08-26 16:29:31 +08:00
|
|
|
|
2010-02-18 11:17:58 +08:00
|
|
|
SourceRange BodyRange;
|
|
|
|
if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
|
2009-11-06 10:55:43 +08:00
|
|
|
|
2010-02-18 11:17:58 +08:00
|
|
|
// Emit the standard function prologue.
|
|
|
|
StartFunction(GD, FD->getResultType(), Fn, Args, BodyRange.getBegin());
|
2010-02-08 03:45:40 +08:00
|
|
|
|
2010-02-18 11:17:58 +08:00
|
|
|
// Generate the body of the function.
|
2010-02-19 17:25:03 +08:00
|
|
|
if (isa<CXXDestructorDecl>(FD))
|
|
|
|
EmitDestructorBody(Args);
|
|
|
|
else if (isa<CXXConstructorDecl>(FD))
|
|
|
|
EmitConstructorBody(Args);
|
|
|
|
else
|
|
|
|
EmitFunctionBody(Args);
|
2009-10-07 02:09:57 +08:00
|
|
|
|
2010-02-18 11:17:58 +08:00
|
|
|
// Emit the standard function epilogue.
|
|
|
|
FinishFunction(BodyRange.getEnd());
|
2010-08-04 06:46:07 +08:00
|
|
|
|
|
|
|
// If we haven't marked the function nothrow through other means, do
|
|
|
|
// a quick pass now to see if we can.
|
|
|
|
if (!CurFn->doesNotThrow())
|
|
|
|
TryMarkNoThrow(CurFn);
|
2007-05-30 07:50:05 +08:00
|
|
|
}
|
|
|
|
|
2008-11-11 15:41:27 +08:00
|
|
|
/// ContainsLabel - Return true if the statement contains a label in it. If
|
|
|
|
/// this statement is not executed normally, it not containing a label means
|
|
|
|
/// that we can just remove the code.
|
|
|
|
bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
|
|
|
|
// Null statement, not a label!
|
|
|
|
if (S == 0) return false;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-11 15:41:27 +08:00
|
|
|
// If this is a label, we have to emit the code, consider something like:
|
|
|
|
// if (0) { ... foo: bar(); } goto foo;
|
|
|
|
if (isa<LabelStmt>(S))
|
|
|
|
return true;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-11 15:41:27 +08:00
|
|
|
// If this is a case/default statement, and we haven't seen a switch, we have
|
|
|
|
// to emit the code.
|
|
|
|
if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
|
|
|
|
return true;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-11 15:41:27 +08:00
|
|
|
// If this is a switch statement, we want to ignore cases below it.
|
|
|
|
if (isa<SwitchStmt>(S))
|
|
|
|
IgnoreCaseStmts = true;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-11 15:41:27 +08:00
|
|
|
// Scan subexpressions for verboten labels.
|
|
|
|
for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
|
|
|
|
I != E; ++I)
|
|
|
|
if (ContainsLabel(*I, IgnoreCaseStmts))
|
|
|
|
return true;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-11 15:41:27 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-11-12 16:04:58 +08:00
|
|
|
|
|
|
|
/// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to
|
|
|
|
/// a constant, or if it does but contains a label, return 0. If it constant
|
|
|
|
/// folds to 'true' and does not contain a label, return 1, if it constant folds
|
|
|
|
/// to 'false' and does not contain a label, return -1.
|
|
|
|
int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) {
|
2008-11-13 06:37:10 +08:00
|
|
|
// FIXME: Rename and handle conversion of other evaluatable things
|
|
|
|
// to bool.
|
2008-12-01 10:46:24 +08:00
|
|
|
Expr::EvalResult Result;
|
2009-09-09 23:08:12 +08:00
|
|
|
if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
|
2008-12-01 10:46:24 +08:00
|
|
|
Result.HasSideEffects)
|
2008-11-23 06:32:07 +08:00
|
|
|
return 0; // Not foldable, not integer or not fully evaluatable.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-12 16:04:58 +08:00
|
|
|
if (CodeGenFunction::ContainsLabel(Cond))
|
|
|
|
return 0; // Contains a label.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-12-01 10:46:24 +08:00
|
|
|
return Result.Val.getInt().getBoolValue() ? 1 : -1;
|
2008-11-12 16:04:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
|
|
|
|
/// statement) to the specified blocks. Based on the condition, this might try
|
|
|
|
/// to simplify the codegen of the conditional based on the branch.
|
|
|
|
///
|
|
|
|
void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
|
|
|
|
llvm::BasicBlock *TrueBlock,
|
|
|
|
llvm::BasicBlock *FalseBlock) {
|
|
|
|
if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond))
|
|
|
|
return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-12 16:04:58 +08:00
|
|
|
if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
|
|
|
|
// Handle X && Y in a condition.
|
|
|
|
if (CondBOp->getOpcode() == BinaryOperator::LAnd) {
|
|
|
|
// If we have "1 && X", simplify the code. "0 && X" would have constant
|
|
|
|
// folded if the case was simple enough.
|
|
|
|
if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) {
|
|
|
|
// br(1 && X) -> br(X).
|
|
|
|
return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-12 16:04:58 +08:00
|
|
|
// If we have "X && 1", simplify the code to use an uncond branch.
|
|
|
|
// "X && 0" would have been constant folded to 0.
|
|
|
|
if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) {
|
|
|
|
// br(X && 1) -> br(X).
|
|
|
|
return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-12 16:04:58 +08:00
|
|
|
// Emit the LHS as a conditional. If the LHS conditional is false, we
|
|
|
|
// want to jump to the FalseBlock.
|
2008-11-13 09:38:36 +08:00
|
|
|
llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
|
2008-11-12 16:04:58 +08:00
|
|
|
EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
|
|
|
|
EmitBlock(LHSTrue);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-01-24 08:20:05 +08:00
|
|
|
// Any temporaries created here are conditional.
|
2010-02-05 01:18:07 +08:00
|
|
|
BeginConditionalBranch();
|
2008-11-12 16:04:58 +08:00
|
|
|
EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
|
2010-02-05 01:18:07 +08:00
|
|
|
EndConditionalBranch();
|
2010-01-24 08:20:05 +08:00
|
|
|
|
2008-11-12 16:04:58 +08:00
|
|
|
return;
|
|
|
|
} else if (CondBOp->getOpcode() == BinaryOperator::LOr) {
|
|
|
|
// If we have "0 || X", simplify the code. "1 || X" would have constant
|
|
|
|
// folded if the case was simple enough.
|
|
|
|
if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) {
|
|
|
|
// br(0 || X) -> br(X).
|
|
|
|
return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-12 16:04:58 +08:00
|
|
|
// If we have "X || 0", simplify the code to use an uncond branch.
|
|
|
|
// "X || 1" would have been constant folded to 1.
|
|
|
|
if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) {
|
|
|
|
// br(X || 0) -> br(X).
|
|
|
|
return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-12 16:04:58 +08:00
|
|
|
// Emit the LHS as a conditional. If the LHS conditional is true, we
|
|
|
|
// want to jump to the TrueBlock.
|
2008-11-13 09:38:36 +08:00
|
|
|
llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
|
2008-11-12 16:04:58 +08:00
|
|
|
EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
|
|
|
|
EmitBlock(LHSFalse);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-01-24 08:20:05 +08:00
|
|
|
// Any temporaries created here are conditional.
|
2010-02-05 01:18:07 +08:00
|
|
|
BeginConditionalBranch();
|
2008-11-12 16:04:58 +08:00
|
|
|
EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
|
2010-02-05 01:18:07 +08:00
|
|
|
EndConditionalBranch();
|
2010-01-24 08:20:05 +08:00
|
|
|
|
2008-11-12 16:04:58 +08:00
|
|
|
return;
|
|
|
|
}
|
2008-11-12 16:13:36 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-12 16:13:36 +08:00
|
|
|
if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
|
|
|
|
// br(!x, t, f) -> br(x, f, t)
|
|
|
|
if (CondUOp->getOpcode() == UnaryOperator::LNot)
|
|
|
|
return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
|
2008-11-12 16:04:58 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-12 18:30:32 +08:00
|
|
|
if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
|
|
|
|
// Handle ?: operator.
|
|
|
|
|
|
|
|
// Just ignore GNU ?: extension.
|
|
|
|
if (CondOp->getLHS()) {
|
|
|
|
// br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
|
|
|
|
llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
|
|
|
|
llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
|
|
|
|
EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
|
|
|
|
EmitBlock(LHSBlock);
|
|
|
|
EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
|
|
|
|
EmitBlock(RHSBlock);
|
|
|
|
EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-12 16:04:58 +08:00
|
|
|
// Emit the code with the fully general case.
|
|
|
|
llvm::Value *CondV = EvaluateExprAsBool(Cond);
|
|
|
|
Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
|
|
|
|
}
|
|
|
|
|
2008-08-16 08:56:44 +08:00
|
|
|
/// ErrorUnsupported - Print out an error that codegen doesn't support the
|
2007-12-02 09:43:38 +08:00
|
|
|
/// specified stmt yet.
|
2008-09-04 11:43:08 +08:00
|
|
|
void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
|
|
|
|
bool OmitOnError) {
|
|
|
|
CGM.ErrorUnsupported(S, Type, OmitOnError);
|
2007-12-02 09:43:38 +08:00
|
|
|
}
|
|
|
|
|
2010-05-23 01:35:42 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
|
2010-05-03 09:20:20 +08:00
|
|
|
// Ignore empty classes in C++.
|
|
|
|
if (getContext().getLangOptions().CPlusPlus) {
|
|
|
|
if (const RecordType *RT = Ty->getAs<RecordType>()) {
|
|
|
|
if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2010-08-07 16:21:30 +08:00
|
|
|
|
|
|
|
// Cast the dest ptr to the appropriate i8 pointer type.
|
|
|
|
unsigned DestAS =
|
|
|
|
cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
|
|
|
|
const llvm::Type *BP =
|
|
|
|
llvm::Type::getInt8PtrTy(VMContext, DestAS);
|
2008-08-31 03:51:14 +08:00
|
|
|
if (DestPtr->getType() != BP)
|
|
|
|
DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
|
|
|
|
|
|
|
|
// Get size and alignment info for this aggregate.
|
|
|
|
std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
|
2010-08-07 16:21:30 +08:00
|
|
|
uint64_t Size = TypeInfo.first;
|
|
|
|
unsigned Align = TypeInfo.second;
|
2008-08-31 03:51:14 +08:00
|
|
|
|
2009-04-22 01:59:23 +08:00
|
|
|
// Don't bother emitting a zero-byte memset.
|
2010-08-07 16:21:30 +08:00
|
|
|
if (Size == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
llvm::ConstantInt *SizeVal = llvm::ConstantInt::get(IntPtrTy, Size / 8);
|
|
|
|
llvm::ConstantInt *AlignVal = Builder.getInt32(Align / 8);
|
|
|
|
|
|
|
|
// If the type contains a pointer to data member we can't memset it to zero.
|
|
|
|
// Instead, create a null constant and copy it to the destination.
|
2010-08-22 12:16:24 +08:00
|
|
|
if (CGM.getLangOptions().CPlusPlus &&
|
|
|
|
CGM.getCXXABI().RequiresNonZeroInitializer(Ty)) {
|
2010-08-07 16:21:30 +08:00
|
|
|
llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
|
|
|
|
|
|
|
|
llvm::GlobalVariable *NullVariable =
|
|
|
|
new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
|
|
|
|
/*isConstant=*/true,
|
|
|
|
llvm::GlobalVariable::PrivateLinkage,
|
|
|
|
NullConstant, llvm::Twine());
|
|
|
|
llvm::Value *SrcPtr =
|
|
|
|
Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
|
|
|
|
|
|
|
|
// FIXME: variable-size types?
|
|
|
|
|
|
|
|
// Get and call the appropriate llvm.memcpy overload.
|
|
|
|
llvm::Constant *Memcpy =
|
|
|
|
CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(), IntPtrTy);
|
|
|
|
Builder.CreateCall5(Memcpy, DestPtr, SrcPtr, SizeVal, AlignVal,
|
|
|
|
/*volatile*/ Builder.getFalse());
|
2009-04-22 01:59:23 +08:00
|
|
|
return;
|
2010-08-07 16:21:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, just memset the whole thing to zero. This is legal
|
|
|
|
// because in LLVM, all default initializers (other than the ones we just
|
|
|
|
// handled above) are guaranteed to have a bit pattern of all zeros.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-08-31 03:51:14 +08:00
|
|
|
// FIXME: Handle variable sized types.
|
2010-06-27 15:15:29 +08:00
|
|
|
Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr,
|
2010-08-07 16:21:30 +08:00
|
|
|
Builder.getInt8(0),
|
|
|
|
SizeVal, AlignVal, /*volatile*/ Builder.getFalse());
|
2008-08-31 03:51:14 +08:00
|
|
|
}
|
|
|
|
|
2009-10-29 07:59:40 +08:00
|
|
|
llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
|
|
|
|
// Make sure that there is a block for the indirect goto.
|
|
|
|
if (IndirectBranch == 0)
|
|
|
|
GetIndirectGotoBlock();
|
2009-10-13 14:55:33 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
|
2009-10-13 14:55:33 +08:00
|
|
|
|
2009-10-29 07:59:40 +08:00
|
|
|
// Make sure the indirect branch includes all of the address-taken blocks.
|
|
|
|
IndirectBranch->addDestination(BB);
|
|
|
|
return llvm::BlockAddress::get(CurFn, BB);
|
2009-10-13 14:55:33 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-10-13 14:55:33 +08:00
|
|
|
llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
|
2009-10-29 07:59:40 +08:00
|
|
|
// If we already made the indirect branch for indirect goto, return its block.
|
|
|
|
if (IndirectBranch) return IndirectBranch->getParent();
|
2009-10-13 14:55:33 +08:00
|
|
|
|
2009-10-29 07:59:40 +08:00
|
|
|
CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
|
2009-10-13 14:55:33 +08:00
|
|
|
|
2009-10-29 07:59:40 +08:00
|
|
|
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
|
2009-10-29 04:36:47 +08:00
|
|
|
|
2009-10-13 14:55:33 +08:00
|
|
|
// Create the PHI node that indirect gotos will add entries to.
|
2009-10-29 07:59:40 +08:00
|
|
|
llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest");
|
2009-10-13 14:55:33 +08:00
|
|
|
|
2009-10-29 07:59:40 +08:00
|
|
|
// Create the indirect branch instruction.
|
|
|
|
IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
|
|
|
|
return IndirectBranch->getParent();
|
2008-08-05 00:51:22 +08:00
|
|
|
}
|
2008-11-04 13:30:00 +08:00
|
|
|
|
2009-07-19 14:58:07 +08:00
|
|
|
llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) {
|
2009-08-15 10:50:32 +08:00
|
|
|
llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-12-21 04:27:15 +08:00
|
|
|
assert(SizeEntry && "Did not emit size for type");
|
|
|
|
return SizeEntry;
|
|
|
|
}
|
2008-12-12 15:19:02 +08:00
|
|
|
|
2009-07-19 14:58:07 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
|
2008-12-21 04:46:34 +08:00
|
|
|
assert(Ty->isVariablyModifiedType() &&
|
|
|
|
"Must pass variably modified type to EmitVLASizes!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-07-19 14:58:07 +08:00
|
|
|
EnsureInsertPoint();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-12-21 04:46:34 +08:00
|
|
|
if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
|
2009-08-15 10:50:32 +08:00
|
|
|
llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-12-21 05:51:53 +08:00
|
|
|
if (!SizeEntry) {
|
2009-02-06 03:43:10 +08:00
|
|
|
const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-08-15 08:03:43 +08:00
|
|
|
// Get the element size;
|
|
|
|
QualType ElemTy = VAT->getElementType();
|
|
|
|
llvm::Value *ElemSize;
|
2008-12-21 05:51:53 +08:00
|
|
|
if (ElemTy->isVariableArrayType())
|
|
|
|
ElemSize = EmitVLASize(ElemTy);
|
2009-08-15 08:03:43 +08:00
|
|
|
else
|
2009-07-25 07:12:58 +08:00
|
|
|
ElemSize = llvm::ConstantInt::get(SizeTy,
|
2010-01-12 01:06:35 +08:00
|
|
|
getContext().getTypeSizeInChars(ElemTy).getQuantity());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-12-21 05:51:53 +08:00
|
|
|
llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
|
2009-02-06 03:43:10 +08:00
|
|
|
NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-12-21 05:51:53 +08:00
|
|
|
SizeEntry = Builder.CreateMul(ElemSize, NumElements);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-12-21 04:46:34 +08:00
|
|
|
return SizeEntry;
|
2008-12-12 15:19:02 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-08-15 08:03:43 +08:00
|
|
|
if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
|
|
|
|
EmitVLASize(AT->getElementType());
|
|
|
|
return 0;
|
2009-09-09 23:08:12 +08:00
|
|
|
}
|
|
|
|
|
2009-08-15 08:03:43 +08:00
|
|
|
const PointerType *PT = Ty->getAs<PointerType>();
|
|
|
|
assert(PT && "unknown VM type!");
|
|
|
|
EmitVLASize(PT->getPointeeType());
|
2008-12-21 04:46:34 +08:00
|
|
|
return 0;
|
2008-12-12 15:19:02 +08:00
|
|
|
}
|
2009-01-21 01:46:04 +08:00
|
|
|
|
|
|
|
llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
|
2010-06-27 15:40:06 +08:00
|
|
|
if (CGM.getContext().getBuiltinVaListType()->isArrayType())
|
2009-01-21 01:46:04 +08:00
|
|
|
return EmitScalarExpr(E);
|
|
|
|
return EmitLValue(E).getAddress();
|
|
|
|
}
|
2009-02-08 06:53:43 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
/// Pops cleanup blocks until the given savepoint is reached.
|
|
|
|
void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
|
|
|
|
assert(Old.isValid());
|
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
while (EHStack.stable_begin() != Old) {
|
|
|
|
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
|
|
|
|
|
|
|
|
// As long as Old strictly encloses the scope's enclosing normal
|
|
|
|
// cleanup, we're going to emit another normal cleanup which
|
|
|
|
// fallthrough can propagate through.
|
|
|
|
bool FallThroughIsBranchThrough =
|
|
|
|
Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
|
|
|
|
|
|
|
|
PopCleanupBlock(FallThroughIsBranchThrough);
|
|
|
|
}
|
2009-02-08 06:53:43 +08:00
|
|
|
}
|
2009-02-08 07:50:39 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
|
|
|
|
EHCleanupScope &Scope) {
|
|
|
|
assert(Scope.isNormalCleanup());
|
|
|
|
llvm::BasicBlock *Entry = Scope.getNormalBlock();
|
|
|
|
if (!Entry) {
|
|
|
|
Entry = CGF.createBasicBlock("cleanup");
|
|
|
|
Scope.setNormalBlock(Entry);
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
2010-07-24 05:56:41 +08:00
|
|
|
return Entry;
|
|
|
|
}
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
static llvm::BasicBlock *CreateEHEntry(CodeGenFunction &CGF,
|
|
|
|
EHCleanupScope &Scope) {
|
|
|
|
assert(Scope.isEHCleanup());
|
|
|
|
llvm::BasicBlock *Entry = Scope.getEHBlock();
|
|
|
|
if (!Entry) {
|
|
|
|
Entry = CGF.createBasicBlock("eh.cleanup");
|
|
|
|
Scope.setEHBlock(Entry);
|
|
|
|
}
|
|
|
|
return Entry;
|
|
|
|
}
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
/// Transitions the terminator of the given exit-block of a cleanup to
|
|
|
|
/// be a cleanup switch.
|
|
|
|
static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
|
|
|
|
llvm::BasicBlock *Block) {
|
|
|
|
// If it's a branch, turn it into a switch whose default
|
|
|
|
// destination is its original target.
|
|
|
|
llvm::TerminatorInst *Term = Block->getTerminator();
|
|
|
|
assert(Term && "can't transition block without terminator");
|
|
|
|
|
|
|
|
if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
|
|
|
|
assert(Br->isUnconditional());
|
|
|
|
llvm::LoadInst *Load =
|
|
|
|
new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
|
|
|
|
llvm::SwitchInst *Switch =
|
|
|
|
llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
|
|
|
|
Br->eraseFromParent();
|
|
|
|
return Switch;
|
|
|
|
} else {
|
|
|
|
return cast<llvm::SwitchInst>(Term);
|
|
|
|
}
|
2009-02-08 07:50:39 +08:00
|
|
|
}
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
/// Attempts to reduce a cleanup's entry block to a fallthrough. This
|
|
|
|
/// is basically llvm::MergeBlockIntoPredecessor, except
|
2010-07-24 05:56:41 +08:00
|
|
|
/// simplified/optimized for the tighter constraints on cleanup blocks.
|
|
|
|
///
|
|
|
|
/// Returns the new block, whatever it is.
|
|
|
|
static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
|
|
|
|
llvm::BasicBlock *Entry) {
|
2010-07-06 09:34:17 +08:00
|
|
|
llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
|
2010-07-24 05:56:41 +08:00
|
|
|
if (!Pred) return Entry;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
|
2010-07-24 05:56:41 +08:00
|
|
|
if (!Br || Br->isConditional()) return Entry;
|
2010-07-06 09:34:17 +08:00
|
|
|
assert(Br->getSuccessor(0) == Entry);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// If we were previously inserting at the end of the cleanup entry
|
|
|
|
// block, we'll need to continue inserting at the end of the
|
|
|
|
// predecessor.
|
|
|
|
bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
|
|
|
|
assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Kill the branch.
|
|
|
|
Br->eraseFromParent();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Merge the blocks.
|
|
|
|
Pred->getInstList().splice(Pred->end(), Entry->getInstList());
|
2009-12-02 15:41:41 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Kill the entry block.
|
|
|
|
Entry->eraseFromParent();
|
2009-12-09 11:35:49 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
if (WasInsertBlock)
|
|
|
|
CGF.Builder.SetInsertPoint(Pred);
|
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
return Pred;
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
2010-07-21 15:22:38 +08:00
|
|
|
static void EmitCleanup(CodeGenFunction &CGF,
|
|
|
|
EHScopeStack::Cleanup *Fn,
|
|
|
|
bool ForEH) {
|
2010-07-14 04:32:21 +08:00
|
|
|
if (ForEH) CGF.EHStack.pushTerminate();
|
|
|
|
Fn->Emit(CGF, ForEH);
|
|
|
|
if (ForEH) CGF.EHStack.popTerminate();
|
|
|
|
assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
|
|
|
|
}
|
|
|
|
|
2010-07-21 15:22:38 +08:00
|
|
|
/// Pops a cleanup block. If the block includes a normal cleanup, the
|
|
|
|
/// current insertion point is threaded through the cleanup, as are
|
|
|
|
/// any branch fixups on the cleanup.
|
2010-07-24 05:56:41 +08:00
|
|
|
void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
|
2010-07-21 15:22:38 +08:00
|
|
|
assert(!EHStack.empty() && "cleanup stack is empty!");
|
|
|
|
assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
|
|
|
|
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
|
|
|
|
assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
|
2010-08-14 15:46:19 +08:00
|
|
|
assert(Scope.isActive() && "cleanup was still inactive when popped!");
|
2010-07-14 04:32:21 +08:00
|
|
|
|
|
|
|
// Check whether we need an EH cleanup. This is only true if we've
|
|
|
|
// generated a lazy EH cleanup block.
|
2010-07-24 05:56:41 +08:00
|
|
|
bool RequiresEHCleanup = Scope.hasEHBranches();
|
2010-07-14 04:32:21 +08:00
|
|
|
|
|
|
|
// Check the three conditions which might require a normal cleanup:
|
|
|
|
|
|
|
|
// - whether there are branch fix-ups through this cleanup
|
|
|
|
unsigned FixupDepth = Scope.getFixupDepth();
|
2010-07-21 15:22:38 +08:00
|
|
|
bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
|
2010-07-14 04:32:21 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
// - whether there are branch-throughs or branch-afters
|
|
|
|
bool HasExistingBranches = Scope.hasBranches();
|
2010-07-14 04:32:21 +08:00
|
|
|
|
|
|
|
// - whether there's a fallthrough
|
2010-07-21 15:22:38 +08:00
|
|
|
llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
|
2010-07-14 04:32:21 +08:00
|
|
|
bool HasFallthrough = (FallthroughSource != 0);
|
|
|
|
|
|
|
|
bool RequiresNormalCleanup = false;
|
|
|
|
if (Scope.isNormalCleanup() &&
|
|
|
|
(HasFixups || HasExistingBranches || HasFallthrough)) {
|
|
|
|
RequiresNormalCleanup = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we don't need the cleanup at all, we're done.
|
|
|
|
if (!RequiresNormalCleanup && !RequiresEHCleanup) {
|
2010-07-24 05:56:41 +08:00
|
|
|
EHStack.popCleanup(); // safe because there are no fixups
|
2010-07-21 15:22:38 +08:00
|
|
|
assert(EHStack.getNumBranchFixups() == 0 ||
|
|
|
|
EHStack.hasNormalCleanups());
|
2010-07-14 04:32:21 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy the cleanup emission data out. Note that SmallVector
|
|
|
|
// guarantees maximal alignment for its buffer regardless of its
|
|
|
|
// type parameter.
|
|
|
|
llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
|
|
|
|
CleanupBuffer.reserve(Scope.getCleanupSize());
|
|
|
|
memcpy(CleanupBuffer.data(),
|
|
|
|
Scope.getCleanupBuffer(), Scope.getCleanupSize());
|
|
|
|
CleanupBuffer.set_size(Scope.getCleanupSize());
|
2010-07-21 15:22:38 +08:00
|
|
|
EHScopeStack::Cleanup *Fn =
|
|
|
|
reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
|
2010-07-14 04:32:21 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
// We want to emit the EH cleanup after the normal cleanup, but go
|
|
|
|
// ahead and do the setup for the EH cleanup while the scope is still
|
|
|
|
// alive.
|
|
|
|
llvm::BasicBlock *EHEntry = 0;
|
|
|
|
llvm::SmallVector<llvm::Instruction*, 2> EHInstsToAppend;
|
|
|
|
if (RequiresEHCleanup) {
|
|
|
|
EHEntry = CreateEHEntry(*this, Scope);
|
|
|
|
|
|
|
|
// Figure out the branch-through dest if necessary.
|
|
|
|
llvm::BasicBlock *EHBranchThroughDest = 0;
|
|
|
|
if (Scope.hasEHBranchThroughs()) {
|
|
|
|
assert(Scope.getEnclosingEHCleanup() != EHStack.stable_end());
|
|
|
|
EHScope &S = *EHStack.find(Scope.getEnclosingEHCleanup());
|
|
|
|
EHBranchThroughDest = CreateEHEntry(*this, cast<EHCleanupScope>(S));
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have exactly one branch-after and no branch-throughs, we
|
|
|
|
// can dispatch it without a switch.
|
2010-07-27 06:44:58 +08:00
|
|
|
if (!Scope.hasEHBranchThroughs() &&
|
2010-07-24 05:56:41 +08:00
|
|
|
Scope.getNumEHBranchAfters() == 1) {
|
|
|
|
assert(!EHBranchThroughDest);
|
|
|
|
|
|
|
|
// TODO: remove the spurious eh.cleanup.dest stores if this edge
|
|
|
|
// never went through any switches.
|
|
|
|
llvm::BasicBlock *BranchAfterDest = Scope.getEHBranchAfterBlock(0);
|
|
|
|
EHInstsToAppend.push_back(llvm::BranchInst::Create(BranchAfterDest));
|
|
|
|
|
|
|
|
// Otherwise, if we have any branch-afters, we need a switch.
|
|
|
|
} else if (Scope.getNumEHBranchAfters()) {
|
|
|
|
// The default of the switch belongs to the branch-throughs if
|
|
|
|
// they exist.
|
|
|
|
llvm::BasicBlock *Default =
|
|
|
|
(EHBranchThroughDest ? EHBranchThroughDest : getUnreachableBlock());
|
|
|
|
|
|
|
|
const unsigned SwitchCapacity = Scope.getNumEHBranchAfters();
|
|
|
|
|
|
|
|
llvm::LoadInst *Load =
|
|
|
|
new llvm::LoadInst(getEHCleanupDestSlot(), "cleanup.dest");
|
|
|
|
llvm::SwitchInst *Switch =
|
|
|
|
llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
|
|
|
|
|
|
|
|
EHInstsToAppend.push_back(Load);
|
|
|
|
EHInstsToAppend.push_back(Switch);
|
|
|
|
|
|
|
|
for (unsigned I = 0, E = Scope.getNumEHBranchAfters(); I != E; ++I)
|
|
|
|
Switch->addCase(Scope.getEHBranchAfterIndex(I),
|
|
|
|
Scope.getEHBranchAfterBlock(I));
|
|
|
|
|
|
|
|
// Otherwise, we have only branch-throughs; jump to the next EH
|
|
|
|
// cleanup.
|
|
|
|
} else {
|
|
|
|
assert(EHBranchThroughDest);
|
|
|
|
EHInstsToAppend.push_back(llvm::BranchInst::Create(EHBranchThroughDest));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!RequiresNormalCleanup) {
|
|
|
|
EHStack.popCleanup();
|
|
|
|
} else {
|
|
|
|
// As a kindof crazy internal case, branch-through fall-throughs
|
|
|
|
// leave the insertion point set to the end of the last cleanup.
|
|
|
|
bool HasPrebranchedFallthrough =
|
|
|
|
(HasFallthrough && FallthroughSource->getTerminator());
|
|
|
|
assert(!HasPrebranchedFallthrough ||
|
|
|
|
FallthroughSource->getTerminator()->getSuccessor(0)
|
|
|
|
== Scope.getNormalBlock());
|
2010-07-14 04:32:21 +08:00
|
|
|
|
|
|
|
// If we have a fallthrough and no other need for the cleanup,
|
|
|
|
// emit it directly.
|
2010-07-24 05:56:41 +08:00
|
|
|
if (HasFallthrough && !HasPrebranchedFallthrough &&
|
|
|
|
!HasFixups && !HasExistingBranches) {
|
|
|
|
|
|
|
|
// Fixups can cause us to optimistically create a normal block,
|
|
|
|
// only to later have no real uses for it. Just delete it in
|
|
|
|
// this case.
|
|
|
|
// TODO: we can potentially simplify all the uses after this.
|
|
|
|
if (Scope.getNormalBlock()) {
|
|
|
|
Scope.getNormalBlock()->replaceAllUsesWith(getUnreachableBlock());
|
|
|
|
delete Scope.getNormalBlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
EHStack.popCleanup();
|
|
|
|
|
2010-07-21 15:22:38 +08:00
|
|
|
EmitCleanup(*this, Fn, /*ForEH*/ false);
|
2010-07-14 04:32:21 +08:00
|
|
|
|
|
|
|
// Otherwise, the best approach is to thread everything through
|
|
|
|
// the cleanup block and then try to clean up after ourselves.
|
|
|
|
} else {
|
|
|
|
// Force the entry block to exist.
|
2010-07-24 05:56:41 +08:00
|
|
|
llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
|
2010-07-14 04:32:21 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
// If there's a fallthrough, we need to store the cleanup
|
|
|
|
// destination index. For fall-throughs this is always zero.
|
|
|
|
if (HasFallthrough && !HasPrebranchedFallthrough)
|
|
|
|
Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
|
|
|
|
|
|
|
|
// Emit the entry block. This implicitly branches to it if we
|
|
|
|
// have fallthrough. All the fixups and existing branches should
|
|
|
|
// already be branched to it.
|
2010-07-21 15:22:38 +08:00
|
|
|
EmitBlock(NormalEntry);
|
2010-07-14 04:32:21 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
bool HasEnclosingCleanups =
|
|
|
|
(Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
|
|
|
|
|
|
|
|
// Compute the branch-through dest if we need it:
|
|
|
|
// - if there are branch-throughs threaded through the scope
|
|
|
|
// - if fall-through is a branch-through
|
|
|
|
// - if there are fixups that will be optimistically forwarded
|
|
|
|
// to the enclosing cleanup
|
|
|
|
llvm::BasicBlock *BranchThroughDest = 0;
|
|
|
|
if (Scope.hasBranchThroughs() ||
|
|
|
|
(HasFallthrough && FallthroughIsBranchThrough) ||
|
|
|
|
(HasFixups && HasEnclosingCleanups)) {
|
|
|
|
assert(HasEnclosingCleanups);
|
|
|
|
EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
|
|
|
|
BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
|
2010-07-14 04:32:21 +08:00
|
|
|
}
|
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
llvm::BasicBlock *FallthroughDest = 0;
|
|
|
|
llvm::SmallVector<llvm::Instruction*, 2> InstsToAppend;
|
|
|
|
|
|
|
|
// If there's exactly one branch-after and no other threads,
|
|
|
|
// we can route it without a switch.
|
|
|
|
if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
|
|
|
|
Scope.getNumBranchAfters() == 1) {
|
|
|
|
assert(!BranchThroughDest);
|
|
|
|
|
|
|
|
// TODO: clean up the possibly dead stores to the cleanup dest slot.
|
|
|
|
llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
|
|
|
|
InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));
|
|
|
|
|
|
|
|
// Build a switch-out if we need it:
|
|
|
|
// - if there are branch-afters threaded through the scope
|
|
|
|
// - if fall-through is a branch-after
|
|
|
|
// - if there are fixups that have nowhere left to go and
|
|
|
|
// so must be immediately resolved
|
|
|
|
} else if (Scope.getNumBranchAfters() ||
|
|
|
|
(HasFallthrough && !FallthroughIsBranchThrough) ||
|
|
|
|
(HasFixups && !HasEnclosingCleanups)) {
|
|
|
|
|
|
|
|
llvm::BasicBlock *Default =
|
|
|
|
(BranchThroughDest ? BranchThroughDest : getUnreachableBlock());
|
|
|
|
|
|
|
|
// TODO: base this on the number of branch-afters and fixups
|
|
|
|
const unsigned SwitchCapacity = 10;
|
|
|
|
|
|
|
|
llvm::LoadInst *Load =
|
|
|
|
new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
|
|
|
|
llvm::SwitchInst *Switch =
|
|
|
|
llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
|
|
|
|
|
|
|
|
InstsToAppend.push_back(Load);
|
|
|
|
InstsToAppend.push_back(Switch);
|
|
|
|
|
|
|
|
// Branch-after fallthrough.
|
|
|
|
if (HasFallthrough && !FallthroughIsBranchThrough) {
|
|
|
|
FallthroughDest = createBasicBlock("cleanup.cont");
|
|
|
|
Switch->addCase(Builder.getInt32(0), FallthroughDest);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
|
|
|
|
Switch->addCase(Scope.getBranchAfterIndex(I),
|
|
|
|
Scope.getBranchAfterBlock(I));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (HasFixups && !HasEnclosingCleanups)
|
|
|
|
ResolveAllBranchFixups(Switch);
|
|
|
|
} else {
|
|
|
|
// We should always have a branch-through destination in this case.
|
|
|
|
assert(BranchThroughDest);
|
|
|
|
InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
|
|
|
|
}
|
2010-07-14 04:32:21 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
// We're finally ready to pop the cleanup.
|
|
|
|
EHStack.popCleanup();
|
|
|
|
assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
|
2010-07-14 04:32:21 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitCleanup(*this, Fn, /*ForEH*/ false);
|
|
|
|
|
|
|
|
// Append the prepared cleanup prologue from above.
|
|
|
|
llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
|
|
|
|
for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
|
|
|
|
NormalExit->getInstList().push_back(InstsToAppend[I]);
|
|
|
|
|
|
|
|
// Optimistically hope that any fixups will continue falling through.
|
|
|
|
for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
|
|
|
|
I < E; ++I) {
|
|
|
|
BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I);
|
|
|
|
if (!Fixup.Destination) continue;
|
|
|
|
if (!Fixup.OptimisticBranchBlock) {
|
|
|
|
new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
|
|
|
|
getNormalCleanupDestSlot(),
|
|
|
|
Fixup.InitialBranch);
|
|
|
|
Fixup.InitialBranch->setSuccessor(0, NormalEntry);
|
|
|
|
}
|
|
|
|
Fixup.OptimisticBranchBlock = NormalExit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (FallthroughDest)
|
|
|
|
EmitBlock(FallthroughDest);
|
|
|
|
else if (!HasFallthrough)
|
|
|
|
Builder.ClearInsertionPoint();
|
|
|
|
|
|
|
|
// Check whether we can merge NormalEntry into a single predecessor.
|
|
|
|
// This might invalidate (non-IR) pointers to NormalEntry.
|
|
|
|
llvm::BasicBlock *NewNormalEntry =
|
|
|
|
SimplifyCleanupEntry(*this, NormalEntry);
|
|
|
|
|
|
|
|
// If it did invalidate those pointers, and NormalEntry was the same
|
|
|
|
// as NormalExit, go back and patch up the fixups.
|
|
|
|
if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
|
|
|
|
for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
|
|
|
|
I < E; ++I)
|
|
|
|
CGF.EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
|
2010-07-14 04:32:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
|
|
|
|
|
2010-07-14 04:32:21 +08:00
|
|
|
// Emit the EH cleanup if required.
|
|
|
|
if (RequiresEHCleanup) {
|
2010-07-21 15:22:38 +08:00
|
|
|
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
|
2010-07-24 05:56:41 +08:00
|
|
|
|
2010-07-21 15:22:38 +08:00
|
|
|
EmitBlock(EHEntry);
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitCleanup(*this, Fn, /*ForEH*/ true);
|
|
|
|
|
|
|
|
// Append the prepared cleanup prologue from above.
|
|
|
|
llvm::BasicBlock *EHExit = Builder.GetInsertBlock();
|
|
|
|
for (unsigned I = 0, E = EHInstsToAppend.size(); I != E; ++I)
|
|
|
|
EHExit->getInstList().push_back(EHInstsToAppend[I]);
|
|
|
|
|
2010-07-21 15:22:38 +08:00
|
|
|
Builder.restoreIP(SavedIP);
|
2010-07-24 05:56:41 +08:00
|
|
|
|
|
|
|
SimplifyCleanupEntry(*this, EHEntry);
|
2010-07-14 04:32:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
/// Terminate the current block by emitting a branch which might leave
|
|
|
|
/// the current cleanup-protected scope. The target scope may not yet
|
|
|
|
/// be known, in which case this will require a fixup.
|
|
|
|
///
|
|
|
|
/// As a side-effect, this method clears the insertion point.
|
2010-07-06 09:34:17 +08:00
|
|
|
void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
|
2010-07-28 09:07:35 +08:00
|
|
|
assert(Dest.getScopeDepth().encloses(EHStack.getInnermostNormalCleanup())
|
|
|
|
&& "stale jump destination");
|
|
|
|
|
2009-02-09 06:13:37 +08:00
|
|
|
if (!HaveInsertPoint())
|
|
|
|
return;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Create the branch.
|
2010-07-24 05:56:41 +08:00
|
|
|
llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-08-14 15:46:19 +08:00
|
|
|
// Calculate the innermost active normal cleanup.
|
|
|
|
EHScopeStack::stable_iterator
|
|
|
|
TopCleanup = EHStack.getInnermostActiveNormalCleanup();
|
|
|
|
|
|
|
|
// If we're not in an active normal cleanup scope, or if the
|
|
|
|
// destination scope is within the innermost active normal cleanup
|
|
|
|
// scope, we don't need to worry about fixups.
|
|
|
|
if (TopCleanup == EHStack.stable_end() ||
|
|
|
|
TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid
|
2010-07-06 09:34:17 +08:00
|
|
|
Builder.ClearInsertionPoint();
|
2009-02-08 08:50:42 +08:00
|
|
|
return;
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// If we can't resolve the destination cleanup scope, just add this
|
2010-07-24 05:56:41 +08:00
|
|
|
// to the current cleanup scope as a branch fixup.
|
|
|
|
if (!Dest.getScopeDepth().isValid()) {
|
|
|
|
BranchFixup &Fixup = EHStack.addBranchFixup();
|
|
|
|
Fixup.Destination = Dest.getBlock();
|
|
|
|
Fixup.DestinationIndex = Dest.getDestIndex();
|
|
|
|
Fixup.InitialBranch = BI;
|
|
|
|
Fixup.OptimisticBranchBlock = 0;
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
Builder.ClearInsertionPoint();
|
2009-02-08 08:50:42 +08:00
|
|
|
return;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
// Otherwise, thread through all the normal cleanups in scope.
|
|
|
|
|
|
|
|
// Store the index at the start.
|
|
|
|
llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
|
|
|
|
new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
|
|
|
|
|
|
|
|
// Adjust BI to point to the first cleanup block.
|
|
|
|
{
|
|
|
|
EHCleanupScope &Scope =
|
2010-08-14 15:46:19 +08:00
|
|
|
cast<EHCleanupScope>(*EHStack.find(TopCleanup));
|
2010-07-24 05:56:41 +08:00
|
|
|
BI->setSuccessor(0, CreateNormalEntry(*this, Scope));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add this destination to all the scopes involved.
|
2010-08-14 15:46:19 +08:00
|
|
|
EHScopeStack::stable_iterator I = TopCleanup;
|
2010-07-24 05:56:41 +08:00
|
|
|
EHScopeStack::stable_iterator E = Dest.getScopeDepth();
|
|
|
|
if (E.strictlyEncloses(I)) {
|
|
|
|
while (true) {
|
|
|
|
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
|
|
|
|
assert(Scope.isNormalCleanup());
|
|
|
|
I = Scope.getEnclosingNormalCleanup();
|
|
|
|
|
|
|
|
// If this is the last cleanup we're propagating through, tell it
|
|
|
|
// that there's a resolved jump moving through it.
|
|
|
|
if (!E.strictlyEncloses(I)) {
|
|
|
|
Scope.addBranchAfter(Index, Dest.getBlock());
|
|
|
|
break;
|
2010-07-14 04:32:21 +08:00
|
|
|
}
|
2010-07-24 05:56:41 +08:00
|
|
|
|
|
|
|
// Otherwise, tell the scope that there's a jump propoagating
|
|
|
|
// through it. If this isn't new information, all the rest of
|
|
|
|
// the work has been done before.
|
|
|
|
if (!Scope.addBranchThrough(Dest.getBlock()))
|
|
|
|
break;
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
2009-02-08 08:50:42 +08:00
|
|
|
}
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
Builder.ClearInsertionPoint();
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
void CodeGenFunction::EmitBranchThroughEHCleanup(UnwindDest Dest) {
|
|
|
|
// We should never get invalid scope depths for an UnwindDest; that
|
|
|
|
// implies that the destination wasn't set up correctly.
|
|
|
|
assert(Dest.getScopeDepth().isValid() && "invalid scope depth on EH dest?");
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
if (!HaveInsertPoint())
|
|
|
|
return;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Create the branch.
|
2010-07-24 05:56:41 +08:00
|
|
|
llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2010-08-14 15:46:19 +08:00
|
|
|
// Calculate the innermost active cleanup.
|
|
|
|
EHScopeStack::stable_iterator
|
|
|
|
InnermostCleanup = EHStack.getInnermostActiveEHCleanup();
|
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
// If the destination is in the same EH cleanup scope as us, we
|
|
|
|
// don't need to thread through anything.
|
2010-08-14 15:46:19 +08:00
|
|
|
if (InnermostCleanup.encloses(Dest.getScopeDepth())) {
|
2010-07-06 09:34:17 +08:00
|
|
|
Builder.ClearInsertionPoint();
|
2009-02-08 08:50:42 +08:00
|
|
|
return;
|
|
|
|
}
|
2010-08-14 15:46:19 +08:00
|
|
|
assert(InnermostCleanup != EHStack.stable_end());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
// Store the index at the start.
|
|
|
|
llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
|
|
|
|
new llvm::StoreInst(Index, getEHCleanupDestSlot(), BI);
|
|
|
|
|
|
|
|
// Adjust BI to point to the first cleanup block.
|
|
|
|
{
|
|
|
|
EHCleanupScope &Scope =
|
2010-08-14 15:46:19 +08:00
|
|
|
cast<EHCleanupScope>(*EHStack.find(InnermostCleanup));
|
2010-07-24 05:56:41 +08:00
|
|
|
BI->setSuccessor(0, CreateEHEntry(*this, Scope));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add this destination to all the scopes involved.
|
|
|
|
for (EHScopeStack::stable_iterator
|
2010-08-14 15:46:19 +08:00
|
|
|
I = InnermostCleanup, E = Dest.getScopeDepth(); ; ) {
|
2010-07-24 05:56:41 +08:00
|
|
|
assert(E.strictlyEncloses(I));
|
|
|
|
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
|
|
|
|
assert(Scope.isEHCleanup());
|
|
|
|
I = Scope.getEnclosingEHCleanup();
|
|
|
|
|
|
|
|
// If this is the last cleanup we're propagating through, add this
|
|
|
|
// as a branch-after.
|
|
|
|
if (I == E) {
|
|
|
|
Scope.addEHBranchAfter(Index, Dest.getBlock());
|
|
|
|
break;
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
2010-07-24 05:56:41 +08:00
|
|
|
|
|
|
|
// Otherwise, add it as a branch-through. If this isn't new
|
|
|
|
// information, all the rest of the work has been done before.
|
|
|
|
if (!Scope.addEHBranchThrough(Dest.getBlock()))
|
|
|
|
break;
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Builder.ClearInsertionPoint();
|
2009-02-08 08:50:42 +08:00
|
|
|
}
|
2010-07-24 05:56:41 +08:00
|
|
|
|
|
|
|
/// All the branch fixups on the EH stack have propagated out past the
|
|
|
|
/// outermost normal cleanup; resolve them all by adding cases to the
|
|
|
|
/// given switch instruction.
|
|
|
|
void CodeGenFunction::ResolveAllBranchFixups(llvm::SwitchInst *Switch) {
|
|
|
|
llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded;
|
|
|
|
|
|
|
|
for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
|
|
|
|
// Skip this fixup if its destination isn't set or if we've
|
|
|
|
// already treated it.
|
|
|
|
BranchFixup &Fixup = EHStack.getBranchFixup(I);
|
|
|
|
if (Fixup.Destination == 0) continue;
|
|
|
|
if (!CasesAdded.insert(Fixup.Destination)) continue;
|
|
|
|
|
|
|
|
Switch->addCase(Builder.getInt32(Fixup.DestinationIndex),
|
|
|
|
Fixup.Destination);
|
|
|
|
}
|
|
|
|
|
|
|
|
EHStack.clearFixups();
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
|
|
|
|
assert(Block && "resolving a null target block");
|
|
|
|
if (!EHStack.getNumBranchFixups()) return;
|
|
|
|
|
|
|
|
assert(EHStack.hasNormalCleanups() &&
|
|
|
|
"branch fixups exist with no normal cleanups on stack");
|
|
|
|
|
|
|
|
llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks;
|
|
|
|
bool ResolvedAny = false;
|
|
|
|
|
|
|
|
for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
|
|
|
|
// Skip this fixup if its destination doesn't match.
|
|
|
|
BranchFixup &Fixup = EHStack.getBranchFixup(I);
|
|
|
|
if (Fixup.Destination != Block) continue;
|
|
|
|
|
|
|
|
Fixup.Destination = 0;
|
|
|
|
ResolvedAny = true;
|
|
|
|
|
|
|
|
// If it doesn't have an optimistic branch block, LatestBranch is
|
|
|
|
// already pointing to the right place.
|
|
|
|
llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock;
|
|
|
|
if (!BranchBB)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Don't process the same optimistic branch block twice.
|
|
|
|
if (!ModifiedOptimisticBlocks.insert(BranchBB))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
|
|
|
|
|
|
|
|
// Add a case to the switch.
|
|
|
|
Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ResolvedAny)
|
|
|
|
EHStack.popNullFixups();
|
|
|
|
}
|
|
|
|
|
2010-08-14 05:20:51 +08:00
|
|
|
/// Activate a cleanup that was created in an inactivated state.
|
|
|
|
void CodeGenFunction::ActivateCleanup(EHScopeStack::stable_iterator C) {
|
|
|
|
assert(C != EHStack.stable_end() && "activating bottom of stack?");
|
|
|
|
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
|
|
|
|
assert(!Scope.isActive() && "double activation");
|
|
|
|
|
|
|
|
// Calculate whether the cleanup was used:
|
|
|
|
bool Used = false;
|
|
|
|
|
|
|
|
// - as a normal cleanup
|
|
|
|
if (Scope.isNormalCleanup()) {
|
|
|
|
bool NormalUsed = false;
|
|
|
|
if (Scope.getNormalBlock()) {
|
|
|
|
NormalUsed = true;
|
|
|
|
} else {
|
|
|
|
// Check whether any enclosed cleanups were needed.
|
|
|
|
for (EHScopeStack::stable_iterator
|
|
|
|
I = EHStack.getInnermostNormalCleanup(); I != C; ) {
|
|
|
|
assert(C.strictlyEncloses(I));
|
|
|
|
EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
|
|
|
|
if (S.getNormalBlock()) {
|
|
|
|
NormalUsed = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
I = S.getEnclosingNormalCleanup();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NormalUsed)
|
|
|
|
Used = true;
|
|
|
|
else
|
|
|
|
Scope.setActivatedBeforeNormalUse(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
// - as an EH cleanup
|
|
|
|
if (Scope.isEHCleanup()) {
|
|
|
|
bool EHUsed = false;
|
|
|
|
if (Scope.getEHBlock()) {
|
|
|
|
EHUsed = true;
|
|
|
|
} else {
|
|
|
|
// Check whether any enclosed cleanups were needed.
|
|
|
|
for (EHScopeStack::stable_iterator
|
|
|
|
I = EHStack.getInnermostEHCleanup(); I != C; ) {
|
|
|
|
assert(C.strictlyEncloses(I));
|
|
|
|
EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
|
|
|
|
if (S.getEHBlock()) {
|
|
|
|
EHUsed = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
I = S.getEnclosingEHCleanup();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (EHUsed)
|
|
|
|
Used = true;
|
|
|
|
else
|
|
|
|
Scope.setActivatedBeforeEHUse(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::AllocaInst *Var = EHCleanupScope::activeSentinel();
|
|
|
|
if (Used) {
|
|
|
|
Var = CreateTempAlloca(Builder.getInt1Ty());
|
|
|
|
InitTempAlloca(Var, Builder.getFalse());
|
|
|
|
}
|
|
|
|
Scope.setActiveVar(Var);
|
|
|
|
}
|
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
|
|
|
|
if (!NormalCleanupDest)
|
|
|
|
NormalCleanupDest =
|
|
|
|
CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
|
|
|
|
return NormalCleanupDest;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *CodeGenFunction::getEHCleanupDestSlot() {
|
|
|
|
if (!EHCleanupDest)
|
|
|
|
EHCleanupDest =
|
|
|
|
CreateTempAlloca(Builder.getInt32Ty(), "eh.cleanup.dest.slot");
|
|
|
|
return EHCleanupDest;
|
|
|
|
}
|
2010-08-10 15:24:25 +08:00
|
|
|
|
|
|
|
void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
|
2010-08-11 01:53:33 +08:00
|
|
|
llvm::ConstantInt *Init) {
|
|
|
|
assert (Init && "Invalid DeclRefExpr initializer!");
|
|
|
|
if (CGDebugInfo *Dbg = getDebugInfo())
|
|
|
|
Dbg->EmitGlobalVariable(E->getDecl(), Init, Builder);
|
2010-08-10 15:24:25 +08:00
|
|
|
}
|