2007-06-02 02:02:12 +08:00
|
|
|
//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 03:59:25 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-06-02 02:02:12 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code to emit Stmt nodes as LLVM code.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "CodeGenFunction.h"
|
2008-05-08 16:54:20 +08:00
|
|
|
#include "CGDebugInfo.h"
|
|
|
|
#include "CodeGenModule.h"
|
2011-02-20 07:03:58 +08:00
|
|
|
#include "TargetInfo.h"
|
2008-08-11 13:00:27 +08:00
|
|
|
#include "clang/AST/StmtVisitor.h"
|
2013-06-07 08:04:50 +08:00
|
|
|
#include "clang/Sema/SemaDiagnostic.h"
|
2009-03-05 16:04:57 +08:00
|
|
|
#include "clang/Basic/PrettyStackTrace.h"
|
2008-02-06 00:35:33 +08:00
|
|
|
#include "clang/Basic/TargetInfo.h"
|
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2013-01-02 19:45:17 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/InlineAsm.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2013-05-10 03:17:11 +08:00
|
|
|
#include "llvm/Support/CallSite.h"
|
2007-06-02 02:02:12 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Statement Emission
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
void CodeGenFunction::EmitStopPoint(const Stmt *S) {
|
2009-02-13 16:11:52 +08:00
|
|
|
if (CGDebugInfo *DI = getDebugInfo()) {
|
2011-10-14 05:45:18 +08:00
|
|
|
SourceLocation Loc;
|
2013-06-18 08:27:36 +08:00
|
|
|
Loc = S->getLocStart();
|
2011-10-14 05:45:18 +08:00
|
|
|
DI->EmitLocation(Builder, Loc);
|
2013-05-03 01:30:20 +08:00
|
|
|
|
2013-05-08 06:26:03 +08:00
|
|
|
LastStopPoint = Loc;
|
2008-11-12 16:21:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-06-02 02:02:12 +08:00
|
|
|
void CodeGenFunction::EmitStmt(const Stmt *S) {
|
|
|
|
assert(S && "Null statement?");
|
2008-11-12 07:11:34 +08:00
|
|
|
|
2011-09-26 23:03:19 +08:00
|
|
|
// These statements have their own debug info handling.
|
2008-11-12 16:21:33 +08:00
|
|
|
if (EmitSimpleStmt(S))
|
|
|
|
return;
|
|
|
|
|
2009-07-19 14:58:07 +08:00
|
|
|
// Check if we are generating unreachable code.
|
|
|
|
if (!HaveInsertPoint()) {
|
|
|
|
// If so, and the statement doesn't contain a label, then we do not need to
|
|
|
|
// generate actual code. This is safe because (1) the current point is
|
|
|
|
// unreachable, so we don't need to execute the code, and (2) we've already
|
|
|
|
// handled the statements which update internal data structures (like the
|
|
|
|
// local variable map) which could be used by subsequent statements.
|
|
|
|
if (!ContainsLabel(S)) {
|
|
|
|
// Verify that any decl statements were handled as simple, they may be in
|
|
|
|
// scope of subsequent reachable statements.
|
|
|
|
assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, make a new block to hold the code.
|
|
|
|
EnsureInsertPoint();
|
|
|
|
}
|
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
// Generate a stoppoint if we are emitting debug info.
|
|
|
|
EmitStopPoint(S);
|
2008-05-08 16:54:20 +08:00
|
|
|
|
2007-06-02 02:02:12 +08:00
|
|
|
switch (S->getStmtClass()) {
|
2010-12-05 10:00:02 +08:00
|
|
|
case Stmt::NoStmtClass:
|
|
|
|
case Stmt::CXXCatchStmtClass:
|
2011-04-28 09:08:34 +08:00
|
|
|
case Stmt::SEHExceptStmtClass:
|
|
|
|
case Stmt::SEHFinallyStmtClass:
|
2011-10-25 09:33:02 +08:00
|
|
|
case Stmt::MSDependentExistsStmtClass:
|
2013-07-19 11:13:43 +08:00
|
|
|
case Stmt::OMPParallelDirectiveClass:
|
2010-12-05 10:00:02 +08:00
|
|
|
llvm_unreachable("invalid statement class to emit generically");
|
|
|
|
case Stmt::NullStmtClass:
|
|
|
|
case Stmt::CompoundStmtClass:
|
|
|
|
case Stmt::DeclStmtClass:
|
|
|
|
case Stmt::LabelStmtClass:
|
2012-04-14 08:33:13 +08:00
|
|
|
case Stmt::AttributedStmtClass:
|
2010-12-05 10:00:02 +08:00
|
|
|
case Stmt::GotoStmtClass:
|
|
|
|
case Stmt::BreakStmtClass:
|
|
|
|
case Stmt::ContinueStmtClass:
|
|
|
|
case Stmt::DefaultStmtClass:
|
|
|
|
case Stmt::CaseStmtClass:
|
|
|
|
llvm_unreachable("should have emitted these statements as simple");
|
2009-07-19 16:23:12 +08:00
|
|
|
|
2010-12-05 10:00:02 +08:00
|
|
|
#define STMT(Type, Base)
|
|
|
|
#define ABSTRACT_STMT(Op)
|
|
|
|
#define EXPR(Type, Base) \
|
|
|
|
case Stmt::Type##Class:
|
|
|
|
#include "clang/AST/StmtNodes.inc"
|
2011-01-12 11:41:02 +08:00
|
|
|
{
|
|
|
|
// Remember the block we came in on.
|
|
|
|
llvm::BasicBlock *incoming = Builder.GetInsertBlock();
|
|
|
|
assert(incoming && "expression emission must have an insertion point");
|
|
|
|
|
2010-12-05 10:00:02 +08:00
|
|
|
EmitIgnoredExpr(cast<Expr>(S));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-12 11:41:02 +08:00
|
|
|
llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
|
|
|
|
assert(outgoing && "expression emission cleared block!");
|
|
|
|
|
|
|
|
// The expression emitters assume (reasonably!) that the insertion
|
|
|
|
// point is always set. To maintain that, the call-emission code
|
|
|
|
// for noreturn functions has to enter a new block with no
|
|
|
|
// predecessors. We want to kill that block and mark the current
|
|
|
|
// insertion point unreachable in the common case of a call like
|
|
|
|
// "exit();". Since expression emission doesn't otherwise create
|
|
|
|
// blocks with no predecessors, we can just test for that.
|
|
|
|
// However, we must be careful not to do this to our incoming
|
|
|
|
// block, because *statement* emission does sometimes create
|
|
|
|
// reachable blocks which will have no predecessors until later in
|
|
|
|
// the function. This occurs with, e.g., labels that are not
|
|
|
|
// reachable by fallthrough.
|
|
|
|
if (incoming != outgoing && outgoing->use_empty()) {
|
|
|
|
outgoing->eraseFromParent();
|
|
|
|
Builder.ClearInsertionPoint();
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
|
|
|
break;
|
2011-01-12 11:41:02 +08:00
|
|
|
}
|
2010-12-05 10:00:02 +08:00
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
case Stmt::IndirectGotoStmtClass:
|
2008-08-05 00:51:22 +08:00
|
|
|
EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
|
2007-06-05 11:59:43 +08:00
|
|
|
|
2007-06-02 02:02:12 +08:00
|
|
|
case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
|
2007-06-05 11:59:43 +08:00
|
|
|
case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break;
|
2007-06-06 04:53:16 +08:00
|
|
|
case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break;
|
|
|
|
case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-02 11:19:07 +08:00
|
|
|
case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
|
2008-10-03 02:02:06 +08:00
|
|
|
|
2007-10-05 07:45:31 +08:00
|
|
|
case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
|
2012-08-29 05:11:24 +08:00
|
|
|
case Stmt::GCCAsmStmtClass: // Intentional fall-through.
|
|
|
|
case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
|
2013-09-07 02:03:48 +08:00
|
|
|
case Stmt::CapturedStmtClass: {
|
|
|
|
const CapturedStmt *CS = cast<CapturedStmt>(S);
|
|
|
|
EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
|
|
|
|
}
|
2013-04-17 02:53:08 +08:00
|
|
|
break;
|
2008-08-23 18:51:21 +08:00
|
|
|
case Stmt::ObjCAtTryStmtClass:
|
2008-09-09 18:04:29 +08:00
|
|
|
EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
|
2009-09-09 23:08:12 +08:00
|
|
|
break;
|
2008-08-23 18:51:21 +08:00
|
|
|
case Stmt::ObjCAtCatchStmtClass:
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable(
|
|
|
|
"@catch statements should be handled by EmitObjCAtTryStmt");
|
2008-08-23 18:51:21 +08:00
|
|
|
case Stmt::ObjCAtFinallyStmtClass:
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable(
|
|
|
|
"@finally statements should be handled by EmitObjCAtTryStmt");
|
2008-08-23 18:51:21 +08:00
|
|
|
case Stmt::ObjCAtThrowStmtClass:
|
2008-09-09 18:04:29 +08:00
|
|
|
EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
|
2008-08-23 18:51:21 +08:00
|
|
|
break;
|
|
|
|
case Stmt::ObjCAtSynchronizedStmtClass:
|
2008-11-16 05:26:17 +08:00
|
|
|
EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
|
2008-08-23 18:51:21 +08:00
|
|
|
break;
|
2009-09-09 23:08:12 +08:00
|
|
|
case Stmt::ObjCForCollectionStmtClass:
|
2008-08-31 03:51:14 +08:00
|
|
|
EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
|
2008-08-23 18:51:21 +08:00
|
|
|
break;
|
2011-06-16 07:02:42 +08:00
|
|
|
case Stmt::ObjCAutoreleasePoolStmtClass:
|
|
|
|
EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
|
|
|
|
break;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2009-09-28 02:58:34 +08:00
|
|
|
case Stmt::CXXTryStmtClass:
|
|
|
|
EmitCXXTryStmt(cast<CXXTryStmt>(*S));
|
|
|
|
break;
|
2011-04-15 06:09:26 +08:00
|
|
|
case Stmt::CXXForRangeStmtClass:
|
|
|
|
EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S));
|
2013-09-17 05:46:30 +08:00
|
|
|
break;
|
2011-04-28 09:08:34 +08:00
|
|
|
case Stmt::SEHTryStmtClass:
|
2013-09-17 05:46:30 +08:00
|
|
|
EmitSEHTryStmt(cast<SEHTryStmt>(*S));
|
2011-04-15 06:09:26 +08:00
|
|
|
break;
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
|
|
|
|
switch (S->getStmtClass()) {
|
|
|
|
default: return false;
|
|
|
|
case Stmt::NullStmtClass: break;
|
|
|
|
case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
|
2009-07-19 14:58:07 +08:00
|
|
|
case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
|
2008-11-12 16:21:33 +08:00
|
|
|
case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
|
2012-04-14 08:33:13 +08:00
|
|
|
case Stmt::AttributedStmtClass:
|
|
|
|
EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
|
2008-11-12 16:21:33 +08:00
|
|
|
case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
|
|
|
|
case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
|
|
|
|
case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
|
|
|
|
case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
|
|
|
|
case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2007-09-01 06:09:40 +08:00
|
|
|
/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
|
|
|
|
/// this captures the expression result of the last sub-statement and returns it
|
|
|
|
/// (for use by the statement expression extension).
|
2013-06-11 06:04:49 +08:00
|
|
|
llvm::Value* CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
|
|
|
|
AggValueSlot AggSlot) {
|
2009-03-05 16:04:57 +08:00
|
|
|
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
|
|
|
|
"LLVM IR generation of compound statement ('{}')");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-02-23 08:43:07 +08:00
|
|
|
// Keep track of the current cleanup stack depth, including debug scopes.
|
|
|
|
LexicalScope Scope(*this, S.getSourceRange());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2013-01-27 06:16:26 +08:00
|
|
|
return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
|
|
|
|
}
|
|
|
|
|
2013-06-11 06:04:49 +08:00
|
|
|
llvm::Value*
|
|
|
|
CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
|
|
|
|
bool GetLast,
|
|
|
|
AggValueSlot AggSlot) {
|
2013-01-27 06:16:26 +08:00
|
|
|
|
2007-09-01 06:09:40 +08:00
|
|
|
for (CompoundStmt::const_body_iterator I = S.body_begin(),
|
|
|
|
E = S.body_end()-GetLast; I != E; ++I)
|
2007-06-02 02:02:12 +08:00
|
|
|
EmitStmt(*I);
|
2008-05-08 16:54:20 +08:00
|
|
|
|
2013-06-11 06:04:49 +08:00
|
|
|
llvm::Value *RetAlloca = 0;
|
|
|
|
if (GetLast) {
|
2009-09-09 23:08:12 +08:00
|
|
|
// We have to special case labels here. They are statements, but when put
|
2008-12-12 13:52:00 +08:00
|
|
|
// at the end of a statement expression, they yield the value of their
|
|
|
|
// subexpression. Handle this by walking through all labels we encounter,
|
|
|
|
// emitting them before we evaluate the subexpr.
|
|
|
|
const Stmt *LastStmt = S.body_back();
|
|
|
|
while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
|
2011-02-17 15:39:24 +08:00
|
|
|
EmitLabel(LS->getDecl());
|
2008-12-12 13:52:00 +08:00
|
|
|
LastStmt = LS->getSubStmt();
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-12-12 13:52:00 +08:00
|
|
|
EnsureInsertPoint();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2013-06-11 06:04:49 +08:00
|
|
|
QualType ExprTy = cast<Expr>(LastStmt)->getType();
|
|
|
|
if (hasAggregateEvaluationKind(ExprTy)) {
|
|
|
|
EmitAggExpr(cast<Expr>(LastStmt), AggSlot);
|
|
|
|
} else {
|
|
|
|
// We can't return an RValue here because there might be cleanups at
|
|
|
|
// the end of the StmtExpr. Because of that, we have to emit the result
|
|
|
|
// here into a temporary alloca.
|
|
|
|
RetAlloca = CreateMemTemp(ExprTy);
|
|
|
|
EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(),
|
|
|
|
/*IsInit*/false);
|
|
|
|
}
|
|
|
|
|
2008-07-27 04:23:23 +08:00
|
|
|
}
|
2008-11-12 07:11:34 +08:00
|
|
|
|
2013-06-11 06:04:49 +08:00
|
|
|
return RetAlloca;
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
|
|
|
|
2009-04-01 12:37:47 +08:00
|
|
|
void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
|
|
|
|
llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-04-01 12:37:47 +08:00
|
|
|
// If there is a cleanup stack, then we it isn't worth trying to
|
|
|
|
// simplify this block (we would need to remove it from the scope map
|
|
|
|
// and cleanup entry).
|
2010-07-06 09:34:17 +08:00
|
|
|
if (!EHStack.empty())
|
2009-04-01 12:37:47 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Can only simplify direct branches.
|
|
|
|
if (!BI || !BI->isUnconditional())
|
|
|
|
return;
|
|
|
|
|
2012-10-27 07:23:35 +08:00
|
|
|
// Can only simplify empty blocks.
|
|
|
|
if (BI != BB->begin())
|
|
|
|
return;
|
|
|
|
|
2009-04-01 12:37:47 +08:00
|
|
|
BB->replaceAllUsesWith(BI->getSuccessor(0));
|
|
|
|
BI->eraseFromParent();
|
|
|
|
BB->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
2008-11-13 09:24:05 +08:00
|
|
|
void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
|
2010-04-21 19:18:06 +08:00
|
|
|
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
|
|
|
|
|
2008-11-11 17:41:28 +08:00
|
|
|
// Fall out of the current block (if necessary).
|
|
|
|
EmitBranch(BB);
|
2008-11-13 09:24:05 +08:00
|
|
|
|
|
|
|
if (IsFinished && BB->use_empty()) {
|
|
|
|
delete BB;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-04-21 18:29:06 +08:00
|
|
|
// Place the block after the current block, if possible, or else at
|
|
|
|
// the end of the function.
|
2010-04-21 19:18:06 +08:00
|
|
|
if (CurBB && CurBB->getParent())
|
|
|
|
CurFn->getBasicBlockList().insertAfter(CurBB, BB);
|
2010-04-21 18:29:06 +08:00
|
|
|
else
|
|
|
|
CurFn->getBasicBlockList().push_back(BB);
|
2008-11-11 17:41:28 +08:00
|
|
|
Builder.SetInsertPoint(BB);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
|
|
|
|
// Emit a branch from the current block to the target one if this
|
|
|
|
// was a real block. If this was just a fall-through block after a
|
|
|
|
// terminator, don't emit it.
|
|
|
|
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
|
|
|
|
|
|
|
|
if (!CurBB || CurBB->getTerminator()) {
|
|
|
|
// If there is no insert point or the previous block is already
|
|
|
|
// terminated, don't touch it.
|
2007-06-02 02:02:12 +08:00
|
|
|
} else {
|
|
|
|
// Otherwise, create a fall-through branch.
|
2008-11-11 17:41:28 +08:00
|
|
|
Builder.CreateBr(Target);
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
2008-11-12 06:06:59 +08:00
|
|
|
|
|
|
|
Builder.ClearInsertionPoint();
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
|
|
|
|
2011-08-11 10:22:43 +08:00
|
|
|
void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
|
|
|
|
bool inserted = false;
|
|
|
|
for (llvm::BasicBlock::use_iterator
|
|
|
|
i = block->use_begin(), e = block->use_end(); i != e; ++i) {
|
|
|
|
if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(*i)) {
|
|
|
|
CurFn->getBasicBlockList().insertAfter(insn->getParent(), block);
|
|
|
|
inserted = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!inserted)
|
|
|
|
CurFn->getBasicBlockList().push_back(block);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(block);
|
|
|
|
}
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
CodeGenFunction::JumpDest
|
2011-02-17 15:39:24 +08:00
|
|
|
CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
|
|
|
|
JumpDest &Dest = LabelMap[D];
|
2010-07-24 05:56:41 +08:00
|
|
|
if (Dest.isValid()) return Dest;
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Create, but don't insert, the new block.
|
2011-02-17 15:39:24 +08:00
|
|
|
Dest = JumpDest(createBasicBlock(D->getName()),
|
2010-07-24 05:56:41 +08:00
|
|
|
EHScopeStack::stable_iterator::invalid(),
|
|
|
|
NextCleanupDestIndex++);
|
2010-07-06 09:34:17 +08:00
|
|
|
return Dest;
|
|
|
|
}
|
|
|
|
|
2011-02-17 15:39:24 +08:00
|
|
|
void CodeGenFunction::EmitLabel(const LabelDecl *D) {
|
2013-03-23 14:43:35 +08:00
|
|
|
// Add this label to the current lexical scope if we're within any
|
|
|
|
// normal cleanups. Jumps "in" to this label --- when permitted by
|
|
|
|
// the language --- may need to be routed around such cleanups.
|
|
|
|
if (EHStack.hasNormalCleanups() && CurLexicalScope)
|
|
|
|
CurLexicalScope->addLabel(D);
|
|
|
|
|
2011-02-17 15:39:24 +08:00
|
|
|
JumpDest &Dest = LabelMap[D];
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
// If we didn't need a forward reference to this label, just go
|
2010-07-06 09:34:17 +08:00
|
|
|
// ahead and create a destination at the current scope.
|
2010-07-24 05:56:41 +08:00
|
|
|
if (!Dest.isValid()) {
|
2011-02-17 15:39:24 +08:00
|
|
|
Dest = getJumpDestInCurrentScope(D->getName());
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Otherwise, we need to give this label a target depth and remove
|
|
|
|
// it from the branch-fixups list.
|
|
|
|
} else {
|
2010-07-24 05:56:41 +08:00
|
|
|
assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
|
2013-03-23 14:43:35 +08:00
|
|
|
Dest.setScopeDepth(EHStack.stable_begin());
|
2010-07-24 05:56:41 +08:00
|
|
|
ResolveBranchFixups(Dest.getBlock());
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(Dest.getBlock());
|
2008-07-27 04:23:23 +08:00
|
|
|
}
|
|
|
|
|
2013-03-23 14:43:35 +08:00
|
|
|
/// Change the cleanup scope of the labels in this lexical scope to
|
|
|
|
/// match the scope of the enclosing context.
|
|
|
|
void CodeGenFunction::LexicalScope::rescopeLabels() {
|
|
|
|
assert(!Labels.empty());
|
|
|
|
EHScopeStack::stable_iterator innermostScope
|
|
|
|
= CGF.EHStack.getInnermostNormalCleanup();
|
|
|
|
|
|
|
|
// Change the scope depth of all the labels.
|
|
|
|
for (SmallVectorImpl<const LabelDecl*>::const_iterator
|
|
|
|
i = Labels.begin(), e = Labels.end(); i != e; ++i) {
|
|
|
|
assert(CGF.LabelMap.count(*i));
|
|
|
|
JumpDest &dest = CGF.LabelMap.find(*i)->second;
|
|
|
|
assert(dest.getScopeDepth().isValid());
|
|
|
|
assert(innermostScope.encloses(dest.getScopeDepth()));
|
|
|
|
dest.setScopeDepth(innermostScope);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reparent the labels if the new scope also has cleanups.
|
|
|
|
if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
|
|
|
|
ParentScope->Labels.append(Labels.begin(), Labels.end());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-27 04:23:23 +08:00
|
|
|
|
|
|
|
void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
|
2011-02-17 15:39:24 +08:00
|
|
|
EmitLabel(S.getDecl());
|
2007-06-02 02:02:12 +08:00
|
|
|
EmitStmt(S.getSubStmt());
|
|
|
|
}
|
|
|
|
|
2012-04-14 08:33:13 +08:00
|
|
|
void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
|
|
|
|
EmitStmt(S.getSubStmt());
|
|
|
|
}
|
|
|
|
|
2007-06-02 02:02:12 +08:00
|
|
|
void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
|
2008-11-12 16:21:33 +08:00
|
|
|
// If this code is reachable then emit a stop point (if generating
|
|
|
|
// debug info). We have to do this ourselves because we are on the
|
|
|
|
// "simple" statement path.
|
|
|
|
if (HaveInsertPoint())
|
|
|
|
EmitStopPoint(&S);
|
2009-02-07 20:52:26 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
|
|
|
|
2009-10-13 14:55:33 +08:00
|
|
|
|
2008-08-05 00:51:22 +08:00
|
|
|
void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
|
2011-02-17 15:39:24 +08:00
|
|
|
if (const LabelDecl *Target = S.getConstantTarget()) {
|
2010-10-28 16:53:48 +08:00
|
|
|
EmitBranchThroughCleanup(getJumpDestForLabel(Target));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-11-07 02:10:47 +08:00
|
|
|
// Ensure that we have an i8* for our PHI node.
|
2009-10-29 07:59:40 +08:00
|
|
|
llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
|
2011-02-08 16:22:06 +08:00
|
|
|
Int8PtrTy, "addr");
|
2009-10-13 14:55:33 +08:00
|
|
|
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
|
|
|
|
|
|
|
|
// Get the basic block for the indirect goto.
|
|
|
|
llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2009-10-13 14:55:33 +08:00
|
|
|
// The first instruction in the block has to be the PHI for the switch dest,
|
|
|
|
// add an entry for this branch.
|
|
|
|
cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2009-10-13 14:55:33 +08:00
|
|
|
EmitBranch(IndGotoBB);
|
2008-08-05 00:51:22 +08:00
|
|
|
}
|
|
|
|
|
2008-11-11 15:24:28 +08:00
|
|
|
void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
|
2007-06-02 02:02:12 +08:00
|
|
|
// C99 6.8.4.1: The first substatement is executed if the expression compares
|
|
|
|
// unequal to 0. The condition must be a scalar type.
|
2013-07-11 04:14:36 +08:00
|
|
|
LexicalScope ConditionScope(*this, S.getSourceRange());
|
2013-06-08 08:16:55 +08:00
|
|
|
|
2009-11-24 07:44:04 +08:00
|
|
|
if (S.getConditionVariable())
|
2010-10-15 12:57:14 +08:00
|
|
|
EmitAutoVarDecl(*S.getConditionVariable());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
Make emission of 'if' conditions much more sophisticated when we
have a condition that is an &&/||. Before we used to compile things like this:
int test() {
if (x && y) foo(); else bar();
}
into:
%0 = load i32* @x ; <i32> [#uses=1]
%1 = icmp ne i32 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %land_rhs, label %land_cont
land_rhs: ; preds = %entry
%2 = load i32* @y ; <i32> [#uses=1]
%3 = icmp ne i32 %2, 0 ; <i1> [#uses=1]
br label %land_cont
land_cont: ; preds = %land_rhs, %entry
%4 = phi i1 [ false, %entry ], [ %3, %land_rhs ] ; <i1> [#uses=1]
br i1 %4, label %ifthen, label %ifelse
ifthen: ; preds = %land_cont
%call = call i32 (...)* @foo() ; <i32> [#uses=0]
br label %ifend
ifelse: ; preds = %land_cont
%call1 = call i32 (...)* @bar() ; <i32> [#uses=0]
br label %ifend
ifend: ; preds = %ifelse, %ifthen
Now we turn it into the much more svelte code:
%0 = load i32* @x ; <i32> [#uses=1]
%1 = icmp ne i32 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %land_lhs_true, label %ifelse
land_lhs_true: ; preds = %entry
%2 = load i32* @y ; <i32> [#uses=1]
%3 = icmp ne i32 %2, 0 ; <i1> [#uses=1]
br i1 %3, label %ifthen, label %ifelse
ifthen: ; preds = %land_lhs_true
%call = call i32 (...)* @foo() ; <i32> [#uses=0]
br label %ifend
ifelse: ; preds = %land_lhs_true, %entry
%call1 = call i32 (...)* @bar() ; <i32> [#uses=0]
br label %ifend
ifend: ; preds = %ifelse, %ifthen
Note the lack of a phi node.
This shrinks the -O0 .ll file for 176.gcc/expr.c from 43176 to 40267 lines.
llvm-svn: 59111
2008-11-12 15:46:33 +08:00
|
|
|
// If the condition constant folds and can be elided, try to avoid emitting
|
|
|
|
// the condition and the dead arm of the if/else.
|
2011-02-28 07:02:32 +08:00
|
|
|
bool CondConstant;
|
|
|
|
if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant)) {
|
2008-11-11 15:24:28 +08:00
|
|
|
// Figure out which block (then or else) is executed.
|
2011-02-28 07:02:32 +08:00
|
|
|
const Stmt *Executed = S.getThen();
|
|
|
|
const Stmt *Skipped = S.getElse();
|
|
|
|
if (!CondConstant) // Condition false?
|
2008-11-11 15:24:28 +08:00
|
|
|
std::swap(Executed, Skipped);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-11 15:24:28 +08:00
|
|
|
// If the skipped block has no labels in it, just emit the executed block.
|
|
|
|
// This avoids emitting dead code and simplifies the CFG substantially.
|
Make emission of 'if' conditions much more sophisticated when we
have a condition that is an &&/||. Before we used to compile things like this:
int test() {
if (x && y) foo(); else bar();
}
into:
%0 = load i32* @x ; <i32> [#uses=1]
%1 = icmp ne i32 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %land_rhs, label %land_cont
land_rhs: ; preds = %entry
%2 = load i32* @y ; <i32> [#uses=1]
%3 = icmp ne i32 %2, 0 ; <i1> [#uses=1]
br label %land_cont
land_cont: ; preds = %land_rhs, %entry
%4 = phi i1 [ false, %entry ], [ %3, %land_rhs ] ; <i1> [#uses=1]
br i1 %4, label %ifthen, label %ifelse
ifthen: ; preds = %land_cont
%call = call i32 (...)* @foo() ; <i32> [#uses=0]
br label %ifend
ifelse: ; preds = %land_cont
%call1 = call i32 (...)* @bar() ; <i32> [#uses=0]
br label %ifend
ifend: ; preds = %ifelse, %ifthen
Now we turn it into the much more svelte code:
%0 = load i32* @x ; <i32> [#uses=1]
%1 = icmp ne i32 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %land_lhs_true, label %ifelse
land_lhs_true: ; preds = %entry
%2 = load i32* @y ; <i32> [#uses=1]
%3 = icmp ne i32 %2, 0 ; <i1> [#uses=1]
br i1 %3, label %ifthen, label %ifelse
ifthen: ; preds = %land_lhs_true
%call = call i32 (...)* @foo() ; <i32> [#uses=0]
br label %ifend
ifelse: ; preds = %land_lhs_true, %entry
%call1 = call i32 (...)* @bar() ; <i32> [#uses=0]
br label %ifend
ifend: ; preds = %ifelse, %ifthen
Note the lack of a phi node.
This shrinks the -O0 .ll file for 176.gcc/expr.c from 43176 to 40267 lines.
llvm-svn: 59111
2008-11-12 15:46:33 +08:00
|
|
|
if (!ContainsLabel(Skipped)) {
|
2009-11-25 00:43:22 +08:00
|
|
|
if (Executed) {
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope ExecutedScope(*this);
|
2008-11-11 15:24:28 +08:00
|
|
|
EmitStmt(Executed);
|
2009-11-25 00:43:22 +08:00
|
|
|
}
|
2008-11-11 15:24:28 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
Make emission of 'if' conditions much more sophisticated when we
have a condition that is an &&/||. Before we used to compile things like this:
int test() {
if (x && y) foo(); else bar();
}
into:
%0 = load i32* @x ; <i32> [#uses=1]
%1 = icmp ne i32 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %land_rhs, label %land_cont
land_rhs: ; preds = %entry
%2 = load i32* @y ; <i32> [#uses=1]
%3 = icmp ne i32 %2, 0 ; <i1> [#uses=1]
br label %land_cont
land_cont: ; preds = %land_rhs, %entry
%4 = phi i1 [ false, %entry ], [ %3, %land_rhs ] ; <i1> [#uses=1]
br i1 %4, label %ifthen, label %ifelse
ifthen: ; preds = %land_cont
%call = call i32 (...)* @foo() ; <i32> [#uses=0]
br label %ifend
ifelse: ; preds = %land_cont
%call1 = call i32 (...)* @bar() ; <i32> [#uses=0]
br label %ifend
ifend: ; preds = %ifelse, %ifthen
Now we turn it into the much more svelte code:
%0 = load i32* @x ; <i32> [#uses=1]
%1 = icmp ne i32 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %land_lhs_true, label %ifelse
land_lhs_true: ; preds = %entry
%2 = load i32* @y ; <i32> [#uses=1]
%3 = icmp ne i32 %2, 0 ; <i1> [#uses=1]
br i1 %3, label %ifthen, label %ifelse
ifthen: ; preds = %land_lhs_true
%call = call i32 (...)* @foo() ; <i32> [#uses=0]
br label %ifend
ifelse: ; preds = %land_lhs_true, %entry
%call1 = call i32 (...)* @bar() ; <i32> [#uses=0]
br label %ifend
ifend: ; preds = %ifelse, %ifthen
Note the lack of a phi node.
This shrinks the -O0 .ll file for 176.gcc/expr.c from 43176 to 40267 lines.
llvm-svn: 59111
2008-11-12 15:46:33 +08:00
|
|
|
|
|
|
|
// Otherwise, the condition did not fold, or we couldn't elide it. Just emit
|
|
|
|
// the conditional branch.
|
2008-11-13 08:47:57 +08:00
|
|
|
llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
|
|
|
|
llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
|
|
|
|
llvm::BasicBlock *ElseBlock = ContBlock;
|
2007-06-02 02:02:12 +08:00
|
|
|
if (S.getElse())
|
2008-11-13 08:47:57 +08:00
|
|
|
ElseBlock = createBasicBlock("if.else");
|
|
|
|
EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-02 02:02:12 +08:00
|
|
|
// Emit the 'then' code.
|
2009-11-25 00:43:22 +08:00
|
|
|
EmitBlock(ThenBlock);
|
|
|
|
{
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope ThenScope(*this);
|
2009-11-25 00:43:22 +08:00
|
|
|
EmitStmt(S.getThen());
|
|
|
|
}
|
2008-11-11 17:41:28 +08:00
|
|
|
EmitBranch(ContBlock);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-02 02:02:12 +08:00
|
|
|
// Emit the 'else' code if present.
|
|
|
|
if (const Stmt *Else = S.getElse()) {
|
2011-03-30 08:08:31 +08:00
|
|
|
// There is no need to emit line number for unconditional branch.
|
|
|
|
if (getDebugInfo())
|
|
|
|
Builder.SetCurrentDebugLocation(llvm::DebugLoc());
|
2007-06-02 02:02:12 +08:00
|
|
|
EmitBlock(ElseBlock);
|
2009-11-25 00:43:22 +08:00
|
|
|
{
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope ElseScope(*this);
|
2009-11-25 00:43:22 +08:00
|
|
|
EmitStmt(Else);
|
|
|
|
}
|
2011-03-30 08:08:31 +08:00
|
|
|
// There is no need to emit line number for unconditional branch.
|
|
|
|
if (getDebugInfo())
|
|
|
|
Builder.SetCurrentDebugLocation(llvm::DebugLoc());
|
2008-11-11 17:41:28 +08:00
|
|
|
EmitBranch(ContBlock);
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-02 02:02:12 +08:00
|
|
|
// Emit the continuation block for code after the if.
|
2008-11-13 09:54:24 +08:00
|
|
|
EmitBlock(ContBlock, true);
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
|
|
|
|
2007-06-05 11:59:43 +08:00
|
|
|
void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
|
2010-07-06 09:34:17 +08:00
|
|
|
// Emit the header for the loop, which will also become
|
|
|
|
// the continue target.
|
|
|
|
JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(LoopHeader.getBlock());
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Create an exit block for when the condition fails, which will
|
|
|
|
// also become the break target.
|
|
|
|
JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
|
2009-02-08 02:08:12 +08:00
|
|
|
|
|
|
|
// Store the blocks to use for break and continue.
|
2010-07-06 09:34:17 +08:00
|
|
|
BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-11-25 05:15:44 +08:00
|
|
|
// C++ [stmt.while]p2:
|
|
|
|
// When the condition of a while statement is a declaration, the
|
|
|
|
// scope of the variable that is declared extends from its point
|
|
|
|
// of declaration (3.3.2) to the end of the while statement.
|
|
|
|
// [...]
|
|
|
|
// The object created in a condition is destroyed and created
|
|
|
|
// with each iteration of the loop.
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope ConditionScope(*this);
|
2009-11-25 05:15:44 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
if (S.getConditionVariable())
|
2010-10-15 12:57:14 +08:00
|
|
|
EmitAutoVarDecl(*S.getConditionVariable());
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2009-02-08 01:18:33 +08:00
|
|
|
// Evaluate the conditional in the while header. C99 6.8.5.1: The
|
|
|
|
// evaluation of the controlling expression takes place before each
|
|
|
|
// execution of the loop body.
|
2007-06-06 04:53:16 +08:00
|
|
|
llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2007-10-10 04:51:27 +08:00
|
|
|
// while(1) is common, avoid extra exit blocks. Be sure
|
2007-06-06 04:53:16 +08:00
|
|
|
// to correctly handle break/continue though.
|
2007-10-10 04:51:27 +08:00
|
|
|
bool EmitBoolCondBranch = true;
|
2009-09-09 23:08:12 +08:00
|
|
|
if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
|
2007-10-10 04:51:27 +08:00
|
|
|
if (C->isOne())
|
|
|
|
EmitBoolCondBranch = false;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-05 11:59:43 +08:00
|
|
|
// As long as the condition is true, go to the loop body.
|
2010-07-06 09:34:17 +08:00
|
|
|
llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
|
|
|
|
if (EmitBoolCondBranch) {
|
2010-07-24 05:56:41 +08:00
|
|
|
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
|
2010-07-06 09:34:17 +08:00
|
|
|
if (ConditionScope.requiresCleanups())
|
|
|
|
ExitBlock = createBasicBlock("while.exit");
|
|
|
|
|
|
|
|
Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
|
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
if (ExitBlock != LoopExit.getBlock()) {
|
2010-07-06 09:34:17 +08:00
|
|
|
EmitBlock(ExitBlock);
|
|
|
|
EmitBranchThroughCleanup(LoopExit);
|
|
|
|
}
|
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Emit the loop body. We have to emit this in a cleanup scope
|
|
|
|
// because it might be a singleton DeclStmt.
|
2009-11-25 05:15:44 +08:00
|
|
|
{
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope BodyScope(*this);
|
2009-11-25 05:15:44 +08:00
|
|
|
EmitBlock(LoopBody);
|
|
|
|
EmitStmt(S.getBody());
|
|
|
|
}
|
2007-07-17 05:28:45 +08:00
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
BreakContinueStack.pop_back();
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Immediately force cleanup.
|
|
|
|
ConditionScope.ForceCleanup();
|
2009-11-25 05:15:44 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Branch to the loop header again.
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBranch(LoopHeader.getBlock());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-05 11:59:43 +08:00
|
|
|
// Emit the exit block.
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(LoopExit.getBlock(), true);
|
2009-11-25 05:15:44 +08:00
|
|
|
|
2009-04-01 12:37:47 +08:00
|
|
|
// The LoopHeader typically is just a branch if we skipped emitting
|
|
|
|
// a branch, try to erase it.
|
2010-07-06 09:34:17 +08:00
|
|
|
if (!EmitBoolCondBranch)
|
2010-07-24 05:56:41 +08:00
|
|
|
SimplifyForwardingBlocks(LoopHeader.getBlock());
|
2007-06-05 11:59:43 +08:00
|
|
|
}
|
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
|
2010-07-06 09:34:17 +08:00
|
|
|
JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
|
|
|
|
JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-17 05:28:45 +08:00
|
|
|
// Store the blocks to use for break and continue.
|
2010-07-06 09:34:17 +08:00
|
|
|
BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Emit the body of the loop.
|
|
|
|
llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
|
|
|
|
EmitBlock(LoopBody);
|
|
|
|
{
|
|
|
|
RunCleanupsScope BodyScope(*this);
|
|
|
|
EmitStmt(S.getBody());
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-10 13:52:02 +08:00
|
|
|
BreakContinueStack.pop_back();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(LoopCond.getBlock());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
// C99 6.8.5.2: "The evaluation of the controlling expression takes place
|
|
|
|
// after each execution of the loop body."
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
// Evaluate the conditional in the while header.
|
|
|
|
// C99 6.8.5p2/p4: The first substatement is executed if the expression
|
|
|
|
// compares unequal to 0. The condition must be a scalar type.
|
|
|
|
llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
|
2007-10-10 04:33:39 +08:00
|
|
|
|
|
|
|
// "do {} while (0)" is common in macros, avoid extra blocks. Be sure
|
|
|
|
// to correctly handle break/continue though.
|
|
|
|
bool EmitBoolCondBranch = true;
|
2009-09-09 23:08:12 +08:00
|
|
|
if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
|
2007-10-10 04:33:39 +08:00
|
|
|
if (C->isZero())
|
|
|
|
EmitBoolCondBranch = false;
|
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
// As long as the condition is true, iterate the loop.
|
2007-10-10 04:33:39 +08:00
|
|
|
if (EmitBoolCondBranch)
|
2010-07-24 05:56:41 +08:00
|
|
|
Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.getBlock());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
// Emit the exit block.
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(LoopExit.getBlock());
|
2007-10-10 04:33:39 +08:00
|
|
|
|
2009-04-01 12:37:47 +08:00
|
|
|
// The DoCond block typically is just a branch if we skipped
|
|
|
|
// emitting a branch, try to erase it.
|
|
|
|
if (!EmitBoolCondBranch)
|
2010-07-24 05:56:41 +08:00
|
|
|
SimplifyForwardingBlocks(LoopCond.getBlock());
|
2007-06-06 04:53:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitForStmt(const ForStmt &S) {
|
2010-07-06 09:34:17 +08:00
|
|
|
JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
|
|
|
|
|
|
|
|
RunCleanupsScope ForScope(*this);
|
2007-07-17 05:28:45 +08:00
|
|
|
|
2010-08-25 08:28:56 +08:00
|
|
|
CGDebugInfo *DI = getDebugInfo();
|
2011-10-14 05:45:18 +08:00
|
|
|
if (DI)
|
|
|
|
DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
|
2010-08-25 08:28:56 +08:00
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
// Evaluate the first part before the loop.
|
|
|
|
if (S.getInit())
|
|
|
|
EmitStmt(S.getInit());
|
|
|
|
|
|
|
|
// Start the loop with a block that tests the condition.
|
2010-07-06 09:34:17 +08:00
|
|
|
// If there's an increment, the continue scope will be overwritten
|
|
|
|
// later.
|
|
|
|
JumpDest Continue = getJumpDestInCurrentScope("for.cond");
|
2010-07-24 05:56:41 +08:00
|
|
|
llvm::BasicBlock *CondBlock = Continue.getBlock();
|
2007-06-06 04:53:16 +08:00
|
|
|
EmitBlock(CondBlock);
|
|
|
|
|
2009-11-25 09:51:31 +08:00
|
|
|
// Create a cleanup scope for the condition variable cleanups.
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope ConditionScope(*this);
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
if (S.getCond()) {
|
2009-11-25 08:27:52 +08:00
|
|
|
// If the for statement has a condition scope, emit the local variable
|
|
|
|
// declaration.
|
2009-11-25 09:51:31 +08:00
|
|
|
if (S.getConditionVariable()) {
|
2010-10-15 12:57:14 +08:00
|
|
|
EmitAutoVarDecl(*S.getConditionVariable());
|
2009-11-25 09:51:31 +08:00
|
|
|
}
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2013-11-05 00:13:18 +08:00
|
|
|
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
|
2010-07-06 09:34:17 +08:00
|
|
|
// If there are any cleanups between here and the loop-exit scope,
|
|
|
|
// create a block to stage a loop exit along.
|
|
|
|
if (ForScope.requiresCleanups())
|
|
|
|
ExitBlock = createBasicBlock("for.cond.cleanup");
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2008-11-12 16:04:58 +08:00
|
|
|
// As long as the condition is true, iterate the loop.
|
2008-11-13 09:38:36 +08:00
|
|
|
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
// C99 6.8.5p2/p4: The first substatement is executed if the expression
|
|
|
|
// compares unequal to 0. The condition must be a scalar type.
|
2013-11-05 00:13:21 +08:00
|
|
|
EmitBranchOnBoolExpr(S.getCond(), ForBody, ExitBlock);
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
if (ExitBlock != LoopExit.getBlock()) {
|
2010-07-06 09:34:17 +08:00
|
|
|
EmitBlock(ExitBlock);
|
|
|
|
EmitBranchThroughCleanup(LoopExit);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
|
|
|
EmitBlock(ForBody);
|
2007-06-06 04:53:16 +08:00
|
|
|
} else {
|
|
|
|
// Treat it as a non-zero constant. Don't even create a new block for the
|
|
|
|
// body, just fall into it.
|
|
|
|
}
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
// If the for loop doesn't have an increment we can just use the
|
2010-07-06 09:34:17 +08:00
|
|
|
// condition as the continue block. Otherwise we'll need to create
|
|
|
|
// a block for it (in the current scope, i.e. in the scope of the
|
|
|
|
// condition), and that we will become our continue block.
|
2007-07-17 05:28:45 +08:00
|
|
|
if (S.getInc())
|
2010-07-06 09:34:17 +08:00
|
|
|
Continue = getJumpDestInCurrentScope("for.inc");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-17 05:28:45 +08:00
|
|
|
// Store the blocks to use for break and continue.
|
2010-07-06 09:34:17 +08:00
|
|
|
BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
|
2009-02-08 07:02:10 +08:00
|
|
|
|
2009-11-25 09:51:31 +08:00
|
|
|
{
|
|
|
|
// Create a separate cleanup scope for the body, in case it is not
|
|
|
|
// a compound statement.
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope BodyScope(*this);
|
2009-11-25 09:51:31 +08:00
|
|
|
EmitStmt(S.getBody());
|
|
|
|
}
|
2007-07-17 05:28:45 +08:00
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
// If there is an increment, emit it next.
|
2008-09-28 08:19:22 +08:00
|
|
|
if (S.getInc()) {
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(Continue.getBlock());
|
2007-08-11 08:04:45 +08:00
|
|
|
EmitStmt(S.getInc());
|
2008-09-28 08:19:22 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-05-22 02:36:48 +08:00
|
|
|
BreakContinueStack.pop_back();
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
ConditionScope.ForceCleanup();
|
|
|
|
EmitBranch(CondBlock);
|
|
|
|
|
|
|
|
ForScope.ForceCleanup();
|
|
|
|
|
2011-10-14 05:45:18 +08:00
|
|
|
if (DI)
|
|
|
|
DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
|
2007-06-06 04:53:16 +08:00
|
|
|
|
2007-07-17 05:28:45 +08:00
|
|
|
// Emit the fall-through block.
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(LoopExit.getBlock(), true);
|
2007-06-06 04:53:16 +08:00
|
|
|
}
|
2007-06-05 11:59:43 +08:00
|
|
|
|
2011-04-15 06:09:26 +08:00
|
|
|
void CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S) {
|
|
|
|
JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
|
|
|
|
|
|
|
|
RunCleanupsScope ForScope(*this);
|
|
|
|
|
|
|
|
CGDebugInfo *DI = getDebugInfo();
|
2011-10-14 05:45:18 +08:00
|
|
|
if (DI)
|
|
|
|
DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
|
2011-04-15 06:09:26 +08:00
|
|
|
|
|
|
|
// Evaluate the first pieces before the loop.
|
|
|
|
EmitStmt(S.getRangeStmt());
|
|
|
|
EmitStmt(S.getBeginEndStmt());
|
|
|
|
|
|
|
|
// Start the loop with a block that tests the condition.
|
|
|
|
// If there's an increment, the continue scope will be overwritten
|
|
|
|
// later.
|
|
|
|
llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
|
|
|
|
EmitBlock(CondBlock);
|
|
|
|
|
|
|
|
// If there are any cleanups between here and the loop-exit scope,
|
|
|
|
// create a block to stage a loop exit along.
|
|
|
|
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
|
|
|
|
if (ForScope.requiresCleanups())
|
|
|
|
ExitBlock = createBasicBlock("for.cond.cleanup");
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2011-04-15 06:09:26 +08:00
|
|
|
// The loop body, consisting of the specified body and the loop variable.
|
|
|
|
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
|
|
|
|
|
|
|
|
// The body is executed if the expression, contextually converted
|
|
|
|
// to bool, is true.
|
2013-11-05 00:13:21 +08:00
|
|
|
EmitBranchOnBoolExpr(S.getCond(), ForBody, ExitBlock);
|
2011-04-15 06:09:26 +08:00
|
|
|
|
|
|
|
if (ExitBlock != LoopExit.getBlock()) {
|
|
|
|
EmitBlock(ExitBlock);
|
|
|
|
EmitBranchThroughCleanup(LoopExit);
|
|
|
|
}
|
|
|
|
|
|
|
|
EmitBlock(ForBody);
|
|
|
|
|
|
|
|
// Create a block for the increment. In case of a 'continue', we jump there.
|
|
|
|
JumpDest Continue = getJumpDestInCurrentScope("for.inc");
|
|
|
|
|
|
|
|
// Store the blocks to use for break and continue.
|
|
|
|
BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
|
|
|
|
|
|
|
|
{
|
|
|
|
// Create a separate cleanup scope for the loop variable and body.
|
|
|
|
RunCleanupsScope BodyScope(*this);
|
|
|
|
EmitStmt(S.getLoopVarStmt());
|
|
|
|
EmitStmt(S.getBody());
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there is an increment, emit it next.
|
|
|
|
EmitBlock(Continue.getBlock());
|
|
|
|
EmitStmt(S.getInc());
|
|
|
|
|
|
|
|
BreakContinueStack.pop_back();
|
|
|
|
|
|
|
|
EmitBranch(CondBlock);
|
|
|
|
|
|
|
|
ForScope.ForceCleanup();
|
|
|
|
|
2011-10-14 05:45:18 +08:00
|
|
|
if (DI)
|
|
|
|
DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
|
2011-04-15 06:09:26 +08:00
|
|
|
|
|
|
|
// Emit the fall-through block.
|
|
|
|
EmitBlock(LoopExit.getBlock(), true);
|
|
|
|
}
|
|
|
|
|
2008-09-24 12:00:38 +08:00
|
|
|
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
|
|
|
|
if (RV.isScalar()) {
|
|
|
|
Builder.CreateStore(RV.getScalarVal(), ReturnValue);
|
|
|
|
} else if (RV.isAggregate()) {
|
2012-03-30 01:37:10 +08:00
|
|
|
EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
|
2008-09-24 12:00:38 +08:00
|
|
|
} else {
|
2013-03-08 05:37:08 +08:00
|
|
|
EmitStoreOfComplex(RV.getComplexVal(),
|
|
|
|
MakeNaturalAlignAddrLValue(ReturnValue, Ty),
|
|
|
|
/*init*/ true);
|
2008-09-24 12:00:38 +08:00
|
|
|
}
|
2009-02-10 04:31:03 +08:00
|
|
|
EmitBranchThroughCleanup(ReturnBlock);
|
2008-09-24 12:00:38 +08:00
|
|
|
}
|
|
|
|
|
2007-06-02 11:19:07 +08:00
|
|
|
/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
|
|
|
|
/// if the function returns void, or may be missing one if the function returns
|
|
|
|
/// non-void. Fun stuff :).
|
|
|
|
void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
|
|
|
|
// Emit the result value, even if unused, to evalute the side effects.
|
|
|
|
const Expr *RV = S.getRetValue();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-09-25 14:56:03 +08:00
|
|
|
// Treat block literals in a return expression as if they appeared
|
|
|
|
// in their own scope. This permits a small, easily-implemented
|
|
|
|
// exception to our over-conservative rules about not jumping to
|
|
|
|
// statements following block literals with non-trivial cleanups.
|
|
|
|
RunCleanupsScope cleanupScope(*this);
|
|
|
|
if (const ExprWithCleanups *cleanups =
|
|
|
|
dyn_cast_or_null<ExprWithCleanups>(RV)) {
|
|
|
|
enterFullExpression(cleanups);
|
|
|
|
RV = cleanups->getSubExpr();
|
|
|
|
}
|
|
|
|
|
2008-09-10 05:00:17 +08:00
|
|
|
// FIXME: Clean this up by using an LValue for ReturnTemp,
|
|
|
|
// EmitStoreThroughLValue, and EmitAnyExpr.
|
2013-03-27 02:41:47 +08:00
|
|
|
if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) {
|
2010-05-15 14:46:45 +08:00
|
|
|
// Apply the named return value optimization for this return statement,
|
|
|
|
// which means doing nothing: the appropriate result has already been
|
|
|
|
// constructed into the NRVO variable.
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2010-05-17 23:52:46 +08:00
|
|
|
// If there is an NRVO flag for this variable, set it to 1 into indicate
|
|
|
|
// that the cleanup code should not destroy the variable.
|
2011-02-08 16:22:06 +08:00
|
|
|
if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
|
|
|
|
Builder.CreateStore(Builder.getTrue(), NRVOFlag);
|
2010-05-15 14:46:45 +08:00
|
|
|
} else if (!ReturnValue) {
|
2008-09-10 05:00:17 +08:00
|
|
|
// Make sure not to return anything, but evaluate the expression
|
|
|
|
// for side effects.
|
|
|
|
if (RV)
|
2008-05-22 09:22:33 +08:00
|
|
|
EmitAnyExpr(RV);
|
2007-06-02 11:19:07 +08:00
|
|
|
} else if (RV == 0) {
|
2008-09-10 05:00:17 +08:00
|
|
|
// Do nothing (return value is left uninitialized)
|
2009-05-27 12:56:12 +08:00
|
|
|
} else if (FnRetTy->isReferenceType()) {
|
|
|
|
// If this function returns a reference, take the address of the expression
|
|
|
|
// rather than the value.
|
2013-06-13 07:38:09 +08:00
|
|
|
RValue Result = EmitReferenceBindingToExpr(RV);
|
2010-03-25 07:14:04 +08:00
|
|
|
Builder.CreateStore(Result.getScalarVal(), ReturnValue);
|
2007-06-02 11:19:07 +08:00
|
|
|
} else {
|
2013-03-08 05:37:08 +08:00
|
|
|
switch (getEvaluationKind(RV->getType())) {
|
|
|
|
case TEK_Scalar:
|
|
|
|
Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
|
|
|
|
break;
|
|
|
|
case TEK_Complex:
|
|
|
|
EmitComplexExprIntoLValue(RV,
|
|
|
|
MakeNaturalAlignAddrLValue(ReturnValue, RV->getType()),
|
|
|
|
/*isInit*/ true);
|
|
|
|
break;
|
|
|
|
case TEK_Aggregate: {
|
|
|
|
CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType());
|
|
|
|
EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment,
|
|
|
|
Qualifiers(),
|
|
|
|
AggValueSlot::IsDestructed,
|
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
|
|
|
AggValueSlot::IsNotAliased));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2007-06-02 11:19:07 +08:00
|
|
|
}
|
2008-05-22 09:22:33 +08:00
|
|
|
|
2013-05-08 06:41:09 +08:00
|
|
|
++NumReturnExprs;
|
2013-05-03 01:30:20 +08:00
|
|
|
if (RV == 0 || RV->isEvaluatable(getContext()))
|
2013-05-08 06:41:09 +08:00
|
|
|
++NumSimpleReturnExprs;
|
2013-05-03 01:30:20 +08:00
|
|
|
|
2012-09-25 14:56:03 +08:00
|
|
|
cleanupScope.ForceCleanup();
|
2009-02-10 04:31:03 +08:00
|
|
|
EmitBranchThroughCleanup(ReturnBlock);
|
2007-06-02 11:19:07 +08:00
|
|
|
}
|
|
|
|
|
2007-06-09 09:20:56 +08:00
|
|
|
void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
|
2011-06-04 08:38:02 +08:00
|
|
|
// As long as debug info is modeled with instructions, we have to ensure we
|
|
|
|
// have a place to insert here and write the stop point here.
|
2012-04-10 13:04:07 +08:00
|
|
|
if (HaveInsertPoint())
|
2011-06-04 08:38:02 +08:00
|
|
|
EmitStopPoint(&S);
|
|
|
|
|
2008-10-07 02:42:27 +08:00
|
|
|
for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
|
|
|
|
I != E; ++I)
|
|
|
|
EmitDecl(**I);
|
2007-07-12 23:43:07 +08:00
|
|
|
}
|
2007-07-17 05:28:45 +08:00
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
|
2007-07-17 05:28:45 +08:00
|
|
|
assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
|
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
// If this code is reachable then emit a stop point (if generating
|
|
|
|
// debug info). We have to do this ourselves because we are on the
|
|
|
|
// "simple" statement path.
|
|
|
|
if (HaveInsertPoint())
|
|
|
|
EmitStopPoint(&S);
|
2009-02-08 17:22:19 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
JumpDest Block = BreakContinueStack.back().BreakBlock;
|
2009-02-10 04:31:03 +08:00
|
|
|
EmitBranchThroughCleanup(Block);
|
2007-07-17 05:28:45 +08:00
|
|
|
}
|
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
|
2007-07-17 05:28:45 +08:00
|
|
|
assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
|
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
// If this code is reachable then emit a stop point (if generating
|
|
|
|
// debug info). We have to do this ourselves because we are on the
|
|
|
|
// "simple" statement path.
|
|
|
|
if (HaveInsertPoint())
|
|
|
|
EmitStopPoint(&S);
|
2009-02-08 17:22:19 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
JumpDest Block = BreakContinueStack.back().ContinueBlock;
|
2009-02-10 04:31:03 +08:00
|
|
|
EmitBranchThroughCleanup(Block);
|
2007-07-17 05:28:45 +08:00
|
|
|
}
|
2007-10-05 07:45:31 +08:00
|
|
|
|
2007-10-09 04:57:48 +08:00
|
|
|
/// EmitCaseStmtRange - If case statement range is not too big then
|
|
|
|
/// add multiple cases to switch instruction, one for each value within
|
|
|
|
/// the range. If range is too big then emit "if" condition check.
|
|
|
|
void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
|
2008-07-24 09:18:41 +08:00
|
|
|
assert(S.getRHS() && "Expected RHS value in CaseStmt");
|
2007-10-09 04:57:48 +08:00
|
|
|
|
2011-10-11 02:28:20 +08:00
|
|
|
llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
|
|
|
|
llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
|
2008-07-24 09:18:41 +08:00
|
|
|
|
2008-07-25 09:11:38 +08:00
|
|
|
// Emit the code for this case. We do this first to make sure it is
|
|
|
|
// properly chained from our predecessor before generating the
|
|
|
|
// switch machinery to enter this block.
|
2008-11-11 12:12:31 +08:00
|
|
|
EmitBlock(createBasicBlock("sw.bb"));
|
2008-07-25 09:11:38 +08:00
|
|
|
llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
|
|
|
|
EmitStmt(S.getSubStmt());
|
|
|
|
|
2008-07-24 09:18:41 +08:00
|
|
|
// If range is empty, do nothing.
|
|
|
|
if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
|
|
|
|
return;
|
2007-10-09 04:57:48 +08:00
|
|
|
|
|
|
|
llvm::APInt Range = RHS - LHS;
|
2008-07-25 09:11:38 +08:00
|
|
|
// FIXME: parameters such as this should not be hardcoded.
|
2007-10-09 04:57:48 +08:00
|
|
|
if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
|
|
|
|
// Range is small enough to add multiple switch instruction cases.
|
2008-07-24 09:18:41 +08:00
|
|
|
for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
|
2011-04-20 04:53:45 +08:00
|
|
|
SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
|
2007-10-06 04:54:07 +08:00
|
|
|
LHS++;
|
|
|
|
}
|
2007-10-09 04:57:48 +08:00
|
|
|
return;
|
2009-09-09 23:08:12 +08:00
|
|
|
}
|
|
|
|
|
2008-07-25 09:11:38 +08:00
|
|
|
// The range is too big. Emit "if" condition into a new block,
|
|
|
|
// making sure to save and restore the current insertion point.
|
|
|
|
llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
|
|
|
|
|
|
|
|
// Push this test onto the chain of range checks (which terminates
|
|
|
|
// in the default basic block). The switch's default will be changed
|
|
|
|
// to the top of this chain after switch emission is complete.
|
|
|
|
llvm::BasicBlock *FalseDest = CaseRangeBlock;
|
2008-11-11 10:29:29 +08:00
|
|
|
CaseRangeBlock = createBasicBlock("sw.caserange");
|
2008-07-25 09:11:38 +08:00
|
|
|
|
|
|
|
CurFn->getBasicBlockList().push_back(CaseRangeBlock);
|
|
|
|
Builder.SetInsertPoint(CaseRangeBlock);
|
2007-10-09 04:57:48 +08:00
|
|
|
|
|
|
|
// Emit range check.
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::Value *Diff =
|
2011-09-28 05:06:10 +08:00
|
|
|
Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::Value *Cond =
|
2011-04-20 04:53:45 +08:00
|
|
|
Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
|
2007-10-09 04:57:48 +08:00
|
|
|
Builder.CreateCondBr(Cond, CaseDest, FalseDest);
|
|
|
|
|
2008-07-25 09:11:38 +08:00
|
|
|
// Restore the appropriate insertion point.
|
2008-11-12 07:11:34 +08:00
|
|
|
if (RestoreBB)
|
|
|
|
Builder.SetInsertPoint(RestoreBB);
|
|
|
|
else
|
|
|
|
Builder.ClearInsertionPoint();
|
2007-10-09 04:57:48 +08:00
|
|
|
}
|
2007-10-06 04:54:07 +08:00
|
|
|
|
2007-10-09 04:57:48 +08:00
|
|
|
void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
|
2012-01-17 01:35:57 +08:00
|
|
|
// If there is no enclosing switch instance that we're aware of, then this
|
|
|
|
// case statement and its block can be elided. This situation only happens
|
|
|
|
// when we've constant-folded the switch, are emitting the constant case,
|
|
|
|
// and part of the constant case includes another case statement. For
|
|
|
|
// instance: switch (4) { case 4: do { case 5: } while (1); }
|
2012-01-18 07:55:19 +08:00
|
|
|
if (!SwitchInsn) {
|
|
|
|
EmitStmt(S.getSubStmt());
|
2012-01-17 01:35:57 +08:00
|
|
|
return;
|
2012-01-18 07:55:19 +08:00
|
|
|
}
|
2012-01-17 01:35:57 +08:00
|
|
|
|
2011-04-17 08:54:30 +08:00
|
|
|
// Handle case ranges.
|
2007-10-09 04:57:48 +08:00
|
|
|
if (S.getRHS()) {
|
|
|
|
EmitCaseStmtRange(S);
|
|
|
|
return;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-04-20 04:53:45 +08:00
|
|
|
llvm::ConstantInt *CaseVal =
|
2011-10-11 02:28:20 +08:00
|
|
|
Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
|
2011-04-20 04:53:45 +08:00
|
|
|
|
2011-04-18 07:21:26 +08:00
|
|
|
// If the body of the case is just a 'break', and if there was no fallthrough,
|
|
|
|
// try to not emit an empty block.
|
2012-08-25 02:31:16 +08:00
|
|
|
if ((CGM.getCodeGenOpts().OptimizationLevel > 0) &&
|
|
|
|
isa<BreakStmt>(S.getSubStmt())) {
|
2011-04-17 08:54:30 +08:00
|
|
|
JumpDest Block = BreakContinueStack.back().BreakBlock;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2011-04-17 08:54:30 +08:00
|
|
|
// Only do this optimization if there are no cleanups that need emitting.
|
|
|
|
if (isObviouslyBranchWithoutCleanups(Block)) {
|
2011-04-20 04:53:45 +08:00
|
|
|
SwitchInsn->addCase(CaseVal, Block.getBlock());
|
2011-04-18 07:21:26 +08:00
|
|
|
|
|
|
|
// If there was a fallthrough into this case, make sure to redirect it to
|
|
|
|
// the end of the switch as well.
|
|
|
|
if (Builder.GetInsertBlock()) {
|
|
|
|
Builder.CreateBr(Block.getBlock());
|
|
|
|
Builder.ClearInsertionPoint();
|
|
|
|
}
|
2011-04-17 08:54:30 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2008-11-11 12:12:31 +08:00
|
|
|
EmitBlock(createBasicBlock("sw.bb"));
|
2007-10-09 04:57:48 +08:00
|
|
|
llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
|
2011-04-20 04:53:45 +08:00
|
|
|
SwitchInsn->addCase(CaseVal, CaseDest);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
add a special case for codegen that improves the case where we have
multiple sequential cases to a) not create tons of fall-through basic blocks
and b) not recurse deeply. This fixes codegen on 100K deep cases, and improves
codegen on moderate cases from this:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb1
i32 1002, label %sw.bb2
i32 1003, label %sw.bb3
i32 1004, label %sw.bb4
...
sw.bb: ; preds = %entry
br label %sw.bb1
sw.bb1: ; preds = %entry, %sw.bb
br label %sw.bb2
sw.bb2: ; preds = %entry, %sw.bb1
br label %sw.bb3
sw.bb3: ; preds = %entry, %sw.bb2
br label %sw.bb4
to:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb
i32 1002, label %sw.bb
i32 1003, label %sw.bb
i32 1004, label %sw.bb
sw.bb: ;; many preds
llvm-svn: 66015
2009-03-04 12:46:18 +08:00
|
|
|
// Recursively emitting the statement is acceptable, but is not wonderful for
|
|
|
|
// code where we have many case statements nested together, i.e.:
|
|
|
|
// case 1:
|
|
|
|
// case 2:
|
|
|
|
// case 3: etc.
|
|
|
|
// Handling this recursively will create a new block for each case statement
|
|
|
|
// that falls through to the next case which is IR intensive. It also causes
|
|
|
|
// deep recursion which can run into stack depth limitations. Handle
|
|
|
|
// sequential non-range case statements specially.
|
|
|
|
const CaseStmt *CurCase = &S;
|
|
|
|
const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
|
|
|
|
|
2011-04-20 04:53:45 +08:00
|
|
|
// Otherwise, iteratively add consecutive cases to this switch stmt.
|
add a special case for codegen that improves the case where we have
multiple sequential cases to a) not create tons of fall-through basic blocks
and b) not recurse deeply. This fixes codegen on 100K deep cases, and improves
codegen on moderate cases from this:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb1
i32 1002, label %sw.bb2
i32 1003, label %sw.bb3
i32 1004, label %sw.bb4
...
sw.bb: ; preds = %entry
br label %sw.bb1
sw.bb1: ; preds = %entry, %sw.bb
br label %sw.bb2
sw.bb2: ; preds = %entry, %sw.bb1
br label %sw.bb3
sw.bb3: ; preds = %entry, %sw.bb2
br label %sw.bb4
to:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb
i32 1002, label %sw.bb
i32 1003, label %sw.bb
i32 1004, label %sw.bb
sw.bb: ;; many preds
llvm-svn: 66015
2009-03-04 12:46:18 +08:00
|
|
|
while (NextCase && NextCase->getRHS() == 0) {
|
|
|
|
CurCase = NextCase;
|
2011-04-20 04:53:45 +08:00
|
|
|
llvm::ConstantInt *CaseVal =
|
2011-10-11 02:28:20 +08:00
|
|
|
Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
|
2011-04-20 04:53:45 +08:00
|
|
|
SwitchInsn->addCase(CaseVal, CaseDest);
|
add a special case for codegen that improves the case where we have
multiple sequential cases to a) not create tons of fall-through basic blocks
and b) not recurse deeply. This fixes codegen on 100K deep cases, and improves
codegen on moderate cases from this:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb1
i32 1002, label %sw.bb2
i32 1003, label %sw.bb3
i32 1004, label %sw.bb4
...
sw.bb: ; preds = %entry
br label %sw.bb1
sw.bb1: ; preds = %entry, %sw.bb
br label %sw.bb2
sw.bb2: ; preds = %entry, %sw.bb1
br label %sw.bb3
sw.bb3: ; preds = %entry, %sw.bb2
br label %sw.bb4
to:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb
i32 1002, label %sw.bb
i32 1003, label %sw.bb
i32 1004, label %sw.bb
sw.bb: ;; many preds
llvm-svn: 66015
2009-03-04 12:46:18 +08:00
|
|
|
NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
add a special case for codegen that improves the case where we have
multiple sequential cases to a) not create tons of fall-through basic blocks
and b) not recurse deeply. This fixes codegen on 100K deep cases, and improves
codegen on moderate cases from this:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb1
i32 1002, label %sw.bb2
i32 1003, label %sw.bb3
i32 1004, label %sw.bb4
...
sw.bb: ; preds = %entry
br label %sw.bb1
sw.bb1: ; preds = %entry, %sw.bb
br label %sw.bb2
sw.bb2: ; preds = %entry, %sw.bb1
br label %sw.bb3
sw.bb3: ; preds = %entry, %sw.bb2
br label %sw.bb4
to:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb
i32 1002, label %sw.bb
i32 1003, label %sw.bb
i32 1004, label %sw.bb
sw.bb: ;; many preds
llvm-svn: 66015
2009-03-04 12:46:18 +08:00
|
|
|
// Normal default recursion for non-cases.
|
|
|
|
EmitStmt(CurCase->getSubStmt());
|
2007-10-05 07:45:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
|
2008-07-25 09:11:38 +08:00
|
|
|
llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
|
2009-09-09 23:08:12 +08:00
|
|
|
assert(DefaultBlock->empty() &&
|
2008-11-11 10:29:29 +08:00
|
|
|
"EmitDefaultStmt: Default block already defined?");
|
2008-07-25 09:11:38 +08:00
|
|
|
EmitBlock(DefaultBlock);
|
2007-10-05 07:45:31 +08:00
|
|
|
EmitStmt(S.getSubStmt());
|
|
|
|
}
|
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
/// CollectStatementsForCase - Given the body of a 'switch' statement and a
|
|
|
|
/// constant value that is being switched on, see if we can dead code eliminate
|
|
|
|
/// the body of the switch to a simple series of statements to emit. Basically,
|
|
|
|
/// on a switch (5) we want to find these statements:
|
|
|
|
/// case 5:
|
|
|
|
/// printf(...); <--
|
|
|
|
/// ++i; <--
|
|
|
|
/// break;
|
|
|
|
///
|
|
|
|
/// and add them to the ResultStmts vector. If it is unsafe to do this
|
|
|
|
/// transformation (for example, one of the elided statements contains a label
|
|
|
|
/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
|
|
|
|
/// should include statements after it (e.g. the printf() line is a substmt of
|
|
|
|
/// the case) then return CSFC_FallThrough. If we handled it and found a break
|
|
|
|
/// statement, then return CSFC_Success.
|
|
|
|
///
|
|
|
|
/// If Case is non-null, then we are looking for the specified case, checking
|
|
|
|
/// that nothing we jump over contains labels. If Case is null, then we found
|
|
|
|
/// the case and are looking for the break.
|
|
|
|
///
|
|
|
|
/// If the recursive walk actually finds our Case, then we set FoundCase to
|
|
|
|
/// true.
|
|
|
|
///
|
|
|
|
enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
|
|
|
|
static CSFC_Result CollectStatementsForCase(const Stmt *S,
|
|
|
|
const SwitchCase *Case,
|
|
|
|
bool &FoundCase,
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVectorImpl<const Stmt*> &ResultStmts) {
|
2011-02-28 09:02:29 +08:00
|
|
|
// If this is a null statement, just succeed.
|
|
|
|
if (S == 0)
|
|
|
|
return Case ? CSFC_Success : CSFC_FallThrough;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// If this is the switchcase (case 4: or default) that we're looking for, then
|
|
|
|
// we're in business. Just add the substatement.
|
|
|
|
if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
|
|
|
|
if (S == Case) {
|
|
|
|
FoundCase = true;
|
|
|
|
return CollectStatementsForCase(SC->getSubStmt(), 0, FoundCase,
|
|
|
|
ResultStmts);
|
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// Otherwise, this is some other case or default statement, just ignore it.
|
|
|
|
return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
|
|
|
|
ResultStmts);
|
|
|
|
}
|
2011-02-28 09:02:29 +08:00
|
|
|
|
|
|
|
// If we are in the live part of the code and we found our break statement,
|
|
|
|
// return a success!
|
|
|
|
if (Case == 0 && isa<BreakStmt>(S))
|
|
|
|
return CSFC_Success;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2011-02-28 09:02:29 +08:00
|
|
|
// If this is a switch statement, then it might contain the SwitchCase, the
|
|
|
|
// break, or neither.
|
|
|
|
if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
|
|
|
|
// Handle this as two cases: we might be looking for the SwitchCase (if so
|
|
|
|
// the skipped statements must be skippable) or we might already have it.
|
|
|
|
CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
|
|
|
|
if (Case) {
|
2011-02-28 15:22:44 +08:00
|
|
|
// Keep track of whether we see a skipped declaration. The code could be
|
|
|
|
// using the declaration even if it is skipped, so we can't optimize out
|
|
|
|
// the decl if the kept statements might refer to it.
|
|
|
|
bool HadSkippedDecl = false;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2011-02-28 09:02:29 +08:00
|
|
|
// If we're looking for the case, just see if we can skip each of the
|
|
|
|
// substatements.
|
|
|
|
for (; Case && I != E; ++I) {
|
2011-05-22 03:15:39 +08:00
|
|
|
HadSkippedDecl |= isa<DeclStmt>(*I);
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2011-02-28 09:02:29 +08:00
|
|
|
switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
|
|
|
|
case CSFC_Failure: return CSFC_Failure;
|
|
|
|
case CSFC_Success:
|
|
|
|
// A successful result means that either 1) that the statement doesn't
|
|
|
|
// have the case and is skippable, or 2) does contain the case value
|
2011-02-28 15:16:14 +08:00
|
|
|
// and also contains the break to exit the switch. In the later case,
|
|
|
|
// we just verify the rest of the statements are elidable.
|
|
|
|
if (FoundCase) {
|
2011-02-28 15:22:44 +08:00
|
|
|
// If we found the case and skipped declarations, we can't do the
|
|
|
|
// optimization.
|
|
|
|
if (HadSkippedDecl)
|
|
|
|
return CSFC_Failure;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2011-02-28 15:16:14 +08:00
|
|
|
for (++I; I != E; ++I)
|
|
|
|
if (CodeGenFunction::ContainsLabel(*I, true))
|
|
|
|
return CSFC_Failure;
|
|
|
|
return CSFC_Success;
|
|
|
|
}
|
2011-02-28 09:02:29 +08:00
|
|
|
break;
|
|
|
|
case CSFC_FallThrough:
|
|
|
|
// If we have a fallthrough condition, then we must have found the
|
|
|
|
// case started to include statements. Consider the rest of the
|
|
|
|
// statements in the compound statement as candidates for inclusion.
|
|
|
|
assert(FoundCase && "Didn't find case but returned fallthrough?");
|
|
|
|
// We recursively found Case, so we're not looking for it anymore.
|
|
|
|
Case = 0;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2011-02-28 15:22:44 +08:00
|
|
|
// If we found the case and skipped declarations, we can't do the
|
|
|
|
// optimization.
|
|
|
|
if (HadSkippedDecl)
|
|
|
|
return CSFC_Failure;
|
2011-02-28 09:02:29 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have statements in our range, then we know that the statements are
|
|
|
|
// live and need to be added to the set of statements we're tracking.
|
|
|
|
for (; I != E; ++I) {
|
|
|
|
switch (CollectStatementsForCase(*I, 0, FoundCase, ResultStmts)) {
|
|
|
|
case CSFC_Failure: return CSFC_Failure;
|
|
|
|
case CSFC_FallThrough:
|
|
|
|
// A fallthrough result means that the statement was simple and just
|
|
|
|
// included in ResultStmt, keep adding them afterwards.
|
|
|
|
break;
|
|
|
|
case CSFC_Success:
|
|
|
|
// A successful result means that we found the break statement and
|
|
|
|
// stopped statement inclusion. We just ensure that any leftover stmts
|
|
|
|
// are skippable and return success ourselves.
|
|
|
|
for (++I; I != E; ++I)
|
|
|
|
if (CodeGenFunction::ContainsLabel(*I, true))
|
|
|
|
return CSFC_Failure;
|
|
|
|
return CSFC_Success;
|
2012-06-21 01:43:05 +08:00
|
|
|
}
|
2011-02-28 09:02:29 +08:00
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2011-02-28 09:02:29 +08:00
|
|
|
return Case ? CSFC_Success : CSFC_FallThrough;
|
|
|
|
}
|
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// Okay, this is some other statement that we don't handle explicitly, like a
|
|
|
|
// for statement or increment etc. If we are skipping over this statement,
|
|
|
|
// just verify it doesn't have labels, which would make it invalid to elide.
|
|
|
|
if (Case) {
|
2011-02-28 15:22:44 +08:00
|
|
|
if (CodeGenFunction::ContainsLabel(S, true))
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
return CSFC_Failure;
|
|
|
|
return CSFC_Success;
|
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// Otherwise, we want to include this statement. Everything is cool with that
|
|
|
|
// so long as it doesn't contain a break out of the switch we're in.
|
|
|
|
if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// Otherwise, everything is great. Include the statement and tell the caller
|
|
|
|
// that we fall through and include the next statement as well.
|
|
|
|
ResultStmts.push_back(S);
|
|
|
|
return CSFC_FallThrough;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// FindCaseStatementsForValue - Find the case statement being jumped to and
|
|
|
|
/// then invoke CollectStatementsForCase to find the list of statements to emit
|
|
|
|
/// for a switch on constant. See the comment above CollectStatementsForCase
|
|
|
|
/// for more details.
|
|
|
|
static bool FindCaseStatementsForValue(const SwitchStmt &S,
|
2012-07-24 04:21:35 +08:00
|
|
|
const llvm::APSInt &ConstantCondValue,
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVectorImpl<const Stmt*> &ResultStmts,
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
ASTContext &C) {
|
|
|
|
// First step, find the switch case that is being branched to. We can do this
|
|
|
|
// efficiently by scanning the SwitchCase list.
|
|
|
|
const SwitchCase *Case = S.getSwitchCaseList();
|
|
|
|
const DefaultStmt *DefaultCase = 0;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
for (; Case; Case = Case->getNextSwitchCase()) {
|
|
|
|
// It's either a default or case. Just remember the default statement in
|
|
|
|
// case we're not jumping to any numbered cases.
|
|
|
|
if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
|
|
|
|
DefaultCase = DS;
|
|
|
|
continue;
|
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// Check to see if this case is the one we're looking for.
|
|
|
|
const CaseStmt *CS = cast<CaseStmt>(Case);
|
|
|
|
// Don't handle case ranges yet.
|
|
|
|
if (CS->getRHS()) return false;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// If we found our case, remember it as 'case'.
|
2011-10-11 02:28:20 +08:00
|
|
|
if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
break;
|
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// If we didn't find a matching case, we use a default if it exists, or we
|
|
|
|
// elide the whole switch body!
|
|
|
|
if (Case == 0) {
|
|
|
|
// It is safe to elide the body of the switch if it doesn't contain labels
|
|
|
|
// etc. If it is safe, return successfully with an empty ResultStmts list.
|
|
|
|
if (DefaultCase == 0)
|
|
|
|
return !CodeGenFunction::ContainsLabel(&S);
|
|
|
|
Case = DefaultCase;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ok, we know which case is being jumped to, try to collect all the
|
|
|
|
// statements that follow it. This can fail for a variety of reasons. Also,
|
|
|
|
// check to see that the recursive walk actually found our case statement.
|
|
|
|
// Insane cases like this can fail to find it in the recursive walk since we
|
|
|
|
// don't handle every stmt kind:
|
|
|
|
// switch (4) {
|
|
|
|
// while (1) {
|
|
|
|
// case 4: ...
|
|
|
|
bool FoundCase = false;
|
|
|
|
return CollectStatementsForCase(S.getBody(), Case, FoundCase,
|
|
|
|
ResultStmts) != CSFC_Failure &&
|
|
|
|
FoundCase;
|
|
|
|
}
|
|
|
|
|
2007-10-05 07:45:31 +08:00
|
|
|
void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
|
2010-07-06 09:34:17 +08:00
|
|
|
JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
|
|
|
|
|
|
|
|
RunCleanupsScope ConditionScope(*this);
|
2009-11-25 01:07:59 +08:00
|
|
|
|
|
|
|
if (S.getConditionVariable())
|
2010-10-15 12:57:14 +08:00
|
|
|
EmitAutoVarDecl(*S.getConditionVariable());
|
2009-11-25 01:07:59 +08:00
|
|
|
|
2012-01-18 07:39:50 +08:00
|
|
|
// Handle nested switch statements.
|
|
|
|
llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
|
|
|
|
llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
|
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// See if we can constant fold the condition of the switch and therefore only
|
|
|
|
// emit the live case statement (if any) of the switch.
|
2012-07-24 04:21:35 +08:00
|
|
|
llvm::APSInt ConstantCondValue;
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<const Stmt*, 4> CaseStmts;
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
|
|
|
|
getContext())) {
|
|
|
|
RunCleanupsScope ExecutedScope(*this);
|
|
|
|
|
2012-01-18 07:39:50 +08:00
|
|
|
// At this point, we are no longer "within" a switch instance, so
|
|
|
|
// we can temporarily enforce this to ensure that any embedded case
|
|
|
|
// statements are not emitted.
|
|
|
|
SwitchInsn = 0;
|
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// Okay, we can dead code eliminate everything except this case. Emit the
|
|
|
|
// specified series of statements and we're good.
|
|
|
|
for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
|
|
|
|
EmitStmt(CaseStmts[i]);
|
2012-01-18 07:39:50 +08:00
|
|
|
|
2012-04-10 13:04:04 +08:00
|
|
|
// Now we want to restore the saved switch instance so that nested
|
|
|
|
// switches continue to function properly
|
2012-01-18 07:39:50 +08:00
|
|
|
SwitchInsn = SavedSwitchInsn;
|
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2007-10-05 07:45:31 +08:00
|
|
|
llvm::Value *CondV = EmitScalarExpr(S.getCond());
|
|
|
|
|
2008-07-25 09:11:38 +08:00
|
|
|
// Create basic block to hold stuff that comes after switch
|
|
|
|
// statement. We also need to create a default block now so that
|
|
|
|
// explicit case ranges tests can have a place to jump to on
|
|
|
|
// failure.
|
2008-11-11 10:29:29 +08:00
|
|
|
llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
|
2008-07-25 09:11:38 +08:00
|
|
|
SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
|
|
|
|
CaseRangeBlock = DefaultBlock;
|
2007-10-05 07:45:31 +08:00
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
// Clear the insertion point to indicate we are in unreachable code.
|
|
|
|
Builder.ClearInsertionPoint();
|
2008-05-13 00:08:04 +08:00
|
|
|
|
2007-10-31 04:59:40 +08:00
|
|
|
// All break statements jump to NextBlock. If BreakContinueStack is non empty
|
|
|
|
// then reuse last ContinueBlock.
|
2010-07-06 09:34:17 +08:00
|
|
|
JumpDest OuterContinue;
|
2009-02-10 13:52:02 +08:00
|
|
|
if (!BreakContinueStack.empty())
|
2010-07-06 09:34:17 +08:00
|
|
|
OuterContinue = BreakContinueStack.back().ContinueBlock;
|
2009-02-10 13:52:02 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
|
2007-10-05 07:45:31 +08:00
|
|
|
|
|
|
|
// Emit switch body.
|
|
|
|
EmitStmt(S.getBody());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-10 13:52:02 +08:00
|
|
|
BreakContinueStack.pop_back();
|
2007-10-05 07:45:31 +08:00
|
|
|
|
2008-07-25 09:11:38 +08:00
|
|
|
// Update the default block in case explicit case range tests have
|
|
|
|
// been chained on top.
|
Compatability fix for SwitchInst refactoring.
The purpose of refactoring is to hide operand roles from SwitchInst user (programmer). If you want to play with operands directly, probably you will need lower level methods than SwitchInst ones (TerminatorInst or may be User). After this patch we can reorganize SwitchInst operands and successors as we want.
What was done:
1. Changed semantics of index inside the getCaseValue method:
getCaseValue(0) means "get first case", not a condition. Use getCondition() if you want to resolve the condition. I propose don't mix SwitchInst case indexing with low level indexing (TI successors indexing, User's operands indexing), since it may be dangerous.
2. By the same reason findCaseValue(ConstantInt*) returns actual number of case value. 0 means first case, not default. If there is no case with given value, ErrorIndex will returned.
3. Added getCaseSuccessor method. I propose to avoid usage of TerminatorInst::getSuccessor if you want to resolve case successor BB. Use getCaseSuccessor instead, since internal SwitchInst organization of operands/successors is hidden and may be changed in any moment.
4. Added resolveSuccessorIndex and resolveCaseIndex. The main purpose of these methods is to see how case successors are really mapped in TerminatorInst.
4.1 "resolveSuccessorIndex" was created if you need to level down from SwitchInst to TerminatorInst. It returns TerminatorInst's successor index for given case successor.
4.2 "resolveCaseIndex" converts low level successors index to case index that curresponds to the given successor.
Note: There are also related compatability fix patches for dragonegg, klee, llvm-gcc-4.0, llvm-gcc-4.2, safecode, clang.
llvm-svn: 149482
2012-02-01 15:50:21 +08:00
|
|
|
SwitchInsn->setDefaultDest(CaseRangeBlock);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// If a default was never emitted:
|
2008-07-25 09:11:38 +08:00
|
|
|
if (!DefaultBlock->getParent()) {
|
2010-07-06 09:34:17 +08:00
|
|
|
// If we have cleanups, emit the default block so that there's a
|
|
|
|
// place to jump through the cleanups from.
|
|
|
|
if (ConditionScope.requiresCleanups()) {
|
|
|
|
EmitBlock(DefaultBlock);
|
|
|
|
|
|
|
|
// Otherwise, just forward the default block to the switch end.
|
|
|
|
} else {
|
2010-07-24 05:56:41 +08:00
|
|
|
DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
|
2010-07-06 09:34:17 +08:00
|
|
|
delete DefaultBlock;
|
|
|
|
}
|
2008-07-25 09:11:38 +08:00
|
|
|
}
|
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
ConditionScope.ForceCleanup();
|
|
|
|
|
2008-07-25 09:11:38 +08:00
|
|
|
// Emit continuation.
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(SwitchExit.getBlock(), true);
|
2007-10-05 07:45:31 +08:00
|
|
|
|
|
|
|
SwitchInsn = SavedSwitchInsn;
|
2007-10-09 04:57:48 +08:00
|
|
|
CaseRangeBlock = SavedCRBlock;
|
2007-10-05 07:45:31 +08:00
|
|
|
}
|
2008-02-06 00:35:33 +08:00
|
|
|
|
2009-04-27 01:57:12 +08:00
|
|
|
static std::string
|
2009-11-13 13:51:54 +08:00
|
|
|
SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
|
2008-02-06 00:35:33 +08:00
|
|
|
std::string Result;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
while (*Constraint) {
|
|
|
|
switch (*Constraint) {
|
|
|
|
default:
|
2011-06-08 07:45:05 +08:00
|
|
|
Result += Target.convertConstraint(Constraint);
|
2008-02-06 00:35:33 +08:00
|
|
|
break;
|
|
|
|
// Ignore these
|
|
|
|
case '*':
|
|
|
|
case '?':
|
|
|
|
case '!':
|
2010-08-11 03:20:14 +08:00
|
|
|
case '=': // Will see this and the following in mult-alt constraints.
|
|
|
|
case '+':
|
|
|
|
break;
|
2012-10-29 20:20:54 +08:00
|
|
|
case '#': // Ignore the rest of the constraint alternative.
|
|
|
|
while (Constraint[1] && Constraint[1] != ',')
|
2013-07-11 04:14:36 +08:00
|
|
|
Constraint++;
|
2012-10-29 20:20:54 +08:00
|
|
|
break;
|
2010-09-18 09:15:13 +08:00
|
|
|
case ',':
|
|
|
|
Result += "|";
|
2008-02-06 00:35:33 +08:00
|
|
|
break;
|
|
|
|
case 'g':
|
|
|
|
Result += "imr";
|
|
|
|
break;
|
2009-01-18 10:06:20 +08:00
|
|
|
case '[': {
|
2009-04-27 01:57:12 +08:00
|
|
|
assert(OutCons &&
|
2009-01-18 10:06:20 +08:00
|
|
|
"Must pass output names to constraints with a symbolic name");
|
|
|
|
unsigned Index;
|
2009-09-09 23:08:12 +08:00
|
|
|
bool result = Target.resolveSymbolicName(Constraint,
|
2009-04-27 01:57:12 +08:00
|
|
|
&(*OutCons)[0],
|
|
|
|
OutCons->size(), Index);
|
2011-01-06 02:41:53 +08:00
|
|
|
assert(result && "Could not resolve symbolic name"); (void)result;
|
2009-01-18 10:06:20 +08:00
|
|
|
Result += llvm::utostr(Index);
|
|
|
|
break;
|
|
|
|
}
|
2008-02-06 00:35:33 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
Constraint++;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2011-01-02 05:12:33 +08:00
|
|
|
/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
|
|
|
|
/// as using a particular register add that as a constraint that will be used
|
|
|
|
/// in this asm stmt.
|
2010-12-31 06:59:32 +08:00
|
|
|
static std::string
|
2011-01-02 05:12:33 +08:00
|
|
|
AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
|
|
|
|
const TargetInfo &Target, CodeGenModule &CGM,
|
2012-08-29 02:54:39 +08:00
|
|
|
const AsmStmt &Stmt) {
|
2010-12-31 06:59:32 +08:00
|
|
|
const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
|
|
|
|
if (!AsmDeclRef)
|
|
|
|
return Constraint;
|
|
|
|
const ValueDecl &Value = *AsmDeclRef->getDecl();
|
|
|
|
const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
|
|
|
|
if (!Variable)
|
|
|
|
return Constraint;
|
2012-03-16 07:12:51 +08:00
|
|
|
if (Variable->getStorageClass() != SC_Register)
|
|
|
|
return Constraint;
|
2010-12-31 06:59:32 +08:00
|
|
|
AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
|
|
|
|
if (!Attr)
|
|
|
|
return Constraint;
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef Register = Attr->getLabel();
|
2011-01-02 05:47:03 +08:00
|
|
|
assert(Target.isValidGCCRegisterName(Register));
|
2011-06-17 09:53:34 +08:00
|
|
|
// We're using validateOutputConstraint here because we only care if
|
|
|
|
// this is a register constraint.
|
|
|
|
TargetInfo::ConstraintInfo Info(Constraint, "");
|
|
|
|
if (Target.validateOutputConstraint(Info) &&
|
|
|
|
!Info.allowsRegister()) {
|
2010-12-31 06:59:32 +08:00
|
|
|
CGM.ErrorUnsupported(&Stmt, "__asm__");
|
|
|
|
return Constraint;
|
|
|
|
}
|
2011-06-21 08:07:10 +08:00
|
|
|
// Canonicalize the register here before returning it.
|
|
|
|
Register = Target.getNormalizedGCCRegisterName(Register);
|
2010-12-31 06:59:32 +08:00
|
|
|
return "{" + Register.str() + "}";
|
|
|
|
}
|
|
|
|
|
2010-07-16 08:55:21 +08:00
|
|
|
llvm::Value*
|
2012-08-24 04:00:18 +08:00
|
|
|
CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
|
2010-07-16 08:55:21 +08:00
|
|
|
LValue InputValue, QualType InputType,
|
2013-10-02 10:29:49 +08:00
|
|
|
std::string &ConstraintStr,
|
|
|
|
SourceLocation Loc) {
|
2009-01-12 03:32:54 +08:00
|
|
|
llvm::Value *Arg;
|
2009-09-09 23:08:12 +08:00
|
|
|
if (Info.allowsRegister() || !Info.allowsMemory()) {
|
2013-03-08 05:37:08 +08:00
|
|
|
if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
|
2013-10-02 10:29:49 +08:00
|
|
|
Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
|
2009-01-12 03:32:54 +08:00
|
|
|
} else {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *Ty = ConvertType(InputType);
|
2012-10-09 00:25:52 +08:00
|
|
|
uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
|
2009-01-12 10:22:13 +08:00
|
|
|
if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
|
2011-02-08 16:22:06 +08:00
|
|
|
Ty = llvm::IntegerType::get(getLLVMContext(), Size);
|
2009-01-12 10:22:13 +08:00
|
|
|
Ty = llvm::PointerType::getUnqual(Ty);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-16 08:55:21 +08:00
|
|
|
Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
|
|
|
|
Ty));
|
2009-01-12 10:22:13 +08:00
|
|
|
} else {
|
2010-07-16 08:55:21 +08:00
|
|
|
Arg = InputValue.getAddress();
|
2009-01-12 10:22:13 +08:00
|
|
|
ConstraintStr += '*';
|
|
|
|
}
|
2009-01-12 03:32:54 +08:00
|
|
|
}
|
|
|
|
} else {
|
2010-07-16 08:55:21 +08:00
|
|
|
Arg = InputValue.getAddress();
|
2009-01-12 03:32:54 +08:00
|
|
|
ConstraintStr += '*';
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-12 03:32:54 +08:00
|
|
|
return Arg;
|
|
|
|
}
|
|
|
|
|
2012-08-24 04:00:18 +08:00
|
|
|
llvm::Value* CodeGenFunction::EmitAsmInput(
|
2010-07-16 08:55:21 +08:00
|
|
|
const TargetInfo::ConstraintInfo &Info,
|
|
|
|
const Expr *InputExpr,
|
|
|
|
std::string &ConstraintStr) {
|
|
|
|
if (Info.allowsRegister() || !Info.allowsMemory())
|
2013-03-08 05:37:08 +08:00
|
|
|
if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
|
2010-07-16 08:55:21 +08:00
|
|
|
return EmitScalarExpr(InputExpr);
|
|
|
|
|
|
|
|
InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
|
|
|
|
LValue Dest = EmitLValue(InputExpr);
|
2013-10-02 10:29:49 +08:00
|
|
|
return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
|
|
|
|
InputExpr->getExprLoc());
|
2010-07-16 08:55:21 +08:00
|
|
|
}
|
|
|
|
|
2010-11-17 13:58:54 +08:00
|
|
|
/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
|
2010-11-17 16:25:26 +08:00
|
|
|
/// asm call instruction. The !srcloc MDNode contains a list of constant
|
|
|
|
/// integers which are the source locations of the start of each line in the
|
|
|
|
/// asm.
|
2010-11-17 13:58:54 +08:00
|
|
|
static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
|
|
|
|
CodeGenFunction &CGF) {
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<llvm::Value *, 8> Locs;
|
2010-11-17 16:25:26 +08:00
|
|
|
// Add the location of the first line to the MDNode.
|
|
|
|
Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
|
|
|
|
Str->getLocStart().getRawEncoding()));
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef StrVal = Str->getString();
|
2010-11-17 16:25:26 +08:00
|
|
|
if (!StrVal.empty()) {
|
|
|
|
const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
|
2012-03-11 15:00:24 +08:00
|
|
|
const LangOptions &LangOpts = CGF.CGM.getLangOpts();
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2010-11-17 16:25:26 +08:00
|
|
|
// Add the location of the start of each subsequent line of the asm to the
|
|
|
|
// MDNode.
|
|
|
|
for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) {
|
|
|
|
if (StrVal[i] != '\n') continue;
|
|
|
|
SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts,
|
2013-04-17 06:48:15 +08:00
|
|
|
CGF.getTarget());
|
2010-11-17 16:25:26 +08:00
|
|
|
Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
|
|
|
|
LineLoc.getRawEncoding()));
|
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
}
|
|
|
|
|
2011-04-22 03:59:12 +08:00
|
|
|
return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
|
2010-11-17 13:58:54 +08:00
|
|
|
}
|
|
|
|
|
2012-08-29 02:54:39 +08:00
|
|
|
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
|
2012-08-25 01:05:45 +08:00
|
|
|
// Assemble the final asm string.
|
2012-08-28 04:23:31 +08:00
|
|
|
std::string AsmString = S.generateAsmString(getContext());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 15:05:00 +08:00
|
|
|
// Get all the output and input constraints together.
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
|
|
|
|
SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
|
2009-05-03 15:05:00 +08:00
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
|
2013-05-03 08:10:13 +08:00
|
|
|
StringRef Name;
|
|
|
|
if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
|
|
|
|
Name = GAS->getOutputName(i);
|
|
|
|
TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
|
2013-04-17 06:48:15 +08:00
|
|
|
bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
|
2010-03-04 05:52:23 +08:00
|
|
|
assert(IsValid && "Failed to parse output constraint");
|
2009-05-03 15:05:00 +08:00
|
|
|
OutputConstraintInfos.push_back(Info);
|
2009-09-09 23:08:12 +08:00
|
|
|
}
|
|
|
|
|
2009-05-03 15:05:00 +08:00
|
|
|
for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
|
2013-05-03 08:10:13 +08:00
|
|
|
StringRef Name;
|
|
|
|
if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
|
|
|
|
Name = GAS->getInputName(i);
|
|
|
|
TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
|
2013-04-17 06:48:15 +08:00
|
|
|
bool IsValid =
|
|
|
|
getTarget().validateInputConstraint(OutputConstraintInfos.data(),
|
|
|
|
S.getNumOutputs(), Info);
|
2010-03-04 05:52:23 +08:00
|
|
|
assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
|
2009-05-03 15:05:00 +08:00
|
|
|
InputConstraintInfos.push_back(Info);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
std::string Constraints;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 15:53:25 +08:00
|
|
|
std::vector<LValue> ResultRegDests;
|
|
|
|
std::vector<QualType> ResultRegQualTys;
|
2011-07-11 17:56:20 +08:00
|
|
|
std::vector<llvm::Type *> ResultRegTypes;
|
|
|
|
std::vector<llvm::Type *> ResultTruncRegTypes;
|
2012-05-02 03:53:37 +08:00
|
|
|
std::vector<llvm::Type *> ArgTypes;
|
2008-02-06 00:35:33 +08:00
|
|
|
std::vector<llvm::Value*> Args;
|
2008-02-06 04:01:53 +08:00
|
|
|
|
|
|
|
// Keep track of inout constraints.
|
|
|
|
std::string InOutConstraints;
|
|
|
|
std::vector<llvm::Value*> InOutArgs;
|
2011-07-10 01:41:47 +08:00
|
|
|
std::vector<llvm::Type*> InOutArgTypes;
|
2009-01-28 04:38:24 +08:00
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
|
2009-05-03 15:05:00 +08:00
|
|
|
TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
|
2009-01-28 04:38:24 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
// Simplify the output constraint.
|
2009-05-03 15:05:00 +08:00
|
|
|
std::string OutputConstraint(S.getOutputConstraint(i));
|
2013-04-17 06:48:15 +08:00
|
|
|
OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
|
|
|
|
getTarget());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-03-14 01:38:01 +08:00
|
|
|
const Expr *OutExpr = S.getOutputExpr(i);
|
|
|
|
OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-06-03 22:52:25 +08:00
|
|
|
OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
|
2013-04-17 06:48:15 +08:00
|
|
|
getTarget(), CGM, S);
|
2010-12-31 06:59:32 +08:00
|
|
|
|
2009-03-14 01:38:01 +08:00
|
|
|
LValue Dest = EmitLValue(OutExpr);
|
2009-05-03 15:53:25 +08:00
|
|
|
if (!Constraints.empty())
|
2009-05-01 08:16:04 +08:00
|
|
|
Constraints += ',';
|
|
|
|
|
2009-05-03 16:21:20 +08:00
|
|
|
// If this is a register output, then make the inline asm return it
|
|
|
|
// by-value. If this is a memory result, return the value by-reference.
|
2013-03-08 05:37:08 +08:00
|
|
|
if (!Info.allowsMemory() && hasScalarEvaluationKind(OutExpr->getType())) {
|
2009-05-03 16:21:20 +08:00
|
|
|
Constraints += "=" + OutputConstraint;
|
2009-05-03 15:53:25 +08:00
|
|
|
ResultRegQualTys.push_back(OutExpr->getType());
|
|
|
|
ResultRegDests.push_back(Dest);
|
2009-05-03 16:21:20 +08:00
|
|
|
ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
|
|
|
|
ResultTruncRegTypes.push_back(ResultRegTypes.back());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 16:21:20 +08:00
|
|
|
// If this output is tied to an input, and if the input is larger, then
|
|
|
|
// we need to set the actual result type of the inline asm node to be the
|
|
|
|
// same as the input type.
|
|
|
|
if (Info.hasMatchingInput()) {
|
2009-05-03 16:38:58 +08:00
|
|
|
unsigned InputNo;
|
|
|
|
for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
|
|
|
|
TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
|
2010-04-24 01:27:29 +08:00
|
|
|
if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
|
2009-05-03 16:21:20 +08:00
|
|
|
break;
|
2009-05-03 16:38:58 +08:00
|
|
|
}
|
|
|
|
assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 16:21:20 +08:00
|
|
|
QualType InputTy = S.getInputExpr(InputNo)->getType();
|
2010-04-24 01:27:29 +08:00
|
|
|
QualType OutputType = OutExpr->getType();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 16:21:20 +08:00
|
|
|
uint64_t InputSize = getContext().getTypeSize(InputTy);
|
2010-04-24 01:27:29 +08:00
|
|
|
if (getContext().getTypeSize(OutputType) < InputSize) {
|
|
|
|
// Form the asm to return the value as a larger integer or fp type.
|
|
|
|
ResultRegTypes.back() = ConvertType(InputTy);
|
2009-05-03 16:21:20 +08:00
|
|
|
}
|
|
|
|
}
|
2013-06-07 08:04:50 +08:00
|
|
|
if (llvm::Type* AdjTy =
|
2011-02-20 07:03:58 +08:00
|
|
|
getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
|
|
|
|
ResultRegTypes.back()))
|
2010-10-30 07:12:32 +08:00
|
|
|
ResultRegTypes.back() = AdjTy;
|
2013-06-07 08:04:50 +08:00
|
|
|
else {
|
|
|
|
CGM.getDiags().Report(S.getAsmLoc(),
|
|
|
|
diag::err_asm_invalid_type_in_input)
|
|
|
|
<< OutExpr->getType() << OutputConstraint;
|
|
|
|
}
|
2008-02-06 00:35:33 +08:00
|
|
|
} else {
|
|
|
|
ArgTypes.push_back(Dest.getAddress()->getType());
|
2008-02-06 00:57:38 +08:00
|
|
|
Args.push_back(Dest.getAddress());
|
2008-02-06 04:01:53 +08:00
|
|
|
Constraints += "=*";
|
2008-02-06 00:35:33 +08:00
|
|
|
Constraints += OutputConstraint;
|
2008-02-06 04:01:53 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-04-26 15:16:29 +08:00
|
|
|
if (Info.isReadWrite()) {
|
2008-02-06 04:01:53 +08:00
|
|
|
InOutConstraints += ',';
|
2009-01-12 03:32:54 +08:00
|
|
|
|
2009-08-05 02:18:36 +08:00
|
|
|
const Expr *InputExpr = S.getOutputExpr(i);
|
2012-08-24 04:00:18 +08:00
|
|
|
llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
|
2013-10-02 10:29:49 +08:00
|
|
|
InOutConstraints,
|
|
|
|
InputExpr->getExprLoc());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-03-23 07:25:07 +08:00
|
|
|
if (llvm::Type* AdjTy =
|
2013-06-07 08:04:50 +08:00
|
|
|
getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
|
|
|
|
Arg->getType()))
|
2012-03-23 07:25:07 +08:00
|
|
|
Arg = Builder.CreateBitCast(Arg, AdjTy);
|
|
|
|
|
2009-04-26 15:16:29 +08:00
|
|
|
if (Info.allowsRegister())
|
2009-01-12 05:23:27 +08:00
|
|
|
InOutConstraints += llvm::utostr(i);
|
|
|
|
else
|
|
|
|
InOutConstraints += OutputConstraint;
|
2009-01-12 03:46:50 +08:00
|
|
|
|
2009-08-05 02:18:36 +08:00
|
|
|
InOutArgTypes.push_back(Arg->getType());
|
|
|
|
InOutArgs.push_back(Arg);
|
2008-02-06 04:01:53 +08:00
|
|
|
}
|
2008-02-06 00:35:33 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
|
|
|
|
const Expr *InputExpr = S.getInputExpr(i);
|
|
|
|
|
2009-05-03 15:05:00 +08:00
|
|
|
TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
|
|
|
|
|
2009-05-03 15:53:25 +08:00
|
|
|
if (!Constraints.empty())
|
2008-02-06 00:35:33 +08:00
|
|
|
Constraints += ',';
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
// Simplify the input constraint.
|
2009-05-03 15:05:00 +08:00
|
|
|
std::string InputConstraint(S.getInputConstraint(i));
|
2013-04-17 06:48:15 +08:00
|
|
|
InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
|
2009-04-27 01:57:12 +08:00
|
|
|
&OutputConstraintInfos);
|
2008-02-06 00:35:33 +08:00
|
|
|
|
2010-12-31 06:59:32 +08:00
|
|
|
InputConstraint =
|
2011-01-02 05:12:33 +08:00
|
|
|
AddVariableConstraints(InputConstraint,
|
2010-12-31 06:59:32 +08:00
|
|
|
*InputExpr->IgnoreParenNoopCasts(getContext()),
|
2013-04-17 06:48:15 +08:00
|
|
|
getTarget(), CGM, S);
|
2010-12-31 06:59:32 +08:00
|
|
|
|
2012-08-24 04:00:18 +08:00
|
|
|
llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 15:27:51 +08:00
|
|
|
// If this input argument is tied to a larger output result, extend the
|
|
|
|
// input to be the same size as the output. The LLVM backend wants to see
|
|
|
|
// the input and output of a matching constraint be the same size. Note
|
|
|
|
// that GCC does not define what the top bits are here. We use zext because
|
|
|
|
// that is usually cheaper, but LLVM IR should really get an anyext someday.
|
|
|
|
if (Info.hasTiedOperand()) {
|
|
|
|
unsigned Output = Info.getTiedOperand();
|
2010-04-24 01:27:29 +08:00
|
|
|
QualType OutputType = S.getOutputExpr(Output)->getType();
|
2009-05-03 15:27:51 +08:00
|
|
|
QualType InputTy = InputExpr->getType();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-04-24 01:27:29 +08:00
|
|
|
if (getContext().getTypeSize(OutputType) >
|
2009-05-03 15:27:51 +08:00
|
|
|
getContext().getTypeSize(InputTy)) {
|
|
|
|
// Use ptrtoint as appropriate so that we can do our extension.
|
|
|
|
if (isa<llvm::PointerType>(Arg->getType()))
|
2010-06-27 15:15:29 +08:00
|
|
|
Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *OutputTy = ConvertType(OutputType);
|
2010-04-24 01:27:29 +08:00
|
|
|
if (isa<llvm::IntegerType>(OutputTy))
|
|
|
|
Arg = Builder.CreateZExt(Arg, OutputTy);
|
2011-07-29 08:24:50 +08:00
|
|
|
else if (isa<llvm::PointerType>(OutputTy))
|
|
|
|
Arg = Builder.CreateZExt(Arg, IntPtrTy);
|
|
|
|
else {
|
|
|
|
assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
|
2010-04-24 01:27:29 +08:00
|
|
|
Arg = Builder.CreateFPExt(Arg, OutputTy);
|
2011-07-29 08:24:50 +08:00
|
|
|
}
|
2009-05-03 15:27:51 +08:00
|
|
|
}
|
|
|
|
}
|
2012-03-23 07:25:07 +08:00
|
|
|
if (llvm::Type* AdjTy =
|
2011-02-20 07:03:58 +08:00
|
|
|
getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
|
|
|
|
Arg->getType()))
|
2010-10-30 07:12:32 +08:00
|
|
|
Arg = Builder.CreateBitCast(Arg, AdjTy);
|
2013-06-07 08:04:50 +08:00
|
|
|
else
|
|
|
|
CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
|
|
|
|
<< InputExpr->getType() << InputConstraint;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
ArgTypes.push_back(Arg->getType());
|
|
|
|
Args.push_back(Arg);
|
|
|
|
Constraints += InputConstraint;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 04:01:53 +08:00
|
|
|
// Append the "input" part of inout constraints last.
|
|
|
|
for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
|
|
|
|
ArgTypes.push_back(InOutArgTypes[i]);
|
|
|
|
Args.push_back(InOutArgs[i]);
|
|
|
|
}
|
|
|
|
Constraints += InOutConstraints;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
// Clobbers
|
|
|
|
for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
|
2012-08-28 07:47:56 +08:00
|
|
|
StringRef Clobber = S.getClobber(i);
|
2008-02-06 00:35:33 +08:00
|
|
|
|
2011-06-29 02:20:53 +08:00
|
|
|
if (Clobber != "memory" && Clobber != "cc")
|
2013-04-17 06:48:15 +08:00
|
|
|
Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 08:11:32 +08:00
|
|
|
if (i != 0 || NumConstraints != 0)
|
2008-02-06 00:35:33 +08:00
|
|
|
Constraints += ',';
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 08:11:32 +08:00
|
|
|
Constraints += "~{";
|
2008-02-06 00:35:33 +08:00
|
|
|
Constraints += Clobber;
|
2008-02-06 08:11:32 +08:00
|
|
|
Constraints += '}';
|
2008-02-06 00:35:33 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
// Add machine specific clobbers
|
2013-04-17 06:48:15 +08:00
|
|
|
std::string MachineClobbers = getTarget().getClobbers();
|
2008-12-21 09:15:32 +08:00
|
|
|
if (!MachineClobbers.empty()) {
|
2008-02-06 00:35:33 +08:00
|
|
|
if (!Constraints.empty())
|
|
|
|
Constraints += ',';
|
2008-12-21 09:15:32 +08:00
|
|
|
Constraints += MachineClobbers;
|
2008-02-06 00:35:33 +08:00
|
|
|
}
|
2009-05-01 08:16:04 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ResultType;
|
2009-05-03 16:21:20 +08:00
|
|
|
if (ResultRegTypes.empty())
|
2012-02-07 08:39:47 +08:00
|
|
|
ResultType = VoidTy;
|
2009-05-03 16:21:20 +08:00
|
|
|
else if (ResultRegTypes.size() == 1)
|
|
|
|
ResultType = ResultRegTypes[0];
|
2009-05-01 08:16:04 +08:00
|
|
|
else
|
2011-02-08 16:22:06 +08:00
|
|
|
ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *FTy =
|
2008-02-06 00:35:33 +08:00
|
|
|
llvm::FunctionType::get(ResultType, ArgTypes, false);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-09-05 03:50:17 +08:00
|
|
|
bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
|
2012-09-06 03:01:07 +08:00
|
|
|
llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
|
|
|
|
llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::InlineAsm *IA =
|
2012-09-05 07:08:24 +08:00
|
|
|
llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
|
2012-09-06 03:01:07 +08:00
|
|
|
/* IsAlignStack */ false, AsmDialect);
|
2011-07-15 16:37:34 +08:00
|
|
|
llvm::CallInst *Result = Builder.CreateCall(IA, Args);
|
2012-12-08 07:17:26 +08:00
|
|
|
Result->addAttribute(llvm::AttributeSet::FunctionIndex,
|
2013-03-02 09:20:22 +08:00
|
|
|
llvm::Attribute::NoUnwind);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-04-07 13:46:54 +08:00
|
|
|
// Slap the source location of the inline asm into a !srcloc metadata on the
|
2012-08-29 02:54:39 +08:00
|
|
|
// call. FIXME: Handle metadata for MS-style inline asms.
|
|
|
|
if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
|
|
|
|
Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(),
|
|
|
|
*this));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 16:21:20 +08:00
|
|
|
// Extract all of the register value results from the asm.
|
|
|
|
std::vector<llvm::Value*> RegResults;
|
|
|
|
if (ResultRegTypes.size() == 1) {
|
|
|
|
RegResults.push_back(Result);
|
2009-05-01 08:16:04 +08:00
|
|
|
} else {
|
2009-05-03 16:21:20 +08:00
|
|
|
for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
|
2009-05-01 08:16:04 +08:00
|
|
|
llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
|
2009-05-03 16:21:20 +08:00
|
|
|
RegResults.push_back(Tmp);
|
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 16:21:20 +08:00
|
|
|
for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
|
|
|
|
llvm::Value *Tmp = RegResults[i];
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 16:21:20 +08:00
|
|
|
// If the result type of the LLVM IR asm doesn't match the result type of
|
|
|
|
// the expression, do the conversion.
|
|
|
|
if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *TruncTy = ResultTruncRegTypes[i];
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2010-04-24 01:27:29 +08:00
|
|
|
// Truncate the integer result to the right size, note that TruncTy can be
|
|
|
|
// a pointer.
|
|
|
|
if (TruncTy->isFloatingPointTy())
|
|
|
|
Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
|
2010-04-24 12:55:02 +08:00
|
|
|
else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
|
2012-10-09 00:25:52 +08:00
|
|
|
uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
|
2011-02-08 16:22:06 +08:00
|
|
|
Tmp = Builder.CreateTrunc(Tmp,
|
|
|
|
llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
|
2009-05-03 16:21:20 +08:00
|
|
|
Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
|
2010-04-24 12:55:02 +08:00
|
|
|
} else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
|
2012-10-09 00:25:52 +08:00
|
|
|
uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
|
2011-02-08 16:22:06 +08:00
|
|
|
Tmp = Builder.CreatePtrToInt(Tmp,
|
|
|
|
llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
|
2010-04-24 12:55:02 +08:00
|
|
|
Tmp = Builder.CreateTrunc(Tmp, TruncTy);
|
|
|
|
} else if (TruncTy->isIntegerTy()) {
|
|
|
|
Tmp = Builder.CreateTrunc(Tmp, TruncTy);
|
2010-10-30 07:12:32 +08:00
|
|
|
} else if (TruncTy->isVectorTy()) {
|
|
|
|
Tmp = Builder.CreateBitCast(Tmp, TruncTy);
|
2009-05-03 16:21:20 +08:00
|
|
|
}
|
2009-05-01 08:16:04 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-06-25 10:11:03 +08:00
|
|
|
EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
|
2009-05-01 08:16:04 +08:00
|
|
|
}
|
2008-02-06 00:35:33 +08:00
|
|
|
}
|
2013-04-17 02:53:08 +08:00
|
|
|
|
2013-05-10 03:17:11 +08:00
|
|
|
static LValue InitCapturedStruct(CodeGenFunction &CGF, const CapturedStmt &S) {
|
|
|
|
const RecordDecl *RD = S.getCapturedRecordDecl();
|
|
|
|
QualType RecordTy = CGF.getContext().getRecordType(RD);
|
|
|
|
|
|
|
|
// Initialize the captured struct.
|
|
|
|
LValue SlotLV = CGF.MakeNaturalAlignAddrLValue(
|
|
|
|
CGF.CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
|
|
|
|
|
|
|
|
RecordDecl::field_iterator CurField = RD->field_begin();
|
|
|
|
for (CapturedStmt::capture_init_iterator I = S.capture_init_begin(),
|
|
|
|
E = S.capture_init_end();
|
|
|
|
I != E; ++I, ++CurField) {
|
|
|
|
LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
|
|
|
|
CGF.EmitInitializerForField(*CurField, LV, *I, ArrayRef<VarDecl *>());
|
|
|
|
}
|
|
|
|
|
|
|
|
return SlotLV;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Generate an outlined function for the body of a CapturedStmt, store any
|
|
|
|
/// captured variables into the captured struct, and call the outlined function.
|
|
|
|
llvm::Function *
|
|
|
|
CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
|
|
|
|
const CapturedDecl *CD = S.getCapturedDecl();
|
|
|
|
const RecordDecl *RD = S.getCapturedRecordDecl();
|
|
|
|
assert(CD->hasBody() && "missing CapturedDecl body");
|
|
|
|
|
|
|
|
LValue CapStruct = InitCapturedStruct(*this, S);
|
|
|
|
|
|
|
|
// Emit the CapturedDecl
|
|
|
|
CodeGenFunction CGF(CGM, true);
|
|
|
|
CGF.CapturedStmtInfo = new CGCapturedStmtInfo(S, K);
|
2013-10-02 10:29:49 +08:00
|
|
|
llvm::Function *F = CGF.GenerateCapturedStmtFunction(CD, RD, S.getLocStart());
|
2013-05-10 03:17:11 +08:00
|
|
|
delete CGF.CapturedStmtInfo;
|
|
|
|
|
|
|
|
// Emit call to the helper function.
|
|
|
|
EmitCallOrInvoke(F, CapStruct.getAddress());
|
|
|
|
|
|
|
|
return F;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Creates the outlined function for a CapturedStmt.
|
|
|
|
llvm::Function *
|
|
|
|
CodeGenFunction::GenerateCapturedStmtFunction(const CapturedDecl *CD,
|
2013-10-02 10:29:49 +08:00
|
|
|
const RecordDecl *RD,
|
|
|
|
SourceLocation Loc) {
|
2013-05-10 03:17:11 +08:00
|
|
|
assert(CapturedStmtInfo &&
|
|
|
|
"CapturedStmtInfo should be set when generating the captured function");
|
|
|
|
|
|
|
|
// Build the argument list.
|
|
|
|
ASTContext &Ctx = CGM.getContext();
|
|
|
|
FunctionArgList Args;
|
|
|
|
Args.append(CD->param_begin(), CD->param_end());
|
|
|
|
|
|
|
|
// Create the function declaration.
|
|
|
|
FunctionType::ExtInfo ExtInfo;
|
|
|
|
const CGFunctionInfo &FuncInfo =
|
|
|
|
CGM.getTypes().arrangeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo,
|
|
|
|
/*IsVariadic=*/false);
|
|
|
|
llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
|
|
|
|
|
|
|
|
llvm::Function *F =
|
|
|
|
llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
|
|
|
|
CapturedStmtInfo->getHelperName(), &CGM.getModule());
|
|
|
|
CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
|
|
|
|
|
|
|
|
// Generate the function.
|
|
|
|
StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getBody()->getLocStart());
|
|
|
|
|
|
|
|
// Set the context parameter in CapturedStmtInfo.
|
|
|
|
llvm::Value *DeclPtr = LocalDeclMap[CD->getContextParam()];
|
|
|
|
assert(DeclPtr && "missing context parameter for CapturedStmt");
|
|
|
|
CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
|
|
|
|
|
|
|
|
// If 'this' is captured, load it into CXXThisValue.
|
|
|
|
if (CapturedStmtInfo->isCXXThisExprCaptured()) {
|
|
|
|
FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
|
|
|
|
LValue LV = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
|
|
|
|
Ctx.getTagDeclType(RD));
|
|
|
|
LValue ThisLValue = EmitLValueForField(LV, FD);
|
2013-10-02 10:29:49 +08:00
|
|
|
CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
|
2013-05-10 03:17:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CapturedStmtInfo->EmitBody(*this, CD->getBody());
|
|
|
|
FinishFunction(CD->getBodyRBrace());
|
|
|
|
|
|
|
|
return F;
|
2013-04-17 02:53:08 +08:00
|
|
|
}
|