2007-06-02 02:02:12 +08:00
|
|
|
//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2007-06-02 02:02:12 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code to emit Stmt nodes as LLVM code.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "CodeGenFunction.h"
|
2008-05-08 16:54:20 +08:00
|
|
|
#include "CGDebugInfo.h"
|
|
|
|
#include "CodeGenModule.h"
|
2011-02-20 07:03:58 +08:00
|
|
|
#include "TargetInfo.h"
|
2008-08-11 13:00:27 +08:00
|
|
|
#include "clang/AST/StmtVisitor.h"
|
2015-09-10 06:39:06 +08:00
|
|
|
#include "clang/Basic/Builtins.h"
|
2009-03-05 16:04:57 +08:00
|
|
|
#include "clang/Basic/PrettyStackTrace.h"
|
2008-02-06 00:35:33 +08:00
|
|
|
#include "clang/Basic/TargetInfo.h"
|
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2013-01-02 19:45:17 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/InlineAsm.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2015-09-10 06:39:06 +08:00
|
|
|
#include "llvm/IR/MDBuilder.h"
|
|
|
|
|
2007-06-02 02:02:12 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Statement Emission
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
void CodeGenFunction::EmitStopPoint(const Stmt *S) {
|
2009-02-13 16:11:52 +08:00
|
|
|
if (CGDebugInfo *DI = getDebugInfo()) {
|
2011-10-14 05:45:18 +08:00
|
|
|
SourceLocation Loc;
|
2018-08-10 05:08:08 +08:00
|
|
|
Loc = S->getBeginLoc();
|
2011-10-14 05:45:18 +08:00
|
|
|
DI->EmitLocation(Builder, Loc);
|
2013-05-03 01:30:20 +08:00
|
|
|
|
2014-01-08 06:05:52 +08:00
|
|
|
LastStopPoint = Loc;
|
2008-11-12 16:21:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-06 16:47:18 +08:00
|
|
|
void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
|
2007-06-02 02:02:12 +08:00
|
|
|
assert(S && "Null statement?");
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
PGO.setCurrentStmt(S);
|
2008-11-12 07:11:34 +08:00
|
|
|
|
2011-09-26 23:03:19 +08:00
|
|
|
// These statements have their own debug info handling.
|
2008-11-12 16:21:33 +08:00
|
|
|
if (EmitSimpleStmt(S))
|
|
|
|
return;
|
|
|
|
|
2009-07-19 14:58:07 +08:00
|
|
|
// Check if we are generating unreachable code.
|
|
|
|
if (!HaveInsertPoint()) {
|
|
|
|
// If so, and the statement doesn't contain a label, then we do not need to
|
|
|
|
// generate actual code. This is safe because (1) the current point is
|
|
|
|
// unreachable, so we don't need to execute the code, and (2) we've already
|
|
|
|
// handled the statements which update internal data structures (like the
|
|
|
|
// local variable map) which could be used by subsequent statements.
|
|
|
|
if (!ContainsLabel(S)) {
|
|
|
|
// Verify that any decl statements were handled as simple, they may be in
|
|
|
|
// scope of subsequent reachable statements.
|
|
|
|
assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, make a new block to hold the code.
|
|
|
|
EnsureInsertPoint();
|
|
|
|
}
|
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
// Generate a stoppoint if we are emitting debug info.
|
|
|
|
EmitStopPoint(S);
|
2008-05-08 16:54:20 +08:00
|
|
|
|
2017-12-30 02:07:07 +08:00
|
|
|
// Ignore all OpenMP directives except for simd if OpenMP with Simd is
|
|
|
|
// enabled.
|
|
|
|
if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
|
|
|
|
if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
|
|
|
|
EmitSimpleOMPExecutableDirective(*D);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-06-02 02:02:12 +08:00
|
|
|
switch (S->getStmtClass()) {
|
2010-12-05 10:00:02 +08:00
|
|
|
case Stmt::NoStmtClass:
|
|
|
|
case Stmt::CXXCatchStmtClass:
|
2011-04-28 09:08:34 +08:00
|
|
|
case Stmt::SEHExceptStmtClass:
|
|
|
|
case Stmt::SEHFinallyStmtClass:
|
2011-10-25 09:33:02 +08:00
|
|
|
case Stmt::MSDependentExistsStmtClass:
|
2010-12-05 10:00:02 +08:00
|
|
|
llvm_unreachable("invalid statement class to emit generically");
|
|
|
|
case Stmt::NullStmtClass:
|
|
|
|
case Stmt::CompoundStmtClass:
|
|
|
|
case Stmt::DeclStmtClass:
|
|
|
|
case Stmt::LabelStmtClass:
|
2012-04-14 08:33:13 +08:00
|
|
|
case Stmt::AttributedStmtClass:
|
2010-12-05 10:00:02 +08:00
|
|
|
case Stmt::GotoStmtClass:
|
|
|
|
case Stmt::BreakStmtClass:
|
|
|
|
case Stmt::ContinueStmtClass:
|
|
|
|
case Stmt::DefaultStmtClass:
|
|
|
|
case Stmt::CaseStmtClass:
|
2015-02-13 07:16:11 +08:00
|
|
|
case Stmt::SEHLeaveStmtClass:
|
2010-12-05 10:00:02 +08:00
|
|
|
llvm_unreachable("should have emitted these statements as simple");
|
2009-07-19 16:23:12 +08:00
|
|
|
|
2010-12-05 10:00:02 +08:00
|
|
|
#define STMT(Type, Base)
|
|
|
|
#define ABSTRACT_STMT(Op)
|
|
|
|
#define EXPR(Type, Base) \
|
|
|
|
case Stmt::Type##Class:
|
|
|
|
#include "clang/AST/StmtNodes.inc"
|
2011-01-12 11:41:02 +08:00
|
|
|
{
|
|
|
|
// Remember the block we came in on.
|
|
|
|
llvm::BasicBlock *incoming = Builder.GetInsertBlock();
|
|
|
|
assert(incoming && "expression emission must have an insertion point");
|
|
|
|
|
2010-12-05 10:00:02 +08:00
|
|
|
EmitIgnoredExpr(cast<Expr>(S));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-12 11:41:02 +08:00
|
|
|
llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
|
|
|
|
assert(outgoing && "expression emission cleared block!");
|
|
|
|
|
|
|
|
// The expression emitters assume (reasonably!) that the insertion
|
|
|
|
// point is always set. To maintain that, the call-emission code
|
|
|
|
// for noreturn functions has to enter a new block with no
|
|
|
|
// predecessors. We want to kill that block and mark the current
|
|
|
|
// insertion point unreachable in the common case of a call like
|
|
|
|
// "exit();". Since expression emission doesn't otherwise create
|
|
|
|
// blocks with no predecessors, we can just test for that.
|
|
|
|
// However, we must be careful not to do this to our incoming
|
|
|
|
// block, because *statement* emission does sometimes create
|
|
|
|
// reachable blocks which will have no predecessors until later in
|
|
|
|
// the function. This occurs with, e.g., labels that are not
|
|
|
|
// reachable by fallthrough.
|
|
|
|
if (incoming != outgoing && outgoing->use_empty()) {
|
|
|
|
outgoing->eraseFromParent();
|
|
|
|
Builder.ClearInsertionPoint();
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
|
|
|
break;
|
2011-01-12 11:41:02 +08:00
|
|
|
}
|
2010-12-05 10:00:02 +08:00
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
case Stmt::IndirectGotoStmtClass:
|
2008-08-05 00:51:22 +08:00
|
|
|
EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
|
2007-06-05 11:59:43 +08:00
|
|
|
|
2017-09-06 16:47:18 +08:00
|
|
|
case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
|
|
|
|
case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
|
|
|
|
case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
|
|
|
|
case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2017-09-06 16:47:18 +08:00
|
|
|
case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
|
2008-10-03 02:02:06 +08:00
|
|
|
|
2017-09-06 16:47:18 +08:00
|
|
|
case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
|
|
|
|
case Stmt::GCCAsmStmtClass: // Intentional fall-through.
|
|
|
|
case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
|
2015-10-27 14:02:45 +08:00
|
|
|
case Stmt::CoroutineBodyStmtClass:
|
2016-10-28 00:28:31 +08:00
|
|
|
EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
|
|
|
|
break;
|
2015-10-27 14:02:45 +08:00
|
|
|
case Stmt::CoreturnStmtClass:
|
2017-03-07 05:12:54 +08:00
|
|
|
EmitCoreturnStmt(cast<CoreturnStmt>(*S));
|
2015-10-27 14:02:45 +08:00
|
|
|
break;
|
2013-09-07 02:03:48 +08:00
|
|
|
case Stmt::CapturedStmtClass: {
|
|
|
|
const CapturedStmt *CS = cast<CapturedStmt>(S);
|
|
|
|
EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
|
|
|
|
}
|
2013-04-17 02:53:08 +08:00
|
|
|
break;
|
2008-08-23 18:51:21 +08:00
|
|
|
case Stmt::ObjCAtTryStmtClass:
|
2008-09-09 18:04:29 +08:00
|
|
|
EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
|
2009-09-09 23:08:12 +08:00
|
|
|
break;
|
2008-08-23 18:51:21 +08:00
|
|
|
case Stmt::ObjCAtCatchStmtClass:
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable(
|
|
|
|
"@catch statements should be handled by EmitObjCAtTryStmt");
|
2008-08-23 18:51:21 +08:00
|
|
|
case Stmt::ObjCAtFinallyStmtClass:
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable(
|
|
|
|
"@finally statements should be handled by EmitObjCAtTryStmt");
|
2008-08-23 18:51:21 +08:00
|
|
|
case Stmt::ObjCAtThrowStmtClass:
|
2008-09-09 18:04:29 +08:00
|
|
|
EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
|
2008-08-23 18:51:21 +08:00
|
|
|
break;
|
|
|
|
case Stmt::ObjCAtSynchronizedStmtClass:
|
2008-11-16 05:26:17 +08:00
|
|
|
EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
|
2008-08-23 18:51:21 +08:00
|
|
|
break;
|
2009-09-09 23:08:12 +08:00
|
|
|
case Stmt::ObjCForCollectionStmtClass:
|
2008-08-31 03:51:14 +08:00
|
|
|
EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
|
2008-08-23 18:51:21 +08:00
|
|
|
break;
|
2011-06-16 07:02:42 +08:00
|
|
|
case Stmt::ObjCAutoreleasePoolStmtClass:
|
|
|
|
EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
|
|
|
|
break;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2009-09-28 02:58:34 +08:00
|
|
|
case Stmt::CXXTryStmtClass:
|
|
|
|
EmitCXXTryStmt(cast<CXXTryStmt>(*S));
|
|
|
|
break;
|
2011-04-15 06:09:26 +08:00
|
|
|
case Stmt::CXXForRangeStmtClass:
|
2017-09-06 16:47:18 +08:00
|
|
|
EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
|
2013-09-17 05:46:30 +08:00
|
|
|
break;
|
2011-04-28 09:08:34 +08:00
|
|
|
case Stmt::SEHTryStmtClass:
|
2013-09-17 05:46:30 +08:00
|
|
|
EmitSEHTryStmt(cast<SEHTryStmt>(*S));
|
2011-04-15 06:09:26 +08:00
|
|
|
break;
|
2014-05-06 18:08:46 +08:00
|
|
|
case Stmt::OMPParallelDirectiveClass:
|
|
|
|
EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
|
|
|
|
break;
|
2014-05-22 16:54:05 +08:00
|
|
|
case Stmt::OMPSimdDirectiveClass:
|
|
|
|
EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
|
|
|
|
break;
|
2014-06-18 12:14:57 +08:00
|
|
|
case Stmt::OMPForDirectiveClass:
|
|
|
|
EmitOMPForDirective(cast<OMPForDirective>(*S));
|
|
|
|
break;
|
2014-09-18 13:12:34 +08:00
|
|
|
case Stmt::OMPForSimdDirectiveClass:
|
|
|
|
EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
|
|
|
|
break;
|
2014-06-25 19:44:49 +08:00
|
|
|
case Stmt::OMPSectionsDirectiveClass:
|
|
|
|
EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
|
|
|
|
break;
|
2014-06-26 16:21:58 +08:00
|
|
|
case Stmt::OMPSectionDirectiveClass:
|
|
|
|
EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
|
|
|
|
break;
|
2014-06-26 20:05:45 +08:00
|
|
|
case Stmt::OMPSingleDirectiveClass:
|
|
|
|
EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
|
|
|
|
break;
|
2014-07-17 16:54:58 +08:00
|
|
|
case Stmt::OMPMasterDirectiveClass:
|
|
|
|
EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
|
|
|
|
break;
|
2014-07-21 17:42:05 +08:00
|
|
|
case Stmt::OMPCriticalDirectiveClass:
|
|
|
|
EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
|
|
|
|
break;
|
2014-07-07 21:01:15 +08:00
|
|
|
case Stmt::OMPParallelForDirectiveClass:
|
|
|
|
EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
|
|
|
|
break;
|
2014-09-23 17:33:00 +08:00
|
|
|
case Stmt::OMPParallelForSimdDirectiveClass:
|
|
|
|
EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
|
|
|
|
break;
|
2014-07-08 16:12:03 +08:00
|
|
|
case Stmt::OMPParallelSectionsDirectiveClass:
|
|
|
|
EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
|
|
|
|
break;
|
2014-07-11 19:25:16 +08:00
|
|
|
case Stmt::OMPTaskDirectiveClass:
|
|
|
|
EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
|
|
|
|
break;
|
2014-07-18 15:47:19 +08:00
|
|
|
case Stmt::OMPTaskyieldDirectiveClass:
|
|
|
|
EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
|
|
|
|
break;
|
2014-07-18 17:11:51 +08:00
|
|
|
case Stmt::OMPBarrierDirectiveClass:
|
|
|
|
EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
|
|
|
|
break;
|
2014-07-18 18:17:07 +08:00
|
|
|
case Stmt::OMPTaskwaitDirectiveClass:
|
|
|
|
EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
|
|
|
|
break;
|
2015-06-18 20:14:09 +08:00
|
|
|
case Stmt::OMPTaskgroupDirectiveClass:
|
|
|
|
EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
|
|
|
|
break;
|
2014-07-21 19:26:11 +08:00
|
|
|
case Stmt::OMPFlushDirectiveClass:
|
|
|
|
EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
|
|
|
|
break;
|
2014-07-22 14:45:04 +08:00
|
|
|
case Stmt::OMPOrderedDirectiveClass:
|
|
|
|
EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
|
|
|
|
break;
|
2014-07-22 18:10:35 +08:00
|
|
|
case Stmt::OMPAtomicDirectiveClass:
|
|
|
|
EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
|
|
|
|
break;
|
2014-09-19 16:19:49 +08:00
|
|
|
case Stmt::OMPTargetDirectiveClass:
|
|
|
|
EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
|
|
|
|
break;
|
2014-10-09 12:18:56 +08:00
|
|
|
case Stmt::OMPTeamsDirectiveClass:
|
|
|
|
EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
|
|
|
|
break;
|
2015-07-01 14:57:41 +08:00
|
|
|
case Stmt::OMPCancellationPointDirectiveClass:
|
|
|
|
EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
|
|
|
|
break;
|
2015-07-02 19:25:17 +08:00
|
|
|
case Stmt::OMPCancelDirectiveClass:
|
|
|
|
EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
|
|
|
|
break;
|
2015-07-21 21:44:28 +08:00
|
|
|
case Stmt::OMPTargetDataDirectiveClass:
|
|
|
|
EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
|
|
|
|
break;
|
2016-01-20 03:15:56 +08:00
|
|
|
case Stmt::OMPTargetEnterDataDirectiveClass:
|
|
|
|
EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
|
|
|
|
break;
|
2016-01-20 04:04:50 +08:00
|
|
|
case Stmt::OMPTargetExitDataDirectiveClass:
|
|
|
|
EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
|
|
|
|
break;
|
2016-01-27 02:48:41 +08:00
|
|
|
case Stmt::OMPTargetParallelDirectiveClass:
|
|
|
|
EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
|
|
|
|
break;
|
2016-02-03 23:46:42 +08:00
|
|
|
case Stmt::OMPTargetParallelForDirectiveClass:
|
|
|
|
EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
|
|
|
|
break;
|
2015-12-01 12:18:41 +08:00
|
|
|
case Stmt::OMPTaskLoopDirectiveClass:
|
|
|
|
EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
|
|
|
|
break;
|
2015-12-03 17:40:15 +08:00
|
|
|
case Stmt::OMPTaskLoopSimdDirectiveClass:
|
|
|
|
EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
|
|
|
|
break;
|
2019-10-11 04:13:02 +08:00
|
|
|
case Stmt::OMPMasterTaskLoopDirectiveClass:
|
|
|
|
EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
|
|
|
|
break;
|
2016-01-29 02:06:31 +08:00
|
|
|
case Stmt::OMPDistributeDirectiveClass:
|
2015-12-14 22:51:25 +08:00
|
|
|
EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
|
2016-01-29 02:06:31 +08:00
|
|
|
break;
|
2016-05-27 01:30:50 +08:00
|
|
|
case Stmt::OMPTargetUpdateDirectiveClass:
|
|
|
|
EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
|
|
|
|
break;
|
2016-06-27 22:55:37 +08:00
|
|
|
case Stmt::OMPDistributeParallelForDirectiveClass:
|
|
|
|
EmitOMPDistributeParallelForDirective(
|
|
|
|
cast<OMPDistributeParallelForDirective>(*S));
|
|
|
|
break;
|
2016-07-05 13:00:15 +08:00
|
|
|
case Stmt::OMPDistributeParallelForSimdDirectiveClass:
|
|
|
|
EmitOMPDistributeParallelForSimdDirective(
|
|
|
|
cast<OMPDistributeParallelForSimdDirective>(*S));
|
|
|
|
break;
|
2016-07-06 12:45:38 +08:00
|
|
|
case Stmt::OMPDistributeSimdDirectiveClass:
|
|
|
|
EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
|
|
|
|
break;
|
2016-07-14 10:54:56 +08:00
|
|
|
case Stmt::OMPTargetParallelForSimdDirectiveClass:
|
|
|
|
EmitOMPTargetParallelForSimdDirective(
|
|
|
|
cast<OMPTargetParallelForSimdDirective>(*S));
|
|
|
|
break;
|
2016-07-21 06:57:10 +08:00
|
|
|
case Stmt::OMPTargetSimdDirectiveClass:
|
|
|
|
EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
|
|
|
|
break;
|
2016-08-05 22:37:37 +08:00
|
|
|
case Stmt::OMPTeamsDistributeDirectiveClass:
|
|
|
|
EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
|
|
|
|
break;
|
2016-10-25 20:50:55 +08:00
|
|
|
case Stmt::OMPTeamsDistributeSimdDirectiveClass:
|
|
|
|
EmitOMPTeamsDistributeSimdDirective(
|
|
|
|
cast<OMPTeamsDistributeSimdDirective>(*S));
|
|
|
|
break;
|
2016-12-01 07:51:03 +08:00
|
|
|
case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
|
|
|
|
EmitOMPTeamsDistributeParallelForSimdDirective(
|
|
|
|
cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
|
|
|
|
break;
|
2016-12-09 11:24:30 +08:00
|
|
|
case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
|
|
|
|
EmitOMPTeamsDistributeParallelForDirective(
|
|
|
|
cast<OMPTeamsDistributeParallelForDirective>(*S));
|
|
|
|
break;
|
2016-12-17 13:48:59 +08:00
|
|
|
case Stmt::OMPTargetTeamsDirectiveClass:
|
|
|
|
EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
|
|
|
|
break;
|
2016-12-25 12:52:54 +08:00
|
|
|
case Stmt::OMPTargetTeamsDistributeDirectiveClass:
|
|
|
|
EmitOMPTargetTeamsDistributeDirective(
|
|
|
|
cast<OMPTargetTeamsDistributeDirective>(*S));
|
|
|
|
break;
|
2016-12-30 06:16:30 +08:00
|
|
|
case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
|
|
|
|
EmitOMPTargetTeamsDistributeParallelForDirective(
|
|
|
|
cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
|
|
|
|
break;
|
2017-01-03 13:23:48 +08:00
|
|
|
case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
|
|
|
|
EmitOMPTargetTeamsDistributeParallelForSimdDirective(
|
|
|
|
cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
|
|
|
|
break;
|
2017-01-11 02:08:18 +08:00
|
|
|
case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
|
|
|
|
EmitOMPTargetTeamsDistributeSimdDirective(
|
|
|
|
cast<OMPTargetTeamsDistributeSimdDirective>(*S));
|
|
|
|
break;
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
|
|
|
|
switch (S->getStmtClass()) {
|
|
|
|
default: return false;
|
|
|
|
case Stmt::NullStmtClass: break;
|
|
|
|
case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
|
2009-07-19 14:58:07 +08:00
|
|
|
case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
|
2008-11-12 16:21:33 +08:00
|
|
|
case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
|
2012-04-14 08:33:13 +08:00
|
|
|
case Stmt::AttributedStmtClass:
|
|
|
|
EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
|
2008-11-12 16:21:33 +08:00
|
|
|
case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
|
|
|
|
case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
|
|
|
|
case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
|
|
|
|
case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
|
|
|
|
case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
|
2015-02-13 07:16:11 +08:00
|
|
|
case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break;
|
2008-11-12 16:21:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2007-09-01 06:09:40 +08:00
|
|
|
/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
|
|
|
|
/// this captures the expression result of the last sub-statement and returns it
|
|
|
|
/// (for use by the statement expression extension).
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
|
|
|
|
AggValueSlot AggSlot) {
|
2009-03-05 16:04:57 +08:00
|
|
|
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
|
|
|
|
"LLVM IR generation of compound statement ('{}')");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-02-23 08:43:07 +08:00
|
|
|
// Keep track of the current cleanup stack depth, including debug scopes.
|
|
|
|
LexicalScope Scope(*this, S.getSourceRange());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2013-01-27 06:16:26 +08:00
|
|
|
return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address
|
2013-06-11 06:04:49 +08:00
|
|
|
CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
|
|
|
|
bool GetLast,
|
|
|
|
AggValueSlot AggSlot) {
|
2013-01-27 06:16:26 +08:00
|
|
|
|
2019-07-09 23:02:07 +08:00
|
|
|
const Stmt *ExprResult = S.getStmtExprResult();
|
|
|
|
assert((!GetLast || (GetLast && ExprResult)) &&
|
|
|
|
"If GetLast is true then the CompoundStmt must have a StmtExprResult");
|
2008-05-08 16:54:20 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address RetAlloca = Address::invalid();
|
2019-07-09 23:02:07 +08:00
|
|
|
|
|
|
|
for (auto *CurStmt : S.body()) {
|
|
|
|
if (GetLast && ExprResult == CurStmt) {
|
|
|
|
// We have to special case labels here. They are statements, but when put
|
|
|
|
// at the end of a statement expression, they yield the value of their
|
|
|
|
// subexpression. Handle this by walking through all labels we encounter,
|
|
|
|
// emitting them before we evaluate the subexpr.
|
|
|
|
// Similar issues arise for attributed statements.
|
|
|
|
while (!isa<Expr>(ExprResult)) {
|
|
|
|
if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
|
|
|
|
EmitLabel(LS->getDecl());
|
|
|
|
ExprResult = LS->getSubStmt();
|
|
|
|
} else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
|
|
|
|
// FIXME: Update this if we ever have attributes that affect the
|
|
|
|
// semantics of an expression.
|
|
|
|
ExprResult = AS->getSubStmt();
|
|
|
|
} else {
|
|
|
|
llvm_unreachable("unknown value statement");
|
|
|
|
}
|
2019-02-15 08:27:53 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2019-07-09 23:02:07 +08:00
|
|
|
EnsureInsertPoint();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2019-07-09 23:02:07 +08:00
|
|
|
const Expr *E = cast<Expr>(ExprResult);
|
|
|
|
QualType ExprTy = E->getType();
|
|
|
|
if (hasAggregateEvaluationKind(ExprTy)) {
|
|
|
|
EmitAggExpr(E, AggSlot);
|
|
|
|
} else {
|
|
|
|
// We can't return an RValue here because there might be cleanups at
|
|
|
|
// the end of the StmtExpr. Because of that, we have to emit the result
|
|
|
|
// here into a temporary alloca.
|
|
|
|
RetAlloca = CreateMemTemp(ExprTy);
|
|
|
|
EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
|
|
|
|
/*IsInit*/ false);
|
|
|
|
}
|
2013-06-11 06:04:49 +08:00
|
|
|
} else {
|
2019-07-09 23:02:07 +08:00
|
|
|
EmitStmt(CurStmt);
|
2013-06-11 06:04:49 +08:00
|
|
|
}
|
2008-07-27 04:23:23 +08:00
|
|
|
}
|
2008-11-12 07:11:34 +08:00
|
|
|
|
2013-06-11 06:04:49 +08:00
|
|
|
return RetAlloca;
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
|
|
|
|
2009-04-01 12:37:47 +08:00
|
|
|
void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
|
|
|
|
llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-04-01 12:37:47 +08:00
|
|
|
// If there is a cleanup stack, then we it isn't worth trying to
|
|
|
|
// simplify this block (we would need to remove it from the scope map
|
|
|
|
// and cleanup entry).
|
2010-07-06 09:34:17 +08:00
|
|
|
if (!EHStack.empty())
|
2009-04-01 12:37:47 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Can only simplify direct branches.
|
|
|
|
if (!BI || !BI->isUnconditional())
|
|
|
|
return;
|
|
|
|
|
2012-10-27 07:23:35 +08:00
|
|
|
// Can only simplify empty blocks.
|
2015-11-07 07:00:41 +08:00
|
|
|
if (BI->getIterator() != BB->begin())
|
2012-10-27 07:23:35 +08:00
|
|
|
return;
|
|
|
|
|
2009-04-01 12:37:47 +08:00
|
|
|
BB->replaceAllUsesWith(BI->getSuccessor(0));
|
|
|
|
BI->eraseFromParent();
|
|
|
|
BB->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
2008-11-13 09:24:05 +08:00
|
|
|
void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
|
2010-04-21 19:18:06 +08:00
|
|
|
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
|
|
|
|
|
2008-11-11 17:41:28 +08:00
|
|
|
// Fall out of the current block (if necessary).
|
|
|
|
EmitBranch(BB);
|
2008-11-13 09:24:05 +08:00
|
|
|
|
|
|
|
if (IsFinished && BB->use_empty()) {
|
|
|
|
delete BB;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-04-21 18:29:06 +08:00
|
|
|
// Place the block after the current block, if possible, or else at
|
|
|
|
// the end of the function.
|
2010-04-21 19:18:06 +08:00
|
|
|
if (CurBB && CurBB->getParent())
|
2015-11-07 07:00:41 +08:00
|
|
|
CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
|
2010-04-21 18:29:06 +08:00
|
|
|
else
|
|
|
|
CurFn->getBasicBlockList().push_back(BB);
|
2008-11-11 17:41:28 +08:00
|
|
|
Builder.SetInsertPoint(BB);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
|
|
|
|
// Emit a branch from the current block to the target one if this
|
|
|
|
// was a real block. If this was just a fall-through block after a
|
|
|
|
// terminator, don't emit it.
|
|
|
|
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
|
|
|
|
|
|
|
|
if (!CurBB || CurBB->getTerminator()) {
|
|
|
|
// If there is no insert point or the previous block is already
|
|
|
|
// terminated, don't touch it.
|
2007-06-02 02:02:12 +08:00
|
|
|
} else {
|
|
|
|
// Otherwise, create a fall-through branch.
|
2008-11-11 17:41:28 +08:00
|
|
|
Builder.CreateBr(Target);
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
2008-11-12 06:06:59 +08:00
|
|
|
|
|
|
|
Builder.ClearInsertionPoint();
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
|
|
|
|
2011-08-11 10:22:43 +08:00
|
|
|
void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
|
|
|
|
bool inserted = false;
|
2014-03-09 11:16:50 +08:00
|
|
|
for (llvm::User *u : block->users()) {
|
|
|
|
if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
|
2015-11-07 07:00:41 +08:00
|
|
|
CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
|
|
|
|
block);
|
2011-08-11 10:22:43 +08:00
|
|
|
inserted = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!inserted)
|
|
|
|
CurFn->getBasicBlockList().push_back(block);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(block);
|
|
|
|
}
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
CodeGenFunction::JumpDest
|
2011-02-17 15:39:24 +08:00
|
|
|
CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
|
|
|
|
JumpDest &Dest = LabelMap[D];
|
2010-07-24 05:56:41 +08:00
|
|
|
if (Dest.isValid()) return Dest;
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Create, but don't insert, the new block.
|
2011-02-17 15:39:24 +08:00
|
|
|
Dest = JumpDest(createBasicBlock(D->getName()),
|
2010-07-24 05:56:41 +08:00
|
|
|
EHScopeStack::stable_iterator::invalid(),
|
|
|
|
NextCleanupDestIndex++);
|
2010-07-06 09:34:17 +08:00
|
|
|
return Dest;
|
|
|
|
}
|
|
|
|
|
2011-02-17 15:39:24 +08:00
|
|
|
void CodeGenFunction::EmitLabel(const LabelDecl *D) {
|
2013-03-23 14:43:35 +08:00
|
|
|
// Add this label to the current lexical scope if we're within any
|
|
|
|
// normal cleanups. Jumps "in" to this label --- when permitted by
|
|
|
|
// the language --- may need to be routed around such cleanups.
|
|
|
|
if (EHStack.hasNormalCleanups() && CurLexicalScope)
|
|
|
|
CurLexicalScope->addLabel(D);
|
|
|
|
|
2011-02-17 15:39:24 +08:00
|
|
|
JumpDest &Dest = LabelMap[D];
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
// If we didn't need a forward reference to this label, just go
|
2010-07-06 09:34:17 +08:00
|
|
|
// ahead and create a destination at the current scope.
|
2010-07-24 05:56:41 +08:00
|
|
|
if (!Dest.isValid()) {
|
2011-02-17 15:39:24 +08:00
|
|
|
Dest = getJumpDestInCurrentScope(D->getName());
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Otherwise, we need to give this label a target depth and remove
|
|
|
|
// it from the branch-fixups list.
|
|
|
|
} else {
|
2010-07-24 05:56:41 +08:00
|
|
|
assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
|
2013-03-23 14:43:35 +08:00
|
|
|
Dest.setScopeDepth(EHStack.stable_begin());
|
2010-07-24 05:56:41 +08:00
|
|
|
ResolveBranchFixups(Dest.getBlock());
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(Dest.getBlock());
|
2019-01-24 13:34:29 +08:00
|
|
|
|
|
|
|
// Emit debug info for labels.
|
|
|
|
if (CGDebugInfo *DI = getDebugInfo()) {
|
|
|
|
if (CGM.getCodeGenOpts().getDebugInfo() >=
|
|
|
|
codegenoptions::LimitedDebugInfo) {
|
|
|
|
DI->setLocation(D->getLocation());
|
|
|
|
DI->EmitLabel(D, Builder);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-24 07:06:47 +08:00
|
|
|
incrementProfileCounter(D->getStmt());
|
2008-07-27 04:23:23 +08:00
|
|
|
}
|
|
|
|
|
2013-03-23 14:43:35 +08:00
|
|
|
/// Change the cleanup scope of the labels in this lexical scope to
|
|
|
|
/// match the scope of the enclosing context.
|
|
|
|
void CodeGenFunction::LexicalScope::rescopeLabels() {
|
|
|
|
assert(!Labels.empty());
|
|
|
|
EHScopeStack::stable_iterator innermostScope
|
|
|
|
= CGF.EHStack.getInnermostNormalCleanup();
|
|
|
|
|
|
|
|
// Change the scope depth of all the labels.
|
|
|
|
for (SmallVectorImpl<const LabelDecl*>::const_iterator
|
|
|
|
i = Labels.begin(), e = Labels.end(); i != e; ++i) {
|
|
|
|
assert(CGF.LabelMap.count(*i));
|
|
|
|
JumpDest &dest = CGF.LabelMap.find(*i)->second;
|
|
|
|
assert(dest.getScopeDepth().isValid());
|
|
|
|
assert(innermostScope.encloses(dest.getScopeDepth()));
|
|
|
|
dest.setScopeDepth(innermostScope);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reparent the labels if the new scope also has cleanups.
|
|
|
|
if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
|
|
|
|
ParentScope->Labels.append(Labels.begin(), Labels.end());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-27 04:23:23 +08:00
|
|
|
|
|
|
|
void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
|
2011-02-17 15:39:24 +08:00
|
|
|
EmitLabel(S.getDecl());
|
2007-06-02 02:02:12 +08:00
|
|
|
EmitStmt(S.getSubStmt());
|
|
|
|
}
|
|
|
|
|
2012-04-14 08:33:13 +08:00
|
|
|
void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
|
2017-09-06 16:47:18 +08:00
|
|
|
EmitStmt(S.getSubStmt(), S.getAttrs());
|
2012-04-14 08:33:13 +08:00
|
|
|
}
|
|
|
|
|
2007-06-02 02:02:12 +08:00
|
|
|
void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
|
2008-11-12 16:21:33 +08:00
|
|
|
// If this code is reachable then emit a stop point (if generating
|
|
|
|
// debug info). We have to do this ourselves because we are on the
|
|
|
|
// "simple" statement path.
|
|
|
|
if (HaveInsertPoint())
|
|
|
|
EmitStopPoint(&S);
|
2009-02-07 20:52:26 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
|
|
|
|
2009-10-13 14:55:33 +08:00
|
|
|
|
2008-08-05 00:51:22 +08:00
|
|
|
void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
|
2011-02-17 15:39:24 +08:00
|
|
|
if (const LabelDecl *Target = S.getConstantTarget()) {
|
2010-10-28 16:53:48 +08:00
|
|
|
EmitBranchThroughCleanup(getJumpDestForLabel(Target));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-11-07 02:10:47 +08:00
|
|
|
// Ensure that we have an i8* for our PHI node.
|
2009-10-29 07:59:40 +08:00
|
|
|
llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
|
2011-02-08 16:22:06 +08:00
|
|
|
Int8PtrTy, "addr");
|
2009-10-13 14:55:33 +08:00
|
|
|
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
|
|
|
|
|
|
|
|
// Get the basic block for the indirect goto.
|
|
|
|
llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2009-10-13 14:55:33 +08:00
|
|
|
// The first instruction in the block has to be the PHI for the switch dest,
|
|
|
|
// add an entry for this branch.
|
|
|
|
cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2009-10-13 14:55:33 +08:00
|
|
|
EmitBranch(IndGotoBB);
|
2008-08-05 00:51:22 +08:00
|
|
|
}
|
|
|
|
|
2008-11-11 15:24:28 +08:00
|
|
|
void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
|
2007-06-02 02:02:12 +08:00
|
|
|
// C99 6.8.4.1: The first substatement is executed if the expression compares
|
|
|
|
// unequal to 0. The condition must be a scalar type.
|
2014-05-29 03:10:59 +08:00
|
|
|
LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
|
2013-06-08 08:16:55 +08:00
|
|
|
|
2016-07-14 08:11:03 +08:00
|
|
|
if (S.getInit())
|
|
|
|
EmitStmt(S.getInit());
|
|
|
|
|
2009-11-24 07:44:04 +08:00
|
|
|
if (S.getConditionVariable())
|
2018-03-18 05:01:27 +08:00
|
|
|
EmitDecl(*S.getConditionVariable());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
Make emission of 'if' conditions much more sophisticated when we
have a condition that is an &&/||. Before we used to compile things like this:
int test() {
if (x && y) foo(); else bar();
}
into:
%0 = load i32* @x ; <i32> [#uses=1]
%1 = icmp ne i32 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %land_rhs, label %land_cont
land_rhs: ; preds = %entry
%2 = load i32* @y ; <i32> [#uses=1]
%3 = icmp ne i32 %2, 0 ; <i1> [#uses=1]
br label %land_cont
land_cont: ; preds = %land_rhs, %entry
%4 = phi i1 [ false, %entry ], [ %3, %land_rhs ] ; <i1> [#uses=1]
br i1 %4, label %ifthen, label %ifelse
ifthen: ; preds = %land_cont
%call = call i32 (...)* @foo() ; <i32> [#uses=0]
br label %ifend
ifelse: ; preds = %land_cont
%call1 = call i32 (...)* @bar() ; <i32> [#uses=0]
br label %ifend
ifend: ; preds = %ifelse, %ifthen
Now we turn it into the much more svelte code:
%0 = load i32* @x ; <i32> [#uses=1]
%1 = icmp ne i32 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %land_lhs_true, label %ifelse
land_lhs_true: ; preds = %entry
%2 = load i32* @y ; <i32> [#uses=1]
%3 = icmp ne i32 %2, 0 ; <i1> [#uses=1]
br i1 %3, label %ifthen, label %ifelse
ifthen: ; preds = %land_lhs_true
%call = call i32 (...)* @foo() ; <i32> [#uses=0]
br label %ifend
ifelse: ; preds = %land_lhs_true, %entry
%call1 = call i32 (...)* @bar() ; <i32> [#uses=0]
br label %ifend
ifend: ; preds = %ifelse, %ifthen
Note the lack of a phi node.
This shrinks the -O0 .ll file for 176.gcc/expr.c from 43176 to 40267 lines.
llvm-svn: 59111
2008-11-12 15:46:33 +08:00
|
|
|
// If the condition constant folds and can be elided, try to avoid emitting
|
|
|
|
// the condition and the dead arm of the if/else.
|
2011-02-28 07:02:32 +08:00
|
|
|
bool CondConstant;
|
2016-06-24 03:16:49 +08:00
|
|
|
if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
|
|
|
|
S.isConstexpr())) {
|
2008-11-11 15:24:28 +08:00
|
|
|
// Figure out which block (then or else) is executed.
|
2011-02-28 07:02:32 +08:00
|
|
|
const Stmt *Executed = S.getThen();
|
|
|
|
const Stmt *Skipped = S.getElse();
|
|
|
|
if (!CondConstant) // Condition false?
|
2008-11-11 15:24:28 +08:00
|
|
|
std::swap(Executed, Skipped);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-11-11 15:24:28 +08:00
|
|
|
// If the skipped block has no labels in it, just emit the executed block.
|
|
|
|
// This avoids emitting dead code and simplifies the CFG substantially.
|
2016-06-24 03:16:49 +08:00
|
|
|
if (S.isConstexpr() || !ContainsLabel(Skipped)) {
|
2014-01-07 06:27:43 +08:00
|
|
|
if (CondConstant)
|
2015-04-24 07:06:47 +08:00
|
|
|
incrementProfileCounter(&S);
|
2009-11-25 00:43:22 +08:00
|
|
|
if (Executed) {
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope ExecutedScope(*this);
|
2008-11-11 15:24:28 +08:00
|
|
|
EmitStmt(Executed);
|
2009-11-25 00:43:22 +08:00
|
|
|
}
|
2008-11-11 15:24:28 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
Make emission of 'if' conditions much more sophisticated when we
have a condition that is an &&/||. Before we used to compile things like this:
int test() {
if (x && y) foo(); else bar();
}
into:
%0 = load i32* @x ; <i32> [#uses=1]
%1 = icmp ne i32 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %land_rhs, label %land_cont
land_rhs: ; preds = %entry
%2 = load i32* @y ; <i32> [#uses=1]
%3 = icmp ne i32 %2, 0 ; <i1> [#uses=1]
br label %land_cont
land_cont: ; preds = %land_rhs, %entry
%4 = phi i1 [ false, %entry ], [ %3, %land_rhs ] ; <i1> [#uses=1]
br i1 %4, label %ifthen, label %ifelse
ifthen: ; preds = %land_cont
%call = call i32 (...)* @foo() ; <i32> [#uses=0]
br label %ifend
ifelse: ; preds = %land_cont
%call1 = call i32 (...)* @bar() ; <i32> [#uses=0]
br label %ifend
ifend: ; preds = %ifelse, %ifthen
Now we turn it into the much more svelte code:
%0 = load i32* @x ; <i32> [#uses=1]
%1 = icmp ne i32 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %land_lhs_true, label %ifelse
land_lhs_true: ; preds = %entry
%2 = load i32* @y ; <i32> [#uses=1]
%3 = icmp ne i32 %2, 0 ; <i1> [#uses=1]
br i1 %3, label %ifthen, label %ifelse
ifthen: ; preds = %land_lhs_true
%call = call i32 (...)* @foo() ; <i32> [#uses=0]
br label %ifend
ifelse: ; preds = %land_lhs_true, %entry
%call1 = call i32 (...)* @bar() ; <i32> [#uses=0]
br label %ifend
ifend: ; preds = %ifelse, %ifthen
Note the lack of a phi node.
This shrinks the -O0 .ll file for 176.gcc/expr.c from 43176 to 40267 lines.
llvm-svn: 59111
2008-11-12 15:46:33 +08:00
|
|
|
|
|
|
|
// Otherwise, the condition did not fold, or we couldn't elide it. Just emit
|
|
|
|
// the conditional branch.
|
2008-11-13 08:47:57 +08:00
|
|
|
llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
|
|
|
|
llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
|
|
|
|
llvm::BasicBlock *ElseBlock = ContBlock;
|
2007-06-02 02:02:12 +08:00
|
|
|
if (S.getElse())
|
2008-11-13 08:47:57 +08:00
|
|
|
ElseBlock = createBasicBlock("if.else");
|
2014-01-07 06:27:43 +08:00
|
|
|
|
2015-04-24 07:06:47 +08:00
|
|
|
EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock,
|
|
|
|
getProfileCount(S.getThen()));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-02 02:02:12 +08:00
|
|
|
// Emit the 'then' code.
|
2014-01-07 06:27:43 +08:00
|
|
|
EmitBlock(ThenBlock);
|
2015-04-24 07:06:47 +08:00
|
|
|
incrementProfileCounter(&S);
|
2009-11-25 00:43:22 +08:00
|
|
|
{
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope ThenScope(*this);
|
2009-11-25 00:43:22 +08:00
|
|
|
EmitStmt(S.getThen());
|
|
|
|
}
|
2016-07-22 07:28:18 +08:00
|
|
|
EmitBranch(ContBlock);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-02 02:02:12 +08:00
|
|
|
// Emit the 'else' code if present.
|
|
|
|
if (const Stmt *Else = S.getElse()) {
|
2014-07-11 04:42:59 +08:00
|
|
|
{
|
2015-02-04 02:40:42 +08:00
|
|
|
// There is no need to emit line number for an unconditional branch.
|
2015-02-04 04:00:54 +08:00
|
|
|
auto NL = ApplyDebugLocation::CreateEmpty(*this);
|
2014-07-11 04:42:59 +08:00
|
|
|
EmitBlock(ElseBlock);
|
|
|
|
}
|
2009-11-25 00:43:22 +08:00
|
|
|
{
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope ElseScope(*this);
|
2009-11-25 00:43:22 +08:00
|
|
|
EmitStmt(Else);
|
|
|
|
}
|
2014-07-11 04:42:59 +08:00
|
|
|
{
|
2015-02-04 02:40:42 +08:00
|
|
|
// There is no need to emit line number for an unconditional branch.
|
2015-02-04 04:00:54 +08:00
|
|
|
auto NL = ApplyDebugLocation::CreateEmpty(*this);
|
2014-07-11 04:42:59 +08:00
|
|
|
EmitBranch(ContBlock);
|
|
|
|
}
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-02 02:02:12 +08:00
|
|
|
// Emit the continuation block for code after the if.
|
2008-11-13 09:54:24 +08:00
|
|
|
EmitBlock(ContBlock, true);
|
2007-06-02 02:02:12 +08:00
|
|
|
}
|
|
|
|
|
2014-06-06 20:40:24 +08:00
|
|
|
void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
|
2014-08-27 14:28:16 +08:00
|
|
|
ArrayRef<const Attr *> WhileAttrs) {
|
2010-07-06 09:34:17 +08:00
|
|
|
// Emit the header for the loop, which will also become
|
|
|
|
// the continue target.
|
|
|
|
JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(LoopHeader.getBlock());
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2016-11-10 22:44:30 +08:00
|
|
|
const SourceRange &R = S.getSourceRange();
|
Add a loop's debug location to its llvm.loop metadata
Getting accurate locations for loops is important, because those locations are
used by the frontend to generate optimization remarks. Currently, optimization
remarks for loops often appear on the wrong line, often the first line of the
loop body instead of the loop itself. This is confusing because that line might
itself be another loop, or might be somewhere else completely if the body was
an inlined function call. This happens because of the way we find the loop's
starting location. First, we look for a preheader, and if we find one, and its
terminator has a debug location, then we use that. Otherwise, we look for a
location on an instruction in the loop header.
The fallback heuristic is not bad, but will almost always find the beginning of
the body, and not the loop statement itself. The preheader location search
often fails because there's often not a preheader, and even when there is a
preheader, depending on how it was formed, it sometimes carries the location of
some preceeding code.
I don't see any good theoretical way to fix this problem. On the other hand,
this seems like a straightforward solution: Put the debug location in the
loop's llvm.loop metadata. When emitting debug information, this commit causes
us to add the debug location as an operand to each loop's llvm.loop metadata.
Thus, we now generate this metadata for all loops (not just loops with
optimization hints) when we're otherwise generating debug information.
The remark test case changes depend on the companion LLVM commit r270771.
llvm-svn: 270772
2016-05-26 05:53:24 +08:00
|
|
|
LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), WhileAttrs,
|
2016-11-10 22:44:30 +08:00
|
|
|
SourceLocToDebugLoc(R.getBegin()),
|
|
|
|
SourceLocToDebugLoc(R.getEnd()));
|
2014-05-22 16:54:05 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Create an exit block for when the condition fails, which will
|
|
|
|
// also become the break target.
|
|
|
|
JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
|
2009-02-08 02:08:12 +08:00
|
|
|
|
|
|
|
// Store the blocks to use for break and continue.
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-11-25 05:15:44 +08:00
|
|
|
// C++ [stmt.while]p2:
|
|
|
|
// When the condition of a while statement is a declaration, the
|
|
|
|
// scope of the variable that is declared extends from its point
|
|
|
|
// of declaration (3.3.2) to the end of the while statement.
|
|
|
|
// [...]
|
|
|
|
// The object created in a condition is destroyed and created
|
|
|
|
// with each iteration of the loop.
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope ConditionScope(*this);
|
2009-11-25 05:15:44 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
if (S.getConditionVariable())
|
2018-03-18 05:01:27 +08:00
|
|
|
EmitDecl(*S.getConditionVariable());
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2009-02-08 01:18:33 +08:00
|
|
|
// Evaluate the conditional in the while header. C99 6.8.5.1: The
|
|
|
|
// evaluation of the controlling expression takes place before each
|
|
|
|
// execution of the loop body.
|
2007-06-06 04:53:16 +08:00
|
|
|
llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2007-10-10 04:51:27 +08:00
|
|
|
// while(1) is common, avoid extra exit blocks. Be sure
|
2007-06-06 04:53:16 +08:00
|
|
|
// to correctly handle break/continue though.
|
2007-10-10 04:51:27 +08:00
|
|
|
bool EmitBoolCondBranch = true;
|
2009-09-09 23:08:12 +08:00
|
|
|
if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
|
2007-10-10 04:51:27 +08:00
|
|
|
if (C->isOne())
|
|
|
|
EmitBoolCondBranch = false;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-05 11:59:43 +08:00
|
|
|
// As long as the condition is true, go to the loop body.
|
2010-07-06 09:34:17 +08:00
|
|
|
llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
|
|
|
|
if (EmitBoolCondBranch) {
|
2010-07-24 05:56:41 +08:00
|
|
|
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
|
2010-07-06 09:34:17 +08:00
|
|
|
if (ConditionScope.requiresCleanups())
|
|
|
|
ExitBlock = createBasicBlock("while.exit");
|
2015-07-28 04:10:20 +08:00
|
|
|
Builder.CreateCondBr(
|
2015-04-24 07:06:47 +08:00
|
|
|
BoolCondVal, LoopBody, ExitBlock,
|
2015-05-02 13:00:55 +08:00
|
|
|
createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
if (ExitBlock != LoopExit.getBlock()) {
|
2010-07-06 09:34:17 +08:00
|
|
|
EmitBlock(ExitBlock);
|
|
|
|
EmitBranchThroughCleanup(LoopExit);
|
|
|
|
}
|
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Emit the loop body. We have to emit this in a cleanup scope
|
|
|
|
// because it might be a singleton DeclStmt.
|
2009-11-25 05:15:44 +08:00
|
|
|
{
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope BodyScope(*this);
|
2009-11-25 05:15:44 +08:00
|
|
|
EmitBlock(LoopBody);
|
2015-04-24 07:06:47 +08:00
|
|
|
incrementProfileCounter(&S);
|
2009-11-25 05:15:44 +08:00
|
|
|
EmitStmt(S.getBody());
|
|
|
|
}
|
2007-07-17 05:28:45 +08:00
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
BreakContinueStack.pop_back();
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Immediately force cleanup.
|
|
|
|
ConditionScope.ForceCleanup();
|
2009-11-25 05:15:44 +08:00
|
|
|
|
2014-08-16 05:11:25 +08:00
|
|
|
EmitStopPoint(&S);
|
2010-07-06 09:34:17 +08:00
|
|
|
// Branch to the loop header again.
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBranch(LoopHeader.getBlock());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2014-05-22 16:54:05 +08:00
|
|
|
LoopStack.pop();
|
|
|
|
|
2007-06-05 11:59:43 +08:00
|
|
|
// Emit the exit block.
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(LoopExit.getBlock(), true);
|
2009-11-25 05:15:44 +08:00
|
|
|
|
2009-04-01 12:37:47 +08:00
|
|
|
// The LoopHeader typically is just a branch if we skipped emitting
|
|
|
|
// a branch, try to erase it.
|
2010-07-06 09:34:17 +08:00
|
|
|
if (!EmitBoolCondBranch)
|
2010-07-24 05:56:41 +08:00
|
|
|
SimplifyForwardingBlocks(LoopHeader.getBlock());
|
2007-06-05 11:59:43 +08:00
|
|
|
}
|
|
|
|
|
2014-06-06 20:40:24 +08:00
|
|
|
void CodeGenFunction::EmitDoStmt(const DoStmt &S,
|
2014-08-27 14:28:16 +08:00
|
|
|
ArrayRef<const Attr *> DoAttrs) {
|
2010-07-06 09:34:17 +08:00
|
|
|
JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
|
|
|
|
JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2015-04-24 07:06:47 +08:00
|
|
|
uint64_t ParentCount = getCurrentProfileCount();
|
2014-01-07 06:27:43 +08:00
|
|
|
|
2007-07-17 05:28:45 +08:00
|
|
|
// Store the blocks to use for break and continue.
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Emit the body of the loop.
|
|
|
|
llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
|
2014-05-22 16:54:05 +08:00
|
|
|
|
2015-04-24 07:06:47 +08:00
|
|
|
EmitBlockWithFallThrough(LoopBody, &S);
|
2010-07-06 09:34:17 +08:00
|
|
|
{
|
|
|
|
RunCleanupsScope BodyScope(*this);
|
|
|
|
EmitStmt(S.getBody());
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(LoopCond.getBlock());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
Patch to fix pragma metadata for do-while loops
Summary:
Make sure that loop metadata only is put on the backedge
when expanding a do-while loop.
Previously we added the loop metadata also on the branch
in the pre-header. That could confuse optimization passes
and result in the loop metadata being associated with the
wrong loop.
Fixes https://bugs.llvm.org/show_bug.cgi?id=38011
Committing on behalf of deepak2427 (Deepak Panickal)
Reviewers: #clang, ABataev, hfinkel, aaron.ballman, bjope
Reviewed By: bjope
Subscribers: bjope, rsmith, shenhan, zzheng, xbolva00, lebedev.ri, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D48721
llvm-svn: 336717
2018-07-11 03:55:02 +08:00
|
|
|
const SourceRange &R = S.getSourceRange();
|
|
|
|
LoopStack.push(LoopBody, CGM.getContext(), DoAttrs,
|
|
|
|
SourceLocToDebugLoc(R.getBegin()),
|
|
|
|
SourceLocToDebugLoc(R.getEnd()));
|
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
// C99 6.8.5.2: "The evaluation of the controlling expression takes place
|
|
|
|
// after each execution of the loop body."
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
// Evaluate the conditional in the while header.
|
|
|
|
// C99 6.8.5p2/p4: The first substatement is executed if the expression
|
|
|
|
// compares unequal to 0. The condition must be a scalar type.
|
|
|
|
llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
|
2007-10-10 04:33:39 +08:00
|
|
|
|
2014-01-23 23:05:00 +08:00
|
|
|
BreakContinueStack.pop_back();
|
|
|
|
|
2007-10-10 04:33:39 +08:00
|
|
|
// "do {} while (0)" is common in macros, avoid extra blocks. Be sure
|
|
|
|
// to correctly handle break/continue though.
|
|
|
|
bool EmitBoolCondBranch = true;
|
2009-09-09 23:08:12 +08:00
|
|
|
if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
|
2007-10-10 04:33:39 +08:00
|
|
|
if (C->isZero())
|
|
|
|
EmitBoolCondBranch = false;
|
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
// As long as the condition is true, iterate the loop.
|
2014-06-06 20:40:24 +08:00
|
|
|
if (EmitBoolCondBranch) {
|
2015-04-24 07:06:47 +08:00
|
|
|
uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
|
2015-07-28 04:10:20 +08:00
|
|
|
Builder.CreateCondBr(
|
2015-05-02 13:00:55 +08:00
|
|
|
BoolCondVal, LoopBody, LoopExit.getBlock(),
|
|
|
|
createProfileWeightsForLoop(S.getCond(), BackedgeCount));
|
2014-06-06 20:40:24 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2014-05-22 16:54:05 +08:00
|
|
|
LoopStack.pop();
|
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
// Emit the exit block.
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(LoopExit.getBlock());
|
2007-10-10 04:33:39 +08:00
|
|
|
|
2009-04-01 12:37:47 +08:00
|
|
|
// The DoCond block typically is just a branch if we skipped
|
|
|
|
// emitting a branch, try to erase it.
|
|
|
|
if (!EmitBoolCondBranch)
|
2010-07-24 05:56:41 +08:00
|
|
|
SimplifyForwardingBlocks(LoopCond.getBlock());
|
2007-06-06 04:53:16 +08:00
|
|
|
}
|
|
|
|
|
2014-06-06 20:40:24 +08:00
|
|
|
void CodeGenFunction::EmitForStmt(const ForStmt &S,
|
2014-08-27 14:28:16 +08:00
|
|
|
ArrayRef<const Attr *> ForAttrs) {
|
2010-07-06 09:34:17 +08:00
|
|
|
JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
|
|
|
|
|
2014-08-23 05:37:04 +08:00
|
|
|
LexicalScope ForScope(*this, S.getSourceRange());
|
2010-08-25 08:28:56 +08:00
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
// Evaluate the first part before the loop.
|
|
|
|
if (S.getInit())
|
|
|
|
EmitStmt(S.getInit());
|
|
|
|
|
|
|
|
// Start the loop with a block that tests the condition.
|
2010-07-06 09:34:17 +08:00
|
|
|
// If there's an increment, the continue scope will be overwritten
|
|
|
|
// later.
|
|
|
|
JumpDest Continue = getJumpDestInCurrentScope("for.cond");
|
2010-07-24 05:56:41 +08:00
|
|
|
llvm::BasicBlock *CondBlock = Continue.getBlock();
|
2007-06-06 04:53:16 +08:00
|
|
|
EmitBlock(CondBlock);
|
|
|
|
|
2016-11-10 22:44:30 +08:00
|
|
|
const SourceRange &R = S.getSourceRange();
|
|
|
|
LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
|
|
|
|
SourceLocToDebugLoc(R.getBegin()),
|
|
|
|
SourceLocToDebugLoc(R.getEnd()));
|
2014-05-22 16:54:05 +08:00
|
|
|
|
2014-01-23 23:05:00 +08:00
|
|
|
// If the for loop doesn't have an increment we can just use the
|
|
|
|
// condition as the continue block. Otherwise we'll need to create
|
|
|
|
// a block for it (in the current scope, i.e. in the scope of the
|
|
|
|
// condition), and that we will become our continue block.
|
|
|
|
if (S.getInc())
|
|
|
|
Continue = getJumpDestInCurrentScope("for.inc");
|
|
|
|
|
|
|
|
// Store the blocks to use for break and continue.
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
|
2014-01-23 23:05:00 +08:00
|
|
|
|
2009-11-25 09:51:31 +08:00
|
|
|
// Create a cleanup scope for the condition variable cleanups.
|
2014-08-23 05:37:04 +08:00
|
|
|
LexicalScope ConditionScope(*this, S.getSourceRange());
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
if (S.getCond()) {
|
2009-11-25 08:27:52 +08:00
|
|
|
// If the for statement has a condition scope, emit the local variable
|
|
|
|
// declaration.
|
2009-11-25 09:51:31 +08:00
|
|
|
if (S.getConditionVariable()) {
|
2018-03-18 05:01:27 +08:00
|
|
|
EmitDecl(*S.getConditionVariable());
|
2009-11-25 09:51:31 +08:00
|
|
|
}
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2013-11-05 00:13:18 +08:00
|
|
|
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
|
2010-07-06 09:34:17 +08:00
|
|
|
// If there are any cleanups between here and the loop-exit scope,
|
|
|
|
// create a block to stage a loop exit along.
|
|
|
|
if (ForScope.requiresCleanups())
|
|
|
|
ExitBlock = createBasicBlock("for.cond.cleanup");
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2008-11-12 16:04:58 +08:00
|
|
|
// As long as the condition is true, iterate the loop.
|
2008-11-13 09:38:36 +08:00
|
|
|
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
// C99 6.8.5p2/p4: The first substatement is executed if the expression
|
|
|
|
// compares unequal to 0. The condition must be a scalar type.
|
2013-11-22 18:20:43 +08:00
|
|
|
llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
|
2015-07-28 04:10:20 +08:00
|
|
|
Builder.CreateCondBr(
|
2015-04-24 07:06:47 +08:00
|
|
|
BoolCondVal, ForBody, ExitBlock,
|
2015-05-02 13:00:55 +08:00
|
|
|
createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
|
2014-06-06 20:40:24 +08:00
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
if (ExitBlock != LoopExit.getBlock()) {
|
2010-07-06 09:34:17 +08:00
|
|
|
EmitBlock(ExitBlock);
|
|
|
|
EmitBranchThroughCleanup(LoopExit);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
|
|
|
EmitBlock(ForBody);
|
2007-06-06 04:53:16 +08:00
|
|
|
} else {
|
|
|
|
// Treat it as a non-zero constant. Don't even create a new block for the
|
|
|
|
// body, just fall into it.
|
|
|
|
}
|
2015-04-24 07:06:47 +08:00
|
|
|
incrementProfileCounter(&S);
|
2007-06-06 04:53:16 +08:00
|
|
|
|
2009-11-25 09:51:31 +08:00
|
|
|
{
|
|
|
|
// Create a separate cleanup scope for the body, in case it is not
|
|
|
|
// a compound statement.
|
2010-07-06 09:34:17 +08:00
|
|
|
RunCleanupsScope BodyScope(*this);
|
2009-11-25 09:51:31 +08:00
|
|
|
EmitStmt(S.getBody());
|
|
|
|
}
|
2007-07-17 05:28:45 +08:00
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
// If there is an increment, emit it next.
|
2008-09-28 08:19:22 +08:00
|
|
|
if (S.getInc()) {
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(Continue.getBlock());
|
2007-08-11 08:04:45 +08:00
|
|
|
EmitStmt(S.getInc());
|
2008-09-28 08:19:22 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-05-22 02:36:48 +08:00
|
|
|
BreakContinueStack.pop_back();
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
ConditionScope.ForceCleanup();
|
2014-08-23 06:25:37 +08:00
|
|
|
|
|
|
|
EmitStopPoint(&S);
|
2010-07-06 09:34:17 +08:00
|
|
|
EmitBranch(CondBlock);
|
|
|
|
|
|
|
|
ForScope.ForceCleanup();
|
|
|
|
|
2014-05-22 16:54:05 +08:00
|
|
|
LoopStack.pop();
|
|
|
|
|
2007-07-17 05:28:45 +08:00
|
|
|
// Emit the fall-through block.
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(LoopExit.getBlock(), true);
|
2007-06-06 04:53:16 +08:00
|
|
|
}
|
2007-06-05 11:59:43 +08:00
|
|
|
|
2014-06-06 20:40:24 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
|
2014-08-27 14:28:16 +08:00
|
|
|
ArrayRef<const Attr *> ForAttrs) {
|
2011-04-15 06:09:26 +08:00
|
|
|
JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
|
|
|
|
|
2014-08-23 05:54:29 +08:00
|
|
|
LexicalScope ForScope(*this, S.getSourceRange());
|
2011-04-15 06:09:26 +08:00
|
|
|
|
|
|
|
// Evaluate the first pieces before the loop.
|
2018-09-29 02:44:09 +08:00
|
|
|
if (S.getInit())
|
|
|
|
EmitStmt(S.getInit());
|
2011-04-15 06:09:26 +08:00
|
|
|
EmitStmt(S.getRangeStmt());
|
2016-03-20 18:33:40 +08:00
|
|
|
EmitStmt(S.getBeginStmt());
|
|
|
|
EmitStmt(S.getEndStmt());
|
2011-04-15 06:09:26 +08:00
|
|
|
|
|
|
|
// Start the loop with a block that tests the condition.
|
|
|
|
// If there's an increment, the continue scope will be overwritten
|
|
|
|
// later.
|
|
|
|
llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
|
|
|
|
EmitBlock(CondBlock);
|
|
|
|
|
2016-11-10 22:44:30 +08:00
|
|
|
const SourceRange &R = S.getSourceRange();
|
|
|
|
LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
|
|
|
|
SourceLocToDebugLoc(R.getBegin()),
|
|
|
|
SourceLocToDebugLoc(R.getEnd()));
|
2014-05-22 16:54:05 +08:00
|
|
|
|
2011-04-15 06:09:26 +08:00
|
|
|
// If there are any cleanups between here and the loop-exit scope,
|
|
|
|
// create a block to stage a loop exit along.
|
|
|
|
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
|
|
|
|
if (ForScope.requiresCleanups())
|
|
|
|
ExitBlock = createBasicBlock("for.cond.cleanup");
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2011-04-15 06:09:26 +08:00
|
|
|
// The loop body, consisting of the specified body and the loop variable.
|
|
|
|
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
|
|
|
|
|
|
|
|
// The body is executed if the expression, contextually converted
|
|
|
|
// to bool, is true.
|
2013-11-22 18:20:43 +08:00
|
|
|
llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
|
2015-07-28 04:10:20 +08:00
|
|
|
Builder.CreateCondBr(
|
2015-04-24 07:06:47 +08:00
|
|
|
BoolCondVal, ForBody, ExitBlock,
|
2015-05-02 13:00:55 +08:00
|
|
|
createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
|
2014-06-06 20:40:24 +08:00
|
|
|
|
2011-04-15 06:09:26 +08:00
|
|
|
if (ExitBlock != LoopExit.getBlock()) {
|
|
|
|
EmitBlock(ExitBlock);
|
|
|
|
EmitBranchThroughCleanup(LoopExit);
|
|
|
|
}
|
|
|
|
|
|
|
|
EmitBlock(ForBody);
|
2015-04-24 07:06:47 +08:00
|
|
|
incrementProfileCounter(&S);
|
2011-04-15 06:09:26 +08:00
|
|
|
|
|
|
|
// Create a block for the increment. In case of a 'continue', we jump there.
|
|
|
|
JumpDest Continue = getJumpDestInCurrentScope("for.inc");
|
|
|
|
|
|
|
|
// Store the blocks to use for break and continue.
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
|
2011-04-15 06:09:26 +08:00
|
|
|
|
|
|
|
{
|
|
|
|
// Create a separate cleanup scope for the loop variable and body.
|
2014-08-23 05:54:29 +08:00
|
|
|
LexicalScope BodyScope(*this, S.getSourceRange());
|
2011-04-15 06:09:26 +08:00
|
|
|
EmitStmt(S.getLoopVarStmt());
|
|
|
|
EmitStmt(S.getBody());
|
|
|
|
}
|
|
|
|
|
2014-08-16 04:50:45 +08:00
|
|
|
EmitStopPoint(&S);
|
2011-04-15 06:09:26 +08:00
|
|
|
// If there is an increment, emit it next.
|
|
|
|
EmitBlock(Continue.getBlock());
|
|
|
|
EmitStmt(S.getInc());
|
|
|
|
|
|
|
|
BreakContinueStack.pop_back();
|
|
|
|
|
|
|
|
EmitBranch(CondBlock);
|
|
|
|
|
|
|
|
ForScope.ForceCleanup();
|
|
|
|
|
2014-05-22 16:54:05 +08:00
|
|
|
LoopStack.pop();
|
|
|
|
|
2011-04-15 06:09:26 +08:00
|
|
|
// Emit the fall-through block.
|
|
|
|
EmitBlock(LoopExit.getBlock(), true);
|
|
|
|
}
|
|
|
|
|
2008-09-24 12:00:38 +08:00
|
|
|
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
|
|
|
|
if (RV.isScalar()) {
|
|
|
|
Builder.CreateStore(RV.getScalarVal(), ReturnValue);
|
|
|
|
} else if (RV.isAggregate()) {
|
2018-01-25 22:21:55 +08:00
|
|
|
LValue Dest = MakeAddrLValue(ReturnValue, Ty);
|
|
|
|
LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
|
2019-06-21 04:56:20 +08:00
|
|
|
EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
|
2008-09-24 12:00:38 +08:00
|
|
|
} else {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
|
2013-03-08 05:37:08 +08:00
|
|
|
/*init*/ true);
|
2008-09-24 12:00:38 +08:00
|
|
|
}
|
2009-02-10 04:31:03 +08:00
|
|
|
EmitBranchThroughCleanup(ReturnBlock);
|
2008-09-24 12:00:38 +08:00
|
|
|
}
|
|
|
|
|
2007-06-02 11:19:07 +08:00
|
|
|
/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
|
|
|
|
/// if the function returns void, or may be missing one if the function returns
|
|
|
|
/// non-void. Fun stuff :).
|
|
|
|
void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
|
2017-06-24 05:32:38 +08:00
|
|
|
if (requiresReturnValueCheck()) {
|
2018-08-10 05:08:08 +08:00
|
|
|
llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
|
2017-06-24 05:32:38 +08:00
|
|
|
auto *SLocPtr =
|
|
|
|
new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
|
|
|
|
llvm::GlobalVariable::PrivateLinkage, SLoc);
|
|
|
|
SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
|
|
|
|
CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
|
|
|
|
assert(ReturnLocation.isValid() && "No valid return location");
|
|
|
|
Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
|
|
|
|
ReturnLocation);
|
|
|
|
}
|
|
|
|
|
2015-04-15 04:59:00 +08:00
|
|
|
// Returning from an outlined SEH helper is UB, and we already warn on it.
|
|
|
|
if (IsOutlinedSEHHelper) {
|
|
|
|
Builder.CreateUnreachable();
|
|
|
|
Builder.ClearInsertionPoint();
|
|
|
|
}
|
|
|
|
|
2018-04-06 23:14:32 +08:00
|
|
|
// Emit the result value, even if unused, to evaluate the side effects.
|
2007-06-02 11:19:07 +08:00
|
|
|
const Expr *RV = S.getRetValue();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-09-25 14:56:03 +08:00
|
|
|
// Treat block literals in a return expression as if they appeared
|
|
|
|
// in their own scope. This permits a small, easily-implemented
|
|
|
|
// exception to our over-conservative rules about not jumping to
|
|
|
|
// statements following block literals with non-trivial cleanups.
|
|
|
|
RunCleanupsScope cleanupScope(*this);
|
2018-10-31 11:48:47 +08:00
|
|
|
if (const FullExpr *fe = dyn_cast_or_null<FullExpr>(RV)) {
|
|
|
|
enterFullExpression(fe);
|
|
|
|
RV = fe->getSubExpr();
|
2012-09-25 14:56:03 +08:00
|
|
|
}
|
|
|
|
|
2008-09-10 05:00:17 +08:00
|
|
|
// FIXME: Clean this up by using an LValue for ReturnTemp,
|
|
|
|
// EmitStoreThroughLValue, and EmitAnyExpr.
|
2014-01-11 09:24:05 +08:00
|
|
|
if (getLangOpts().ElideConstructors &&
|
|
|
|
S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) {
|
2010-05-15 14:46:45 +08:00
|
|
|
// Apply the named return value optimization for this return statement,
|
|
|
|
// which means doing nothing: the appropriate result has already been
|
|
|
|
// constructed into the NRVO variable.
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2010-05-17 23:52:46 +08:00
|
|
|
// If there is an NRVO flag for this variable, set it to 1 into indicate
|
|
|
|
// that the cleanup code should not destroy the variable.
|
2011-02-08 16:22:06 +08:00
|
|
|
if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
|
|
|
|
} else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
|
2008-09-10 05:00:17 +08:00
|
|
|
// Make sure not to return anything, but evaluate the expression
|
|
|
|
// for side effects.
|
|
|
|
if (RV)
|
2008-05-22 09:22:33 +08:00
|
|
|
EmitAnyExpr(RV);
|
2014-05-21 13:09:00 +08:00
|
|
|
} else if (!RV) {
|
2008-09-10 05:00:17 +08:00
|
|
|
// Do nothing (return value is left uninitialized)
|
2009-05-27 12:56:12 +08:00
|
|
|
} else if (FnRetTy->isReferenceType()) {
|
|
|
|
// If this function returns a reference, take the address of the expression
|
|
|
|
// rather than the value.
|
2013-06-13 07:38:09 +08:00
|
|
|
RValue Result = EmitReferenceBindingToExpr(RV);
|
2010-03-25 07:14:04 +08:00
|
|
|
Builder.CreateStore(Result.getScalarVal(), ReturnValue);
|
2007-06-02 11:19:07 +08:00
|
|
|
} else {
|
2013-03-08 05:37:08 +08:00
|
|
|
switch (getEvaluationKind(RV->getType())) {
|
|
|
|
case TEK_Scalar:
|
|
|
|
Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
|
|
|
|
break;
|
|
|
|
case TEK_Complex:
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
|
2013-03-08 05:37:08 +08:00
|
|
|
/*isInit*/ true);
|
|
|
|
break;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
case TEK_Aggregate:
|
2018-04-06 04:52:58 +08:00
|
|
|
EmitAggExpr(RV, AggValueSlot::forAddr(
|
|
|
|
ReturnValue, Qualifiers(),
|
|
|
|
AggValueSlot::IsDestructed,
|
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
|
|
|
AggValueSlot::IsNotAliased,
|
2019-06-21 04:56:20 +08:00
|
|
|
getOverlapForReturnValue()));
|
2013-03-08 05:37:08 +08:00
|
|
|
break;
|
|
|
|
}
|
2007-06-02 11:19:07 +08:00
|
|
|
}
|
2008-05-22 09:22:33 +08:00
|
|
|
|
2013-05-08 06:41:09 +08:00
|
|
|
++NumReturnExprs;
|
2014-05-21 13:09:00 +08:00
|
|
|
if (!RV || RV->isEvaluatable(getContext()))
|
2013-05-08 06:41:09 +08:00
|
|
|
++NumSimpleReturnExprs;
|
2013-05-03 01:30:20 +08:00
|
|
|
|
2012-09-25 14:56:03 +08:00
|
|
|
cleanupScope.ForceCleanup();
|
2009-02-10 04:31:03 +08:00
|
|
|
EmitBranchThroughCleanup(ReturnBlock);
|
2007-06-02 11:19:07 +08:00
|
|
|
}
|
|
|
|
|
2007-06-09 09:20:56 +08:00
|
|
|
void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
|
2011-06-04 08:38:02 +08:00
|
|
|
// As long as debug info is modeled with instructions, we have to ensure we
|
|
|
|
// have a place to insert here and write the stop point here.
|
2012-04-10 13:04:07 +08:00
|
|
|
if (HaveInsertPoint())
|
2011-06-04 08:38:02 +08:00
|
|
|
EmitStopPoint(&S);
|
|
|
|
|
2014-03-15 01:01:24 +08:00
|
|
|
for (const auto *I : S.decls())
|
|
|
|
EmitDecl(*I);
|
2007-07-12 23:43:07 +08:00
|
|
|
}
|
2007-07-17 05:28:45 +08:00
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
|
2007-07-17 05:28:45 +08:00
|
|
|
assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
|
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
// If this code is reachable then emit a stop point (if generating
|
|
|
|
// debug info). We have to do this ourselves because we are on the
|
|
|
|
// "simple" statement path.
|
|
|
|
if (HaveInsertPoint())
|
|
|
|
EmitStopPoint(&S);
|
2009-02-08 17:22:19 +08:00
|
|
|
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
|
2007-07-17 05:28:45 +08:00
|
|
|
}
|
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
|
2007-07-17 05:28:45 +08:00
|
|
|
assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
|
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
// If this code is reachable then emit a stop point (if generating
|
|
|
|
// debug info). We have to do this ourselves because we are on the
|
|
|
|
// "simple" statement path.
|
|
|
|
if (HaveInsertPoint())
|
|
|
|
EmitStopPoint(&S);
|
2009-02-08 17:22:19 +08:00
|
|
|
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
|
2007-07-17 05:28:45 +08:00
|
|
|
}
|
2007-10-05 07:45:31 +08:00
|
|
|
|
2007-10-09 04:57:48 +08:00
|
|
|
/// EmitCaseStmtRange - If case statement range is not too big then
|
|
|
|
/// add multiple cases to switch instruction, one for each value within
|
|
|
|
/// the range. If range is too big then emit "if" condition check.
|
|
|
|
void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
|
2008-07-24 09:18:41 +08:00
|
|
|
assert(S.getRHS() && "Expected RHS value in CaseStmt");
|
2007-10-09 04:57:48 +08:00
|
|
|
|
2011-10-11 02:28:20 +08:00
|
|
|
llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
|
|
|
|
llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
|
2008-07-24 09:18:41 +08:00
|
|
|
|
2008-07-25 09:11:38 +08:00
|
|
|
// Emit the code for this case. We do this first to make sure it is
|
|
|
|
// properly chained from our predecessor before generating the
|
|
|
|
// switch machinery to enter this block.
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
|
2015-04-24 07:06:47 +08:00
|
|
|
EmitBlockWithFallThrough(CaseDest, &S);
|
2008-07-25 09:11:38 +08:00
|
|
|
EmitStmt(S.getSubStmt());
|
|
|
|
|
2008-07-24 09:18:41 +08:00
|
|
|
// If range is empty, do nothing.
|
|
|
|
if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
|
|
|
|
return;
|
2007-10-09 04:57:48 +08:00
|
|
|
|
|
|
|
llvm::APInt Range = RHS - LHS;
|
2008-07-25 09:11:38 +08:00
|
|
|
// FIXME: parameters such as this should not be hardcoded.
|
2007-10-09 04:57:48 +08:00
|
|
|
if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
|
|
|
|
// Range is small enough to add multiple switch instruction cases.
|
2015-04-24 07:06:47 +08:00
|
|
|
uint64_t Total = getProfileCount(&S);
|
2014-01-07 06:27:43 +08:00
|
|
|
unsigned NCases = Range.getZExtValue() + 1;
|
2014-01-14 05:24:25 +08:00
|
|
|
// We only have one region counter for the entire set of cases here, so we
|
|
|
|
// need to divide the weights evenly between the generated cases, ensuring
|
2014-02-18 03:21:03 +08:00
|
|
|
// that the total weight is preserved. E.g., a weight of 5 over three cases
|
2014-01-14 05:24:25 +08:00
|
|
|
// will be distributed as weights of 2, 2, and 1.
|
2014-01-07 06:27:43 +08:00
|
|
|
uint64_t Weight = Total / NCases, Rem = Total % NCases;
|
|
|
|
for (unsigned I = 0; I != NCases; ++I) {
|
|
|
|
if (SwitchWeights)
|
|
|
|
SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
|
|
|
|
if (Rem)
|
|
|
|
Rem--;
|
2011-04-20 04:53:45 +08:00
|
|
|
SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
|
2017-04-20 05:02:45 +08:00
|
|
|
++LHS;
|
2007-10-06 04:54:07 +08:00
|
|
|
}
|
2007-10-09 04:57:48 +08:00
|
|
|
return;
|
2009-09-09 23:08:12 +08:00
|
|
|
}
|
|
|
|
|
2008-07-25 09:11:38 +08:00
|
|
|
// The range is too big. Emit "if" condition into a new block,
|
|
|
|
// making sure to save and restore the current insertion point.
|
|
|
|
llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
|
|
|
|
|
|
|
|
// Push this test onto the chain of range checks (which terminates
|
|
|
|
// in the default basic block). The switch's default will be changed
|
|
|
|
// to the top of this chain after switch emission is complete.
|
|
|
|
llvm::BasicBlock *FalseDest = CaseRangeBlock;
|
2008-11-11 10:29:29 +08:00
|
|
|
CaseRangeBlock = createBasicBlock("sw.caserange");
|
2008-07-25 09:11:38 +08:00
|
|
|
|
|
|
|
CurFn->getBasicBlockList().push_back(CaseRangeBlock);
|
|
|
|
Builder.SetInsertPoint(CaseRangeBlock);
|
2007-10-09 04:57:48 +08:00
|
|
|
|
|
|
|
// Emit range check.
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::Value *Diff =
|
2011-09-28 05:06:10 +08:00
|
|
|
Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::Value *Cond =
|
2011-04-20 04:53:45 +08:00
|
|
|
Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
|
2014-01-07 06:27:43 +08:00
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::MDNode *Weights = nullptr;
|
2014-01-07 06:27:43 +08:00
|
|
|
if (SwitchWeights) {
|
2015-04-24 07:06:47 +08:00
|
|
|
uint64_t ThisCount = getProfileCount(&S);
|
2014-01-07 06:27:43 +08:00
|
|
|
uint64_t DefaultCount = (*SwitchWeights)[0];
|
2015-05-02 13:00:55 +08:00
|
|
|
Weights = createProfileWeights(ThisCount, DefaultCount);
|
2014-01-07 06:27:43 +08:00
|
|
|
|
|
|
|
// Since we're chaining the switch default through each large case range, we
|
|
|
|
// need to update the weight for the default, ie, the first case, to include
|
|
|
|
// this case.
|
|
|
|
(*SwitchWeights)[0] += ThisCount;
|
|
|
|
}
|
|
|
|
Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
|
2007-10-09 04:57:48 +08:00
|
|
|
|
2008-07-25 09:11:38 +08:00
|
|
|
// Restore the appropriate insertion point.
|
2008-11-12 07:11:34 +08:00
|
|
|
if (RestoreBB)
|
|
|
|
Builder.SetInsertPoint(RestoreBB);
|
|
|
|
else
|
|
|
|
Builder.ClearInsertionPoint();
|
2007-10-09 04:57:48 +08:00
|
|
|
}
|
2007-10-06 04:54:07 +08:00
|
|
|
|
2007-10-09 04:57:48 +08:00
|
|
|
void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
|
2012-01-17 01:35:57 +08:00
|
|
|
// If there is no enclosing switch instance that we're aware of, then this
|
|
|
|
// case statement and its block can be elided. This situation only happens
|
|
|
|
// when we've constant-folded the switch, are emitting the constant case,
|
2013-11-22 18:20:40 +08:00
|
|
|
// and part of the constant case includes another case statement. For
|
2012-01-17 01:35:57 +08:00
|
|
|
// instance: switch (4) { case 4: do { case 5: } while (1); }
|
2012-01-18 07:55:19 +08:00
|
|
|
if (!SwitchInsn) {
|
|
|
|
EmitStmt(S.getSubStmt());
|
2012-01-17 01:35:57 +08:00
|
|
|
return;
|
2012-01-18 07:55:19 +08:00
|
|
|
}
|
2012-01-17 01:35:57 +08:00
|
|
|
|
2011-04-17 08:54:30 +08:00
|
|
|
// Handle case ranges.
|
2007-10-09 04:57:48 +08:00
|
|
|
if (S.getRHS()) {
|
|
|
|
EmitCaseStmtRange(S);
|
|
|
|
return;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-04-20 04:53:45 +08:00
|
|
|
llvm::ConstantInt *CaseVal =
|
2011-10-11 02:28:20 +08:00
|
|
|
Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
|
2011-04-20 04:53:45 +08:00
|
|
|
|
2014-01-21 08:35:11 +08:00
|
|
|
// If the body of the case is just a 'break', try to not emit an empty block.
|
|
|
|
// If we're profiling or we're not optimizing, leave the block in for better
|
|
|
|
// debug and coverage analysis.
|
2016-02-05 02:39:09 +08:00
|
|
|
if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
|
2014-01-21 08:35:11 +08:00
|
|
|
CGM.getCodeGenOpts().OptimizationLevel > 0 &&
|
|
|
|
isa<BreakStmt>(S.getSubStmt())) {
|
|
|
|
JumpDest Block = BreakContinueStack.back().BreakBlock;
|
|
|
|
|
|
|
|
// Only do this optimization if there are no cleanups that need emitting.
|
|
|
|
if (isObviouslyBranchWithoutCleanups(Block)) {
|
|
|
|
if (SwitchWeights)
|
2015-04-24 07:06:47 +08:00
|
|
|
SwitchWeights->push_back(getProfileCount(&S));
|
2014-01-21 08:35:11 +08:00
|
|
|
SwitchInsn->addCase(CaseVal, Block.getBlock());
|
|
|
|
|
|
|
|
// If there was a fallthrough into this case, make sure to redirect it to
|
|
|
|
// the end of the switch as well.
|
|
|
|
if (Builder.GetInsertBlock()) {
|
|
|
|
Builder.CreateBr(Block.getBlock());
|
|
|
|
Builder.ClearInsertionPoint();
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
|
2015-04-24 07:06:47 +08:00
|
|
|
EmitBlockWithFallThrough(CaseDest, &S);
|
2014-01-07 06:27:43 +08:00
|
|
|
if (SwitchWeights)
|
2015-04-24 07:06:47 +08:00
|
|
|
SwitchWeights->push_back(getProfileCount(&S));
|
2011-04-20 04:53:45 +08:00
|
|
|
SwitchInsn->addCase(CaseVal, CaseDest);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
add a special case for codegen that improves the case where we have
multiple sequential cases to a) not create tons of fall-through basic blocks
and b) not recurse deeply. This fixes codegen on 100K deep cases, and improves
codegen on moderate cases from this:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb1
i32 1002, label %sw.bb2
i32 1003, label %sw.bb3
i32 1004, label %sw.bb4
...
sw.bb: ; preds = %entry
br label %sw.bb1
sw.bb1: ; preds = %entry, %sw.bb
br label %sw.bb2
sw.bb2: ; preds = %entry, %sw.bb1
br label %sw.bb3
sw.bb3: ; preds = %entry, %sw.bb2
br label %sw.bb4
to:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb
i32 1002, label %sw.bb
i32 1003, label %sw.bb
i32 1004, label %sw.bb
sw.bb: ;; many preds
llvm-svn: 66015
2009-03-04 12:46:18 +08:00
|
|
|
// Recursively emitting the statement is acceptable, but is not wonderful for
|
|
|
|
// code where we have many case statements nested together, i.e.:
|
|
|
|
// case 1:
|
|
|
|
// case 2:
|
|
|
|
// case 3: etc.
|
|
|
|
// Handling this recursively will create a new block for each case statement
|
|
|
|
// that falls through to the next case which is IR intensive. It also causes
|
|
|
|
// deep recursion which can run into stack depth limitations. Handle
|
|
|
|
// sequential non-range case statements specially.
|
|
|
|
const CaseStmt *CurCase = &S;
|
|
|
|
const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
|
|
|
|
|
2011-04-20 04:53:45 +08:00
|
|
|
// Otherwise, iteratively add consecutive cases to this switch stmt.
|
2014-05-21 13:09:00 +08:00
|
|
|
while (NextCase && NextCase->getRHS() == nullptr) {
|
add a special case for codegen that improves the case where we have
multiple sequential cases to a) not create tons of fall-through basic blocks
and b) not recurse deeply. This fixes codegen on 100K deep cases, and improves
codegen on moderate cases from this:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb1
i32 1002, label %sw.bb2
i32 1003, label %sw.bb3
i32 1004, label %sw.bb4
...
sw.bb: ; preds = %entry
br label %sw.bb1
sw.bb1: ; preds = %entry, %sw.bb
br label %sw.bb2
sw.bb2: ; preds = %entry, %sw.bb1
br label %sw.bb3
sw.bb3: ; preds = %entry, %sw.bb2
br label %sw.bb4
to:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb
i32 1002, label %sw.bb
i32 1003, label %sw.bb
i32 1004, label %sw.bb
sw.bb: ;; many preds
llvm-svn: 66015
2009-03-04 12:46:18 +08:00
|
|
|
CurCase = NextCase;
|
2014-01-07 06:27:43 +08:00
|
|
|
llvm::ConstantInt *CaseVal =
|
2011-10-11 02:28:20 +08:00
|
|
|
Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
|
2014-01-07 06:27:43 +08:00
|
|
|
|
|
|
|
if (SwitchWeights)
|
2015-04-24 07:06:47 +08:00
|
|
|
SwitchWeights->push_back(getProfileCount(NextCase));
|
2016-02-05 02:39:09 +08:00
|
|
|
if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
CaseDest = createBasicBlock("sw.bb");
|
2015-04-24 07:06:47 +08:00
|
|
|
EmitBlockWithFallThrough(CaseDest, &S);
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
}
|
2014-01-07 06:27:43 +08:00
|
|
|
|
2011-04-20 04:53:45 +08:00
|
|
|
SwitchInsn->addCase(CaseVal, CaseDest);
|
add a special case for codegen that improves the case where we have
multiple sequential cases to a) not create tons of fall-through basic blocks
and b) not recurse deeply. This fixes codegen on 100K deep cases, and improves
codegen on moderate cases from this:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb1
i32 1002, label %sw.bb2
i32 1003, label %sw.bb3
i32 1004, label %sw.bb4
...
sw.bb: ; preds = %entry
br label %sw.bb1
sw.bb1: ; preds = %entry, %sw.bb
br label %sw.bb2
sw.bb2: ; preds = %entry, %sw.bb1
br label %sw.bb3
sw.bb3: ; preds = %entry, %sw.bb2
br label %sw.bb4
to:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb
i32 1002, label %sw.bb
i32 1003, label %sw.bb
i32 1004, label %sw.bb
sw.bb: ;; many preds
llvm-svn: 66015
2009-03-04 12:46:18 +08:00
|
|
|
NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
add a special case for codegen that improves the case where we have
multiple sequential cases to a) not create tons of fall-through basic blocks
and b) not recurse deeply. This fixes codegen on 100K deep cases, and improves
codegen on moderate cases from this:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb1
i32 1002, label %sw.bb2
i32 1003, label %sw.bb3
i32 1004, label %sw.bb4
...
sw.bb: ; preds = %entry
br label %sw.bb1
sw.bb1: ; preds = %entry, %sw.bb
br label %sw.bb2
sw.bb2: ; preds = %entry, %sw.bb1
br label %sw.bb3
sw.bb3: ; preds = %entry, %sw.bb2
br label %sw.bb4
to:
switch i32 %tmp, label %sw.epilog [
i32 1000, label %sw.bb
i32 1001, label %sw.bb
i32 1002, label %sw.bb
i32 1003, label %sw.bb
i32 1004, label %sw.bb
sw.bb: ;; many preds
llvm-svn: 66015
2009-03-04 12:46:18 +08:00
|
|
|
// Normal default recursion for non-cases.
|
|
|
|
EmitStmt(CurCase->getSubStmt());
|
2007-10-05 07:45:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
|
2016-07-22 06:31:40 +08:00
|
|
|
// If there is no enclosing switch instance that we're aware of, then this
|
|
|
|
// default statement can be elided. This situation only happens when we've
|
|
|
|
// constant-folded the switch.
|
|
|
|
if (!SwitchInsn) {
|
|
|
|
EmitStmt(S.getSubStmt());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-07-25 09:11:38 +08:00
|
|
|
llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
|
2009-09-09 23:08:12 +08:00
|
|
|
assert(DefaultBlock->empty() &&
|
2008-11-11 10:29:29 +08:00
|
|
|
"EmitDefaultStmt: Default block already defined?");
|
2014-01-07 06:27:43 +08:00
|
|
|
|
2015-04-24 07:06:47 +08:00
|
|
|
EmitBlockWithFallThrough(DefaultBlock, &S);
|
2014-01-07 06:27:43 +08:00
|
|
|
|
2007-10-05 07:45:31 +08:00
|
|
|
EmitStmt(S.getSubStmt());
|
|
|
|
}
|
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
/// CollectStatementsForCase - Given the body of a 'switch' statement and a
|
|
|
|
/// constant value that is being switched on, see if we can dead code eliminate
|
|
|
|
/// the body of the switch to a simple series of statements to emit. Basically,
|
|
|
|
/// on a switch (5) we want to find these statements:
|
|
|
|
/// case 5:
|
|
|
|
/// printf(...); <--
|
|
|
|
/// ++i; <--
|
|
|
|
/// break;
|
|
|
|
///
|
|
|
|
/// and add them to the ResultStmts vector. If it is unsafe to do this
|
|
|
|
/// transformation (for example, one of the elided statements contains a label
|
|
|
|
/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
|
|
|
|
/// should include statements after it (e.g. the printf() line is a substmt of
|
|
|
|
/// the case) then return CSFC_FallThrough. If we handled it and found a break
|
|
|
|
/// statement, then return CSFC_Success.
|
|
|
|
///
|
|
|
|
/// If Case is non-null, then we are looking for the specified case, checking
|
|
|
|
/// that nothing we jump over contains labels. If Case is null, then we found
|
|
|
|
/// the case and are looking for the break.
|
|
|
|
///
|
|
|
|
/// If the recursive walk actually finds our Case, then we set FoundCase to
|
|
|
|
/// true.
|
|
|
|
///
|
|
|
|
enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
|
|
|
|
static CSFC_Result CollectStatementsForCase(const Stmt *S,
|
|
|
|
const SwitchCase *Case,
|
|
|
|
bool &FoundCase,
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVectorImpl<const Stmt*> &ResultStmts) {
|
2011-02-28 09:02:29 +08:00
|
|
|
// If this is a null statement, just succeed.
|
2014-05-21 13:09:00 +08:00
|
|
|
if (!S)
|
2011-02-28 09:02:29 +08:00
|
|
|
return Case ? CSFC_Success : CSFC_FallThrough;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// If this is the switchcase (case 4: or default) that we're looking for, then
|
|
|
|
// we're in business. Just add the substatement.
|
|
|
|
if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
|
|
|
|
if (S == Case) {
|
|
|
|
FoundCase = true;
|
2014-05-21 13:09:00 +08:00
|
|
|
return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
ResultStmts);
|
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// Otherwise, this is some other case or default statement, just ignore it.
|
|
|
|
return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
|
|
|
|
ResultStmts);
|
|
|
|
}
|
2011-02-28 09:02:29 +08:00
|
|
|
|
|
|
|
// If we are in the live part of the code and we found our break statement,
|
|
|
|
// return a success!
|
2014-05-21 13:09:00 +08:00
|
|
|
if (!Case && isa<BreakStmt>(S))
|
2011-02-28 09:02:29 +08:00
|
|
|
return CSFC_Success;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2011-02-28 09:02:29 +08:00
|
|
|
// If this is a switch statement, then it might contain the SwitchCase, the
|
|
|
|
// break, or neither.
|
|
|
|
if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
|
|
|
|
// Handle this as two cases: we might be looking for the SwitchCase (if so
|
|
|
|
// the skipped statements must be skippable) or we might already have it.
|
|
|
|
CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
|
2016-09-17 07:30:39 +08:00
|
|
|
bool StartedInLiveCode = FoundCase;
|
|
|
|
unsigned StartSize = ResultStmts.size();
|
|
|
|
|
|
|
|
// If we've not found the case yet, scan through looking for it.
|
2011-02-28 09:02:29 +08:00
|
|
|
if (Case) {
|
2011-02-28 15:22:44 +08:00
|
|
|
// Keep track of whether we see a skipped declaration. The code could be
|
|
|
|
// using the declaration even if it is skipped, so we can't optimize out
|
|
|
|
// the decl if the kept statements might refer to it.
|
|
|
|
bool HadSkippedDecl = false;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2011-02-28 09:02:29 +08:00
|
|
|
// If we're looking for the case, just see if we can skip each of the
|
|
|
|
// substatements.
|
|
|
|
for (; Case && I != E; ++I) {
|
2016-09-17 07:30:39 +08:00
|
|
|
HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2011-02-28 09:02:29 +08:00
|
|
|
switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
|
|
|
|
case CSFC_Failure: return CSFC_Failure;
|
|
|
|
case CSFC_Success:
|
|
|
|
// A successful result means that either 1) that the statement doesn't
|
|
|
|
// have the case and is skippable, or 2) does contain the case value
|
2011-02-28 15:16:14 +08:00
|
|
|
// and also contains the break to exit the switch. In the later case,
|
|
|
|
// we just verify the rest of the statements are elidable.
|
|
|
|
if (FoundCase) {
|
2011-02-28 15:22:44 +08:00
|
|
|
// If we found the case and skipped declarations, we can't do the
|
|
|
|
// optimization.
|
|
|
|
if (HadSkippedDecl)
|
|
|
|
return CSFC_Failure;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2011-02-28 15:16:14 +08:00
|
|
|
for (++I; I != E; ++I)
|
|
|
|
if (CodeGenFunction::ContainsLabel(*I, true))
|
|
|
|
return CSFC_Failure;
|
|
|
|
return CSFC_Success;
|
|
|
|
}
|
2011-02-28 09:02:29 +08:00
|
|
|
break;
|
|
|
|
case CSFC_FallThrough:
|
|
|
|
// If we have a fallthrough condition, then we must have found the
|
|
|
|
// case started to include statements. Consider the rest of the
|
|
|
|
// statements in the compound statement as candidates for inclusion.
|
|
|
|
assert(FoundCase && "Didn't find case but returned fallthrough?");
|
|
|
|
// We recursively found Case, so we're not looking for it anymore.
|
2014-05-21 13:09:00 +08:00
|
|
|
Case = nullptr;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2011-02-28 15:22:44 +08:00
|
|
|
// If we found the case and skipped declarations, we can't do the
|
|
|
|
// optimization.
|
|
|
|
if (HadSkippedDecl)
|
|
|
|
return CSFC_Failure;
|
2011-02-28 09:02:29 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-09-17 07:30:39 +08:00
|
|
|
|
|
|
|
if (!FoundCase)
|
|
|
|
return CSFC_Success;
|
|
|
|
|
|
|
|
assert(!HadSkippedDecl && "fallthrough after skipping decl");
|
2011-02-28 09:02:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we have statements in our range, then we know that the statements are
|
|
|
|
// live and need to be added to the set of statements we're tracking.
|
2016-09-17 07:30:39 +08:00
|
|
|
bool AnyDecls = false;
|
2011-02-28 09:02:29 +08:00
|
|
|
for (; I != E; ++I) {
|
2016-09-17 07:30:39 +08:00
|
|
|
AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
|
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
|
2011-02-28 09:02:29 +08:00
|
|
|
case CSFC_Failure: return CSFC_Failure;
|
|
|
|
case CSFC_FallThrough:
|
|
|
|
// A fallthrough result means that the statement was simple and just
|
|
|
|
// included in ResultStmt, keep adding them afterwards.
|
|
|
|
break;
|
|
|
|
case CSFC_Success:
|
|
|
|
// A successful result means that we found the break statement and
|
|
|
|
// stopped statement inclusion. We just ensure that any leftover stmts
|
|
|
|
// are skippable and return success ourselves.
|
|
|
|
for (++I; I != E; ++I)
|
|
|
|
if (CodeGenFunction::ContainsLabel(*I, true))
|
|
|
|
return CSFC_Failure;
|
|
|
|
return CSFC_Success;
|
2012-06-21 01:43:05 +08:00
|
|
|
}
|
2011-02-28 09:02:29 +08:00
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2016-09-17 07:30:39 +08:00
|
|
|
// If we're about to fall out of a scope without hitting a 'break;', we
|
|
|
|
// can't perform the optimization if there were any decls in that scope
|
|
|
|
// (we'd lose their end-of-lifetime).
|
|
|
|
if (AnyDecls) {
|
|
|
|
// If the entire compound statement was live, there's one more thing we
|
|
|
|
// can try before giving up: emit the whole thing as a single statement.
|
|
|
|
// We can do that unless the statement contains a 'break;'.
|
|
|
|
// FIXME: Such a break must be at the end of a construct within this one.
|
|
|
|
// We could emit this by just ignoring the BreakStmts entirely.
|
|
|
|
if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
|
|
|
|
ResultStmts.resize(StartSize);
|
|
|
|
ResultStmts.push_back(S);
|
|
|
|
} else {
|
|
|
|
return CSFC_Failure;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return CSFC_FallThrough;
|
2011-02-28 09:02:29 +08:00
|
|
|
}
|
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// Okay, this is some other statement that we don't handle explicitly, like a
|
|
|
|
// for statement or increment etc. If we are skipping over this statement,
|
|
|
|
// just verify it doesn't have labels, which would make it invalid to elide.
|
|
|
|
if (Case) {
|
2011-02-28 15:22:44 +08:00
|
|
|
if (CodeGenFunction::ContainsLabel(S, true))
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
return CSFC_Failure;
|
|
|
|
return CSFC_Success;
|
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// Otherwise, we want to include this statement. Everything is cool with that
|
|
|
|
// so long as it doesn't contain a break out of the switch we're in.
|
|
|
|
if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// Otherwise, everything is great. Include the statement and tell the caller
|
|
|
|
// that we fall through and include the next statement as well.
|
|
|
|
ResultStmts.push_back(S);
|
|
|
|
return CSFC_FallThrough;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// FindCaseStatementsForValue - Find the case statement being jumped to and
|
|
|
|
/// then invoke CollectStatementsForCase to find the list of statements to emit
|
|
|
|
/// for a switch on constant. See the comment above CollectStatementsForCase
|
|
|
|
/// for more details.
|
|
|
|
static bool FindCaseStatementsForValue(const SwitchStmt &S,
|
2012-07-24 04:21:35 +08:00
|
|
|
const llvm::APSInt &ConstantCondValue,
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVectorImpl<const Stmt*> &ResultStmts,
|
2014-01-07 06:27:43 +08:00
|
|
|
ASTContext &C,
|
|
|
|
const SwitchCase *&ResultCase) {
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// First step, find the switch case that is being branched to. We can do this
|
|
|
|
// efficiently by scanning the SwitchCase list.
|
|
|
|
const SwitchCase *Case = S.getSwitchCaseList();
|
2014-05-21 13:09:00 +08:00
|
|
|
const DefaultStmt *DefaultCase = nullptr;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
for (; Case; Case = Case->getNextSwitchCase()) {
|
|
|
|
// It's either a default or case. Just remember the default statement in
|
|
|
|
// case we're not jumping to any numbered cases.
|
|
|
|
if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
|
|
|
|
DefaultCase = DS;
|
|
|
|
continue;
|
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// Check to see if this case is the one we're looking for.
|
|
|
|
const CaseStmt *CS = cast<CaseStmt>(Case);
|
|
|
|
// Don't handle case ranges yet.
|
|
|
|
if (CS->getRHS()) return false;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// If we found our case, remember it as 'case'.
|
2011-10-11 02:28:20 +08:00
|
|
|
if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
break;
|
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// If we didn't find a matching case, we use a default if it exists, or we
|
|
|
|
// elide the whole switch body!
|
2014-05-21 13:09:00 +08:00
|
|
|
if (!Case) {
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// It is safe to elide the body of the switch if it doesn't contain labels
|
|
|
|
// etc. If it is safe, return successfully with an empty ResultStmts list.
|
2014-05-21 13:09:00 +08:00
|
|
|
if (!DefaultCase)
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
return !CodeGenFunction::ContainsLabel(&S);
|
|
|
|
Case = DefaultCase;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ok, we know which case is being jumped to, try to collect all the
|
|
|
|
// statements that follow it. This can fail for a variety of reasons. Also,
|
|
|
|
// check to see that the recursive walk actually found our case statement.
|
|
|
|
// Insane cases like this can fail to find it in the recursive walk since we
|
|
|
|
// don't handle every stmt kind:
|
|
|
|
// switch (4) {
|
|
|
|
// while (1) {
|
|
|
|
// case 4: ...
|
|
|
|
bool FoundCase = false;
|
2014-01-07 06:27:43 +08:00
|
|
|
ResultCase = Case;
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
return CollectStatementsForCase(S.getBody(), Case, FoundCase,
|
|
|
|
ResultStmts) != CSFC_Failure &&
|
|
|
|
FoundCase;
|
|
|
|
}
|
|
|
|
|
2007-10-05 07:45:31 +08:00
|
|
|
void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
|
2012-01-18 07:39:50 +08:00
|
|
|
// Handle nested switch statements.
|
|
|
|
llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
|
2014-01-07 06:27:43 +08:00
|
|
|
SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
|
2012-01-18 07:39:50 +08:00
|
|
|
llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
|
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// See if we can constant fold the condition of the switch and therefore only
|
|
|
|
// emit the live case statement (if any) of the switch.
|
2012-07-24 04:21:35 +08:00
|
|
|
llvm::APSInt ConstantCondValue;
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<const Stmt*, 4> CaseStmts;
|
2014-05-21 13:09:00 +08:00
|
|
|
const SwitchCase *Case = nullptr;
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
|
2014-01-07 06:27:43 +08:00
|
|
|
getContext(), Case)) {
|
2015-04-24 07:06:47 +08:00
|
|
|
if (Case)
|
|
|
|
incrementProfileCounter(Case);
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
RunCleanupsScope ExecutedScope(*this);
|
|
|
|
|
2016-07-14 08:11:03 +08:00
|
|
|
if (S.getInit())
|
|
|
|
EmitStmt(S.getInit());
|
|
|
|
|
2014-05-03 08:14:49 +08:00
|
|
|
// Emit the condition variable if needed inside the entire cleanup scope
|
|
|
|
// used by this special case for constant folded switches.
|
|
|
|
if (S.getConditionVariable())
|
2018-03-18 05:01:27 +08:00
|
|
|
EmitDecl(*S.getConditionVariable());
|
2014-05-03 08:14:49 +08:00
|
|
|
|
2012-01-18 07:39:50 +08:00
|
|
|
// At this point, we are no longer "within" a switch instance, so
|
|
|
|
// we can temporarily enforce this to ensure that any embedded case
|
|
|
|
// statements are not emitted.
|
2014-05-21 13:09:00 +08:00
|
|
|
SwitchInsn = nullptr;
|
2012-01-18 07:39:50 +08:00
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
// Okay, we can dead code eliminate everything except this case. Emit the
|
|
|
|
// specified series of statements and we're good.
|
|
|
|
for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
|
|
|
|
EmitStmt(CaseStmts[i]);
|
2015-04-24 07:06:47 +08:00
|
|
|
incrementProfileCounter(&S);
|
2012-01-18 07:39:50 +08:00
|
|
|
|
2012-04-10 13:04:04 +08:00
|
|
|
// Now we want to restore the saved switch instance so that nested
|
|
|
|
// switches continue to function properly
|
2012-01-18 07:39:50 +08:00
|
|
|
SwitchInsn = SavedSwitchInsn;
|
|
|
|
|
First tiny step to implementing PR9322: build infrastructure for only emitting the
live case of a switch statement when switching on a constant. This is terribly
limited, but enough to handle the trivial example included. Before we would
emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
switch i32 1, label %sw.epilog [
i32 1, label %sw.bb
]
sw.bb: ; preds = %entry
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb, %entry
switch i32 0, label %sw.epilog3 [
i32 1, label %sw.bb1
]
sw.bb1: ; preds = %sw.epilog
%tmp2 = load i32* %i.addr, align 4
%add = add nsw i32 %tmp2, 2
store i32 %add, i32* %i.addr, align 4
br label %sw.epilog3
sw.epilog3: ; preds = %sw.bb1, %sw.epilog
ret void
}
now we emit:
define void @test1(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%tmp = load i32* %i.addr, align 4
%inc = add nsw i32 %tmp, 1
store i32 %inc, i32* %i.addr, align 4
ret void
}
This improves -O0 compile time (less IR to generate and shove through the code
generator) and the clever linux kernel people found a way to fail to build if we
don't do this optimization. This step isn't enough to handle the kernel case
though.
llvm-svn: 126597
2011-02-28 08:22:07 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2014-05-03 08:14:49 +08:00
|
|
|
JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
|
|
|
|
|
|
|
|
RunCleanupsScope ConditionScope(*this);
|
2016-07-14 08:11:03 +08:00
|
|
|
|
|
|
|
if (S.getInit())
|
|
|
|
EmitStmt(S.getInit());
|
|
|
|
|
2014-05-03 08:14:49 +08:00
|
|
|
if (S.getConditionVariable())
|
2018-03-18 05:01:27 +08:00
|
|
|
EmitDecl(*S.getConditionVariable());
|
2007-10-05 07:45:31 +08:00
|
|
|
llvm::Value *CondV = EmitScalarExpr(S.getCond());
|
|
|
|
|
2008-07-25 09:11:38 +08:00
|
|
|
// Create basic block to hold stuff that comes after switch
|
|
|
|
// statement. We also need to create a default block now so that
|
|
|
|
// explicit case ranges tests can have a place to jump to on
|
|
|
|
// failure.
|
2008-11-11 10:29:29 +08:00
|
|
|
llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
|
2008-07-25 09:11:38 +08:00
|
|
|
SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
|
2014-01-07 06:27:43 +08:00
|
|
|
if (PGO.haveRegionCounts()) {
|
|
|
|
// Walk the SwitchCase list to find how many there are.
|
|
|
|
uint64_t DefaultCount = 0;
|
|
|
|
unsigned NumCases = 0;
|
|
|
|
for (const SwitchCase *Case = S.getSwitchCaseList();
|
|
|
|
Case;
|
|
|
|
Case = Case->getNextSwitchCase()) {
|
|
|
|
if (isa<DefaultStmt>(Case))
|
2015-04-24 07:06:47 +08:00
|
|
|
DefaultCount = getProfileCount(Case);
|
2014-01-07 06:27:43 +08:00
|
|
|
NumCases += 1;
|
|
|
|
}
|
|
|
|
SwitchWeights = new SmallVector<uint64_t, 16>();
|
|
|
|
SwitchWeights->reserve(NumCases);
|
|
|
|
// The default needs to be first. We store the edge count, so we already
|
|
|
|
// know the right weight.
|
|
|
|
SwitchWeights->push_back(DefaultCount);
|
|
|
|
}
|
2008-07-25 09:11:38 +08:00
|
|
|
CaseRangeBlock = DefaultBlock;
|
2007-10-05 07:45:31 +08:00
|
|
|
|
2008-11-12 16:21:33 +08:00
|
|
|
// Clear the insertion point to indicate we are in unreachable code.
|
|
|
|
Builder.ClearInsertionPoint();
|
2008-05-13 00:08:04 +08:00
|
|
|
|
2013-12-05 12:47:09 +08:00
|
|
|
// All break statements jump to NextBlock. If BreakContinueStack is non-empty
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
// then reuse last ContinueBlock.
|
2010-07-06 09:34:17 +08:00
|
|
|
JumpDest OuterContinue;
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
if (!BreakContinueStack.empty())
|
|
|
|
OuterContinue = BreakContinueStack.back().ContinueBlock;
|
2009-02-10 13:52:02 +08:00
|
|
|
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
|
2007-10-05 07:45:31 +08:00
|
|
|
|
|
|
|
// Emit switch body.
|
|
|
|
EmitStmt(S.getBody());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-10 13:52:02 +08:00
|
|
|
BreakContinueStack.pop_back();
|
2007-10-05 07:45:31 +08:00
|
|
|
|
2008-07-25 09:11:38 +08:00
|
|
|
// Update the default block in case explicit case range tests have
|
|
|
|
// been chained on top.
|
Compatability fix for SwitchInst refactoring.
The purpose of refactoring is to hide operand roles from SwitchInst user (programmer). If you want to play with operands directly, probably you will need lower level methods than SwitchInst ones (TerminatorInst or may be User). After this patch we can reorganize SwitchInst operands and successors as we want.
What was done:
1. Changed semantics of index inside the getCaseValue method:
getCaseValue(0) means "get first case", not a condition. Use getCondition() if you want to resolve the condition. I propose don't mix SwitchInst case indexing with low level indexing (TI successors indexing, User's operands indexing), since it may be dangerous.
2. By the same reason findCaseValue(ConstantInt*) returns actual number of case value. 0 means first case, not default. If there is no case with given value, ErrorIndex will returned.
3. Added getCaseSuccessor method. I propose to avoid usage of TerminatorInst::getSuccessor if you want to resolve case successor BB. Use getCaseSuccessor instead, since internal SwitchInst organization of operands/successors is hidden and may be changed in any moment.
4. Added resolveSuccessorIndex and resolveCaseIndex. The main purpose of these methods is to see how case successors are really mapped in TerminatorInst.
4.1 "resolveSuccessorIndex" was created if you need to level down from SwitchInst to TerminatorInst. It returns TerminatorInst's successor index for given case successor.
4.2 "resolveCaseIndex" converts low level successors index to case index that curresponds to the given successor.
Note: There are also related compatability fix patches for dragonegg, klee, llvm-gcc-4.0, llvm-gcc-4.2, safecode, clang.
llvm-svn: 149482
2012-02-01 15:50:21 +08:00
|
|
|
SwitchInsn->setDefaultDest(CaseRangeBlock);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// If a default was never emitted:
|
2008-07-25 09:11:38 +08:00
|
|
|
if (!DefaultBlock->getParent()) {
|
2010-07-06 09:34:17 +08:00
|
|
|
// If we have cleanups, emit the default block so that there's a
|
|
|
|
// place to jump through the cleanups from.
|
|
|
|
if (ConditionScope.requiresCleanups()) {
|
|
|
|
EmitBlock(DefaultBlock);
|
|
|
|
|
|
|
|
// Otherwise, just forward the default block to the switch end.
|
|
|
|
} else {
|
2010-07-24 05:56:41 +08:00
|
|
|
DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
|
2010-07-06 09:34:17 +08:00
|
|
|
delete DefaultBlock;
|
|
|
|
}
|
2008-07-25 09:11:38 +08:00
|
|
|
}
|
|
|
|
|
2010-07-24 05:56:41 +08:00
|
|
|
ConditionScope.ForceCleanup();
|
|
|
|
|
2008-07-25 09:11:38 +08:00
|
|
|
// Emit continuation.
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(SwitchExit.getBlock(), true);
|
2015-04-24 07:06:47 +08:00
|
|
|
incrementProfileCounter(&S);
|
2014-01-07 06:27:43 +08:00
|
|
|
|
2015-09-10 06:39:06 +08:00
|
|
|
// If the switch has a condition wrapped by __builtin_unpredictable,
|
|
|
|
// create metadata that specifies that the switch is unpredictable.
|
|
|
|
// Don't bother if not optimizing because that metadata would not be used.
|
2016-04-20 01:13:14 +08:00
|
|
|
auto *Call = dyn_cast<CallExpr>(S.getCond());
|
|
|
|
if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
|
|
|
|
auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
|
|
|
|
if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
|
|
|
|
llvm::MDBuilder MDHelper(getLLVMContext());
|
|
|
|
SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
|
|
|
|
MDHelper.createUnpredictable());
|
2015-09-10 06:39:06 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-07 06:27:43 +08:00
|
|
|
if (SwitchWeights) {
|
|
|
|
assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
|
|
|
|
"switch weights do not match switch cases");
|
|
|
|
// If there's only one jump destination there's no sense weighting it.
|
|
|
|
if (SwitchWeights->size() > 1)
|
|
|
|
SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
|
2015-05-02 13:00:55 +08:00
|
|
|
createProfileWeights(*SwitchWeights));
|
2014-01-07 06:27:43 +08:00
|
|
|
delete SwitchWeights;
|
|
|
|
}
|
2007-10-05 07:45:31 +08:00
|
|
|
SwitchInsn = SavedSwitchInsn;
|
2014-01-07 06:27:43 +08:00
|
|
|
SwitchWeights = SavedSwitchWeights;
|
2007-10-09 04:57:48 +08:00
|
|
|
CaseRangeBlock = SavedCRBlock;
|
2007-10-05 07:45:31 +08:00
|
|
|
}
|
2008-02-06 00:35:33 +08:00
|
|
|
|
2009-04-27 01:57:12 +08:00
|
|
|
static std::string
|
2009-11-13 13:51:54 +08:00
|
|
|
SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
|
2014-05-21 13:09:00 +08:00
|
|
|
SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
|
2008-02-06 00:35:33 +08:00
|
|
|
std::string Result;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
while (*Constraint) {
|
|
|
|
switch (*Constraint) {
|
|
|
|
default:
|
2011-06-08 07:45:05 +08:00
|
|
|
Result += Target.convertConstraint(Constraint);
|
2008-02-06 00:35:33 +08:00
|
|
|
break;
|
|
|
|
// Ignore these
|
|
|
|
case '*':
|
|
|
|
case '?':
|
|
|
|
case '!':
|
2010-08-11 03:20:14 +08:00
|
|
|
case '=': // Will see this and the following in mult-alt constraints.
|
|
|
|
case '+':
|
|
|
|
break;
|
2012-10-29 20:20:54 +08:00
|
|
|
case '#': // Ignore the rest of the constraint alternative.
|
|
|
|
while (Constraint[1] && Constraint[1] != ',')
|
2013-07-11 04:14:36 +08:00
|
|
|
Constraint++;
|
2012-10-29 20:20:54 +08:00
|
|
|
break;
|
2015-01-11 17:09:01 +08:00
|
|
|
case '&':
|
2015-01-11 17:13:56 +08:00
|
|
|
case '%':
|
|
|
|
Result += *Constraint;
|
|
|
|
while (Constraint[1] && Constraint[1] == *Constraint)
|
2015-01-11 17:09:01 +08:00
|
|
|
Constraint++;
|
|
|
|
break;
|
2010-09-18 09:15:13 +08:00
|
|
|
case ',':
|
|
|
|
Result += "|";
|
2008-02-06 00:35:33 +08:00
|
|
|
break;
|
|
|
|
case 'g':
|
|
|
|
Result += "imr";
|
|
|
|
break;
|
2009-01-18 10:06:20 +08:00
|
|
|
case '[': {
|
2009-04-27 01:57:12 +08:00
|
|
|
assert(OutCons &&
|
2009-01-18 10:06:20 +08:00
|
|
|
"Must pass output names to constraints with a symbolic name");
|
|
|
|
unsigned Index;
|
2015-10-21 10:34:10 +08:00
|
|
|
bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
|
2011-01-06 02:41:53 +08:00
|
|
|
assert(result && "Could not resolve symbolic name"); (void)result;
|
2009-01-18 10:06:20 +08:00
|
|
|
Result += llvm::utostr(Index);
|
|
|
|
break;
|
|
|
|
}
|
2008-02-06 00:35:33 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
Constraint++;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2011-01-02 05:12:33 +08:00
|
|
|
/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
|
|
|
|
/// as using a particular register add that as a constraint that will be used
|
|
|
|
/// in this asm stmt.
|
2010-12-31 06:59:32 +08:00
|
|
|
static std::string
|
2011-01-02 05:12:33 +08:00
|
|
|
AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
|
|
|
|
const TargetInfo &Target, CodeGenModule &CGM,
|
2015-02-04 22:25:47 +08:00
|
|
|
const AsmStmt &Stmt, const bool EarlyClobber) {
|
2010-12-31 06:59:32 +08:00
|
|
|
const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
|
|
|
|
if (!AsmDeclRef)
|
|
|
|
return Constraint;
|
|
|
|
const ValueDecl &Value = *AsmDeclRef->getDecl();
|
|
|
|
const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
|
|
|
|
if (!Variable)
|
|
|
|
return Constraint;
|
2012-03-16 07:12:51 +08:00
|
|
|
if (Variable->getStorageClass() != SC_Register)
|
|
|
|
return Constraint;
|
2010-12-31 06:59:32 +08:00
|
|
|
AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
|
|
|
|
if (!Attr)
|
|
|
|
return Constraint;
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef Register = Attr->getLabel();
|
2011-01-02 05:47:03 +08:00
|
|
|
assert(Target.isValidGCCRegisterName(Register));
|
2011-06-17 09:53:34 +08:00
|
|
|
// We're using validateOutputConstraint here because we only care if
|
|
|
|
// this is a register constraint.
|
|
|
|
TargetInfo::ConstraintInfo Info(Constraint, "");
|
|
|
|
if (Target.validateOutputConstraint(Info) &&
|
|
|
|
!Info.allowsRegister()) {
|
2010-12-31 06:59:32 +08:00
|
|
|
CGM.ErrorUnsupported(&Stmt, "__asm__");
|
|
|
|
return Constraint;
|
|
|
|
}
|
2011-06-21 08:07:10 +08:00
|
|
|
// Canonicalize the register here before returning it.
|
|
|
|
Register = Target.getNormalizedGCCRegisterName(Register);
|
2015-02-04 22:25:47 +08:00
|
|
|
return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
|
2010-12-31 06:59:32 +08:00
|
|
|
}
|
|
|
|
|
2010-07-16 08:55:21 +08:00
|
|
|
llvm::Value*
|
2012-08-24 04:00:18 +08:00
|
|
|
CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
|
2010-07-16 08:55:21 +08:00
|
|
|
LValue InputValue, QualType InputType,
|
2013-10-02 10:29:49 +08:00
|
|
|
std::string &ConstraintStr,
|
|
|
|
SourceLocation Loc) {
|
2009-01-12 03:32:54 +08:00
|
|
|
llvm::Value *Arg;
|
2009-09-09 23:08:12 +08:00
|
|
|
if (Info.allowsRegister() || !Info.allowsMemory()) {
|
2013-03-08 05:37:08 +08:00
|
|
|
if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
|
2013-10-02 10:29:49 +08:00
|
|
|
Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
|
2009-01-12 03:32:54 +08:00
|
|
|
} else {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *Ty = ConvertType(InputType);
|
2012-10-09 00:25:52 +08:00
|
|
|
uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
|
2009-01-12 10:22:13 +08:00
|
|
|
if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
|
2011-02-08 16:22:06 +08:00
|
|
|
Ty = llvm::IntegerType::get(getLLVMContext(), Size);
|
2009-01-12 10:22:13 +08:00
|
|
|
Ty = llvm::PointerType::getUnqual(Ty);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-16 08:55:21 +08:00
|
|
|
Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
|
|
|
|
Ty));
|
2009-01-12 10:22:13 +08:00
|
|
|
} else {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Arg = InputValue.getPointer();
|
2009-01-12 10:22:13 +08:00
|
|
|
ConstraintStr += '*';
|
|
|
|
}
|
2009-01-12 03:32:54 +08:00
|
|
|
}
|
|
|
|
} else {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Arg = InputValue.getPointer();
|
2009-01-12 03:32:54 +08:00
|
|
|
ConstraintStr += '*';
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-12 03:32:54 +08:00
|
|
|
return Arg;
|
|
|
|
}
|
|
|
|
|
2012-08-24 04:00:18 +08:00
|
|
|
llvm::Value* CodeGenFunction::EmitAsmInput(
|
2010-07-16 08:55:21 +08:00
|
|
|
const TargetInfo::ConstraintInfo &Info,
|
|
|
|
const Expr *InputExpr,
|
|
|
|
std::string &ConstraintStr) {
|
2015-06-12 02:19:34 +08:00
|
|
|
// If this can't be a register or memory, i.e., has to be a constant
|
|
|
|
// (immediate or symbolic), try to emit it as such.
|
|
|
|
if (!Info.allowsRegister() && !Info.allowsMemory()) {
|
2018-12-19 06:54:03 +08:00
|
|
|
if (Info.requiresImmediateConstant()) {
|
2019-03-06 18:26:19 +08:00
|
|
|
Expr::EvalResult EVResult;
|
|
|
|
InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
|
|
|
|
|
|
|
|
llvm::APSInt IntResult;
|
Delay diagnosing asm constraints that require immediates until after inlining
Summary:
An inline asm call may result in an immediate input value after inlining.
Therefore, don't emit a diagnostic here if the input isn't an immediate.
Reviewers: joerg, eli.friedman, rsmith
Subscribers: asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, s.egerton, krytarowski, mgorny, riccibruno, eraman, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D60943
llvm-svn: 368104
2019-08-07 06:41:22 +08:00
|
|
|
if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
|
|
|
|
getContext()))
|
|
|
|
return llvm::ConstantInt::get(getLLVMContext(), IntResult);
|
2018-12-19 06:54:03 +08:00
|
|
|
}
|
|
|
|
|
2018-12-01 07:41:18 +08:00
|
|
|
Expr::EvalResult Result;
|
2015-06-13 09:16:10 +08:00
|
|
|
if (InputExpr->EvaluateAsInt(Result, getContext()))
|
2018-12-01 07:41:18 +08:00
|
|
|
return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
|
2015-06-12 02:19:34 +08:00
|
|
|
}
|
|
|
|
|
2010-07-16 08:55:21 +08:00
|
|
|
if (Info.allowsRegister() || !Info.allowsMemory())
|
2013-03-08 05:37:08 +08:00
|
|
|
if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
|
2010-07-16 08:55:21 +08:00
|
|
|
return EmitScalarExpr(InputExpr);
|
2015-12-15 22:04:18 +08:00
|
|
|
if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
|
|
|
|
return EmitScalarExpr(InputExpr);
|
2010-07-16 08:55:21 +08:00
|
|
|
InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
|
|
|
|
LValue Dest = EmitLValue(InputExpr);
|
2013-10-02 10:29:49 +08:00
|
|
|
return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
|
|
|
|
InputExpr->getExprLoc());
|
2010-07-16 08:55:21 +08:00
|
|
|
}
|
|
|
|
|
2010-11-17 13:58:54 +08:00
|
|
|
/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
|
2010-11-17 16:25:26 +08:00
|
|
|
/// asm call instruction. The !srcloc MDNode contains a list of constant
|
|
|
|
/// integers which are the source locations of the start of each line in the
|
|
|
|
/// asm.
|
2010-11-17 13:58:54 +08:00
|
|
|
static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
|
|
|
|
CodeGenFunction &CGF) {
|
2014-12-10 02:39:32 +08:00
|
|
|
SmallVector<llvm::Metadata *, 8> Locs;
|
2010-11-17 16:25:26 +08:00
|
|
|
// Add the location of the first line to the MDNode.
|
2014-12-10 02:39:32 +08:00
|
|
|
Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
|
2018-08-10 05:08:08 +08:00
|
|
|
CGF.Int32Ty, Str->getBeginLoc().getRawEncoding())));
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef StrVal = Str->getString();
|
2010-11-17 16:25:26 +08:00
|
|
|
if (!StrVal.empty()) {
|
|
|
|
const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
|
2012-03-11 15:00:24 +08:00
|
|
|
const LangOptions &LangOpts = CGF.CGM.getLangOpts();
|
2015-12-10 09:11:47 +08:00
|
|
|
unsigned StartToken = 0;
|
|
|
|
unsigned ByteOffset = 0;
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2010-11-17 16:25:26 +08:00
|
|
|
// Add the location of the start of each subsequent line of the asm to the
|
|
|
|
// MDNode.
|
2015-12-10 09:11:47 +08:00
|
|
|
for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
|
2010-11-17 16:25:26 +08:00
|
|
|
if (StrVal[i] != '\n') continue;
|
2015-12-10 09:11:47 +08:00
|
|
|
SourceLocation LineLoc = Str->getLocationOfByte(
|
|
|
|
i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
|
2014-12-10 02:39:32 +08:00
|
|
|
Locs.push_back(llvm::ConstantAsMetadata::get(
|
|
|
|
llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
|
2010-11-17 16:25:26 +08:00
|
|
|
}
|
2012-06-21 01:43:05 +08:00
|
|
|
}
|
|
|
|
|
2011-04-22 03:59:12 +08:00
|
|
|
return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
|
2010-11-17 13:58:54 +08:00
|
|
|
}
|
|
|
|
|
2019-06-03 23:57:25 +08:00
|
|
|
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
|
|
|
|
bool ReadOnly, bool ReadNone, const AsmStmt &S,
|
|
|
|
const std::vector<llvm::Type *> &ResultRegTypes,
|
|
|
|
CodeGenFunction &CGF,
|
|
|
|
std::vector<llvm::Value *> &RegResults) {
|
|
|
|
Result.addAttribute(llvm::AttributeList::FunctionIndex,
|
|
|
|
llvm::Attribute::NoUnwind);
|
|
|
|
// Attach readnone and readonly attributes.
|
|
|
|
if (!HasSideEffect) {
|
|
|
|
if (ReadNone)
|
|
|
|
Result.addAttribute(llvm::AttributeList::FunctionIndex,
|
|
|
|
llvm::Attribute::ReadNone);
|
|
|
|
else if (ReadOnly)
|
|
|
|
Result.addAttribute(llvm::AttributeList::FunctionIndex,
|
|
|
|
llvm::Attribute::ReadOnly);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Slap the source location of the inline asm into a !srcloc metadata on the
|
|
|
|
// call.
|
|
|
|
if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
|
|
|
|
Result.setMetadata("srcloc",
|
|
|
|
getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
|
|
|
|
else {
|
|
|
|
// At least put the line number on MS inline asm blobs.
|
|
|
|
llvm::Constant *Loc = llvm::ConstantInt::get(CGF.Int32Ty,
|
|
|
|
S.getAsmLoc().getRawEncoding());
|
|
|
|
Result.setMetadata("srcloc",
|
|
|
|
llvm::MDNode::get(CGF.getLLVMContext(),
|
|
|
|
llvm::ConstantAsMetadata::get(Loc)));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (CGF.getLangOpts().assumeFunctionsAreConvergent())
|
|
|
|
// Conservatively, mark all inline asm blocks in CUDA or OpenCL as
|
|
|
|
// convergent (meaning, they may call an intrinsically convergent op, such
|
|
|
|
// as bar.sync, and so can't have certain optimizations applied around
|
|
|
|
// them).
|
|
|
|
Result.addAttribute(llvm::AttributeList::FunctionIndex,
|
|
|
|
llvm::Attribute::Convergent);
|
|
|
|
// Extract all of the register value results from the asm.
|
|
|
|
if (ResultRegTypes.size() == 1) {
|
|
|
|
RegResults.push_back(&Result);
|
|
|
|
} else {
|
|
|
|
for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
|
|
|
|
llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
|
|
|
|
RegResults.push_back(Tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-29 02:54:39 +08:00
|
|
|
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
|
2012-08-25 01:05:45 +08:00
|
|
|
// Assemble the final asm string.
|
2012-08-28 04:23:31 +08:00
|
|
|
std::string AsmString = S.generateAsmString(getContext());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 15:05:00 +08:00
|
|
|
// Get all the output and input constraints together.
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
|
|
|
|
SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
|
2009-05-03 15:05:00 +08:00
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
|
2013-05-03 08:10:13 +08:00
|
|
|
StringRef Name;
|
|
|
|
if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
|
|
|
|
Name = GAS->getOutputName(i);
|
|
|
|
TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
|
2013-04-17 06:48:15 +08:00
|
|
|
bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
|
2013-11-22 18:20:40 +08:00
|
|
|
assert(IsValid && "Failed to parse output constraint");
|
2009-05-03 15:05:00 +08:00
|
|
|
OutputConstraintInfos.push_back(Info);
|
2009-09-09 23:08:12 +08:00
|
|
|
}
|
|
|
|
|
2009-05-03 15:05:00 +08:00
|
|
|
for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
|
2013-05-03 08:10:13 +08:00
|
|
|
StringRef Name;
|
|
|
|
if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
|
|
|
|
Name = GAS->getInputName(i);
|
|
|
|
TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
|
2013-04-17 06:48:15 +08:00
|
|
|
bool IsValid =
|
2015-10-21 10:34:10 +08:00
|
|
|
getTarget().validateInputConstraint(OutputConstraintInfos, Info);
|
2010-03-04 05:52:23 +08:00
|
|
|
assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
|
2009-05-03 15:05:00 +08:00
|
|
|
InputConstraintInfos.push_back(Info);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
std::string Constraints;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 15:53:25 +08:00
|
|
|
std::vector<LValue> ResultRegDests;
|
|
|
|
std::vector<QualType> ResultRegQualTys;
|
2011-07-11 17:56:20 +08:00
|
|
|
std::vector<llvm::Type *> ResultRegTypes;
|
|
|
|
std::vector<llvm::Type *> ResultTruncRegTypes;
|
2012-05-02 03:53:37 +08:00
|
|
|
std::vector<llvm::Type *> ArgTypes;
|
2008-02-06 00:35:33 +08:00
|
|
|
std::vector<llvm::Value*> Args;
|
2019-08-29 19:21:41 +08:00
|
|
|
llvm::BitVector ResultTypeRequiresCast;
|
2008-02-06 04:01:53 +08:00
|
|
|
|
|
|
|
// Keep track of inout constraints.
|
|
|
|
std::string InOutConstraints;
|
|
|
|
std::vector<llvm::Value*> InOutArgs;
|
2011-07-10 01:41:47 +08:00
|
|
|
std::vector<llvm::Type*> InOutArgTypes;
|
2009-01-28 04:38:24 +08:00
|
|
|
|
2019-03-15 03:46:51 +08:00
|
|
|
// Keep track of out constraints for tied input operand.
|
|
|
|
std::vector<std::string> OutputConstraints;
|
|
|
|
|
2015-07-11 02:44:40 +08:00
|
|
|
// An inline asm can be marked readonly if it meets the following conditions:
|
|
|
|
// - it doesn't have any sideeffects
|
|
|
|
// - it doesn't clobber memory
|
|
|
|
// - it doesn't return a value by-reference
|
|
|
|
// It can be marked readnone if it doesn't have any input memory constraints
|
|
|
|
// in addition to meeting the conditions listed above.
|
|
|
|
bool ReadOnly = true, ReadNone = true;
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
|
2009-05-03 15:05:00 +08:00
|
|
|
TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
|
2009-01-28 04:38:24 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
// Simplify the output constraint.
|
2009-05-03 15:05:00 +08:00
|
|
|
std::string OutputConstraint(S.getOutputConstraint(i));
|
2013-04-17 06:48:15 +08:00
|
|
|
OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
|
2018-03-24 03:43:42 +08:00
|
|
|
getTarget(), &OutputConstraintInfos);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-03-14 01:38:01 +08:00
|
|
|
const Expr *OutExpr = S.getOutputExpr(i);
|
|
|
|
OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-06-03 22:52:25 +08:00
|
|
|
OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
|
2015-02-04 22:25:47 +08:00
|
|
|
getTarget(), CGM, S,
|
|
|
|
Info.earlyClobber());
|
2019-03-15 03:46:51 +08:00
|
|
|
OutputConstraints.push_back(OutputConstraint);
|
2009-03-14 01:38:01 +08:00
|
|
|
LValue Dest = EmitLValue(OutExpr);
|
2009-05-03 15:53:25 +08:00
|
|
|
if (!Constraints.empty())
|
2009-05-01 08:16:04 +08:00
|
|
|
Constraints += ',';
|
|
|
|
|
2009-05-03 16:21:20 +08:00
|
|
|
// If this is a register output, then make the inline asm return it
|
|
|
|
// by-value. If this is a memory result, return the value by-reference.
|
2019-08-29 19:21:41 +08:00
|
|
|
bool isScalarizableAggregate =
|
|
|
|
hasAggregateEvaluationKind(OutExpr->getType());
|
|
|
|
if (!Info.allowsMemory() && (hasScalarEvaluationKind(OutExpr->getType()) ||
|
|
|
|
isScalarizableAggregate)) {
|
2009-05-03 16:21:20 +08:00
|
|
|
Constraints += "=" + OutputConstraint;
|
2009-05-03 15:53:25 +08:00
|
|
|
ResultRegQualTys.push_back(OutExpr->getType());
|
|
|
|
ResultRegDests.push_back(Dest);
|
2019-08-29 19:21:41 +08:00
|
|
|
ResultTruncRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
|
|
|
|
if (Info.allowsRegister() && isScalarizableAggregate) {
|
|
|
|
ResultTypeRequiresCast.push_back(true);
|
|
|
|
unsigned Size = getContext().getTypeSize(OutExpr->getType());
|
|
|
|
llvm::Type *ConvTy = llvm::IntegerType::get(getLLVMContext(), Size);
|
|
|
|
ResultRegTypes.push_back(ConvTy);
|
|
|
|
} else {
|
|
|
|
ResultTypeRequiresCast.push_back(false);
|
|
|
|
ResultRegTypes.push_back(ResultTruncRegTypes.back());
|
|
|
|
}
|
2009-05-03 16:21:20 +08:00
|
|
|
// If this output is tied to an input, and if the input is larger, then
|
|
|
|
// we need to set the actual result type of the inline asm node to be the
|
|
|
|
// same as the input type.
|
|
|
|
if (Info.hasMatchingInput()) {
|
2009-05-03 16:38:58 +08:00
|
|
|
unsigned InputNo;
|
|
|
|
for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
|
|
|
|
TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
|
2010-04-24 01:27:29 +08:00
|
|
|
if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
|
2009-05-03 16:21:20 +08:00
|
|
|
break;
|
2009-05-03 16:38:58 +08:00
|
|
|
}
|
|
|
|
assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 16:21:20 +08:00
|
|
|
QualType InputTy = S.getInputExpr(InputNo)->getType();
|
2010-04-24 01:27:29 +08:00
|
|
|
QualType OutputType = OutExpr->getType();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 16:21:20 +08:00
|
|
|
uint64_t InputSize = getContext().getTypeSize(InputTy);
|
2010-04-24 01:27:29 +08:00
|
|
|
if (getContext().getTypeSize(OutputType) < InputSize) {
|
|
|
|
// Form the asm to return the value as a larger integer or fp type.
|
|
|
|
ResultRegTypes.back() = ConvertType(InputTy);
|
2009-05-03 16:21:20 +08:00
|
|
|
}
|
|
|
|
}
|
2013-06-07 08:04:50 +08:00
|
|
|
if (llvm::Type* AdjTy =
|
2011-02-20 07:03:58 +08:00
|
|
|
getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
|
|
|
|
ResultRegTypes.back()))
|
2010-10-30 07:12:32 +08:00
|
|
|
ResultRegTypes.back() = AdjTy;
|
2013-06-07 08:04:50 +08:00
|
|
|
else {
|
|
|
|
CGM.getDiags().Report(S.getAsmLoc(),
|
|
|
|
diag::err_asm_invalid_type_in_input)
|
|
|
|
<< OutExpr->getType() << OutputConstraint;
|
|
|
|
}
|
2018-08-15 04:21:05 +08:00
|
|
|
|
|
|
|
// Update largest vector width for any vector types.
|
|
|
|
if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
|
2019-10-08 20:53:54 +08:00
|
|
|
LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
|
|
|
|
VT->getPrimitiveSizeInBits().getFixedSize());
|
2008-02-06 00:35:33 +08:00
|
|
|
} else {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
ArgTypes.push_back(Dest.getAddress().getType());
|
|
|
|
Args.push_back(Dest.getPointer());
|
2008-02-06 04:01:53 +08:00
|
|
|
Constraints += "=*";
|
2008-02-06 00:35:33 +08:00
|
|
|
Constraints += OutputConstraint;
|
2015-07-11 02:44:40 +08:00
|
|
|
ReadOnly = ReadNone = false;
|
2008-02-06 04:01:53 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-04-26 15:16:29 +08:00
|
|
|
if (Info.isReadWrite()) {
|
2008-02-06 04:01:53 +08:00
|
|
|
InOutConstraints += ',';
|
2009-01-12 03:32:54 +08:00
|
|
|
|
2009-08-05 02:18:36 +08:00
|
|
|
const Expr *InputExpr = S.getOutputExpr(i);
|
2012-08-24 04:00:18 +08:00
|
|
|
llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
|
2013-10-02 10:29:49 +08:00
|
|
|
InOutConstraints,
|
|
|
|
InputExpr->getExprLoc());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-03-23 07:25:07 +08:00
|
|
|
if (llvm::Type* AdjTy =
|
2013-06-07 08:04:50 +08:00
|
|
|
getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
|
|
|
|
Arg->getType()))
|
2012-03-23 07:25:07 +08:00
|
|
|
Arg = Builder.CreateBitCast(Arg, AdjTy);
|
|
|
|
|
2018-08-15 04:21:05 +08:00
|
|
|
// Update largest vector width for any vector types.
|
|
|
|
if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
|
2019-10-08 20:53:54 +08:00
|
|
|
LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
|
|
|
|
VT->getPrimitiveSizeInBits().getFixedSize());
|
2009-04-26 15:16:29 +08:00
|
|
|
if (Info.allowsRegister())
|
2009-01-12 05:23:27 +08:00
|
|
|
InOutConstraints += llvm::utostr(i);
|
|
|
|
else
|
|
|
|
InOutConstraints += OutputConstraint;
|
2009-01-12 03:46:50 +08:00
|
|
|
|
2009-08-05 02:18:36 +08:00
|
|
|
InOutArgTypes.push_back(Arg->getType());
|
|
|
|
InOutArgs.push_back(Arg);
|
2008-02-06 04:01:53 +08:00
|
|
|
}
|
2008-02-06 00:35:33 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2014-09-05 04:04:38 +08:00
|
|
|
// If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
|
|
|
|
// to the return value slot. Only do this when returning in registers.
|
|
|
|
if (isa<MSAsmStmt>(&S)) {
|
|
|
|
const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
|
|
|
|
if (RetAI.isDirect() || RetAI.isExtend()) {
|
|
|
|
// Make a fake lvalue for the return value slot.
|
|
|
|
LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
|
|
|
|
CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
|
|
|
|
*this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
|
|
|
|
ResultRegDests, AsmString, S.getNumOutputs());
|
|
|
|
SawAsmBlock = true;
|
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
|
|
|
|
const Expr *InputExpr = S.getInputExpr(i);
|
|
|
|
|
2009-05-03 15:05:00 +08:00
|
|
|
TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
|
|
|
|
|
2015-07-11 02:44:40 +08:00
|
|
|
if (Info.allowsMemory())
|
|
|
|
ReadNone = false;
|
|
|
|
|
2009-05-03 15:53:25 +08:00
|
|
|
if (!Constraints.empty())
|
2008-02-06 00:35:33 +08:00
|
|
|
Constraints += ',';
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
// Simplify the input constraint.
|
2009-05-03 15:05:00 +08:00
|
|
|
std::string InputConstraint(S.getInputConstraint(i));
|
2013-04-17 06:48:15 +08:00
|
|
|
InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
|
2009-04-27 01:57:12 +08:00
|
|
|
&OutputConstraintInfos);
|
2008-02-06 00:35:33 +08:00
|
|
|
|
2015-02-07 02:44:18 +08:00
|
|
|
InputConstraint = AddVariableConstraints(
|
|
|
|
InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
|
|
|
|
getTarget(), CGM, S, false /* No EarlyClobber */);
|
2010-12-31 06:59:32 +08:00
|
|
|
|
2019-03-15 03:46:51 +08:00
|
|
|
std::string ReplaceConstraint (InputConstraint);
|
2012-08-24 04:00:18 +08:00
|
|
|
llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 15:27:51 +08:00
|
|
|
// If this input argument is tied to a larger output result, extend the
|
|
|
|
// input to be the same size as the output. The LLVM backend wants to see
|
|
|
|
// the input and output of a matching constraint be the same size. Note
|
|
|
|
// that GCC does not define what the top bits are here. We use zext because
|
|
|
|
// that is usually cheaper, but LLVM IR should really get an anyext someday.
|
|
|
|
if (Info.hasTiedOperand()) {
|
|
|
|
unsigned Output = Info.getTiedOperand();
|
2010-04-24 01:27:29 +08:00
|
|
|
QualType OutputType = S.getOutputExpr(Output)->getType();
|
2009-05-03 15:27:51 +08:00
|
|
|
QualType InputTy = InputExpr->getType();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-04-24 01:27:29 +08:00
|
|
|
if (getContext().getTypeSize(OutputType) >
|
2009-05-03 15:27:51 +08:00
|
|
|
getContext().getTypeSize(InputTy)) {
|
|
|
|
// Use ptrtoint as appropriate so that we can do our extension.
|
|
|
|
if (isa<llvm::PointerType>(Arg->getType()))
|
2010-06-27 15:15:29 +08:00
|
|
|
Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *OutputTy = ConvertType(OutputType);
|
2010-04-24 01:27:29 +08:00
|
|
|
if (isa<llvm::IntegerType>(OutputTy))
|
|
|
|
Arg = Builder.CreateZExt(Arg, OutputTy);
|
2011-07-29 08:24:50 +08:00
|
|
|
else if (isa<llvm::PointerType>(OutputTy))
|
|
|
|
Arg = Builder.CreateZExt(Arg, IntPtrTy);
|
|
|
|
else {
|
|
|
|
assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
|
2010-04-24 01:27:29 +08:00
|
|
|
Arg = Builder.CreateFPExt(Arg, OutputTy);
|
2011-07-29 08:24:50 +08:00
|
|
|
}
|
2009-05-03 15:27:51 +08:00
|
|
|
}
|
2019-03-15 03:46:51 +08:00
|
|
|
// Deal with the tied operands' constraint code in adjustInlineAsmType.
|
|
|
|
ReplaceConstraint = OutputConstraints[Output];
|
2009-05-03 15:27:51 +08:00
|
|
|
}
|
2012-03-23 07:25:07 +08:00
|
|
|
if (llvm::Type* AdjTy =
|
2019-03-15 03:46:51 +08:00
|
|
|
getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
|
2011-02-20 07:03:58 +08:00
|
|
|
Arg->getType()))
|
2010-10-30 07:12:32 +08:00
|
|
|
Arg = Builder.CreateBitCast(Arg, AdjTy);
|
2013-06-07 08:04:50 +08:00
|
|
|
else
|
|
|
|
CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
|
|
|
|
<< InputExpr->getType() << InputConstraint;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2018-08-15 04:21:05 +08:00
|
|
|
// Update largest vector width for any vector types.
|
|
|
|
if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
|
2019-10-08 20:53:54 +08:00
|
|
|
LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
|
|
|
|
VT->getPrimitiveSizeInBits().getFixedSize());
|
2018-08-15 04:21:05 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
ArgTypes.push_back(Arg->getType());
|
|
|
|
Args.push_back(Arg);
|
|
|
|
Constraints += InputConstraint;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 04:01:53 +08:00
|
|
|
// Append the "input" part of inout constraints last.
|
|
|
|
for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
|
|
|
|
ArgTypes.push_back(InOutArgTypes[i]);
|
|
|
|
Args.push_back(InOutArgs[i]);
|
|
|
|
}
|
|
|
|
Constraints += InOutConstraints;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2019-06-03 23:57:25 +08:00
|
|
|
// Labels
|
|
|
|
SmallVector<llvm::BasicBlock *, 16> Transfer;
|
|
|
|
llvm::BasicBlock *Fallthrough = nullptr;
|
|
|
|
bool IsGCCAsmGoto = false;
|
|
|
|
if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
|
|
|
|
IsGCCAsmGoto = GS->isAsmGoto();
|
|
|
|
if (IsGCCAsmGoto) {
|
|
|
|
for (auto *E : GS->labels()) {
|
|
|
|
JumpDest Dest = getJumpDestForLabel(E->getLabel());
|
|
|
|
Transfer.push_back(Dest.getBlock());
|
|
|
|
llvm::BlockAddress *BA =
|
|
|
|
llvm::BlockAddress::get(CurFn, Dest.getBlock());
|
|
|
|
Args.push_back(BA);
|
|
|
|
ArgTypes.push_back(BA->getType());
|
|
|
|
if (!Constraints.empty())
|
|
|
|
Constraints += ',';
|
|
|
|
Constraints += 'X';
|
|
|
|
}
|
|
|
|
StringRef Name = "asm.fallthrough";
|
|
|
|
Fallthrough = createBasicBlock(Name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
// Clobbers
|
|
|
|
for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
|
2012-08-28 07:47:56 +08:00
|
|
|
StringRef Clobber = S.getClobber(i);
|
2008-02-06 00:35:33 +08:00
|
|
|
|
2015-07-11 02:44:40 +08:00
|
|
|
if (Clobber == "memory")
|
|
|
|
ReadOnly = ReadNone = false;
|
|
|
|
else if (Clobber != "cc")
|
2014-09-05 04:04:38 +08:00
|
|
|
Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2014-09-05 04:04:38 +08:00
|
|
|
if (!Constraints.empty())
|
2008-02-06 00:35:33 +08:00
|
|
|
Constraints += ',';
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 08:11:32 +08:00
|
|
|
Constraints += "~{";
|
2008-02-06 00:35:33 +08:00
|
|
|
Constraints += Clobber;
|
2008-02-06 08:11:32 +08:00
|
|
|
Constraints += '}';
|
2008-02-06 00:35:33 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-02-06 00:35:33 +08:00
|
|
|
// Add machine specific clobbers
|
2013-04-17 06:48:15 +08:00
|
|
|
std::string MachineClobbers = getTarget().getClobbers();
|
2008-12-21 09:15:32 +08:00
|
|
|
if (!MachineClobbers.empty()) {
|
2008-02-06 00:35:33 +08:00
|
|
|
if (!Constraints.empty())
|
|
|
|
Constraints += ',';
|
2008-12-21 09:15:32 +08:00
|
|
|
Constraints += MachineClobbers;
|
2008-02-06 00:35:33 +08:00
|
|
|
}
|
2009-05-01 08:16:04 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ResultType;
|
2009-05-03 16:21:20 +08:00
|
|
|
if (ResultRegTypes.empty())
|
2012-02-07 08:39:47 +08:00
|
|
|
ResultType = VoidTy;
|
2009-05-03 16:21:20 +08:00
|
|
|
else if (ResultRegTypes.size() == 1)
|
|
|
|
ResultType = ResultRegTypes[0];
|
2009-05-01 08:16:04 +08:00
|
|
|
else
|
2011-02-08 16:22:06 +08:00
|
|
|
ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *FTy =
|
2008-02-06 00:35:33 +08:00
|
|
|
llvm::FunctionType::get(ResultType, ArgTypes, false);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-09-05 03:50:17 +08:00
|
|
|
bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
|
2012-09-06 03:01:07 +08:00
|
|
|
llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
|
|
|
|
llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::InlineAsm *IA =
|
2012-09-05 07:08:24 +08:00
|
|
|
llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
|
2012-09-06 03:01:07 +08:00
|
|
|
/* IsAlignStack */ false, AsmDialect);
|
2009-05-03 16:21:20 +08:00
|
|
|
std::vector<llvm::Value*> RegResults;
|
2019-06-03 23:57:25 +08:00
|
|
|
if (IsGCCAsmGoto) {
|
|
|
|
llvm::CallBrInst *Result =
|
|
|
|
Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
|
|
|
|
UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
|
|
|
|
ReadNone, S, ResultRegTypes, *this, RegResults);
|
|
|
|
EmitBlock(Fallthrough);
|
2009-05-01 08:16:04 +08:00
|
|
|
} else {
|
2019-06-03 23:57:25 +08:00
|
|
|
llvm::CallInst *Result =
|
|
|
|
Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
|
|
|
|
UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
|
|
|
|
ReadNone, S, ResultRegTypes, *this, RegResults);
|
2009-05-03 16:21:20 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2014-09-05 04:04:38 +08:00
|
|
|
assert(RegResults.size() == ResultRegTypes.size());
|
|
|
|
assert(RegResults.size() == ResultTruncRegTypes.size());
|
|
|
|
assert(RegResults.size() == ResultRegDests.size());
|
2019-08-29 19:21:41 +08:00
|
|
|
// ResultRegDests can be also populated by addReturnRegisterOutputs() above,
|
|
|
|
// in which case its size may grow.
|
|
|
|
assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
|
2009-05-03 16:21:20 +08:00
|
|
|
for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
|
|
|
|
llvm::Value *Tmp = RegResults[i];
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-03 16:21:20 +08:00
|
|
|
// If the result type of the LLVM IR asm doesn't match the result type of
|
|
|
|
// the expression, do the conversion.
|
|
|
|
if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *TruncTy = ResultTruncRegTypes[i];
|
2012-06-21 01:43:05 +08:00
|
|
|
|
2010-04-24 01:27:29 +08:00
|
|
|
// Truncate the integer result to the right size, note that TruncTy can be
|
|
|
|
// a pointer.
|
|
|
|
if (TruncTy->isFloatingPointTy())
|
|
|
|
Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
|
2010-04-24 12:55:02 +08:00
|
|
|
else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
|
2012-10-09 00:25:52 +08:00
|
|
|
uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
|
2011-02-08 16:22:06 +08:00
|
|
|
Tmp = Builder.CreateTrunc(Tmp,
|
|
|
|
llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
|
2009-05-03 16:21:20 +08:00
|
|
|
Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
|
2010-04-24 12:55:02 +08:00
|
|
|
} else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
|
2012-10-09 00:25:52 +08:00
|
|
|
uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
|
2011-02-08 16:22:06 +08:00
|
|
|
Tmp = Builder.CreatePtrToInt(Tmp,
|
|
|
|
llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
|
2010-04-24 12:55:02 +08:00
|
|
|
Tmp = Builder.CreateTrunc(Tmp, TruncTy);
|
|
|
|
} else if (TruncTy->isIntegerTy()) {
|
2017-09-12 19:05:42 +08:00
|
|
|
Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
|
2010-10-30 07:12:32 +08:00
|
|
|
} else if (TruncTy->isVectorTy()) {
|
|
|
|
Tmp = Builder.CreateBitCast(Tmp, TruncTy);
|
2009-05-03 16:21:20 +08:00
|
|
|
}
|
2009-05-01 08:16:04 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2019-08-29 19:21:41 +08:00
|
|
|
LValue Dest = ResultRegDests[i];
|
|
|
|
// ResultTypeRequiresCast elements correspond to the first
|
|
|
|
// ResultTypeRequiresCast.size() elements of RegResults.
|
|
|
|
if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
|
|
|
|
unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
|
|
|
|
Address A = Builder.CreateBitCast(Dest.getAddress(),
|
|
|
|
ResultRegTypes[i]->getPointerTo());
|
|
|
|
QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
|
|
|
|
if (Ty.isNull()) {
|
|
|
|
const Expr *OutExpr = S.getOutputExpr(i);
|
|
|
|
CGM.Error(
|
|
|
|
OutExpr->getExprLoc(),
|
2019-08-30 16:58:46 +08:00
|
|
|
"impossible constraint in asm: can't store value into a register");
|
2019-08-29 19:21:41 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
Dest = MakeAddrLValue(A, Ty);
|
|
|
|
}
|
|
|
|
EmitStoreThroughLValue(RValue::get(Tmp), Dest);
|
2009-05-01 08:16:04 +08:00
|
|
|
}
|
2008-02-06 00:35:33 +08:00
|
|
|
}
|
2013-04-17 02:53:08 +08:00
|
|
|
|
2014-10-29 20:21:55 +08:00
|
|
|
LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
|
2013-05-10 03:17:11 +08:00
|
|
|
const RecordDecl *RD = S.getCapturedRecordDecl();
|
2014-10-29 20:21:55 +08:00
|
|
|
QualType RecordTy = getContext().getRecordType(RD);
|
2013-05-10 03:17:11 +08:00
|
|
|
|
|
|
|
// Initialize the captured struct.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
LValue SlotLV =
|
|
|
|
MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
|
2013-05-10 03:17:11 +08:00
|
|
|
|
|
|
|
RecordDecl::field_iterator CurField = RD->field_begin();
|
2015-07-18 02:21:37 +08:00
|
|
|
for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
|
|
|
|
E = S.capture_init_end();
|
2013-05-10 03:17:11 +08:00
|
|
|
I != E; ++I, ++CurField) {
|
2014-10-29 20:21:55 +08:00
|
|
|
LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
|
|
|
|
if (CurField->hasCapturedVLAType()) {
|
|
|
|
auto VAT = CurField->getCapturedVLAType();
|
|
|
|
EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
|
|
|
|
} else {
|
2016-12-14 08:03:17 +08:00
|
|
|
EmitInitializerForField(*CurField, LV, *I);
|
2014-10-29 20:21:55 +08:00
|
|
|
}
|
2013-05-10 03:17:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return SlotLV;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Generate an outlined function for the body of a CapturedStmt, store any
|
|
|
|
/// captured variables into the captured struct, and call the outlined function.
|
|
|
|
llvm::Function *
|
|
|
|
CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
|
2014-10-29 20:21:55 +08:00
|
|
|
LValue CapStruct = InitCapturedStruct(S);
|
2013-05-10 03:17:11 +08:00
|
|
|
|
|
|
|
// Emit the CapturedDecl
|
|
|
|
CodeGenFunction CGF(CGM, true);
|
2015-06-24 11:35:38 +08:00
|
|
|
CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
|
2014-06-30 10:55:54 +08:00
|
|
|
llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
|
2013-05-10 03:17:11 +08:00
|
|
|
delete CGF.CapturedStmtInfo;
|
|
|
|
|
|
|
|
// Emit call to the helper function.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitCallOrInvoke(F, CapStruct.getPointer());
|
2013-05-10 03:17:11 +08:00
|
|
|
|
|
|
|
return F;
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
|
2014-10-29 20:21:55 +08:00
|
|
|
LValue CapStruct = InitCapturedStruct(S);
|
2014-05-06 18:08:46 +08:00
|
|
|
return CapStruct.getAddress();
|
|
|
|
}
|
|
|
|
|
2013-05-10 03:17:11 +08:00
|
|
|
/// Creates the outlined function for a CapturedStmt.
|
|
|
|
llvm::Function *
|
2014-06-30 10:55:54 +08:00
|
|
|
CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
|
2013-05-10 03:17:11 +08:00
|
|
|
assert(CapturedStmtInfo &&
|
|
|
|
"CapturedStmtInfo should be set when generating the captured function");
|
2014-06-30 10:55:54 +08:00
|
|
|
const CapturedDecl *CD = S.getCapturedDecl();
|
|
|
|
const RecordDecl *RD = S.getCapturedRecordDecl();
|
2018-08-10 05:08:08 +08:00
|
|
|
SourceLocation Loc = S.getBeginLoc();
|
2014-06-30 10:55:54 +08:00
|
|
|
assert(CD->hasBody() && "missing CapturedDecl body");
|
2013-05-10 03:17:11 +08:00
|
|
|
|
|
|
|
// Build the argument list.
|
|
|
|
ASTContext &Ctx = CGM.getContext();
|
|
|
|
FunctionArgList Args;
|
|
|
|
Args.append(CD->param_begin(), CD->param_end());
|
|
|
|
|
|
|
|
// Create the function declaration.
|
|
|
|
const CGFunctionInfo &FuncInfo =
|
2016-03-11 12:30:31 +08:00
|
|
|
CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
|
2013-05-10 03:17:11 +08:00
|
|
|
llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
|
|
|
|
|
|
|
|
llvm::Function *F =
|
|
|
|
llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
|
|
|
|
CapturedStmtInfo->getHelperName(), &CGM.getModule());
|
|
|
|
CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
|
2015-03-10 13:15:26 +08:00
|
|
|
if (CD->isNothrow())
|
|
|
|
F->addFnAttr(llvm::Attribute::NoUnwind);
|
2013-05-10 03:17:11 +08:00
|
|
|
|
|
|
|
// Generate the function.
|
2018-08-10 05:08:08 +08:00
|
|
|
StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
|
|
|
|
CD->getBody()->getBeginLoc());
|
2013-05-10 03:17:11 +08:00
|
|
|
// Set the context parameter in CapturedStmtInfo.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
|
2013-05-10 03:17:11 +08:00
|
|
|
CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
|
|
|
|
|
2014-06-30 10:55:54 +08:00
|
|
|
// Initialize variable-length arrays.
|
2014-10-29 20:21:55 +08:00
|
|
|
LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
|
|
|
|
Ctx.getTagDeclType(RD));
|
|
|
|
for (auto *FD : RD->fields()) {
|
|
|
|
if (FD->hasCapturedVLAType()) {
|
2018-08-10 05:08:08 +08:00
|
|
|
auto *ExprArg =
|
|
|
|
EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
|
|
|
|
.getScalarVal();
|
2014-10-29 20:21:55 +08:00
|
|
|
auto VAT = FD->getCapturedVLAType();
|
|
|
|
VLASizeMap[VAT->getSizeExpr()] = ExprArg;
|
|
|
|
}
|
|
|
|
}
|
2014-06-30 10:55:54 +08:00
|
|
|
|
2013-05-10 03:17:11 +08:00
|
|
|
// If 'this' is captured, load it into CXXThisValue.
|
|
|
|
if (CapturedStmtInfo->isCXXThisExprCaptured()) {
|
|
|
|
FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
|
2014-10-29 20:21:55 +08:00
|
|
|
LValue ThisLValue = EmitLValueForField(Base, FD);
|
2013-10-02 10:29:49 +08:00
|
|
|
CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
|
2013-05-10 03:17:11 +08:00
|
|
|
}
|
|
|
|
|
2015-12-06 22:32:39 +08:00
|
|
|
PGO.assignRegionCounters(GlobalDecl(CD), F);
|
2013-05-10 03:17:11 +08:00
|
|
|
CapturedStmtInfo->EmitBody(*this, CD->getBody());
|
|
|
|
FinishFunction(CD->getBodyRBrace());
|
|
|
|
|
|
|
|
return F;
|
2013-04-17 02:53:08 +08:00
|
|
|
}
|