forked from OSchip/llvm-project
[OPENMP] Fixed codegen for 'omp cancel' construct.
If 'omp cancel' construct is used in a worksharing construct it may cause hanging of the software in case if reduction clause is used. Patch fixes this problem by avoiding extra reduction processing for branches that were canceled. llvm-svn: 286944
This commit is contained in:
parent
53315a7b98
commit
473a3e7fed
|
@ -1781,9 +1781,17 @@ void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
|
|||
EmitBlock(LoopExit.getBlock());
|
||||
|
||||
// Tell the runtime we are done.
|
||||
if (!DynamicOrOrdered)
|
||||
RT.emitForStaticFinish(*this, S.getLocEnd());
|
||||
SourceLocation ELoc = S.getLocEnd();
|
||||
auto &&CodeGen = [DynamicOrOrdered, ELoc](CodeGenFunction &CGF) {
|
||||
if (!DynamicOrOrdered)
|
||||
CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, ELoc);
|
||||
};
|
||||
CodeGen(*this);
|
||||
|
||||
OpenMPDirectiveKind DKind = S.getDirectiveKind();
|
||||
if (DKind == OMPD_for || DKind == OMPD_parallel_for ||
|
||||
DKind == OMPD_distribute_parallel_for)
|
||||
OMPCancelStack.back().CodeGen = CodeGen;
|
||||
}
|
||||
|
||||
void CodeGenFunction::EmitOMPForOuterLoop(
|
||||
|
@ -1895,6 +1903,7 @@ void CodeGenFunction::EmitOMPDistributeOuterLoop(
|
|||
void CodeGenFunction::EmitOMPDistributeParallelForDirective(
|
||||
const OMPDistributeParallelForDirective &S) {
|
||||
OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
|
||||
OMPCancelStackRAII CancelRegion(*this);
|
||||
CGM.getOpenMPRuntime().emitInlinedDirective(
|
||||
*this, OMPD_distribute_parallel_for,
|
||||
[&S](CodeGenFunction &CGF, PrePostActionTy &) {
|
||||
|
@ -2123,7 +2132,15 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
|
|||
[](CodeGenFunction &) {});
|
||||
EmitBlock(LoopExit.getBlock());
|
||||
// Tell the runtime we are done.
|
||||
RT.emitForStaticFinish(*this, S.getLocStart());
|
||||
SourceLocation ELoc = S.getLocEnd();
|
||||
auto &&CodeGen = [ELoc](CodeGenFunction &CGF) {
|
||||
CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, ELoc);
|
||||
};
|
||||
CodeGen(*this);
|
||||
OpenMPDirectiveKind DKind = S.getDirectiveKind();
|
||||
if (DKind == OMPD_for || DKind == OMPD_parallel_for ||
|
||||
DKind == OMPD_distribute_parallel_for)
|
||||
OMPCancelStack.back().CodeGen = CodeGen;
|
||||
} else {
|
||||
const bool IsMonotonic =
|
||||
Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
|
||||
|
@ -2177,6 +2194,7 @@ void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
|
|||
};
|
||||
{
|
||||
OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
|
||||
OMPCancelStackRAII CancelRegion(*this);
|
||||
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
|
||||
S.hasCancel());
|
||||
}
|
||||
|
@ -2313,7 +2331,12 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
|
|||
CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
|
||||
[](CodeGenFunction &) {});
|
||||
// Tell the runtime we are done.
|
||||
CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart());
|
||||
SourceLocation ELoc = S.getLocEnd();
|
||||
auto &&CodeGen = [ELoc](CodeGenFunction &CGF) {
|
||||
CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, ELoc);
|
||||
};
|
||||
CodeGen(CGF);
|
||||
CGF.OMPCancelStack.back().CodeGen = CodeGen;
|
||||
CGF.EmitOMPReductionClauseFinal(S);
|
||||
// Emit post-update of the reduction variables if IsLastIter != 0.
|
||||
emitPostUpdateForReductionClause(
|
||||
|
@ -2351,6 +2374,7 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
|
|||
void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
|
||||
{
|
||||
OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
|
||||
OMPCancelStackRAII CancelRegion(*this);
|
||||
EmitSections(S);
|
||||
}
|
||||
// Emit an implicit barrier at the end.
|
||||
|
@ -2438,6 +2462,7 @@ void CodeGenFunction::EmitOMPParallelForDirective(
|
|||
// Emit directive as a combined directive that consists of two implicit
|
||||
// directives: 'parallel' with 'for' directive.
|
||||
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
|
||||
OMPCancelStackRAII CancelRegion(CGF);
|
||||
CGF.EmitOMPWorksharingLoop(S);
|
||||
};
|
||||
emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen);
|
||||
|
@ -2458,6 +2483,7 @@ void CodeGenFunction::EmitOMPParallelSectionsDirective(
|
|||
// Emit directive as a combined directive that consists of two implicit
|
||||
// directives: 'parallel' with 'sections' directive.
|
||||
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
|
||||
OMPCancelStackRAII CancelRegion(CGF);
|
||||
CGF.EmitSections(S);
|
||||
};
|
||||
emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen);
|
||||
|
@ -3438,8 +3464,11 @@ CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
|
|||
if (Kind == OMPD_parallel || Kind == OMPD_task)
|
||||
return ReturnBlock;
|
||||
assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
|
||||
Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for);
|
||||
return BreakContinueStack.back().BreakBlock;
|
||||
Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
|
||||
Kind == OMPD_distribute_parallel_for);
|
||||
if (!OMPCancelStack.back().ExitBlock.isValid())
|
||||
OMPCancelStack.back().ExitBlock = getJumpDestInCurrentScope("cancel.exit");
|
||||
return OMPCancelStack.back().ExitBlock;
|
||||
}
|
||||
|
||||
void CodeGenFunction::EmitOMPUseDevicePtrClause(
|
||||
|
|
|
@ -982,6 +982,35 @@ private:
|
|||
};
|
||||
SmallVector<BreakContinue, 8> BreakContinueStack;
|
||||
|
||||
/// Data for exit block for proper support of OpenMP cancellation constructs.
|
||||
struct OMPCancel {
|
||||
JumpDest ExitBlock;
|
||||
llvm::function_ref<void(CodeGenFunction &CGF)> CodeGen;
|
||||
OMPCancel() : CodeGen([](CodeGenFunction &CGF) {}) {}
|
||||
};
|
||||
SmallVector<OMPCancel, 8> OMPCancelStack;
|
||||
|
||||
/// Controls insertion of cancellation exit blocks in worksharing constructs.
|
||||
class OMPCancelStackRAII {
|
||||
CodeGenFunction &CGF;
|
||||
|
||||
public:
|
||||
OMPCancelStackRAII(CodeGenFunction &CGF) : CGF(CGF) {
|
||||
CGF.OMPCancelStack.push_back({});
|
||||
}
|
||||
~OMPCancelStackRAII() {
|
||||
if (CGF.HaveInsertPoint() &&
|
||||
CGF.OMPCancelStack.back().ExitBlock.isValid()) {
|
||||
auto CJD = CGF.getJumpDestInCurrentScope("cancel.cont");
|
||||
CGF.EmitBranchThroughCleanup(CJD);
|
||||
CGF.EmitBlock(CGF.OMPCancelStack.back().ExitBlock.getBlock());
|
||||
CGF.OMPCancelStack.back().CodeGen(CGF);
|
||||
CGF.EmitBranchThroughCleanup(CJD);
|
||||
CGF.EmitBlock(CJD.getBlock());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
CodeGenPGO PGO;
|
||||
|
||||
/// Calculate branch weights appropriate for PGO data
|
||||
|
|
|
@ -90,9 +90,11 @@ for (int i = 0; i < argc; ++i) {
|
|||
}
|
||||
}
|
||||
// CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
|
||||
#pragma omp parallel for
|
||||
int r = 0;
|
||||
#pragma omp parallel for reduction(+:r)
|
||||
for (int i = 0; i < argc; ++i) {
|
||||
#pragma omp cancel for
|
||||
r += i;
|
||||
}
|
||||
// CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
|
||||
return argc;
|
||||
|
@ -163,6 +165,9 @@ for (int i = 0; i < argc; ++i) {
|
|||
// CHECK: [[CONTINUE]]
|
||||
// CHECK: br label
|
||||
// CHECK: call void @__kmpc_for_static_fini(
|
||||
// CHECK: call i32 @__kmpc_reduce_nowait(
|
||||
// CHECK: call void @__kmpc_end_reduce_nowait(
|
||||
// CHECK: call void @__kmpc_for_static_fini(
|
||||
// CHECK: ret void
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue