[OpenMP] Initial parsing/sema for the 'omp parallel loop' construct

Adds basic parsing/sema/serialization support for the
  #pragma omp parallel loop directive.

 Differential Revision: https://reviews.llvm.org/D122247
This commit is contained in:
Mike Rice 2022-03-22 10:55:21 -07:00
parent 72c1a0d9c2
commit 2cedaee6f7
24 changed files with 567 additions and 4 deletions

View File

@ -2608,7 +2608,11 @@ enum CXCursorKind {
*/
CXCursor_OMPTargetTeamsGenericLoopDirective = 297,
CXCursor_LastStmt = CXCursor_OMPTargetTeamsGenericLoopDirective,
/** OpenMP parallel loop directive.
*/
CXCursor_OMPParallelGenericLoopDirective = 298,
CXCursor_LastStmt = CXCursor_OMPParallelGenericLoopDirective,
/**
* Cursor that represents the translation unit itself.

View File

@ -3080,6 +3080,8 @@ DEF_TRAVERSE_STMT(OMPTeamsGenericLoopDirective,
DEF_TRAVERSE_STMT(OMPTargetTeamsGenericLoopDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
DEF_TRAVERSE_STMT(OMPParallelGenericLoopDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
// OpenMP clauses.
template <typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseOMPClause(OMPClause *C) {

View File

@ -1528,6 +1528,7 @@ public:
T->getStmtClass() == OMPGenericLoopDirectiveClass ||
T->getStmtClass() == OMPTeamsGenericLoopDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsGenericLoopDirectiveClass ||
T->getStmtClass() == OMPParallelGenericLoopDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeDirectiveClass ||
@ -5701,6 +5702,71 @@ public:
return T->getStmtClass() == OMPTargetTeamsGenericLoopDirectiveClass;
}
};
/// This represents '#pragma omp parallel loop' directive.
///
/// \code
/// #pragma omp parallel loop private(a,b) order(concurrent)
/// \endcode
/// In this example directive '#pragma omp parallel loop' has
/// clauses 'private' with the variables 'a' and 'b', and order(concurrent).
///
class OMPParallelGenericLoopDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelGenericLoopDirective(SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelGenericLoopDirectiveClass,
llvm::omp::OMPD_parallel_loop, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelGenericLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelGenericLoopDirectiveClass,
llvm::omp::OMPD_parallel_loop, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelGenericLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelGenericLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelGenericLoopDirectiveClass;
}
};
} // end namespace clang
#endif

View File

@ -285,3 +285,4 @@ def OMPMaskedDirective : StmtNode<OMPExecutableDirective>;
def OMPGenericLoopDirective : StmtNode<OMPLoopDirective>;
def OMPTeamsGenericLoopDirective : StmtNode<OMPLoopDirective>;
def OMPTargetTeamsGenericLoopDirective : StmtNode<OMPLoopDirective>;
def OMPParallelGenericLoopDirective : StmtNode<OMPLoopDirective>;

View File

@ -10914,6 +10914,11 @@ public:
StmtResult ActOnOpenMPTargetTeamsGenericLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel loop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPParallelGenericLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,

View File

@ -1963,6 +1963,7 @@ enum StmtCode {
STMT_OMP_GENERIC_LOOP_DIRECTIVE,
STMT_OMP_TEAMS_GENERIC_LOOP_DIRECTIVE,
STMT_OMP_TARGET_TEAMS_GENERIC_LOOP_DIRECTIVE,
STMT_OMP_PARALLEL_GENERIC_LOOP_DIRECTIVE,
EXPR_OMP_ARRAY_SECTION,
EXPR_OMP_ARRAY_SHAPING,
EXPR_OMP_ITERATOR,

View File

@ -2225,3 +2225,46 @@ OMPTargetTeamsGenericLoopDirective::CreateEmpty(const ASTContext &C,
C, NumClauses, /*HasAssociatedStmt=*/true,
numLoopChildren(CollapsedNum, OMPD_target_teams_loop), CollapsedNum);
}
OMPParallelGenericLoopDirective *OMPParallelGenericLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
auto *Dir = createDirective<OMPParallelGenericLoopDirective>(
C, Clauses, AssociatedStmt,
numLoopChildren(CollapsedNum, OMPD_parallel_loop), StartLoc, EndLoc,
CollapsedNum);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
Dir->setPreCond(Exprs.PreCond);
Dir->setCond(Exprs.Cond);
Dir->setInit(Exprs.Init);
Dir->setInc(Exprs.Inc);
Dir->setIsLastIterVariable(Exprs.IL);
Dir->setLowerBoundVariable(Exprs.LB);
Dir->setUpperBoundVariable(Exprs.UB);
Dir->setStrideVariable(Exprs.ST);
Dir->setEnsureUpperBound(Exprs.EUB);
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
Dir->setDependentCounters(Exprs.DependentCounters);
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
return Dir;
}
OMPParallelGenericLoopDirective *OMPParallelGenericLoopDirective::CreateEmpty(
const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell) {
return createEmptyDirective<OMPParallelGenericLoopDirective>(
C, NumClauses, /*HasAssociatedStmt=*/true,
numLoopChildren(CollapsedNum, OMPD_parallel_loop), CollapsedNum);
}

View File

@ -1017,6 +1017,12 @@ void StmtPrinter::VisitOMPTargetTeamsGenericLoopDirective(
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPParallelGenericLoopDirective(
OMPParallelGenericLoopDirective *Node) {
Indent() << "#pragma omp parallel loop";
PrintOMPExecutableDirective(Node);
}
//===----------------------------------------------------------------------===//
// Expr printing methods.
//===----------------------------------------------------------------------===//

View File

@ -1217,6 +1217,11 @@ void StmtProfiler::VisitOMPTargetTeamsGenericLoopDirective(
VisitOMPLoopDirective(S);
}
void StmtProfiler::VisitOMPParallelGenericLoopDirective(
const OMPParallelGenericLoopDirective *S) {
VisitOMPLoopDirective(S);
}
void StmtProfiler::VisitExpr(const Expr *S) {
VisitStmt(S);
}

View File

@ -496,7 +496,8 @@ bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_target_teams_distribute_parallel_for_simd ||
DKind == OMPD_target_teams_distribute_simd || DKind == OMPD_tile ||
DKind == OMPD_unroll || DKind == OMPD_loop ||
DKind == OMPD_teams_loop || DKind == OMPD_target_teams_loop;
DKind == OMPD_teams_loop || DKind == OMPD_target_teams_loop ||
DKind == OMPD_parallel_loop;
}
bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
@ -534,7 +535,8 @@ bool clang::isOpenMPParallelDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_target_teams_distribute_parallel_for_simd ||
DKind == OMPD_parallel_master ||
DKind == OMPD_parallel_master_taskloop ||
DKind == OMPD_parallel_master_taskloop_simd;
DKind == OMPD_parallel_master_taskloop_simd ||
DKind == OMPD_parallel_loop;
}
bool clang::isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind) {
@ -604,7 +606,7 @@ bool clang::isOpenMPDistributeDirective(OpenMPDirectiveKind Kind) {
bool clang::isOpenMPGenericLoopDirective(OpenMPDirectiveKind Kind) {
return Kind == OMPD_loop || Kind == OMPD_teams_loop ||
Kind == OMPD_target_teams_loop;
Kind == OMPD_target_teams_loop || Kind == OMPD_parallel_loop;
}
bool clang::isOpenMPPrivate(OpenMPClauseKind Kind) {
@ -650,6 +652,7 @@ void clang::getOpenMPCaptureRegions(
case OMPD_parallel_sections:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_parallel_loop:
CaptureRegions.push_back(OMPD_parallel);
break;
case OMPD_target_teams:

View File

@ -402,6 +402,9 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
llvm_unreachable("target teams loop directive not supported yet.");
break;
case Stmt::OMPParallelGenericLoopDirectiveClass:
llvm_unreachable("parallel loop directive not supported yet.");
break;
}
}

View File

@ -150,6 +150,7 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
{OMPD_for, OMPD_simd, OMPD_for_simd},
{OMPD_parallel, OMPD_for, OMPD_parallel_for},
{OMPD_parallel_for, OMPD_simd, OMPD_parallel_for_simd},
{OMPD_parallel, OMPD_loop, OMPD_parallel_loop},
{OMPD_parallel, OMPD_sections, OMPD_parallel_sections},
{OMPD_taskloop, OMPD_simd, OMPD_taskloop_simd},
{OMPD_target, OMPD_parallel, OMPD_target_parallel},
@ -2403,6 +2404,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_loop:
case OMPD_teams_loop:
case OMPD_target_teams_loop:
case OMPD_parallel_loop:
Diag(Tok, diag::err_omp_unexpected_directive)
<< 1 << getOpenMPDirectiveName(DKind);
break;
@ -2760,6 +2762,7 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
case OMPD_loop:
case OMPD_teams_loop:
case OMPD_target_teams_loop:
case OMPD_parallel_loop:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:

View File

@ -1501,6 +1501,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Stmt::OMPGenericLoopDirectiveClass:
case Stmt::OMPTeamsGenericLoopDirectiveClass:
case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
case Stmt::OMPParallelGenericLoopDirectiveClass:
case Stmt::ReturnStmtClass:
case Stmt::SEHExceptStmtClass:
case Stmt::SEHFinallyStmtClass:

View File

@ -3935,6 +3935,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_parallel_for_simd:
case OMPD_parallel_sections:
case OMPD_parallel_master:
case OMPD_parallel_loop:
case OMPD_teams:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd: {
@ -6320,6 +6321,10 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
Res = ActOnOpenMPTargetTeamsGenericLoopDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
break;
case OMPD_parallel_loop:
Res = ActOnOpenMPParallelGenericLoopDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
break;
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_threadprivate:
@ -10312,6 +10317,54 @@ StmtResult Sema::ActOnOpenMPTargetTeamsGenericLoopDirective(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
StmtResult Sema::ActOnOpenMPParallelGenericLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
// OpenMP 5.1 [2.11.7, loop construct, Restrictions]
// A list item may not appear in a lastprivate clause unless it is the
// loop iteration variable of a loop that is associated with the construct.
if (checkGenericLoopLastprivate(*this, Clauses, OMPD_parallel_loop, DSAStack))
return StmtError();
auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
// The point of exit cannot be a branch out of the structured block.
// longjmp() and throw() must not violate the entry/exit criteria.
CS->getCapturedDecl()->setNothrow();
for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_parallel_loop);
ThisCaptureLevel > 1; --ThisCaptureLevel) {
CS = cast<CapturedStmt>(CS->getCapturedStmt());
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
// The point of exit cannot be a branch out of the structured block.
// longjmp() and throw() must not violate the entry/exit criteria.
CS->getCapturedDecl()->setNothrow();
}
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse', it will define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_parallel_loop, getCollapseNumberExpr(Clauses),
/*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
assert((CurContext->isDependentContext() || B.builtAll()) &&
"omp loop exprs were not built");
setFunctionHasBranchProtectedScope();
return OMPParallelGenericLoopDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
@ -14619,6 +14672,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_parallel_master:
case OMPD_parallel_sections:
case OMPD_parallel_for:
case OMPD_parallel_loop:
case OMPD_target:
case OMPD_target_teams:
case OMPD_target_teams_distribute:
@ -14693,6 +14747,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_parallel_sections:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
case OMPD_parallel_loop:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_parallel_master_taskloop:
@ -14798,6 +14853,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_parallel_sections:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
case OMPD_parallel_loop:
case OMPD_target:
case OMPD_target_simd:
case OMPD_target_parallel:
@ -14882,6 +14938,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_parallel_sections:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
case OMPD_parallel_loop:
case OMPD_target:
case OMPD_target_simd:
case OMPD_target_parallel:
@ -14990,6 +15047,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_loop:
case OMPD_teams_loop:
case OMPD_target_teams_loop:
case OMPD_parallel_loop:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
@ -15074,6 +15132,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_loop:
case OMPD_teams_loop:
case OMPD_target_teams_loop:
case OMPD_parallel_loop:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
@ -15159,6 +15218,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_end_declare_target:
case OMPD_loop:
case OMPD_teams_loop:
case OMPD_parallel_loop:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
@ -15245,6 +15305,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_loop:
case OMPD_teams_loop:
case OMPD_target_teams_loop:
case OMPD_parallel_loop:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:

View File

@ -9271,6 +9271,17 @@ StmtResult TreeTransform<Derived>::TransformOMPTargetTeamsGenericLoopDirective(
return Res;
}
template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPParallelGenericLoopDirective(
OMPParallelGenericLoopDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_loop, DirName,
nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
}
//===----------------------------------------------------------------------===//
// OpenMP clause transformation
//===----------------------------------------------------------------------===//

View File

@ -2648,6 +2648,11 @@ void ASTStmtReader::VisitOMPTargetTeamsGenericLoopDirective(
VisitOMPLoopDirective(D);
}
void ASTStmtReader::VisitOMPParallelGenericLoopDirective(
OMPParallelGenericLoopDirective *D) {
VisitOMPLoopDirective(D);
}
//===----------------------------------------------------------------------===//
// ASTReader Implementation
//===----------------------------------------------------------------------===//
@ -3619,6 +3624,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
case STMT_OMP_PARALLEL_GENERIC_LOOP_DIRECTIVE: {
unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
S = OMPParallelGenericLoopDirective::CreateEmpty(Context, NumClauses,
CollapsedNum, Empty);
break;
}
case EXPR_CXX_OPERATOR_CALL:
S = CXXOperatorCallExpr::CreateEmpty(
Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],

View File

@ -2607,6 +2607,12 @@ void ASTStmtWriter::VisitOMPTargetTeamsGenericLoopDirective(
Code = serialization::STMT_OMP_TARGET_TEAMS_GENERIC_LOOP_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPParallelGenericLoopDirective(
OMPParallelGenericLoopDirective *D) {
VisitOMPLoopDirective(D);
Code = serialization::STMT_OMP_PARALLEL_GENERIC_LOOP_DIRECTIVE;
}
//===----------------------------------------------------------------------===//
// ASTWriter Implementation
//===----------------------------------------------------------------------===//

View File

@ -1299,6 +1299,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPGenericLoopDirectiveClass:
case Stmt::OMPTeamsGenericLoopDirectiveClass:
case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
case Stmt::OMPParallelGenericLoopDirectiveClass:
case Stmt::CapturedStmtClass:
case Stmt::OMPUnrollDirectiveClass:
case Stmt::OMPMetaDirectiveClass: {

View File

@ -725,3 +725,25 @@ void targetteamsloop(int argc) {
for (int i = 0; i < 10; ++i)
argc = x;
}
// CHECK-LABEL: void parallelloop(int argc)
void parallelloop(int argc) {
int x, cond, fp, rd;
// CHECK-DAG: [B3]
// CHECK-DAG: [[#PFB:]]: x
// CHECK-DAG: [[#PFB+1]]: [B3.[[#PFB]]] (ImplicitCastExpr, LValueToRValue, int)
// CHECK-DAG: [[#PFB+2]]: argc
// CHECK-DAG: [[#PFB+3]]: [B3.[[#PFB+2]]] = [B3.[[#PFB+1]]]
// CHECK-DAG: [B1]
// CHECK-DAG: [[#PF:]]: cond
// CHECK-DAG: [[#PF+1]]: [B1.[[#PF]]] (ImplicitCastExpr, LValueToRValue, int)
// CHECK-DAG: [[#PF+2]]: [B1.[[#PF+1]]] (ImplicitCastExpr, IntegralToBoolean, _Bool)
// CHECK-DAG: [[#PF+3]]: fp
// CHECK-DAG: [[#PF+4]]: rd
// CHECK-DAG: [[#PF+5]]: #pragma omp parallel loop if(cond) firstprivate(fp) reduction(&: rd)
// CHECK-DAG: for (int i = 0;
// CHECK-DAG: [B3.[[#PFB+3]]];
#pragma omp parallel loop if(cond) firstprivate(fp) reduction(&:rd)
for (int i = 0; i < 10; ++i)
argc = x;
}

View File

@ -0,0 +1,123 @@
// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=51 \
// RUN: -fsyntax-only -verify %s
// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=51 \
// RUN: -ast-print %s | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=51 \
// RUN: -emit-pch -o %t %s
// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=51 \
// RUN: -include-pch %t -ast-print %s | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
typedef void **omp_allocator_handle_t;
extern const omp_allocator_handle_t omp_null_allocator;
extern const omp_allocator_handle_t omp_default_mem_alloc;
extern const omp_allocator_handle_t omp_large_cap_mem_alloc;
extern const omp_allocator_handle_t omp_const_mem_alloc;
extern const omp_allocator_handle_t omp_high_bw_mem_alloc;
extern const omp_allocator_handle_t omp_low_lat_mem_alloc;
extern const omp_allocator_handle_t omp_cgroup_mem_alloc;
extern const omp_allocator_handle_t omp_pteam_mem_alloc;
extern const omp_allocator_handle_t omp_thread_mem_alloc;
//CHECK: template <typename T, int C> void templ_foo(T t) {
//CHECK: T j, z;
//CHECK: #pragma omp parallel loop collapse(C) reduction(+: z) lastprivate(j) bind(thread)
//CHECK: for (T i = 0; i < t; ++i)
//CHECK: for (j = 0; j < t; ++j)
//CHECK: z += i + j;
//CHECK: }
//CHECK: template<> void templ_foo<int, 2>(int t) {
//CHECK: int j, z;
//CHECK: #pragma omp parallel loop collapse(2) reduction(+: z) lastprivate(j) bind(thread)
//CHECK: for (int i = 0; i < t; ++i)
//CHECK: for (j = 0; j < t; ++j)
//CHECK: z += i + j;
//CHECK: }
template <typename T, int C>
void templ_foo(T t) {
T j,z;
#pragma omp parallel loop collapse(C) reduction(+:z) lastprivate(j) bind(thread)
for (T i = 0; i<t; ++i)
for (j = 0; j<t; ++j)
z += i+j;
}
//CHECK: void test() {
void test() {
constexpr int N = 100;
float MTX[N][N];
int aaa[1000];
//CHECK: #pragma omp target teams distribute parallel for map(tofrom: MTX)
//CHECK: #pragma omp parallel loop
#pragma omp target teams distribute parallel for map(MTX)
for (auto i = 0; i < N; ++i) {
#pragma omp parallel loop
for (auto j = 0; j < N; ++j) {
MTX[i][j] = 0;
}
}
//CHECK: #pragma omp target teams
//CHECK: #pragma omp parallel loop
#pragma omp target teams
for (int i=0; i<1000; ++i) {
#pragma omp parallel loop
for (int j=0; j<100; j++) {
aaa[i] += i + j;
}
}
int j, z, z1, z2 = 1, z3 = 2;
//CHECK: #pragma omp parallel loop collapse(2) private(z) lastprivate(j) order(concurrent) firstprivate(z2) num_threads(4) if(1) allocate(omp_default_mem_alloc: z2) shared(z3) reduction(+: z1) bind(parallel) proc_bind(primary)
#pragma omp parallel loop collapse(2) private(z) lastprivate(j) \
order(concurrent) firstprivate(z2) num_threads(4) if(1) \
allocate(omp_default_mem_alloc:z2) shared(z3) \
reduction(+:z1) bind(parallel) proc_bind(primary)
for (auto i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
z = i+j;
MTX[i][j] = z;
z1 += z;
}
}
//CHECK: #pragma omp target teams
//CHECK: #pragma omp parallel loop bind(teams)
#pragma omp target teams
#pragma omp parallel loop bind(teams)
for (auto i = 0; i < N; ++i) { }
//CHECK: #pragma omp target
//CHECK: #pragma omp teams
//CHECK: #pragma omp parallel loop bind(teams)
#pragma omp target
#pragma omp teams
#pragma omp parallel loop bind(teams)
for (auto i = 0; i < N; ++i) { }
}
//CHECK: void nobindingfunc() {
void nobindingfunc()
{
//CHECK: #pragma omp parallel loop
#pragma omp parallel loop
for (int i=0; i<10; ++i) { }
}
void bar()
{
templ_foo<int,2>(8);
}
#endif // HEADER

View File

@ -0,0 +1,158 @@
// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -verify -fopenmp \
// RUN: -fopenmp-version=51 -Wuninitialized %s
void foo()
{
int i,j,k;
int z;
// expected-error@+2 {{statement after '#pragma omp parallel loop' must be a for loop}}
#pragma omp parallel loop bind(thread)
i = 0;
// OpenMP 5.1 [2.22 Nesting of regions]
//
// A barrier region may not be closely nested inside a worksharing, loop,
// task, taskloop, critical, ordered, atomic, or masked region.
// expected-error@+3 {{region cannot be closely nested inside 'parallel loop' region}}
#pragma omp parallel loop bind(thread)
for (i=0; i<1000; ++i) {
#pragma omp barrier
}
// A masked region may not be closely nested inside a worksharing, loop,
// atomic, task, or taskloop region.
// expected-error@+3 {{region cannot be closely nested inside 'parallel loop' region}}
#pragma omp parallel loop bind(thread)
for (i=0; i<1000; ++i) {
#pragma omp masked filter(2)
{ }
}
// An ordered region that corresponds to an ordered construct without any
// clause or with the threads or depend clause may not be closely nested
// inside a critical, ordered, loop, atomic, task, or taskloop region.
// expected-error@+3 {{region cannot be closely nested inside 'parallel loop' region; perhaps you forget to enclose 'omp ordered' directive into a for or a parallel for region with 'ordered' clause?}}
#pragma omp parallel loop bind(thread)
for (i=0; i<1000; ++i) {
#pragma omp ordered
{ }
}
// expected-error@+3 {{region cannot be closely nested inside 'parallel loop' region; perhaps you forget to enclose 'omp ordered' directive into a for or a parallel for region with 'ordered' clause?}}
#pragma omp parallel loop bind(thread)
for (i=0; i<1000; ++i) {
#pragma omp ordered threads
{ }
}
// expected-error@+3 {{region cannot be closely nested inside 'parallel loop' region; perhaps you forget to enclose 'omp ordered' directive into a for or a parallel for region with 'ordered' clause?}}
#pragma omp parallel loop bind(thread)
for (i=0; i<1000; ++i) {
#pragma omp ordered depend(source)
}
// bind clause
// expected-error@+1 {{directive '#pragma omp parallel loop' cannot contain more than one 'bind' clause}}
#pragma omp parallel loop bind(thread) bind(thread)
for (i=0; i<1000; ++i) {
}
// expected-error@+1 {{expected 'teams', 'parallel' or 'thread' in OpenMP clause 'bind'}}
#pragma omp parallel loop bind(other)
for (i=0; i<1000; ++i) {
}
// collapse clause
// expected-error@+4 {{expected 2 for loops after '#pragma omp parallel loop', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel loop collapse(2) bind(thread)
for (i=0; i<1000; ++i)
z = i+11;
// expected-error@+1 {{directive '#pragma omp parallel loop' cannot contain more than one 'collapse' clause}}
#pragma omp parallel loop collapse(2) collapse(2) bind(thread)
for (i=0; i<1000; ++i)
for (j=0; j<1000; ++j)
z = i+j+11;
// order clause
// expected-error@+1 {{expected 'concurrent' in OpenMP clause 'order'}}
#pragma omp parallel loop order(foo) bind(thread)
for (i=0; i<1000; ++i)
z = i+11;
// private clause
// expected-error@+1 {{use of undeclared identifier 'undef_var'}}
#pragma omp parallel loop private(undef_var) bind(thread)
for (i=0; i<1000; ++i)
z = i+11;
// lastprivate
// A list item may not appear in a lastprivate clause unless it is the loop
// iteration variable of a loop that is associated with the construct.
// expected-error@+1 {{only loop iteration variables are allowed in 'lastprivate' clause in 'omp parallel loop' directives}}
#pragma omp parallel loop lastprivate(z) bind(thread)
for (i=0; i<1000; ++i) {
z = i+11;
}
// expected-error@+1 {{only loop iteration variables are allowed in 'lastprivate' clause in 'omp parallel loop' directives}}
#pragma omp parallel loop lastprivate(k) collapse(2) bind(thread)
for (i=0; i<1000; ++i)
for (j=0; j<1000; ++j)
for (k=0; k<1000; ++k)
z = i+j+k+11;
// reduction
// expected-error@+1 {{use of undeclared identifier 'undef_var'}}
#pragma omp parallel loop reduction(+:undef_var) bind(thread)
for (i=0; i<1000; ++i)
z = i+11;
// num_threads
// expected-error@+1 {{directive '#pragma omp parallel loop' cannot contain more than one 'num_threads' clause}}
#pragma omp parallel loop num_threads(4) num_threads(4)
for (i=0; i<1000; ++i)
z = i+11;
// proc_bind
// expected-error@+1 {{directive '#pragma omp parallel loop' cannot contain more than one 'proc_bind' clause}}
#pragma omp parallel loop proc_bind(close) proc_bind(primary)
for (i=0; i<1000; ++i)
z = i+11;
}
template <typename T, int C>
void templ_test(T t) {
T i,z;
// expected-error@+4 {{expected 2 for loops after '#pragma omp parallel loop', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel loop collapse(C) bind(thread)
for (i=0; i<1000; ++i)
z = i+11;
// expected-error@+1 {{only loop iteration variables are allowed in 'lastprivate' clause in 'omp parallel loop' directives}}
#pragma omp parallel loop lastprivate(z) bind(thread)
for (i=0; i<1000; ++i) {
z = i+11;
}
}
void bar()
{
templ_test<int, 2>(16); // expected-note {{in instantiation of function template specialization 'templ_test<int, 2>' requested here}}
}

View File

@ -5730,6 +5730,8 @@ CXString clang_getCursorKindSpelling(enum CXCursorKind Kind) {
return cxstring::createRef("OMPTeamsGenericLoopDirective");
case CXCursor_OMPTargetTeamsGenericLoopDirective:
return cxstring::createRef("OMPTargetTeamsGenericLoopDirective");
case CXCursor_OMPParallelGenericLoopDirective:
return cxstring::createRef("OMPParallelGenericLoopDirective");
case CXCursor_OverloadCandidate:
return cxstring::createRef("OverloadCandidate");
case CXCursor_TypeAliasTemplateDecl:

View File

@ -832,6 +832,9 @@ CXCursor cxcursor::MakeCXCursor(const Stmt *S, const Decl *Parent,
case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
K = CXCursor_OMPTargetTeamsGenericLoopDirective;
break;
case Stmt::OMPParallelGenericLoopDirectiveClass:
K = CXCursor_OMPParallelGenericLoopDirective;
break;
case Stmt::BuiltinBitCastExprClass:
K = CXCursor_BuiltinBitCastExpr;
}

View File

@ -1808,6 +1808,26 @@ def OMP_target_teams_loop : Directive<"target teams loop"> {
VersionedClause<OMPC_ThreadLimit>,
];
}
def OMP_parallel_loop : Directive<"parallel loop"> {
let allowedClauses = [
VersionedClause<OMPC_Allocate>,
VersionedClause<OMPC_Copyin>,
VersionedClause<OMPC_FirstPrivate>,
VersionedClause<OMPC_LastPrivate>,
VersionedClause<OMPC_Private>,
VersionedClause<OMPC_Reduction>,
VersionedClause<OMPC_Shared>,
];
let allowedOnceClauses = [
VersionedClause<OMPC_Bind, 50>,
VersionedClause<OMPC_Collapse>,
VersionedClause<OMPC_Default>,
VersionedClause<OMPC_If>,
VersionedClause<OMPC_NumThreads>,
VersionedClause<OMPC_Order>,
VersionedClause<OMPC_ProcBind>,
];
}
def OMP_Metadirective : Directive<"metadirective"> {
let allowedClauses = [VersionedClause<OMPC_When>];
let allowedOnceClauses = [VersionedClause<OMPC_Default>];