diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp index 5a8f50bf316a..1ca9cd05496b 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp @@ -715,12 +715,9 @@ getDataSharingMode(CodeGenModule &CGM) { } /// Check if the parallel directive has an 'if' clause with non-constant or -/// false condition. Also, check if the number of threads is strictly specified -/// and run those directives in non-SPMD mode. -static bool hasParallelIfNumThreadsClause(ASTContext &Ctx, - const OMPExecutableDirective &D) { - if (D.hasClausesOfKind()) - return true; +/// false condition. +static bool hasParallelIfClause(ASTContext &Ctx, + const OMPExecutableDirective &D) { for (const auto *C : D.getClausesOfKind()) { OpenMPDirectiveKind NameModifier = C->getNameModifier(); if (NameModifier != OMPD_parallel && NameModifier != OMPD_unknown) @@ -747,7 +744,7 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx, switch (D.getDirectiveKind()) { case OMPD_target: if (isOpenMPParallelDirective(DKind) && - !hasParallelIfNumThreadsClause(Ctx, *NestedDir)) + !hasParallelIfClause(Ctx, *NestedDir)) return true; if (DKind == OMPD_teams) { Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( @@ -759,14 +756,14 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx, dyn_cast_or_null(ChildStmt)) { DKind = NND->getDirectiveKind(); if (isOpenMPParallelDirective(DKind) && - !hasParallelIfNumThreadsClause(Ctx, *NND)) + !hasParallelIfClause(Ctx, *NND)) return true; } } return false; case OMPD_target_teams: return isOpenMPParallelDirective(DKind) && - !hasParallelIfNumThreadsClause(Ctx, *NestedDir); + !hasParallelIfClause(Ctx, *NestedDir); case OMPD_target_simd: case OMPD_target_parallel: case OMPD_target_parallel_for: @@ -840,7 +837,7 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx, case OMPD_target_parallel_for_simd: case OMPD_target_teams_distribute_parallel_for: case OMPD_target_teams_distribute_parallel_for_simd: - return !hasParallelIfNumThreadsClause(Ctx, D); + return !hasParallelIfClause(Ctx, D); case OMPD_target_simd: case OMPD_target_teams_distribute: case OMPD_target_teams_distribute_simd: diff --git a/clang/test/OpenMP/nvptx_target_codegen.cpp b/clang/test/OpenMP/nvptx_target_codegen.cpp index b05ee9dee6e6..906ff928c475 100644 --- a/clang/test/OpenMP/nvptx_target_codegen.cpp +++ b/clang/test/OpenMP/nvptx_target_codegen.cpp @@ -5,21 +5,20 @@ // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 // RUN: %clang_cc1 -verify -fopenmp -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 // expected-no-diagnostics + #ifndef HEADER #define HEADER // Check that the execution mode of all 7 target regions is set to Generic Mode. // CHECK-DAG: [[NONSPMD:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, i8* getelementptr inbounds // CHECK-DAG: [[UNKNOWN:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, i8* getelementptr inbounds -// CHECK-DAG: {{@__omp_offloading_.+l59}}_exec_mode = weak constant i8 1 -// CHECK-DAG: {{@__omp_offloading_.+l137}}_exec_mode = weak constant i8 1 -// CHECK-DAG: {{@__omp_offloading_.+l214}}_exec_mode = weak constant i8 1 -// CHECK-DAG: {{@__omp_offloading_.+l324}}_exec_mode = weak constant i8 1 -// CHECK-DAG: {{@__omp_offloading_.+l362}}_exec_mode = weak constant i8 1 -// CHECK-DAG: {{@__omp_offloading_.+l380}}_exec_mode = weak constant i8 1 -// CHECK-DAG: {{@__omp_offloading_.+l345}}_exec_mode = weak constant i8 1 -// CHECK-DAG: [[MAP_TY:%.+]] = type { [128 x i8] } -// CHECK-DAG: [[GLOB_TY:%.+]] = type { i32* } +// CHECK-DAG: {{@__omp_offloading_.+l45}}_exec_mode = weak constant i8 0 +// CHECK-DAG: {{@__omp_offloading_.+l123}}_exec_mode = weak constant i8 1 +// CHECK-DAG: {{@__omp_offloading_.+l200}}_exec_mode = weak constant i8 1 +// CHECK-DAG: {{@__omp_offloading_.+l310}}_exec_mode = weak constant i8 1 +// CHECK-DAG: {{@__omp_offloading_.+l348}}_exec_mode = weak constant i8 1 +// CHECK-DAG: {{@__omp_offloading_.+l366}}_exec_mode = weak constant i8 1 +// CHECK-DAG: {{@__omp_offloading_.+l331}}_exec_mode = weak constant i8 1 __thread int id; @@ -32,29 +31,16 @@ struct TT{ tx &operator[](int i) { return X; } }; -// CHECK: define weak void @__omp_offloading_{{.+}}_{{.+}}targetBar{{.+}}_l59(i32* [[PTR1:%.+]], i32** dereferenceable{{.*}} [[PTR2_REF:%.+]]) +// CHECK: define weak void @__omp_offloading_{{.+}}_{{.+}}targetBar{{.+}}_l45(i32* [[PTR1:%.+]], i32** dereferenceable{{.*}} [[PTR2_REF:%.+]]) // CHECK: store i32* [[PTR1]], i32** [[PTR1_ADDR:%.+]], // CHECK: store i32** [[PTR2_REF]], i32*** [[PTR2_REF_PTR:%.+]], // CHECK: [[PTR2_REF:%.+]] = load i32**, i32*** [[PTR2_REF_PTR]], -// CHECK: call void @__kmpc_kernel_init( -// CHECK: call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds ([[MAP_TY]], [[MAP_TY]] addrspace(3)* @{{.+}}, i32 0, i32 0, i32 0) to i8*), i{{64|32}} %{{.+}}, i16 %{{.+}}, i8** addrspacecast (i8* addrspace(3)* [[BUF_PTR:@.+]] to i8**)) -// CHECK: [[BUF:%.+]] = load i8*, i8* addrspace(3)* [[BUF_PTR]], -// CHECK: [[BUF_OFFS:%.+]] = getelementptr inbounds i8, i8* [[BUF]], i{{[0-9]+}} 0 -// CHECK: [[BUF:%.+]] = bitcast i8* [[BUF_OFFS]] to [[GLOB_TY]]* -// CHECK: [[PTR1:%.+]] = load i32*, i32** [[PTR1_ADDR]], -// CHECK: [[PTR1_GLOB_REF:%.+]] = getelementptr inbounds [[GLOB_TY]], [[GLOB_TY]]* [[BUF]], i32 0, i32 0 -// CHECK: store i32* [[PTR1]], i32** [[PTR1_GLOB_REF]], -// CHECK: call void @__kmpc_begin_sharing_variables(i8*** [[ARG_PTRS_REF:%.+]], i{{64|32}} 2) -// CHECK: [[ARG_PTRS:%.+]] = load i8**, i8*** [[ARG_PTRS_REF]], -// CHECK: [[ARG_PTR1:%.+]] = getelementptr inbounds i8*, i8** [[ARG_PTRS]], i{{[0-9]+}} 0 -// CHECK: [[BC:%.+]] = bitcast i32** [[PTR1_GLOB_REF]] to i8* -// CHECK: store i8* [[BC]], i8** [[ARG_PTR1]], -// CHECK: [[ARG_PTR2:%.+]] = getelementptr inbounds i8*, i8** [[ARG_PTRS]], i{{[0-9]+}} 1 -// CHECK: [[BC:%.+]] = bitcast i32** [[PTR2_REF]] to i8* -// CHECK: store i8* [[BC]], i8** [[ARG_PTR2]], -// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) -// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) -// CHECK: call void @__kmpc_end_sharing_variables() +// CHECK: call void @__kmpc_spmd_kernel_init( +// CHECK: call void @__kmpc_data_sharing_init_stack_spmd() +// CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{.+}}) +// CHECK: store i32 [[GTID]], i32* [[THREADID:%.+]], +// CHECK: call void @{{.+}}(i32* [[THREADID]], i32* %{{.+}}, i32** [[PTR1_ADDR]], i32** [[PTR2_REF]]) +// CHECK: call void @__kmpc_spmd_kernel_deinit_v2(i16 1) void targetBar(int *Ptr1, int *Ptr2) { #pragma omp target map(Ptr1[:0], Ptr2) #pragma omp parallel num_threads(2) @@ -70,7 +56,7 @@ int foo(int n) { double cn[5][n]; TT d; - // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l137}}_worker() + // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l123}}_worker() // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8, // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*, // CHECK: store i8* null, i8** [[OMP_WORK_FN]], @@ -101,7 +87,7 @@ int foo(int n) { // CHECK: [[EXIT]] // CHECK: ret void - // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+l137]]() + // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+l123]]() // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() @@ -143,7 +129,7 @@ int foo(int n) { { } - // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l214}}_worker() + // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l200}}_worker() // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8, // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*, // CHECK: store i8* null, i8** [[OMP_WORK_FN]], @@ -174,7 +160,7 @@ int foo(int n) { // CHECK: [[EXIT]] // CHECK: ret void - // CHECK: define {{.*}}void [[T2:@__omp_offloading_.+foo.+l214]](i[[SZ:32|64]] [[ARG1:%[a-zA-Z_]+]], i[[SZ:32|64]] [[ID:%[a-zA-Z_]+]]) + // CHECK: define {{.*}}void [[T2:@__omp_offloading_.+foo.+l200]](i[[SZ:32|64]] [[ARG1:%[a-zA-Z_]+]], i[[SZ:32|64]] [[ID:%[a-zA-Z_]+]]) // CHECK: [[AA_ADDR:%.+]] = alloca i[[SZ]], // CHECK: store i[[SZ]] [[ARG1]], i[[SZ]]* [[AA_ADDR]], // CHECK: [[AA_CADDR:%.+]] = bitcast i[[SZ]]* [[AA_ADDR]] to i16* @@ -217,7 +203,7 @@ int foo(int n) { id = aa; } - // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l324}}_worker() + // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l310}}_worker() // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8, // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*, // CHECK: store i8* null, i8** [[OMP_WORK_FN]], @@ -248,7 +234,7 @@ int foo(int n) { // CHECK: [[EXIT]] // CHECK: ret void - // CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+l324]](i[[SZ]] + // CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+l310]](i[[SZ]] // Create local storage for each capture. // CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]] // CHECK: [[LOCAL_B:%.+]] = alloca [10 x float]* @@ -409,7 +395,7 @@ int baz(int f, double &a) { return f; } - // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+static.+362}}_worker() + // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+static.+348}}_worker() // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8, // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*, // CHECK: store i8* null, i8** [[OMP_WORK_FN]], @@ -440,7 +426,7 @@ int baz(int f, double &a) { // CHECK: [[EXIT]] // CHECK: ret void - // CHECK: define {{.*}}void [[T4:@__omp_offloading_.+static.+l362]](i[[SZ]] + // CHECK: define {{.*}}void [[T4:@__omp_offloading_.+static.+l348]](i[[SZ]] // Create local storage for each capture. // CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]] // CHECK: [[LOCAL_AA:%.+]] = alloca i[[SZ]] @@ -495,7 +481,7 @@ int baz(int f, double &a) { - // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+S1.+l380}}_worker() + // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+S1.+l366}}_worker() // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8, // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*, // CHECK: store i8* null, i8** [[OMP_WORK_FN]], @@ -529,7 +515,7 @@ int baz(int f, double &a) { // CHECK: [[EXIT]] // CHECK: ret void - // CHECK: define {{.*}}void [[T5:@__omp_offloading_.+S1.+l380]]( + // CHECK: define {{.*}}void [[T5:@__omp_offloading_.+S1.+l366]]( // Create local storage for each capture. // CHECK: [[LOCAL_THIS:%.+]] = alloca [[S1:%struct.*]]* // CHECK: [[LOCAL_B:%.+]] = alloca i[[SZ]] @@ -648,7 +634,7 @@ int baz(int f, double &a) { // CHECK: ret i32 [[RES]] - // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l345}}_worker() + // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l331}}_worker() // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8, // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*, // CHECK: store i8* null, i8** [[OMP_WORK_FN]], @@ -679,7 +665,7 @@ int baz(int f, double &a) { // CHECK: [[EXIT]] // CHECK: ret void - // CHECK: define {{.*}}void [[T6:@__omp_offloading_.+template.+l345]](i[[SZ]] + // CHECK: define {{.*}}void [[T6:@__omp_offloading_.+template.+l331]](i[[SZ]] // Create local storage for each capture. // CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]] // CHECK: [[LOCAL_AA:%.+]] = alloca i[[SZ]] diff --git a/clang/test/OpenMP/nvptx_target_parallel_num_threads_codegen.cpp b/clang/test/OpenMP/nvptx_target_parallel_num_threads_codegen.cpp index 13a7fb289dad..fc90d34da364 100644 --- a/clang/test/OpenMP/nvptx_target_parallel_num_threads_codegen.cpp +++ b/clang/test/OpenMP/nvptx_target_parallel_num_threads_codegen.cpp @@ -9,8 +9,8 @@ #define HEADER // Check that the execution mode of all 2 target regions on the gpu is set to non-SPMD Mode. -// CHECK-DAG: {{@__omp_offloading_.+l21}}_exec_mode = weak constant i8 1 -// CHECK-DAG: {{@__omp_offloading_.+l26}}_exec_mode = weak constant i8 1 +// CHECK-DAG: {{@__omp_offloading_.+l21}}_exec_mode = weak constant i8 0 +// CHECK-DAG: {{@__omp_offloading_.+l26}}_exec_mode = weak constant i8 0 template tx ftemplate(int n) { @@ -46,13 +46,16 @@ int bar(int n){ // CHECK: store i16* {{%.+}}, i16** [[AA_ADDR]], align // CHECK: [[AA:%.+]] = load i16*, i16** [[AA_ADDR]], align // CHECK: [[THREAD_LIMIT:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() - // CHECK: call void @__kmpc_kernel_init(i32 - // CHECK: call void @__kmpc_push_num_threads - // CHECK: call void @__kmpc_kernel_deinit(i16 1) + // CHECK: call void @__kmpc_spmd_kernel_init(i32 [[THREAD_LIMIT]], i16 1, i16 0) + // CHECK: call void @__kmpc_data_sharing_init_stack_spmd() + // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{.+}}) + // CHECK: store i32 [[GTID]], i32* [[THREADID:%.+]], + // CHECK: call void [[OUTLINED:@.+]](i32* [[THREADID]], i32* %{{.+}}, i16* [[AA]]) + // CHECK: call void @__kmpc_spmd_kernel_deinit_v2(i16 1) // CHECK: ret void // CHECK: } - // CHECK: define internal void @{{.+}}(i32* noalias %{{.+}}, i32* noalias %{{.+}}, i16* {{[^%]*}}[[ARG:%.+]]) + // CHECK: define internal void [[OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i16* {{[^%]*}}[[ARG:%.+]]) // CHECK: = alloca i32*, align // CHECK: = alloca i32*, align // CHECK: [[AA_ADDR:%.+]] = alloca i16*, align @@ -63,11 +66,6 @@ int bar(int n){ // CHECK: ret void // CHECK: } - - - - - // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l26}}( // CHECK: [[A_ADDR:%.+]] = alloca i32*, align // CHECK: [[AA_ADDR:%.+]] = alloca i16*, align @@ -79,13 +77,16 @@ int bar(int n){ // CHECK: [[AA:%.+]] = load i16*, i16** [[AA_ADDR]], align // CHECK: [[B:%.+]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align // CHECK: [[THREAD_LIMIT:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() - // CHECK: call void @__kmpc_kernel_init(i32 - // CHECK: call void @__kmpc_push_num_threads - // CHECK: call void @__kmpc_kernel_deinit(i16 1) + // CHECK: call void @__kmpc_spmd_kernel_init(i32 [[THREAD_LIMIT]], i16 1, i16 0) + // CHECK: call void @__kmpc_data_sharing_init_stack_spmd() + // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{.+}}) + // CHECK: store i32 [[GTID]], i32* [[THREADID:%.+]], + // CHECK: call void [[OUTLINED:@.+]](i32* [[THREADID]], i32* %{{.+}}, i32* [[A]], i16* [[AA]], [10 x i32]* [[B]]) + // CHECK: call void @__kmpc_spmd_kernel_deinit_v2(i16 1) // CHECK: ret void // CHECK: } - // CHECK: define internal void @{{.+}}(i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* {{[^%]*}}[[ARG1:%.+]], i16* {{[^%]*}}[[ARG2:%.+]], [10 x i32]* {{[^%]*}}[[ARG3:%.+]]) + // CHECK: define internal void [[OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* {{[^%]*}}[[ARG1:%.+]], i16* {{[^%]*}}[[ARG2:%.+]], [10 x i32]* {{[^%]*}}[[ARG3:%.+]]) // CHECK: = alloca i32*, align // CHECK: = alloca i32*, align // CHECK: [[A_ADDR:%.+]] = alloca i32*, align