[Clang][OpenMP] Codegen generation for has_device_addr claues.

Summary: This patch add codegen support for the has_device_addr clause.  It
use the same logic of is_device_ptr.

Differential Revision: https://reviews.llvm.org/D134186
This commit is contained in:
Jennifer Yu 2022-09-12 16:57:27 -07:00
parent fd37ab6cf6
commit 684f766431
5 changed files with 2145 additions and 4 deletions

View File

@ -7389,6 +7389,13 @@ private:
SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>> SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
DevPointersMap; DevPointersMap;
/// Map between device addr declarations and their expression components.
/// The key value for declarations in 'this' is null.
llvm::DenseMap<
const ValueDecl *,
SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
HasDevAddrsMap;
/// Map between lambda declarations and their map type. /// Map between lambda declarations and their map type.
llvm::DenseMap<const ValueDecl *, const OMPMapClause *> LambdasMap; llvm::DenseMap<const ValueDecl *, const OMPMapClause *> LambdasMap;
@ -8819,6 +8826,10 @@ public:
for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>()) for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
for (auto L : C->component_lists()) for (auto L : C->component_lists())
DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L)); DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L));
// Extract device addr clause information.
for (const auto *C : Dir.getClausesOfKind<OMPHasDeviceAddrClause>())
for (auto L : C->component_lists())
HasDevAddrsMap[std::get<0>(L)].push_back(std::get<1>(L));
// Extract map information. // Extract map information.
for (const auto *C : Dir.getClausesOfKind<OMPMapClause>()) { for (const auto *C : Dir.getClausesOfKind<OMPMapClause>()) {
if (C->getMapType() != OMPC_MAP_to) if (C->getMapType() != OMPC_MAP_to)
@ -9052,7 +9063,7 @@ public:
// If this declaration appears in a is_device_ptr clause we just have to // If this declaration appears in a is_device_ptr clause we just have to
// pass the pointer by value. If it is a reference to a declaration, we just // pass the pointer by value. If it is a reference to a declaration, we just
// pass its value. // pass its value.
if (VD && DevPointersMap.count(VD)) { if (VD && (DevPointersMap.count(VD) || HasDevAddrsMap.count(VD))) {
CombinedInfo.Exprs.push_back(VD); CombinedInfo.Exprs.push_back(VD);
CombinedInfo.BasePointers.emplace_back(Arg, VD); CombinedInfo.BasePointers.emplace_back(Arg, VD);
CombinedInfo.Pointers.push_back(Arg); CombinedInfo.Pointers.push_back(Arg);
@ -9073,14 +9084,19 @@ public:
SmallVector<MapData, 4> DeclComponentLists; SmallVector<MapData, 4> DeclComponentLists;
// For member fields list in is_device_ptr, store it in // For member fields list in is_device_ptr, store it in
// DeclComponentLists for generating components info. // DeclComponentLists for generating components info.
static const OpenMPMapModifierKind Unknown = OMPC_MAP_MODIFIER_unknown;
auto It = DevPointersMap.find(VD); auto It = DevPointersMap.find(VD);
if (It != DevPointersMap.end()) if (It != DevPointersMap.end())
for (const auto &MCL : It->second) { for (const auto &MCL : It->second)
static const OpenMPMapModifierKind Unknown = OMPC_MAP_MODIFIER_unknown;
DeclComponentLists.emplace_back(MCL, OMPC_MAP_to, Unknown, DeclComponentLists.emplace_back(MCL, OMPC_MAP_to, Unknown,
/*IsImpicit = */ true, nullptr, /*IsImpicit = */ true, nullptr,
nullptr); nullptr);
} auto I = HasDevAddrsMap.find(VD);
if (I != HasDevAddrsMap.end())
for (const auto &MCL : I->second)
DeclComponentLists.emplace_back(MCL, OMPC_MAP_tofrom, Unknown,
/*IsImpicit = */ true, nullptr,
nullptr);
assert(CurDir.is<const OMPExecutableDirective *>() && assert(CurDir.is<const OMPExecutableDirective *>() &&
"Expect a executable directive"); "Expect a executable directive");
const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>(); const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,473 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" --prefix-filecheck-ir-name _
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp -fopenmp-version=51 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp-simd -fopenmp-version=51 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// expected-no-diagnostics
struct S {
int a = 0;
int *ptr = &a;
int &ref = a;
int arr[4];
S() {}
void foo() {
#pragma omp target has_device_addr(a, ref, ptr[0:4], arr[:a])
++a, ++*ptr, ++ref, ++arr[0];
}
};
int main() {
float a = 0;
float *ptr = &a;
float &ref = a;
float arr[4];
float vla[(int)a];
S s;
s.foo();
#pragma omp target has_device_addr(a, ref, ptr[0:4], arr[:(int)a], vla[0])
++a, ++*ptr, ++ref, ++arr[0], ++vla[0];
return a;
}
// CHECK-LABEL: define {{[^@]+}}@main
// CHECK-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[A:%.*]] = alloca float, align 4
// CHECK-NEXT: [[PTR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[REF:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[ARR:%.*]] = alloca [4 x float], align 4
// CHECK-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
// CHECK-NEXT: [[TMP:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[REF_CASTED:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8
// CHECK-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8
// CHECK-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK-NEXT: store float 0.000000e+00, float* [[A]], align 4
// CHECK-NEXT: store float* [[A]], float** [[PTR]], align 8
// CHECK-NEXT: store float* [[A]], float** [[REF]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[A]], align 4
// CHECK-NEXT: [[CONV:%.*]] = fptosi float [[TMP0]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[CONV]] to i64
// CHECK-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// CHECK-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
// CHECK-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// CHECK-NEXT: call void @_ZN1SC1Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
// CHECK-NEXT: call void @_ZN1S3fooEv(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
// CHECK-NEXT: [[TMP3:%.*]] = load float*, float** [[REF]], align 8
// CHECK-NEXT: store float* [[TMP3]], float** [[TMP]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = load float, float* [[A]], align 4
// CHECK-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to float*
// CHECK-NEXT: store float [[TMP4]], float* [[CONV1]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK-NEXT: [[TMP6:%.*]] = load float*, float** [[PTR]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = load float*, float** [[TMP]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = load float, float* [[TMP7]], align 4
// CHECK-NEXT: [[CONV2:%.*]] = bitcast i64* [[REF_CASTED]] to float*
// CHECK-NEXT: store float [[TMP8]], float* [[CONV2]], align 4
// CHECK-NEXT: [[TMP9:%.*]] = load i64, i64* [[REF_CASTED]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
// CHECK-NEXT: store i64 [[TMP5]], i64* [[TMP11]], align 8
// CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
// CHECK-NEXT: store i64 [[TMP5]], i64* [[TMP13]], align 8
// CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK-NEXT: store i8* null, i8** [[TMP14]], align 8
// CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to float**
// CHECK-NEXT: store float* [[TMP6]], float** [[TMP16]], align 8
// CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to float**
// CHECK-NEXT: store float* [[TMP6]], float** [[TMP18]], align 8
// CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK-NEXT: store i8* null, i8** [[TMP19]], align 8
// CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64*
// CHECK-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8
// CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64*
// CHECK-NEXT: store i64 [[TMP9]], i64* [[TMP23]], align 8
// CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK-NEXT: store i8* null, i8** [[TMP24]], align 8
// CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to [4 x float]**
// CHECK-NEXT: store [4 x float]* [[ARR]], [4 x float]** [[TMP26]], align 8
// CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to [4 x float]**
// CHECK-NEXT: store [4 x float]* [[ARR]], [4 x float]** [[TMP28]], align 8
// CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK-NEXT: store i8* null, i8** [[TMP29]], align 8
// CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
// CHECK-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64*
// CHECK-NEXT: store i64 [[TMP1]], i64* [[TMP31]], align 8
// CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
// CHECK-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64*
// CHECK-NEXT: store i64 [[TMP1]], i64* [[TMP33]], align 8
// CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
// CHECK-NEXT: store i8* null, i8** [[TMP34]], align 8
// CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
// CHECK-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to float**
// CHECK-NEXT: store float* [[VLA]], float** [[TMP36]], align 8
// CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
// CHECK-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to float**
// CHECK-NEXT: store float* [[VLA]], float** [[TMP38]], align 8
// CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5
// CHECK-NEXT: store i8* null, i8** [[TMP39]], align 8
// CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK-NEXT: store i32 1, i32* [[TMP42]], align 4
// CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK-NEXT: store i32 6, i32* [[TMP43]], align 4
// CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK-NEXT: store i8** [[TMP40]], i8*** [[TMP44]], align 8
// CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK-NEXT: store i8** [[TMP41]], i8*** [[TMP45]], align 8
// CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK-NEXT: store i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP46]], align 8
// CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK-NEXT: store i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP47]], align 8
// CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK-NEXT: store i8** null, i8*** [[TMP48]], align 8
// CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK-NEXT: store i8** null, i8*** [[TMP49]], align 8
// CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK-NEXT: store i64 0, i64* [[TMP50]], align 8
// CHECK-NEXT: [[TMP51:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK-NEXT: [[TMP52:%.*]] = icmp ne i32 [[TMP51]], 0
// CHECK-NEXT: br i1 [[TMP52]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK: omp_offload.failed:
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27(i64 [[TMP5]], float* [[TMP6]], i64 [[TMP9]], [4 x float]* [[ARR]], i64 [[TMP1]], float* [[VLA]]) #[[ATTR5:[0-9]+]]
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK: omp_offload.cont:
// CHECK-NEXT: [[TMP53:%.*]] = load float, float* [[A]], align 4
// CHECK-NEXT: [[CONV3:%.*]] = fptosi float [[TMP53]] to i32
// CHECK-NEXT: store i32 [[CONV3]], i32* [[RETVAL]], align 4
// CHECK-NEXT: [[TMP54:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK-NEXT: call void @llvm.stackrestore(i8* [[TMP54]])
// CHECK-NEXT: [[TMP55:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK-NEXT: ret i32 [[TMP55]]
//
//
// CHECK-LABEL: define {{[^@]+}}@_ZN1SC1Ev
// CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK-NEXT: call void @_ZN1SC2Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS1]])
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@_ZN1S3fooEv
// CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) #[[ATTR3:[0-9]+]] comdat align 2 {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
// CHECK-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
// CHECK-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8
// CHECK-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
// CHECK-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[REF]], align 8
// CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
// CHECK-NEXT: [[ARR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 3
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr [4 x i32], [4 x i32]* [[ARR]], i32 1
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[A]] to i8*
// CHECK-NEXT: [[TMP3:%.*]] = bitcast [4 x i32]* [[TMP1]] to i8*
// CHECK-NEXT: [[TMP4:%.*]] = ptrtoint i8* [[TMP3]] to i64
// CHECK-NEXT: [[TMP5:%.*]] = ptrtoint i8* [[TMP2]] to i64
// CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
// CHECK-NEXT: [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
// CHECK-NEXT: [[TMP8:%.*]] = bitcast [5 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP8]], i8* align 8 bitcast ([5 x i64]* @.offload_sizes.1 to i8*), i64 40, i1 false)
// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.S**
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP10]], align 8
// CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32**
// CHECK-NEXT: store i32* [[A]], i32** [[TMP12]], align 8
// CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK-NEXT: store i64 [[TMP7]], i64* [[TMP13]], align 8
// CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK-NEXT: store i8* null, i8** [[TMP14]], align 8
// CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to %struct.S**
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP16]], align 8
// CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
// CHECK-NEXT: store i32* [[A]], i32** [[TMP18]], align 8
// CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK-NEXT: store i8* null, i8** [[TMP19]], align 8
// CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.S**
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP21]], align 8
// CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
// CHECK-NEXT: store i32* [[TMP0]], i32** [[TMP23]], align 8
// CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK-NEXT: store i8* null, i8** [[TMP24]], align 8
// CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to %struct.S**
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP26]], align 8
// CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
// CHECK-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32***
// CHECK-NEXT: store i32** [[PTR]], i32*** [[TMP28]], align 8
// CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
// CHECK-NEXT: store i8* null, i8** [[TMP29]], align 8
// CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
// CHECK-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to %struct.S**
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP31]], align 8
// CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
// CHECK-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to [4 x i32]**
// CHECK-NEXT: store [4 x i32]* [[ARR]], [4 x i32]** [[TMP33]], align 8
// CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
// CHECK-NEXT: store i8* null, i8** [[TMP34]], align 8
// CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK-NEXT: store i32 1, i32* [[TMP38]], align 4
// CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK-NEXT: store i32 5, i32* [[TMP39]], align 4
// CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK-NEXT: store i8** [[TMP35]], i8*** [[TMP40]], align 8
// CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK-NEXT: store i8** [[TMP36]], i8*** [[TMP41]], align 8
// CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK-NEXT: store i64* [[TMP37]], i64** [[TMP42]], align 8
// CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.2, i32 0, i32 0), i64** [[TMP43]], align 8
// CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK-NEXT: store i8** null, i8*** [[TMP44]], align 8
// CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK-NEXT: store i8** null, i8*** [[TMP45]], align 8
// CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK-NEXT: store i64 0, i64* [[TMP46]], align 8
// CHECK-NEXT: [[TMP47:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l14.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK-NEXT: [[TMP48:%.*]] = icmp ne i32 [[TMP47]], 0
// CHECK-NEXT: br i1 [[TMP48]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK: omp_offload.failed:
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l14(%struct.S* [[THIS1]]) #[[ATTR5]]
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK: omp_offload.cont:
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27
// CHECK-SAME: (i64 noundef [[A:%.*]], float* noundef [[PTR:%.*]], i64 noundef [[REF:%.*]], [4 x float]* noundef nonnull align 4 dereferenceable(16) [[ARR:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[VLA1:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[REF_ADDR:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[ARR_ADDR:%.*]] = alloca [4 x float]*, align 8
// CHECK-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[VLA_ADDR2:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[TMP:%.*]] = alloca float*, align 8
// CHECK-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK-NEXT: store float* [[PTR]], float** [[PTR_ADDR]], align 8
// CHECK-NEXT: store i64 [[REF]], i64* [[REF_ADDR]], align 8
// CHECK-NEXT: store [4 x float]* [[ARR]], [4 x float]** [[ARR_ADDR]], align 8
// CHECK-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK-NEXT: store float* [[VLA1]], float** [[VLA_ADDR2]], align 8
// CHECK-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to float*
// CHECK-NEXT: [[CONV3:%.*]] = bitcast i64* [[REF_ADDR]] to float*
// CHECK-NEXT: [[TMP0:%.*]] = load [4 x float]*, [4 x float]** [[ARR_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load float*, float** [[VLA_ADDR2]], align 8
// CHECK-NEXT: store float* [[CONV3]], float** [[TMP]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[CONV]], align 4
// CHECK-NEXT: [[INC:%.*]] = fadd float [[TMP3]], 1.000000e+00
// CHECK-NEXT: store float [[INC]], float* [[CONV]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load float*, float** [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = load float, float* [[TMP4]], align 4
// CHECK-NEXT: [[INC4:%.*]] = fadd float [[TMP5]], 1.000000e+00
// CHECK-NEXT: store float [[INC4]], float* [[TMP4]], align 4
// CHECK-NEXT: [[TMP6:%.*]] = load float*, float** [[TMP]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = load float, float* [[TMP6]], align 4
// CHECK-NEXT: [[INC5:%.*]] = fadd float [[TMP7]], 1.000000e+00
// CHECK-NEXT: store float [[INC5]], float* [[TMP6]], align 4
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x float], [4 x float]* [[TMP0]], i64 0, i64 0
// CHECK-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK-NEXT: [[INC6:%.*]] = fadd float [[TMP8]], 1.000000e+00
// CHECK-NEXT: store float [[INC6]], float* [[ARRAYIDX]], align 4
// CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 0
// CHECK-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX7]], align 4
// CHECK-NEXT: [[INC8:%.*]] = fadd float [[TMP9]], 1.000000e+00
// CHECK-NEXT: store float [[INC8]], float* [[ARRAYIDX7]], align 4
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@_ZN1SC2Ev
// CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
// CHECK-NEXT: store i32 0, i32* [[A]], align 8
// CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
// CHECK-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
// CHECK-NEXT: store i32* [[A2]], i32** [[PTR]], align 8
// CHECK-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
// CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
// CHECK-NEXT: store i32* [[A3]], i32** [[REF]], align 8
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l14
// CHECK-SAME: (%struct.S* noundef [[THIS:%.*]]) #[[ATTR4]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 8
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK-NEXT: store i32 [[INC]], i32* [[A]], align 8
// CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 1
// CHECK-NEXT: [[TMP2:%.*]] = load i32*, i32** [[PTR]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK-NEXT: [[INC1:%.*]] = add nsw i32 [[TMP3]], 1
// CHECK-NEXT: store i32 [[INC1]], i32* [[TMP2]], align 4
// CHECK-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 2
// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[REF]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK-NEXT: [[INC2:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK-NEXT: store i32 [[INC2]], i32* [[TMP4]], align 4
// CHECK-NEXT: [[ARR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 3
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 8
// CHECK-NEXT: [[INC3:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK-NEXT: store i32 [[INC3]], i32* [[ARRAYIDX]], align 8
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK-SAME: () #[[ATTR7:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK-NEXT: ret void
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@main
// SIMD-ONLY0-SAME: () #[[ATTR0:[0-9]+]] {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// SIMD-ONLY0-NEXT: [[A:%.*]] = alloca float, align 4
// SIMD-ONLY0-NEXT: [[PTR:%.*]] = alloca float*, align 8
// SIMD-ONLY0-NEXT: [[REF:%.*]] = alloca float*, align 8
// SIMD-ONLY0-NEXT: [[ARR:%.*]] = alloca [4 x float], align 4
// SIMD-ONLY0-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// SIMD-ONLY0-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// SIMD-ONLY0-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
// SIMD-ONLY0-NEXT: [[TMP:%.*]] = alloca float*, align 8
// SIMD-ONLY0-NEXT: store i32 0, i32* [[RETVAL]], align 4
// SIMD-ONLY0-NEXT: store float 0.000000e+00, float* [[A]], align 4
// SIMD-ONLY0-NEXT: store float* [[A]], float** [[PTR]], align 8
// SIMD-ONLY0-NEXT: store float* [[A]], float** [[REF]], align 8
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = load float, float* [[A]], align 4
// SIMD-ONLY0-NEXT: [[CONV:%.*]] = fptosi float [[TMP0]] to i32
// SIMD-ONLY0-NEXT: [[TMP1:%.*]] = zext i32 [[CONV]] to i64
// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// SIMD-ONLY0-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// SIMD-ONLY0-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
// SIMD-ONLY0-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// SIMD-ONLY0-NEXT: call void @_ZN1SC1Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
// SIMD-ONLY0-NEXT: call void @_ZN1S3fooEv(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
// SIMD-ONLY0-NEXT: [[TMP3:%.*]] = load float*, float** [[REF]], align 8
// SIMD-ONLY0-NEXT: store float* [[TMP3]], float** [[TMP]], align 8
// SIMD-ONLY0-NEXT: [[TMP4:%.*]] = load float*, float** [[REF]], align 8
// SIMD-ONLY0-NEXT: [[TMP5:%.*]] = load float, float* [[A]], align 4
// SIMD-ONLY0-NEXT: [[INC:%.*]] = fadd float [[TMP5]], 1.000000e+00
// SIMD-ONLY0-NEXT: store float [[INC]], float* [[A]], align 4
// SIMD-ONLY0-NEXT: [[TMP6:%.*]] = load float*, float** [[PTR]], align 8
// SIMD-ONLY0-NEXT: [[TMP7:%.*]] = load float, float* [[TMP6]], align 4
// SIMD-ONLY0-NEXT: [[INC1:%.*]] = fadd float [[TMP7]], 1.000000e+00
// SIMD-ONLY0-NEXT: store float [[INC1]], float* [[TMP6]], align 4
// SIMD-ONLY0-NEXT: [[TMP8:%.*]] = load float*, float** [[TMP]], align 8
// SIMD-ONLY0-NEXT: [[TMP9:%.*]] = load float, float* [[TMP8]], align 4
// SIMD-ONLY0-NEXT: [[INC2:%.*]] = fadd float [[TMP9]], 1.000000e+00
// SIMD-ONLY0-NEXT: store float [[INC2]], float* [[TMP8]], align 4
// SIMD-ONLY0-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x float], [4 x float]* [[ARR]], i64 0, i64 0
// SIMD-ONLY0-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4
// SIMD-ONLY0-NEXT: [[INC3:%.*]] = fadd float [[TMP10]], 1.000000e+00
// SIMD-ONLY0-NEXT: store float [[INC3]], float* [[ARRAYIDX]], align 4
// SIMD-ONLY0-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 0
// SIMD-ONLY0-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX4]], align 4
// SIMD-ONLY0-NEXT: [[INC5:%.*]] = fadd float [[TMP11]], 1.000000e+00
// SIMD-ONLY0-NEXT: store float [[INC5]], float* [[ARRAYIDX4]], align 4
// SIMD-ONLY0-NEXT: [[TMP12:%.*]] = load float, float* [[A]], align 4
// SIMD-ONLY0-NEXT: [[CONV6:%.*]] = fptosi float [[TMP12]] to i32
// SIMD-ONLY0-NEXT: store i32 [[CONV6]], i32* [[RETVAL]], align 4
// SIMD-ONLY0-NEXT: [[TMP13:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// SIMD-ONLY0-NEXT: call void @llvm.stackrestore(i8* [[TMP13]])
// SIMD-ONLY0-NEXT: [[TMP14:%.*]] = load i32, i32* [[RETVAL]], align 4
// SIMD-ONLY0-NEXT: ret i32 [[TMP14]]
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SC1Ev
// SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// SIMD-ONLY0-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// SIMD-ONLY0-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// SIMD-ONLY0-NEXT: call void @_ZN1SC2Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS1]])
// SIMD-ONLY0-NEXT: ret void
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1S3fooEv
// SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) #[[ATTR3:[0-9]+]] comdat align 2 {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// SIMD-ONLY0-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// SIMD-ONLY0-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// SIMD-ONLY0-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 8
// SIMD-ONLY0-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1
// SIMD-ONLY0-NEXT: store i32 [[INC]], i32* [[A]], align 8
// SIMD-ONLY0-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
// SIMD-ONLY0-NEXT: [[TMP1:%.*]] = load i32*, i32** [[PTR]], align 8
// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// SIMD-ONLY0-NEXT: [[INC2:%.*]] = add nsw i32 [[TMP2]], 1
// SIMD-ONLY0-NEXT: store i32 [[INC2]], i32* [[TMP1]], align 4
// SIMD-ONLY0-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
// SIMD-ONLY0-NEXT: [[TMP3:%.*]] = load i32*, i32** [[REF]], align 8
// SIMD-ONLY0-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// SIMD-ONLY0-NEXT: [[INC3:%.*]] = add nsw i32 [[TMP4]], 1
// SIMD-ONLY0-NEXT: store i32 [[INC3]], i32* [[TMP3]], align 4
// SIMD-ONLY0-NEXT: [[ARR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 3
// SIMD-ONLY0-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
// SIMD-ONLY0-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 8
// SIMD-ONLY0-NEXT: [[INC4:%.*]] = add nsw i32 [[TMP5]], 1
// SIMD-ONLY0-NEXT: store i32 [[INC4]], i32* [[ARRAYIDX]], align 8
// SIMD-ONLY0-NEXT: ret void
//
//
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SC2Ev
// SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// SIMD-ONLY0-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// SIMD-ONLY0-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// SIMD-ONLY0-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
// SIMD-ONLY0-NEXT: store i32 0, i32* [[A]], align 8
// SIMD-ONLY0-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
// SIMD-ONLY0-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
// SIMD-ONLY0-NEXT: store i32* [[A2]], i32** [[PTR]], align 8
// SIMD-ONLY0-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
// SIMD-ONLY0-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
// SIMD-ONLY0-NEXT: store i32* [[A3]], i32** [[REF]], align 8
// SIMD-ONLY0-NEXT: ret void
//

View File

@ -0,0 +1,33 @@
// RUN: %libomptarget-compilexx-generic -fopenmp-version=51
// RUN: %libomptarget-run-generic 2>&1 \
// RUN: | %fcheck-generic
#include <assert.h>
#include <iostream>
#include <omp.h>
struct view {
const int size = 10;
int *data_host;
int *data_device;
void foo() {
std::size_t bytes = size * sizeof(int);
const int host_id = omp_get_initial_device();
const int device_id = omp_get_default_device();
data_host = (int *)malloc(bytes);
data_device = (int *)omp_target_alloc(bytes, device_id);
#pragma omp target teams distribute parallel for has_device_addr(data_device)
for (int i = 0; i < size; ++i)
data_device[i] = i;
omp_target_memcpy(data_host, data_device, bytes, 0, 0, host_id, device_id);
for (int i = 0; i < size; ++i)
assert(data_host[i] == i);
}
};
int main() {
view a;
a.foo();
// CHECK: PASSED
printf("PASSED\n");
}

View File

@ -0,0 +1,85 @@
// RUN: %libomptarget-compile-generic -fopenmp-version=51
// RUN: %libomptarget-run-generic 2>&1 \
// RUN: | %fcheck-generic
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 1024
#define FROM 64
#define LENGTH 128
void foo() {
float *A;
#pragma omp allocate(A) allocator(llvm_omp_target_shared_mem_alloc)
float *A_dev = NULL;
#pragma omp target has_device_addr(A [FROM:LENGTH]) map(A_dev)
{ A_dev = A; }
// CHECK: Success
if (A_dev == NULL || A_dev != A)
fprintf(stderr, "Failure %p %p \n", A_dev, A);
else
fprintf(stderr, "Success\n");
}
void bar() {
short x[10];
short *xp = &x[0];
x[1] = 111;
#pragma omp target data map(tofrom : xp [0:2]) use_device_addr(xp [0:2])
#pragma omp target has_device_addr(xp [0:2])
{ xp[1] = 222; }
// CHECK: 222
printf("%d %p\n", xp[1], &xp[1]);
}
void moo() {
short *b = malloc(sizeof(short));
b = b - 1;
b[1] = 111;
#pragma omp target data map(tofrom : b[1]) use_device_addr(b[1])
#pragma omp target has_device_addr(b[1])
{ b[1] = 222; }
// CHECK: 222
printf("%hd %p %p %p\n", b[1], b, &b[1], &b);
}
void zoo() {
short x[10];
short *(xp[10]);
xp[1] = &x[0];
short **xpp = &xp[0];
x[1] = 111;
#pragma omp target data map(tofrom : xpp[1][1]) use_device_addr(xpp[1][1])
#pragma omp target has_device_addr(xpp[1][1])
{ xpp[1][1] = 222; }
// CHECK: 222
printf("%d %p %p\n", xpp[1][1], xpp[1], &xpp[1][1]);
}
void xoo() {
short a[10], b[10];
#pragma omp allocate(a) allocator(llvm_omp_target_shared_mem_alloc)
#pragma omp allocate(b) allocator(llvm_omp_target_shared_mem_alloc)
a[1] = 111;
b[1] = 111;
#pragma omp target has_device_addr(a) has_device_addr(b [0:1])
{
a[1] = 222;
b[1] = 222;
}
// CHECK: 111
printf("%hd %hd %p %p %p\n", a[1], b[1], &a, b, &b); // 111 111 p1d p2d p3d
}
int main() {
foo();
bar();
moo();
zoo();
xoo();
return 0;
}