forked from OSchip/llvm-project
[Clang][OpenMP] Codegen generation for has_device_addr claues.
This patch add codegen support for the has_device_addr clause. It use the same logic of is_device_ptr. But passing &var instead pointer to var to kernal. Differential Revision: https://reviews.llvm.org/D134268
This commit is contained in:
parent
e0a6df53b4
commit
48ffd40ba2
|
@ -7389,6 +7389,13 @@ private:
|
|||
SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
|
||||
DevPointersMap;
|
||||
|
||||
/// Map between device addr declarations and their expression components.
|
||||
/// The key value for declarations in 'this' is null.
|
||||
llvm::DenseMap<
|
||||
const ValueDecl *,
|
||||
SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
|
||||
HasDevAddrsMap;
|
||||
|
||||
/// Map between lambda declarations and their map type.
|
||||
llvm::DenseMap<const ValueDecl *, const OMPMapClause *> LambdasMap;
|
||||
|
||||
|
@ -8819,6 +8826,10 @@ public:
|
|||
for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
|
||||
for (auto L : C->component_lists())
|
||||
DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L));
|
||||
// Extract device addr clause information.
|
||||
for (const auto *C : Dir.getClausesOfKind<OMPHasDeviceAddrClause>())
|
||||
for (auto L : C->component_lists())
|
||||
HasDevAddrsMap[std::get<0>(L)].push_back(std::get<1>(L));
|
||||
// Extract map information.
|
||||
for (const auto *C : Dir.getClausesOfKind<OMPMapClause>()) {
|
||||
if (C->getMapType() != OMPC_MAP_to)
|
||||
|
@ -9065,6 +9076,30 @@ public:
|
|||
CombinedInfo.Mappers.push_back(nullptr);
|
||||
return;
|
||||
}
|
||||
if (VD && HasDevAddrsMap.count(VD)) {
|
||||
auto I = HasDevAddrsMap.find(VD);
|
||||
CombinedInfo.Exprs.push_back(VD);
|
||||
Expr *E = nullptr;
|
||||
for (auto &MCL : I->second) {
|
||||
E = MCL.begin()->getAssociatedExpression();
|
||||
break;
|
||||
}
|
||||
llvm::Value *Ptr = nullptr;
|
||||
if (E->isGLValue())
|
||||
Ptr = CGF.EmitLValue(E).getPointer(CGF);
|
||||
else
|
||||
Ptr = CGF.EmitScalarExpr(E);
|
||||
CombinedInfo.BasePointers.emplace_back(Ptr, VD);
|
||||
CombinedInfo.Pointers.push_back(Ptr);
|
||||
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
|
||||
CGF.getTypeSize(CGF.getContext().VoidPtrTy), CGF.Int64Ty,
|
||||
/*isSigned=*/true));
|
||||
CombinedInfo.Types.push_back(
|
||||
(Cap->capturesVariable() ? OMP_MAP_TO : OMP_MAP_LITERAL) |
|
||||
OMP_MAP_TARGET_PARAM);
|
||||
CombinedInfo.Mappers.push_back(nullptr);
|
||||
return;
|
||||
}
|
||||
|
||||
using MapData =
|
||||
std::tuple<OMPClauseMappableExprCommon::MappableExprComponentListRef,
|
||||
|
@ -9073,14 +9108,19 @@ public:
|
|||
SmallVector<MapData, 4> DeclComponentLists;
|
||||
// For member fields list in is_device_ptr, store it in
|
||||
// DeclComponentLists for generating components info.
|
||||
static const OpenMPMapModifierKind Unknown = OMPC_MAP_MODIFIER_unknown;
|
||||
auto It = DevPointersMap.find(VD);
|
||||
if (It != DevPointersMap.end())
|
||||
for (const auto &MCL : It->second) {
|
||||
static const OpenMPMapModifierKind Unknown = OMPC_MAP_MODIFIER_unknown;
|
||||
for (const auto &MCL : It->second)
|
||||
DeclComponentLists.emplace_back(MCL, OMPC_MAP_to, Unknown,
|
||||
/*IsImpicit = */ true, nullptr,
|
||||
nullptr);
|
||||
}
|
||||
auto I = HasDevAddrsMap.find(VD);
|
||||
if (I != HasDevAddrsMap.end())
|
||||
for (const auto &MCL : I->second)
|
||||
DeclComponentLists.emplace_back(MCL, OMPC_MAP_tofrom, Unknown,
|
||||
/*IsImpicit = */ true, nullptr,
|
||||
nullptr);
|
||||
assert(CurDir.is<const OMPExecutableDirective *>() &&
|
||||
"Expect a executable directive");
|
||||
const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
|
||||
|
|
|
@ -2093,7 +2093,7 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
|
|||
//
|
||||
// =========================================================================
|
||||
// | type | defaultmap | pvt | first | is_device_ptr | map | res. |
|
||||
// | |(tofrom:scalar)| | pvt | | | |
|
||||
// | |(tofrom:scalar)| | pvt | |has_dv_adr| |
|
||||
// =========================================================================
|
||||
// | scl | | | | - | | bycopy|
|
||||
// | scl | | - | x | - | - | bycopy|
|
||||
|
@ -2154,10 +2154,11 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
|
|||
D](OMPClauseMappableExprCommon::MappableExprComponentListRef
|
||||
MapExprComponents,
|
||||
OpenMPClauseKind WhereFoundClauseKind) {
|
||||
// Only the map clause information influences how a variable is
|
||||
// captured. E.g. is_device_ptr does not require changing the default
|
||||
// behavior.
|
||||
if (WhereFoundClauseKind != OMPC_map)
|
||||
// Both map and has_device_addr clauses information influences how a
|
||||
// variable is captured. E.g. is_device_ptr does not require changing
|
||||
// the default behavior.
|
||||
if (WhereFoundClauseKind != OMPC_map &&
|
||||
WhereFoundClauseKind != OMPC_has_device_addr)
|
||||
return false;
|
||||
|
||||
auto EI = MapExprComponents.rbegin();
|
||||
|
@ -23070,13 +23071,17 @@ OMPClause *Sema::ActOnOpenMPHasDeviceAddrClause(ArrayRef<Expr *> VarList,
|
|||
|
||||
// Store the components in the stack so that they can be used to check
|
||||
// against other clauses later on.
|
||||
Expr *Component = SimpleRefExpr;
|
||||
auto *VD = dyn_cast<VarDecl>(D);
|
||||
if (VD && (isa<OMPArraySectionExpr>(RefExpr->IgnoreParenImpCasts()) ||
|
||||
isa<ArraySubscriptExpr>(RefExpr->IgnoreParenImpCasts())))
|
||||
Component = DefaultFunctionArrayLvalueConversion(SimpleRefExpr).get();
|
||||
OMPClauseMappableExprCommon::MappableComponent MC(
|
||||
SimpleRefExpr, D, /*IsNonContiguous=*/false);
|
||||
Component, D, /*IsNonContiguous=*/false);
|
||||
DSAStack->addMappableExpressionComponents(
|
||||
D, MC, /*WhereFoundClauseKind=*/OMPC_has_device_addr);
|
||||
|
||||
// Record the expression we've just processed.
|
||||
auto *VD = dyn_cast<VarDecl>(D);
|
||||
if (!VD && !CurContext->isDependentContext()) {
|
||||
DeclRefExpr *Ref =
|
||||
buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,466 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" --prefix-filecheck-ir-name _
|
||||
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp -fopenmp-version=51 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s
|
||||
|
||||
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp-simd -fopenmp-version=51 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
|
||||
// expected-no-diagnostics
|
||||
|
||||
struct S {
|
||||
int a = 0;
|
||||
int *ptr = &a;
|
||||
int &ref = a;
|
||||
int arr[4];
|
||||
S() {}
|
||||
void foo() {
|
||||
#pragma omp target has_device_addr(a, ref, ptr[0:4], arr[:a])
|
||||
++a, ++*ptr, ++ref, ++arr[0];
|
||||
}
|
||||
};
|
||||
|
||||
int main() {
|
||||
float a = 0;
|
||||
float *ptr = &a;
|
||||
float &ref = a;
|
||||
float arr[4];
|
||||
float vla[(int)a];
|
||||
S s;
|
||||
s.foo();
|
||||
#pragma omp target has_device_addr(a, ref, ptr[0:4], arr[:(int)a], vla[0])
|
||||
++a, ++*ptr, ++ref, ++arr[0], ++vla[0];
|
||||
return a;
|
||||
}
|
||||
|
||||
|
||||
// CHECK-LABEL: define {{[^@]+}}@main
|
||||
// CHECK-SAME: () #[[ATTR0:[0-9]+]] {
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
||||
// CHECK-NEXT: [[A:%.*]] = alloca float, align 4
|
||||
// CHECK-NEXT: [[PTR:%.*]] = alloca float*, align 8
|
||||
// CHECK-NEXT: [[REF:%.*]] = alloca float*, align 8
|
||||
// CHECK-NEXT: [[ARR:%.*]] = alloca [4 x float], align 4
|
||||
// CHECK-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
||||
// CHECK-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
||||
// CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
|
||||
// CHECK-NEXT: [[TMP:%.*]] = alloca float*, align 8
|
||||
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8
|
||||
// CHECK-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8
|
||||
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8
|
||||
// CHECK-NEXT: store i32 0, i32* [[RETVAL]], align 4
|
||||
// CHECK-NEXT: store float 0.000000e+00, float* [[A]], align 4
|
||||
// CHECK-NEXT: store float* [[A]], float** [[PTR]], align 8
|
||||
// CHECK-NEXT: store float* [[A]], float** [[REF]], align 8
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[A]], align 4
|
||||
// CHECK-NEXT: [[CONV:%.*]] = fptosi float [[TMP0]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[CONV]] to i64
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
|
||||
// CHECK-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
|
||||
// CHECK-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
|
||||
// CHECK-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
|
||||
// CHECK-NEXT: call void @_ZN1SC1Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
|
||||
// CHECK-NEXT: call void @_ZN1S3fooEv(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = load float*, float** [[REF]], align 8
|
||||
// CHECK-NEXT: store float* [[TMP3]], float** [[TMP]], align 8
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = load float*, float** [[PTR]], align 8
|
||||
// CHECK-NEXT: [[TMP5:%.*]] = load float*, float** [[TMP]], align 8
|
||||
// CHECK-NEXT: [[TMP6:%.*]] = load float*, float** [[PTR]], align 8
|
||||
// CHECK-NEXT: [[TMP7:%.*]] = load float*, float** [[TMP]], align 8
|
||||
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x float], [4 x float]* [[ARR]], i64 0, i64 0
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
||||
// CHECK-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to float**
|
||||
// CHECK-NEXT: store float* [[A]], float** [[TMP9]], align 8
|
||||
// CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
||||
// CHECK-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to float**
|
||||
// CHECK-NEXT: store float* [[A]], float** [[TMP11]], align 8
|
||||
// CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
|
||||
// CHECK-NEXT: store i8* null, i8** [[TMP12]], align 8
|
||||
// CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
|
||||
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to float**
|
||||
// CHECK-NEXT: store float* [[TMP6]], float** [[TMP14]], align 8
|
||||
// CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
|
||||
// CHECK-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to float**
|
||||
// CHECK-NEXT: store float* [[TMP6]], float** [[TMP16]], align 8
|
||||
// CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
|
||||
// CHECK-NEXT: store i8* null, i8** [[TMP17]], align 8
|
||||
// CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
|
||||
// CHECK-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to float**
|
||||
// CHECK-NEXT: store float* [[TMP7]], float** [[TMP19]], align 8
|
||||
// CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
|
||||
// CHECK-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to float**
|
||||
// CHECK-NEXT: store float* [[TMP7]], float** [[TMP21]], align 8
|
||||
// CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
|
||||
// CHECK-NEXT: store i8* null, i8** [[TMP22]], align 8
|
||||
// CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
|
||||
// CHECK-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to float**
|
||||
// CHECK-NEXT: store float* [[ARRAYDECAY]], float** [[TMP24]], align 8
|
||||
// CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
|
||||
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to float**
|
||||
// CHECK-NEXT: store float* [[ARRAYDECAY]], float** [[TMP26]], align 8
|
||||
// CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
|
||||
// CHECK-NEXT: store i8* null, i8** [[TMP27]], align 8
|
||||
// CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
|
||||
// CHECK-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
|
||||
// CHECK-NEXT: store i64 [[TMP1]], i64* [[TMP29]], align 8
|
||||
// CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
|
||||
// CHECK-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64*
|
||||
// CHECK-NEXT: store i64 [[TMP1]], i64* [[TMP31]], align 8
|
||||
// CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
|
||||
// CHECK-NEXT: store i8* null, i8** [[TMP32]], align 8
|
||||
// CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
|
||||
// CHECK-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to float**
|
||||
// CHECK-NEXT: store float* [[VLA]], float** [[TMP34]], align 8
|
||||
// CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
|
||||
// CHECK-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to float**
|
||||
// CHECK-NEXT: store float* [[VLA]], float** [[TMP36]], align 8
|
||||
// CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5
|
||||
// CHECK-NEXT: store i8* null, i8** [[TMP37]], align 8
|
||||
// CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
||||
// CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
||||
// CHECK-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
|
||||
// CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
|
||||
// CHECK-NEXT: store i32 1, i32* [[TMP40]], align 4
|
||||
// CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
|
||||
// CHECK-NEXT: store i32 6, i32* [[TMP41]], align 4
|
||||
// CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
|
||||
// CHECK-NEXT: store i8** [[TMP38]], i8*** [[TMP42]], align 8
|
||||
// CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
|
||||
// CHECK-NEXT: store i8** [[TMP39]], i8*** [[TMP43]], align 8
|
||||
// CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
|
||||
// CHECK-NEXT: store i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP44]], align 8
|
||||
// CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
|
||||
// CHECK-NEXT: store i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP45]], align 8
|
||||
// CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
|
||||
// CHECK-NEXT: store i8** null, i8*** [[TMP46]], align 8
|
||||
// CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
|
||||
// CHECK-NEXT: store i8** null, i8*** [[TMP47]], align 8
|
||||
// CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
|
||||
// CHECK-NEXT: store i64 0, i64* [[TMP48]], align 8
|
||||
// CHECK-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
|
||||
// CHECK-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0
|
||||
// CHECK-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
|
||||
// CHECK: omp_offload.failed:
|
||||
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27(float* [[A]], float* [[TMP4]], float* [[TMP5]], [4 x float]* [[ARR]], i64 [[TMP1]], float* [[VLA]]) #[[ATTR5:[0-9]+]]
|
||||
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT]]
|
||||
// CHECK: omp_offload.cont:
|
||||
// CHECK-NEXT: [[TMP51:%.*]] = load float, float* [[A]], align 4
|
||||
// CHECK-NEXT: [[CONV1:%.*]] = fptosi float [[TMP51]] to i32
|
||||
// CHECK-NEXT: store i32 [[CONV1]], i32* [[RETVAL]], align 4
|
||||
// CHECK-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
|
||||
// CHECK-NEXT: call void @llvm.stackrestore(i8* [[TMP52]])
|
||||
// CHECK-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4
|
||||
// CHECK-NEXT: ret i32 [[TMP53]]
|
||||
//
|
||||
//
|
||||
// CHECK-LABEL: define {{[^@]+}}@_ZN1SC1Ev
|
||||
// CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 {
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
||||
// CHECK-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
||||
// CHECK-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
||||
// CHECK-NEXT: call void @_ZN1SC2Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS1]])
|
||||
// CHECK-NEXT: ret void
|
||||
//
|
||||
//
|
||||
// CHECK-LABEL: define {{[^@]+}}@_ZN1S3fooEv
|
||||
// CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) #[[ATTR3:[0-9]+]] comdat align 2 {
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
||||
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
|
||||
// CHECK-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
|
||||
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
|
||||
// CHECK-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8
|
||||
// CHECK-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
||||
// CHECK-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
||||
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
|
||||
// CHECK-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[REF]], align 8
|
||||
// CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
|
||||
// CHECK-NEXT: [[ARR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 3
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr [4 x i32], [4 x i32]* [[ARR]], i32 1
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[A]] to i8*
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = bitcast [4 x i32]* [[TMP1]] to i8*
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = ptrtoint i8* [[TMP3]] to i64
|
||||
// CHECK-NEXT: [[TMP5:%.*]] = ptrtoint i8* [[TMP2]] to i64
|
||||
// CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
|
||||
// CHECK-NEXT: [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = bitcast [5 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
|
||||
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP8]], i8* align 8 bitcast ([5 x i64]* @.offload_sizes.1 to i8*), i64 40, i1 false)
|
||||
// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
||||
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.S**
|
||||
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP10]], align 8
|
||||
// CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
||||
// CHECK-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32**
|
||||
// CHECK-NEXT: store i32* [[A]], i32** [[TMP12]], align 8
|
||||
// CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
|
||||
// CHECK-NEXT: store i64 [[TMP7]], i64* [[TMP13]], align 8
|
||||
// CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
|
||||
// CHECK-NEXT: store i8* null, i8** [[TMP14]], align 8
|
||||
// CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
|
||||
// CHECK-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to %struct.S**
|
||||
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP16]], align 8
|
||||
// CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
|
||||
// CHECK-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
|
||||
// CHECK-NEXT: store i32* [[A]], i32** [[TMP18]], align 8
|
||||
// CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
|
||||
// CHECK-NEXT: store i8* null, i8** [[TMP19]], align 8
|
||||
// CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
|
||||
// CHECK-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.S**
|
||||
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP21]], align 8
|
||||
// CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
|
||||
// CHECK-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
|
||||
// CHECK-NEXT: store i32* [[TMP0]], i32** [[TMP23]], align 8
|
||||
// CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
|
||||
// CHECK-NEXT: store i8* null, i8** [[TMP24]], align 8
|
||||
// CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
|
||||
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to %struct.S**
|
||||
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP26]], align 8
|
||||
// CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
|
||||
// CHECK-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32***
|
||||
// CHECK-NEXT: store i32** [[PTR]], i32*** [[TMP28]], align 8
|
||||
// CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
|
||||
// CHECK-NEXT: store i8* null, i8** [[TMP29]], align 8
|
||||
// CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
|
||||
// CHECK-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to %struct.S**
|
||||
// CHECK-NEXT: store %struct.S* [[THIS1]], %struct.S** [[TMP31]], align 8
|
||||
// CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
|
||||
// CHECK-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to [4 x i32]**
|
||||
// CHECK-NEXT: store [4 x i32]* [[ARR]], [4 x i32]** [[TMP33]], align 8
|
||||
// CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
|
||||
// CHECK-NEXT: store i8* null, i8** [[TMP34]], align 8
|
||||
// CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
||||
// CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
||||
// CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
|
||||
// CHECK-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
|
||||
// CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
|
||||
// CHECK-NEXT: store i32 1, i32* [[TMP38]], align 4
|
||||
// CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
|
||||
// CHECK-NEXT: store i32 5, i32* [[TMP39]], align 4
|
||||
// CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
|
||||
// CHECK-NEXT: store i8** [[TMP35]], i8*** [[TMP40]], align 8
|
||||
// CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
|
||||
// CHECK-NEXT: store i8** [[TMP36]], i8*** [[TMP41]], align 8
|
||||
// CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
|
||||
// CHECK-NEXT: store i64* [[TMP37]], i64** [[TMP42]], align 8
|
||||
// CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
|
||||
// CHECK-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.2, i32 0, i32 0), i64** [[TMP43]], align 8
|
||||
// CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
|
||||
// CHECK-NEXT: store i8** null, i8*** [[TMP44]], align 8
|
||||
// CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
|
||||
// CHECK-NEXT: store i8** null, i8*** [[TMP45]], align 8
|
||||
// CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
|
||||
// CHECK-NEXT: store i64 0, i64* [[TMP46]], align 8
|
||||
// CHECK-NEXT: [[TMP47:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l14.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
|
||||
// CHECK-NEXT: [[TMP48:%.*]] = icmp ne i32 [[TMP47]], 0
|
||||
// CHECK-NEXT: br i1 [[TMP48]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
|
||||
// CHECK: omp_offload.failed:
|
||||
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l14(%struct.S* [[THIS1]]) #[[ATTR5]]
|
||||
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT]]
|
||||
// CHECK: omp_offload.cont:
|
||||
// CHECK-NEXT: ret void
|
||||
//
|
||||
//
|
||||
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27
|
||||
// CHECK-SAME: (float* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], float* noundef [[PTR:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[REF:%.*]], [4 x float]* noundef nonnull align 4 dereferenceable(16) [[ARR:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[VLA1:%.*]]) #[[ATTR4:[0-9]+]] {
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
||||
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca float*, align 8
|
||||
// CHECK-NEXT: [[REF_ADDR:%.*]] = alloca float*, align 8
|
||||
// CHECK-NEXT: [[ARR_ADDR:%.*]] = alloca [4 x float]*, align 8
|
||||
// CHECK-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
|
||||
// CHECK-NEXT: [[VLA_ADDR2:%.*]] = alloca float*, align 8
|
||||
// CHECK-NEXT: [[TMP:%.*]] = alloca float*, align 8
|
||||
// CHECK-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
||||
// CHECK-NEXT: store float* [[PTR]], float** [[PTR_ADDR]], align 8
|
||||
// CHECK-NEXT: store float* [[REF]], float** [[REF_ADDR]], align 8
|
||||
// CHECK-NEXT: store [4 x float]* [[ARR]], [4 x float]** [[ARR_ADDR]], align 8
|
||||
// CHECK-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
|
||||
// CHECK-NEXT: store float* [[VLA1]], float** [[VLA_ADDR2]], align 8
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = load float*, float** [[A_ADDR]], align 8
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[REF_ADDR]], align 8
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = load [4 x float]*, [4 x float]** [[ARR_ADDR]], align 8
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = load float*, float** [[VLA_ADDR2]], align 8
|
||||
// CHECK-NEXT: store float* [[TMP1]], float** [[TMP]], align 8
|
||||
// CHECK-NEXT: [[TMP5:%.*]] = load float, float* [[TMP0]], align 4
|
||||
// CHECK-NEXT: [[INC:%.*]] = fadd float [[TMP5]], 1.000000e+00
|
||||
// CHECK-NEXT: store float [[INC]], float* [[TMP0]], align 4
|
||||
// CHECK-NEXT: [[TMP6:%.*]] = load float*, float** [[PTR_ADDR]], align 8
|
||||
// CHECK-NEXT: [[TMP7:%.*]] = load float, float* [[TMP6]], align 4
|
||||
// CHECK-NEXT: [[INC3:%.*]] = fadd float [[TMP7]], 1.000000e+00
|
||||
// CHECK-NEXT: store float [[INC3]], float* [[TMP6]], align 4
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = load float*, float** [[TMP]], align 8
|
||||
// CHECK-NEXT: [[TMP9:%.*]] = load float, float* [[TMP8]], align 4
|
||||
// CHECK-NEXT: [[INC4:%.*]] = fadd float [[TMP9]], 1.000000e+00
|
||||
// CHECK-NEXT: store float [[INC4]], float* [[TMP8]], align 4
|
||||
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x float], [4 x float]* [[TMP2]], i64 0, i64 0
|
||||
// CHECK-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
||||
// CHECK-NEXT: [[INC5:%.*]] = fadd float [[TMP10]], 1.000000e+00
|
||||
// CHECK-NEXT: store float [[INC5]], float* [[ARRAYIDX]], align 4
|
||||
// CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP4]], i64 0
|
||||
// CHECK-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX6]], align 4
|
||||
// CHECK-NEXT: [[INC7:%.*]] = fadd float [[TMP11]], 1.000000e+00
|
||||
// CHECK-NEXT: store float [[INC7]], float* [[ARRAYIDX6]], align 4
|
||||
// CHECK-NEXT: ret void
|
||||
//
|
||||
//
|
||||
// CHECK-LABEL: define {{[^@]+}}@_ZN1SC2Ev
|
||||
// CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 {
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
||||
// CHECK-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
||||
// CHECK-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
||||
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
|
||||
// CHECK-NEXT: store i32 0, i32* [[A]], align 8
|
||||
// CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
|
||||
// CHECK-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
|
||||
// CHECK-NEXT: store i32* [[A2]], i32** [[PTR]], align 8
|
||||
// CHECK-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
|
||||
// CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
|
||||
// CHECK-NEXT: store i32* [[A3]], i32** [[REF]], align 8
|
||||
// CHECK-NEXT: ret void
|
||||
//
|
||||
//
|
||||
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l14
|
||||
// CHECK-SAME: (%struct.S* noundef [[THIS:%.*]]) #[[ATTR4]] {
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
||||
// CHECK-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
||||
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[TMP0]], i32 0, i32 0
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 8
|
||||
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
|
||||
// CHECK-NEXT: store i32 [[INC]], i32* [[A]], align 8
|
||||
// CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 1
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = load i32*, i32** [[PTR]], align 8
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
|
||||
// CHECK-NEXT: [[INC1:%.*]] = add nsw i32 [[TMP3]], 1
|
||||
// CHECK-NEXT: store i32 [[INC1]], i32* [[TMP2]], align 4
|
||||
// CHECK-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 2
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[REF]], align 8
|
||||
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
||||
// CHECK-NEXT: [[INC2:%.*]] = add nsw i32 [[TMP5]], 1
|
||||
// CHECK-NEXT: store i32 [[INC2]], i32* [[TMP4]], align 4
|
||||
// CHECK-NEXT: [[ARR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 3
|
||||
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
|
||||
// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 8
|
||||
// CHECK-NEXT: [[INC3:%.*]] = add nsw i32 [[TMP6]], 1
|
||||
// CHECK-NEXT: store i32 [[INC3]], i32* [[ARRAYIDX]], align 8
|
||||
// CHECK-NEXT: ret void
|
||||
//
|
||||
//
|
||||
// CHECK-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
|
||||
// CHECK-SAME: () #[[ATTR7:[0-9]+]] {
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: call void @__tgt_register_requires(i64 1)
|
||||
// CHECK-NEXT: ret void
|
||||
//
|
||||
//
|
||||
// SIMD-ONLY0-LABEL: define {{[^@]+}}@main
|
||||
// SIMD-ONLY0-SAME: () #[[ATTR0:[0-9]+]] {
|
||||
// SIMD-ONLY0-NEXT: entry:
|
||||
// SIMD-ONLY0-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
||||
// SIMD-ONLY0-NEXT: [[A:%.*]] = alloca float, align 4
|
||||
// SIMD-ONLY0-NEXT: [[PTR:%.*]] = alloca float*, align 8
|
||||
// SIMD-ONLY0-NEXT: [[REF:%.*]] = alloca float*, align 8
|
||||
// SIMD-ONLY0-NEXT: [[ARR:%.*]] = alloca [4 x float], align 4
|
||||
// SIMD-ONLY0-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
||||
// SIMD-ONLY0-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
||||
// SIMD-ONLY0-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[TMP:%.*]] = alloca float*, align 8
|
||||
// SIMD-ONLY0-NEXT: store i32 0, i32* [[RETVAL]], align 4
|
||||
// SIMD-ONLY0-NEXT: store float 0.000000e+00, float* [[A]], align 4
|
||||
// SIMD-ONLY0-NEXT: store float* [[A]], float** [[PTR]], align 8
|
||||
// SIMD-ONLY0-NEXT: store float* [[A]], float** [[REF]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = load float, float* [[A]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[CONV:%.*]] = fptosi float [[TMP0]] to i32
|
||||
// SIMD-ONLY0-NEXT: [[TMP1:%.*]] = zext i32 [[CONV]] to i64
|
||||
// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
|
||||
// SIMD-ONLY0-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
|
||||
// SIMD-ONLY0-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
|
||||
// SIMD-ONLY0-NEXT: call void @_ZN1SC1Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
|
||||
// SIMD-ONLY0-NEXT: call void @_ZN1S3fooEv(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
|
||||
// SIMD-ONLY0-NEXT: [[TMP3:%.*]] = load float*, float** [[REF]], align 8
|
||||
// SIMD-ONLY0-NEXT: store float* [[TMP3]], float** [[TMP]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[TMP4:%.*]] = load float*, float** [[REF]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[TMP5:%.*]] = load float, float* [[A]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[INC:%.*]] = fadd float [[TMP5]], 1.000000e+00
|
||||
// SIMD-ONLY0-NEXT: store float [[INC]], float* [[A]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[TMP6:%.*]] = load float*, float** [[PTR]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[TMP7:%.*]] = load float, float* [[TMP6]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[INC1:%.*]] = fadd float [[TMP7]], 1.000000e+00
|
||||
// SIMD-ONLY0-NEXT: store float [[INC1]], float* [[TMP6]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[TMP8:%.*]] = load float*, float** [[TMP]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[TMP9:%.*]] = load float, float* [[TMP8]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[INC2:%.*]] = fadd float [[TMP9]], 1.000000e+00
|
||||
// SIMD-ONLY0-NEXT: store float [[INC2]], float* [[TMP8]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x float], [4 x float]* [[ARR]], i64 0, i64 0
|
||||
// SIMD-ONLY0-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[INC3:%.*]] = fadd float [[TMP10]], 1.000000e+00
|
||||
// SIMD-ONLY0-NEXT: store float [[INC3]], float* [[ARRAYIDX]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 0
|
||||
// SIMD-ONLY0-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX4]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[INC5:%.*]] = fadd float [[TMP11]], 1.000000e+00
|
||||
// SIMD-ONLY0-NEXT: store float [[INC5]], float* [[ARRAYIDX4]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[TMP12:%.*]] = load float, float* [[A]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[CONV6:%.*]] = fptosi float [[TMP12]] to i32
|
||||
// SIMD-ONLY0-NEXT: store i32 [[CONV6]], i32* [[RETVAL]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[TMP13:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
|
||||
// SIMD-ONLY0-NEXT: call void @llvm.stackrestore(i8* [[TMP13]])
|
||||
// SIMD-ONLY0-NEXT: [[TMP14:%.*]] = load i32, i32* [[RETVAL]], align 4
|
||||
// SIMD-ONLY0-NEXT: ret i32 [[TMP14]]
|
||||
//
|
||||
//
|
||||
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SC1Ev
|
||||
// SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 {
|
||||
// SIMD-ONLY0-NEXT: entry:
|
||||
// SIMD-ONLY0-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
||||
// SIMD-ONLY0-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
||||
// SIMD-ONLY0-NEXT: call void @_ZN1SC2Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS1]])
|
||||
// SIMD-ONLY0-NEXT: ret void
|
||||
//
|
||||
//
|
||||
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1S3fooEv
|
||||
// SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) #[[ATTR3:[0-9]+]] comdat align 2 {
|
||||
// SIMD-ONLY0-NEXT: entry:
|
||||
// SIMD-ONLY0-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
||||
// SIMD-ONLY0-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
|
||||
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1
|
||||
// SIMD-ONLY0-NEXT: store i32 [[INC]], i32* [[A]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
|
||||
// SIMD-ONLY0-NEXT: [[TMP1:%.*]] = load i32*, i32** [[PTR]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[INC2:%.*]] = add nsw i32 [[TMP2]], 1
|
||||
// SIMD-ONLY0-NEXT: store i32 [[INC2]], i32* [[TMP1]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
|
||||
// SIMD-ONLY0-NEXT: [[TMP3:%.*]] = load i32*, i32** [[REF]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[INC3:%.*]] = add nsw i32 [[TMP4]], 1
|
||||
// SIMD-ONLY0-NEXT: store i32 [[INC3]], i32* [[TMP3]], align 4
|
||||
// SIMD-ONLY0-NEXT: [[ARR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 3
|
||||
// SIMD-ONLY0-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
|
||||
// SIMD-ONLY0-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[INC4:%.*]] = add nsw i32 [[TMP5]], 1
|
||||
// SIMD-ONLY0-NEXT: store i32 [[INC4]], i32* [[ARRAYIDX]], align 8
|
||||
// SIMD-ONLY0-NEXT: ret void
|
||||
//
|
||||
//
|
||||
// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SC2Ev
|
||||
// SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 {
|
||||
// SIMD-ONLY0-NEXT: entry:
|
||||
// SIMD-ONLY0-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
||||
// SIMD-ONLY0-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
|
||||
// SIMD-ONLY0-NEXT: store i32 0, i32* [[A]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
|
||||
// SIMD-ONLY0-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
|
||||
// SIMD-ONLY0-NEXT: store i32* [[A2]], i32** [[PTR]], align 8
|
||||
// SIMD-ONLY0-NEXT: [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
|
||||
// SIMD-ONLY0-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
|
||||
// SIMD-ONLY0-NEXT: store i32* [[A3]], i32** [[REF]], align 8
|
||||
// SIMD-ONLY0-NEXT: ret void
|
||||
//
|
|
@ -0,0 +1,33 @@
|
|||
// RUN: %libomptarget-compilexx-generic -fopenmp-version=51
|
||||
// RUN: %libomptarget-run-generic 2>&1 \
|
||||
// RUN: | %fcheck-generic
|
||||
|
||||
#include <assert.h>
|
||||
#include <iostream>
|
||||
#include <omp.h>
|
||||
|
||||
struct view {
|
||||
const int size = 10;
|
||||
int *data_host;
|
||||
int *data_device;
|
||||
void foo() {
|
||||
std::size_t bytes = size * sizeof(int);
|
||||
const int host_id = omp_get_initial_device();
|
||||
const int device_id = omp_get_default_device();
|
||||
data_host = (int *)malloc(bytes);
|
||||
data_device = (int *)omp_target_alloc(bytes, device_id);
|
||||
#pragma omp target teams distribute parallel for has_device_addr(data_device[0])
|
||||
for (int i = 0; i < size; ++i)
|
||||
data_device[i] = i;
|
||||
omp_target_memcpy(data_host, data_device, bytes, 0, 0, host_id, device_id);
|
||||
for (int i = 0; i < size; ++i)
|
||||
assert(data_host[i] == i);
|
||||
}
|
||||
};
|
||||
|
||||
int main() {
|
||||
view a;
|
||||
a.foo();
|
||||
// CHECK: PASSED
|
||||
printf("PASSED\n");
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
// RUN: %libomptarget-compile-generic -fopenmp-version=51
|
||||
// RUN: %libomptarget-run-generic 2>&1 \
|
||||
// RUN: | %fcheck-generic
|
||||
|
||||
// UNSUPPORTED: amdgcn-amd-amdhsa
|
||||
// UNSUPPORTED: amdgcn-amd-amdhsa-LTO
|
||||
|
||||
#include <omp.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#define N 1024
|
||||
#define FROM 64
|
||||
#define LENGTH 128
|
||||
|
||||
void foo() {
|
||||
const int device_id = omp_get_default_device();
|
||||
float *A;
|
||||
A = (float *)omp_target_alloc((FROM + LENGTH) * sizeof(float), device_id);
|
||||
|
||||
float *A_dev = NULL;
|
||||
#pragma omp target has_device_addr(A [FROM:LENGTH]) map(A_dev)
|
||||
{ A_dev = A; }
|
||||
// CHECK: Success
|
||||
if (A_dev == NULL || A_dev != A)
|
||||
fprintf(stderr, "Failure %p %p \n", A_dev, A);
|
||||
else
|
||||
fprintf(stderr, "Success\n");
|
||||
}
|
||||
|
||||
void bar() {
|
||||
short x[10];
|
||||
short *xp = &x[0];
|
||||
|
||||
x[1] = 111;
|
||||
#pragma omp target data map(tofrom : xp [0:2]) use_device_addr(xp [0:2])
|
||||
#pragma omp target has_device_addr(xp [0:2])
|
||||
{
|
||||
xp[1] = 222;
|
||||
// CHECK: 222
|
||||
printf("%d %p\n", xp[1], &xp[1]);
|
||||
}
|
||||
// CHECK: 222
|
||||
printf("%d %p\n", xp[1], &xp[1]);
|
||||
}
|
||||
|
||||
void moo() {
|
||||
short *b = malloc(sizeof(short));
|
||||
b = b - 1;
|
||||
|
||||
b[1] = 111;
|
||||
#pragma omp target data map(tofrom : b[1]) use_device_addr(b[1])
|
||||
#pragma omp target has_device_addr(b[1])
|
||||
{
|
||||
b[1] = 222;
|
||||
// CHECK: 222
|
||||
printf("%hd %p %p %p\n", b[1], b, &b[1], &b);
|
||||
}
|
||||
// CHECK: 222
|
||||
printf("%hd %p %p %p\n", b[1], b, &b[1], &b);
|
||||
}
|
||||
|
||||
void zoo() {
|
||||
short x[10];
|
||||
short *(xp[10]);
|
||||
xp[1] = &x[0];
|
||||
short **xpp = &xp[0];
|
||||
|
||||
x[1] = 111;
|
||||
#pragma omp target data map(tofrom : xpp[1][1]) use_device_addr(xpp[1][1])
|
||||
#pragma omp target has_device_addr(xpp[1][1])
|
||||
{
|
||||
xpp[1][1] = 222;
|
||||
// CHECK: 222
|
||||
printf("%d %p %p\n", xpp[1][1], xpp[1], &xpp[1][1]);
|
||||
}
|
||||
// CHECK: 222
|
||||
printf("%d %p %p\n", xpp[1][1], xpp[1], &xpp[1][1]);
|
||||
}
|
||||
void xoo() {
|
||||
short a[10], b[10];
|
||||
a[1] = 111;
|
||||
b[1] = 111;
|
||||
#pragma omp target data map(to : a [0:2], b [0:2]) use_device_addr(a, b)
|
||||
#pragma omp target has_device_addr(a) has_device_addr(b[0])
|
||||
{
|
||||
a[1] = 222;
|
||||
b[1] = 222;
|
||||
// CHECK: 222 222
|
||||
printf("%hd %hd %p %p %p\n", a[1], b[1], &a, b, &b);
|
||||
}
|
||||
// CHECK:111
|
||||
printf("%hd %hd %p %p %p\n", a[1], b[1], &a, b, &b); // 111 111 p1d p2d p3d
|
||||
}
|
||||
int main() {
|
||||
foo();
|
||||
bar();
|
||||
moo();
|
||||
zoo();
|
||||
xoo();
|
||||
return 0;
|
||||
}
|
Loading…
Reference in New Issue