2021-05-06 06:13:14 +08:00
|
|
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK1
|
2015-05-20 12:24:19 +08:00
|
|
|
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
|
2021-05-06 06:13:14 +08:00
|
|
|
// RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK2
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK3
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK4
|
2017-12-30 02:07:07 +08:00
|
|
|
|
2021-05-19 10:52:53 +08:00
|
|
|
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
2017-12-30 02:07:07 +08:00
|
|
|
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
|
2021-05-19 10:52:53 +08:00
|
|
|
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
[OPENMP] Codegen for 'reduction' clause in 'sections' directive.
Emit a code for reduction clause. Next code should be emitted for reductions:
static kmp_critical_name lock = { 0 };
void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
*(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
...
*(Type<n>-1*)lhs[<n>-1] =
ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
*(Type<n>-1*)rhs[<n>-1]);
}
...
void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>)) {
case 1:
<LHSExprs>[0] = ReductionOperation0(*<LHSExprs>[0], *<RHSExprs>[0]);
...
<LHSExprs>[<n>-1] = ReductionOperation<n>-1(*<LHSExprs>[<n>-1], *<RHSExprs>[<n>-1]);
__kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
break;
case 2:
Atomic(<LHSExprs>[0] = ReductionOperation0(*<LHSExprs>[0], *<RHSExprs>[0]));
...
Atomic(<LHSExprs>[<n>-1] = ReductionOperation<n>-1(*<LHSExprs>[<n>-1], *<RHSExprs>[<n>-1]));
break;
default:;
}
Reduction variables are a kind of a private variables, they have private copies, but initial values are chosen in accordance with the reduction operation.
If sections directive has only single section, then original shared variables are used instead with barrier at the end of the directive.
Differential Revision: http://reviews.llvm.org/D9242
llvm-svn: 235835
2015-04-27 13:04:13 +08:00
|
|
|
// expected-no-diagnostics
|
|
|
|
#ifndef HEADER
|
|
|
|
#define HEADER
|
|
|
|
|
|
|
|
volatile double g;
|
|
|
|
|
|
|
|
template <class T>
|
|
|
|
struct S {
|
|
|
|
T f;
|
|
|
|
S(T a) : f(a + g) {}
|
|
|
|
S() : f(g) {}
|
|
|
|
operator T() { return T(); }
|
|
|
|
S &operator&(const S &) { return *this; }
|
|
|
|
~S() {}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T tmain() {
|
|
|
|
T t;
|
|
|
|
S<T> test;
|
|
|
|
T t_var = T(), t_var1;
|
|
|
|
T vec[] = {1, 2};
|
|
|
|
S<T> s_arr[] = {1, 2};
|
|
|
|
S<T> var(3), var1;
|
|
|
|
#pragma omp parallel
|
|
|
|
#pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) nowait
|
|
|
|
{
|
|
|
|
vec[0] = t_var;
|
|
|
|
#pragma omp section
|
|
|
|
s_arr[0] = var;
|
|
|
|
}
|
|
|
|
return T();
|
|
|
|
}
|
|
|
|
|
|
|
|
int main() {
|
|
|
|
#ifdef LAMBDA
|
|
|
|
[&]() {
|
|
|
|
#pragma omp parallel
|
|
|
|
#pragma omp sections reduction(+:g)
|
|
|
|
{
|
|
|
|
|
|
|
|
// Reduction list for runtime.
|
|
|
|
|
|
|
|
g = 1;
|
2021-05-06 06:13:14 +08:00
|
|
|
|
[OPENMP] Codegen for 'reduction' clause in 'sections' directive.
Emit a code for reduction clause. Next code should be emitted for reductions:
static kmp_critical_name lock = { 0 };
void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
*(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
...
*(Type<n>-1*)lhs[<n>-1] =
ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
*(Type<n>-1*)rhs[<n>-1]);
}
...
void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>)) {
case 1:
<LHSExprs>[0] = ReductionOperation0(*<LHSExprs>[0], *<RHSExprs>[0]);
...
<LHSExprs>[<n>-1] = ReductionOperation<n>-1(*<LHSExprs>[<n>-1], *<RHSExprs>[<n>-1]);
__kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
break;
case 2:
Atomic(<LHSExprs>[0] = ReductionOperation0(*<LHSExprs>[0], *<RHSExprs>[0]));
...
Atomic(<LHSExprs>[<n>-1] = ReductionOperation<n>-1(*<LHSExprs>[<n>-1], *<RHSExprs>[<n>-1]));
break;
default:;
}
Reduction variables are a kind of a private variables, they have private copies, but initial values are chosen in accordance with the reduction operation.
If sections directive has only single section, then original shared variables are used instead with barrier at the end of the directive.
Differential Revision: http://reviews.llvm.org/D9242
llvm-svn: 235835
2015-04-27 13:04:13 +08:00
|
|
|
#pragma omp section
|
|
|
|
[&]() {
|
|
|
|
g = 2;
|
|
|
|
}();
|
|
|
|
}
|
|
|
|
}();
|
|
|
|
return 0;
|
|
|
|
#elif defined(BLOCKS)
|
|
|
|
^{
|
|
|
|
#pragma omp parallel
|
|
|
|
#pragma omp sections reduction(-:g)
|
|
|
|
{
|
|
|
|
|
|
|
|
// Reduction list for runtime.
|
|
|
|
|
|
|
|
g = 1;
|
2021-05-06 06:13:14 +08:00
|
|
|
|
[OPENMP] Codegen for 'reduction' clause in 'sections' directive.
Emit a code for reduction clause. Next code should be emitted for reductions:
static kmp_critical_name lock = { 0 };
void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
*(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
...
*(Type<n>-1*)lhs[<n>-1] =
ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
*(Type<n>-1*)rhs[<n>-1]);
}
...
void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>)) {
case 1:
<LHSExprs>[0] = ReductionOperation0(*<LHSExprs>[0], *<RHSExprs>[0]);
...
<LHSExprs>[<n>-1] = ReductionOperation<n>-1(*<LHSExprs>[<n>-1], *<RHSExprs>[<n>-1]);
__kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
break;
case 2:
Atomic(<LHSExprs>[0] = ReductionOperation0(*<LHSExprs>[0], *<RHSExprs>[0]));
...
Atomic(<LHSExprs>[<n>-1] = ReductionOperation<n>-1(*<LHSExprs>[<n>-1], *<RHSExprs>[<n>-1]));
break;
default:;
}
Reduction variables are a kind of a private variables, they have private copies, but initial values are chosen in accordance with the reduction operation.
If sections directive has only single section, then original shared variables are used instead with barrier at the end of the directive.
Differential Revision: http://reviews.llvm.org/D9242
llvm-svn: 235835
2015-04-27 13:04:13 +08:00
|
|
|
#pragma omp section
|
|
|
|
^{
|
|
|
|
g = 2;
|
|
|
|
}();
|
|
|
|
}
|
|
|
|
}();
|
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
S<float> test;
|
|
|
|
float t_var = 0, t_var1;
|
|
|
|
int vec[] = {1, 2};
|
|
|
|
S<float> s_arr[] = {1, 2};
|
|
|
|
S<float> var(3), var1;
|
|
|
|
#pragma omp parallel
|
|
|
|
#pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
vec[0] = t_var;
|
|
|
|
s_arr[0] = var;
|
|
|
|
vec[1] = t_var1;
|
|
|
|
s_arr[1] = var1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return tmain<int>();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Reduction list for runtime.
|
|
|
|
|
|
|
|
|
2015-09-10 16:12:02 +08:00
|
|
|
|
[OPENMP] Codegen for 'reduction' clause in 'sections' directive.
Emit a code for reduction clause. Next code should be emitted for reductions:
static kmp_critical_name lock = { 0 };
void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
*(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
...
*(Type<n>-1*)lhs[<n>-1] =
ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
*(Type<n>-1*)rhs[<n>-1]);
}
...
void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>)) {
case 1:
<LHSExprs>[0] = ReductionOperation0(*<LHSExprs>[0], *<RHSExprs>[0]);
...
<LHSExprs>[<n>-1] = ReductionOperation<n>-1(*<LHSExprs>[<n>-1], *<RHSExprs>[<n>-1]);
__kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
break;
case 2:
Atomic(<LHSExprs>[0] = ReductionOperation0(*<LHSExprs>[0], *<RHSExprs>[0]));
...
Atomic(<LHSExprs>[<n>-1] = ReductionOperation<n>-1(*<LHSExprs>[<n>-1], *<RHSExprs>[<n>-1]));
break;
default:;
}
Reduction variables are a kind of a private variables, they have private copies, but initial values are chosen in accordance with the reduction operation.
If sections directive has only single section, then original shared variables are used instead with barrier at the end of the directive.
Differential Revision: http://reviews.llvm.org/D9242
llvm-svn: 235835
2015-04-27 13:04:13 +08:00
|
|
|
// For + reduction operation initial value of private variable is 0.
|
|
|
|
|
|
|
|
// For & reduction operation initial value of private variable is ones in all bits.
|
|
|
|
|
|
|
|
// For && reduction operation initial value of private variable is 1.0.
|
|
|
|
|
|
|
|
// For min reduction operation initial value of private variable is largest repesentable value.
|
|
|
|
|
|
|
|
// Skip checks for internal operations.
|
|
|
|
|
|
|
|
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
|
|
|
|
|
|
|
|
|
|
|
|
// res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>);
|
|
|
|
|
|
|
|
|
|
|
|
// switch(res)
|
|
|
|
|
|
|
|
// case 1:
|
|
|
|
// t_var += t_var_reduction;
|
|
|
|
|
|
|
|
// var = var.operator &(var_reduction);
|
|
|
|
|
|
|
|
// var1 = var1.operator &&(var1_reduction);
|
|
|
|
|
|
|
|
// t_var1 = min(t_var1, t_var1_reduction);
|
|
|
|
|
|
|
|
// __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>);
|
|
|
|
|
|
|
|
// break;
|
|
|
|
|
|
|
|
// case 2:
|
|
|
|
// t_var += t_var_reduction;
|
|
|
|
|
|
|
|
// var = var.operator &(var_reduction);
|
|
|
|
|
|
|
|
// var1 = var1.operator &&(var1_reduction);
|
|
|
|
|
|
|
|
// t_var1 = min(t_var1, t_var1_reduction);
|
|
|
|
|
|
|
|
// break;
|
|
|
|
|
|
|
|
// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
|
|
|
|
// *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
|
|
|
|
// ...
|
|
|
|
// *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
|
|
|
|
// *(Type<n>-1*)rhs[<n>-1]);
|
|
|
|
// }
|
|
|
|
// t_var_lhs = (i{{[0-9]+}}*)lhs[0];
|
|
|
|
// t_var_rhs = (i{{[0-9]+}}*)rhs[0];
|
|
|
|
|
|
|
|
// var_lhs = (S<i{{[0-9]+}}>*)lhs[1];
|
|
|
|
// var_rhs = (S<i{{[0-9]+}}>*)rhs[1];
|
|
|
|
|
|
|
|
// var1_lhs = (S<i{{[0-9]+}}>*)lhs[2];
|
|
|
|
// var1_rhs = (S<i{{[0-9]+}}>*)rhs[2];
|
|
|
|
|
|
|
|
// t_var1_lhs = (i{{[0-9]+}}*)lhs[3];
|
|
|
|
// t_var1_rhs = (i{{[0-9]+}}*)rhs[3];
|
|
|
|
|
|
|
|
// t_var_lhs += t_var_rhs;
|
|
|
|
|
|
|
|
// var_lhs = var_lhs.operator &(var_rhs);
|
|
|
|
|
|
|
|
// var1_lhs = var1_lhs.operator &&(var1_rhs);
|
|
|
|
|
|
|
|
// t_var1_lhs = min(t_var1_lhs, t_var1_rhs);
|
|
|
|
|
|
|
|
#endif
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@main
|
|
|
|
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
|
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
|
|
|
|
// CHECK1-NEXT: [[T_VAR:%.*]] = alloca float, align 4
|
|
|
|
// CHECK1-NEXT: [[T_VAR1:%.*]] = alloca float, align 4
|
|
|
|
// CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
|
|
|
|
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
|
|
|
|
// CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S]], align 4
|
|
|
|
// CHECK1-NEXT: [[VAR1:%.*]] = alloca [[STRUCT_S]], align 4
|
|
|
|
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: store float 0.000000e+00, float* [[T_VAR]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
|
|
|
|
// CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
|
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]], float 3.000000e+00)
|
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float*, %struct.S*, %struct.S*, float*, [2 x i32]*, [2 x %struct.S]*)* @.omp_outlined. to void (i32*, i32*, ...)*), float* [[T_VAR]], %struct.S* [[VAR]], %struct.S* [[VAR1]], float* [[T_VAR1]], [2 x i32]* [[VEC]], [2 x %struct.S]* [[S_ARR]])
|
|
|
|
// CHECK1-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
|
|
|
|
// CHECK1-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4:[0-9]+]]
|
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
|
|
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
|
|
|
|
// CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
|
|
|
|
// CHECK1: arraydestroy.body:
|
|
|
|
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
|
|
|
|
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
|
|
|
|
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
|
|
|
|
// CHECK1: arraydestroy.done1:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
|
|
|
|
// CHECK1-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
|
|
|
|
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
|
|
|
|
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR1:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca float*, align 8
|
|
|
|
// CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK1-NEXT: [[VAR1_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK1-NEXT: [[T_VAR1_ADDR:%.*]] = alloca float*, align 8
|
|
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
|
|
|
|
// CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
|
|
|
|
// CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[T_VAR2:%.*]] = alloca float, align 4
|
|
|
|
// CHECK1-NEXT: [[VAR3:%.*]] = alloca [[STRUCT_S:%.*]], align 4
|
|
|
|
// CHECK1-NEXT: [[VAR14:%.*]] = alloca [[STRUCT_S]], align 4
|
|
|
|
// CHECK1-NEXT: [[T_VAR15:%.*]] = alloca float, align 4
|
|
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
|
|
|
|
// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S]], align 4
|
|
|
|
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca float, align 4
|
|
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca float, align 4
|
|
|
|
// CHECK1-NEXT: [[REF_TMP17:%.*]] = alloca [[STRUCT_S]], align 4
|
|
|
|
// CHECK1-NEXT: [[ATOMIC_TEMP27:%.*]] = alloca float, align 4
|
|
|
|
// CHECK1-NEXT: [[_TMP28:%.*]] = alloca float, align 4
|
|
|
|
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store float* [[T_VAR]], float** [[T_VAR_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S* [[VAR1]], %struct.S** [[VAR1_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store float* [[T_VAR1]], float** [[T_VAR1_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load float*, float** [[T_VAR_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR1_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load float*, float** [[T_VAR1_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
|
|
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
|
|
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
|
|
|
|
// CHECK1-NEXT: store float 0.000000e+00, float* [[T_VAR2]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
|
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: store float 0x47EFFFFFE0000000, float* [[T_VAR15]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
|
|
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
|
|
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = icmp slt i32 [[TMP8]], 0
|
|
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 0
|
|
|
|
// CHECK1-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
|
|
|
|
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
|
|
// CHECK1: omp.inner.for.cond:
|
|
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
|
|
// CHECK1: omp.inner.for.body:
|
|
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK1-NEXT: switch i32 [[TMP14]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
|
|
|
|
// CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
|
|
|
|
// CHECK1-NEXT: ]
|
|
|
|
// CHECK1: .omp.sections.case:
|
|
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load float, float* [[T_VAR2]], align 4
|
|
|
|
// CHECK1-NEXT: [[CONV:%.*]] = fptosi float [[TMP15]] to i32
|
|
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 0
|
|
|
|
// CHECK1-NEXT: store i32 [[CONV]], i32* [[ARRAYIDX]], align 4
|
|
|
|
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP5]], i64 0, i64 0
|
|
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = bitcast %struct.S* [[ARRAYIDX6]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP16]], i8* align 4 [[TMP17]], i64 4, i1 false)
|
|
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load float, float* [[T_VAR15]], align 4
|
|
|
|
// CHECK1-NEXT: [[CONV7:%.*]] = fptosi float [[TMP18]] to i32
|
|
|
|
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 1
|
|
|
|
// CHECK1-NEXT: store i32 [[CONV7]], i32* [[ARRAYIDX8]], align 4
|
|
|
|
// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP5]], i64 0, i64 1
|
|
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = bitcast %struct.S* [[ARRAYIDX9]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = bitcast %struct.S* [[VAR14]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP19]], i8* align 4 [[TMP20]], i64 4, i1 false)
|
|
|
|
// CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
|
|
|
|
// CHECK1: .omp.sections.exit:
|
|
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
|
|
// CHECK1: omp.inner.for.inc:
|
|
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP21]], 1
|
|
|
|
// CHECK1-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
|
|
// CHECK1: omp.inner.for.end:
|
|
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]])
|
|
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast float* [[T_VAR2]] to i8*
|
|
|
|
// CHECK1-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
|
|
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
|
|
|
|
// CHECK1-NEXT: store i8* [[TMP25]], i8** [[TMP24]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
|
|
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = bitcast %struct.S* [[VAR14]] to i8*
|
|
|
|
// CHECK1-NEXT: store i8* [[TMP27]], i8** [[TMP26]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
|
|
|
|
// CHECK1-NEXT: [[TMP29:%.*]] = bitcast float* [[T_VAR15]] to i8*
|
|
|
|
// CHECK1-NEXT: store i8* [[TMP29]], i8** [[TMP28]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP30:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP31:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 4, i64 32, i8* [[TMP30]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK1-NEXT: switch i32 [[TMP31]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
|
|
// CHECK1-NEXT: ]
|
|
|
|
// CHECK1: .omp.reduction.case1:
|
|
|
|
// CHECK1-NEXT: [[TMP32:%.*]] = load float, float* [[TMP0]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP33:%.*]] = load float, float* [[T_VAR2]], align 4
|
|
|
|
// CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP32]], [[TMP33]]
|
|
|
|
// CHECK1-NEXT: store float [[ADD]], float* [[TMP0]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP34:%.*]] = bitcast %struct.S* [[TMP1]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP35:%.*]] = bitcast %struct.S* [[CALL]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP34]], i8* align 4 [[TMP35]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL10:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP2]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = fcmp une float [[CALL10]], 0.000000e+00
|
|
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
|
|
// CHECK1: land.rhs:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL11:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TOBOOL12:%.*]] = fcmp une float [[CALL11]], 0.000000e+00
|
|
|
|
// CHECK1-NEXT: br label [[LAND_END]]
|
|
|
|
// CHECK1: land.end:
|
|
|
|
// CHECK1-NEXT: [[TMP36:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL12]], [[LAND_RHS]] ]
|
|
|
|
// CHECK1-NEXT: [[CONV13:%.*]] = uitofp i1 [[TMP36]] to float
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV13]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP37:%.*]] = bitcast %struct.S* [[TMP2]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP38:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP37]], i8* align 4 [[TMP38]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP39:%.*]] = load float, float* [[TMP3]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP40:%.*]] = load float, float* [[T_VAR15]], align 4
|
|
|
|
// CHECK1-NEXT: [[CMP14:%.*]] = fcmp olt float [[TMP39]], [[TMP40]]
|
|
|
|
// CHECK1-NEXT: br i1 [[CMP14]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
|
|
// CHECK1: cond.true:
|
|
|
|
// CHECK1-NEXT: [[TMP41:%.*]] = load float, float* [[TMP3]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
|
|
// CHECK1: cond.false:
|
|
|
|
// CHECK1-NEXT: [[TMP42:%.*]] = load float, float* [[T_VAR15]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
|
|
// CHECK1: cond.end:
|
|
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi float [ [[TMP41]], [[COND_TRUE]] ], [ [[TMP42]], [[COND_FALSE]] ]
|
|
|
|
// CHECK1-NEXT: store float [[COND]], float* [[TMP3]], align 4
|
|
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
|
|
// CHECK1: .omp.reduction.case2:
|
|
|
|
// CHECK1-NEXT: [[TMP43:%.*]] = load float, float* [[T_VAR2]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP44:%.*]] = bitcast float* [[TMP0]] to i32*
|
|
|
|
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i32, i32* [[TMP44]] monotonic, align 4
|
|
|
|
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
|
|
// CHECK1: atomic_cont:
|
|
|
|
// CHECK1-NEXT: [[TMP45:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP53:%.*]], [[ATOMIC_CONT]] ]
|
|
|
|
// CHECK1-NEXT: [[TMP46:%.*]] = bitcast float* [[ATOMIC_TEMP]] to i32*
|
|
|
|
// CHECK1-NEXT: [[TMP47:%.*]] = bitcast i32 [[TMP45]] to float
|
|
|
|
// CHECK1-NEXT: store float [[TMP47]], float* [[TMP]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP48:%.*]] = load float, float* [[TMP]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP49:%.*]] = load float, float* [[T_VAR2]], align 4
|
|
|
|
// CHECK1-NEXT: [[ADD15:%.*]] = fadd float [[TMP48]], [[TMP49]]
|
|
|
|
// CHECK1-NEXT: store float [[ADD15]], float* [[ATOMIC_TEMP]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP50:%.*]] = load i32, i32* [[TMP46]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP51:%.*]] = bitcast float* [[TMP0]] to i32*
|
|
|
|
// CHECK1-NEXT: [[TMP52:%.*]] = cmpxchg i32* [[TMP51]], i32 [[TMP45]], i32 [[TMP50]] monotonic monotonic, align 4
|
|
|
|
// CHECK1-NEXT: [[TMP53]] = extractvalue { i32, i1 } [[TMP52]], 0
|
|
|
|
// CHECK1-NEXT: [[TMP54:%.*]] = extractvalue { i32, i1 } [[TMP52]], 1
|
|
|
|
// CHECK1-NEXT: br i1 [[TMP54]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
|
|
// CHECK1: atomic_exit:
|
|
|
|
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL16:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP55:%.*]] = bitcast %struct.S* [[TMP1]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP56:%.*]] = bitcast %struct.S* [[CALL16]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP55]], i8* align 4 [[TMP56]], i64 4, i1 false)
|
|
|
|
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
|
|
|
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL18:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP2]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TOBOOL19:%.*]] = fcmp une float [[CALL18]], 0.000000e+00
|
|
|
|
// CHECK1-NEXT: br i1 [[TOBOOL19]], label [[LAND_RHS20:%.*]], label [[LAND_END23:%.*]]
|
|
|
|
// CHECK1: land.rhs20:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL21:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TOBOOL22:%.*]] = fcmp une float [[CALL21]], 0.000000e+00
|
|
|
|
// CHECK1-NEXT: br label [[LAND_END23]]
|
|
|
|
// CHECK1: land.end23:
|
|
|
|
// CHECK1-NEXT: [[TMP57:%.*]] = phi i1 [ false, [[ATOMIC_EXIT]] ], [ [[TOBOOL22]], [[LAND_RHS20]] ]
|
|
|
|
// CHECK1-NEXT: [[CONV24:%.*]] = uitofp i1 [[TMP57]] to float
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP17]], float [[CONV24]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP58:%.*]] = bitcast %struct.S* [[TMP2]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP59:%.*]] = bitcast %struct.S* [[REF_TMP17]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP58]], i8* align 4 [[TMP59]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP17]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
|
|
|
// CHECK1-NEXT: [[TMP60:%.*]] = load float, float* [[T_VAR15]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP61:%.*]] = bitcast float* [[TMP3]] to i32*
|
|
|
|
// CHECK1-NEXT: [[ATOMIC_LOAD25:%.*]] = load atomic i32, i32* [[TMP61]] monotonic, align 4
|
|
|
|
// CHECK1-NEXT: br label [[ATOMIC_CONT26:%.*]]
|
|
|
|
// CHECK1: atomic_cont26:
|
|
|
|
// CHECK1-NEXT: [[TMP62:%.*]] = phi i32 [ [[ATOMIC_LOAD25]], [[LAND_END23]] ], [ [[TMP72:%.*]], [[COND_END32:%.*]] ]
|
|
|
|
// CHECK1-NEXT: [[TMP63:%.*]] = bitcast float* [[ATOMIC_TEMP27]] to i32*
|
|
|
|
// CHECK1-NEXT: [[TMP64:%.*]] = bitcast i32 [[TMP62]] to float
|
|
|
|
// CHECK1-NEXT: store float [[TMP64]], float* [[_TMP28]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP65:%.*]] = load float, float* [[_TMP28]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP66:%.*]] = load float, float* [[T_VAR15]], align 4
|
|
|
|
// CHECK1-NEXT: [[CMP29:%.*]] = fcmp olt float [[TMP65]], [[TMP66]]
|
|
|
|
// CHECK1-NEXT: br i1 [[CMP29]], label [[COND_TRUE30:%.*]], label [[COND_FALSE31:%.*]]
|
|
|
|
// CHECK1: cond.true30:
|
|
|
|
// CHECK1-NEXT: [[TMP67:%.*]] = load float, float* [[_TMP28]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[COND_END32]]
|
|
|
|
// CHECK1: cond.false31:
|
|
|
|
// CHECK1-NEXT: [[TMP68:%.*]] = load float, float* [[T_VAR15]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[COND_END32]]
|
|
|
|
// CHECK1: cond.end32:
|
|
|
|
// CHECK1-NEXT: [[COND33:%.*]] = phi float [ [[TMP67]], [[COND_TRUE30]] ], [ [[TMP68]], [[COND_FALSE31]] ]
|
|
|
|
// CHECK1-NEXT: store float [[COND33]], float* [[ATOMIC_TEMP27]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP69:%.*]] = load i32, i32* [[TMP63]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP70:%.*]] = bitcast float* [[TMP3]] to i32*
|
|
|
|
// CHECK1-NEXT: [[TMP71:%.*]] = cmpxchg i32* [[TMP70]], i32 [[TMP62]], i32 [[TMP69]] monotonic monotonic, align 4
|
|
|
|
// CHECK1-NEXT: [[TMP72]] = extractvalue { i32, i1 } [[TMP71]], 0
|
|
|
|
// CHECK1-NEXT: [[TMP73:%.*]] = extractvalue { i32, i1 } [[TMP71]], 1
|
|
|
|
// CHECK1-NEXT: br i1 [[TMP73]], label [[ATOMIC_EXIT34:%.*]], label [[ATOMIC_CONT26]]
|
|
|
|
// CHECK1: atomic_exit34:
|
|
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
|
|
// CHECK1: .omp.reduction.default:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]]) #[[ATTR4]]
|
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR3]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP7]])
|
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
|
|
|
|
// CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
|
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
|
|
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
|
|
|
|
// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 4
|
|
|
|
// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
|
|
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
|
|
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
|
|
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to float*
|
|
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
|
|
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to float*
|
|
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
|
|
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S*
|
|
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
|
|
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S*
|
|
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
|
|
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S*
|
|
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
|
|
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S*
|
|
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
|
|
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i8* [[TMP25]] to float*
|
|
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
|
|
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8* [[TMP28]] to float*
|
|
|
|
// CHECK1-NEXT: [[TMP30:%.*]] = load float, float* [[TMP11]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP31:%.*]] = load float, float* [[TMP8]], align 4
|
|
|
|
// CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP30]], [[TMP31]]
|
|
|
|
// CHECK1-NEXT: store float [[ADD]], float* [[TMP11]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP32:%.*]] = bitcast %struct.S* [[TMP17]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP33:%.*]] = bitcast %struct.S* [[CALL]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL2:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP23]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = fcmp une float [[CALL2]], 0.000000e+00
|
|
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
|
|
// CHECK1: land.rhs:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL3:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP20]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TOBOOL4:%.*]] = fcmp une float [[CALL3]], 0.000000e+00
|
|
|
|
// CHECK1-NEXT: br label [[LAND_END]]
|
|
|
|
// CHECK1: land.end:
|
|
|
|
// CHECK1-NEXT: [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
|
|
|
|
// CHECK1-NEXT: [[CONV:%.*]] = uitofp i1 [[TMP34]] to float
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP35:%.*]] = bitcast %struct.S* [[TMP23]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP36:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP37:%.*]] = load float, float* [[TMP29]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP38:%.*]] = load float, float* [[TMP26]], align 4
|
|
|
|
// CHECK1-NEXT: [[CMP:%.*]] = fcmp olt float [[TMP37]], [[TMP38]]
|
|
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
|
|
// CHECK1: cond.true:
|
|
|
|
// CHECK1-NEXT: [[TMP39:%.*]] = load float, float* [[TMP29]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
|
|
// CHECK1: cond.false:
|
|
|
|
// CHECK1-NEXT: [[TMP40:%.*]] = load float, float* [[TMP26]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
|
|
// CHECK1: cond.end:
|
|
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi float [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
|
|
|
|
// CHECK1-NEXT: store float [[COND]], float* [[TMP29]], align 4
|
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEanERKS0_
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: ret %struct.S* [[THIS1]]
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEcvfEv
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR6]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: ret float 0.000000e+00
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
|
|
|
|
// CHECK1-SAME: () #[[ATTR6]] {
|
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[T:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
|
|
|
|
// CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
|
|
|
|
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
|
|
|
|
// CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4
|
|
|
|
// CHECK1-NEXT: [[VAR1:%.*]] = alloca [[STRUCT_S_0]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: store i32 0, i32* [[T_VAR]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
|
|
|
|
// CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
|
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]], i32 3)
|
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.S.0*, %struct.S.0*, i32*, [2 x i32]*, [2 x %struct.S.0]*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[T_VAR]], %struct.S.0* [[VAR]], %struct.S.0* [[VAR1]], i32* [[T_VAR1]], [2 x i32]* [[VEC]], [2 x %struct.S.0]* [[S_ARR]])
|
|
|
|
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4]]
|
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
|
|
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
|
|
|
|
// CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
|
|
|
|
// CHECK1: arraydestroy.body:
|
|
|
|
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
|
|
|
|
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
|
|
|
|
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
|
|
|
|
// CHECK1: arraydestroy.done1:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
|
|
|
|
// CHECK1-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
|
|
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load volatile double, double* @g, align 8
|
|
|
|
// CHECK1-NEXT: [[CONV:%.*]] = fptrunc double [[TMP0]] to float
|
|
|
|
// CHECK1-NEXT: store float [[CONV]], float* [[F]], align 4
|
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
|
|
|
|
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
|
|
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
|
|
|
|
// CHECK1-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to double
|
|
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load volatile double, double* @g, align 8
|
|
|
|
// CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV]], [[TMP1]]
|
|
|
|
// CHECK1-NEXT: [[CONV2:%.*]] = fptrunc double [[ADD]] to float
|
|
|
|
// CHECK1-NEXT: store float [[CONV2]], float* [[F]], align 4
|
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
|
|
|
|
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]]) #[[ATTR3]] {
|
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK1-NEXT: [[VAR1_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK1-NEXT: [[T_VAR1_ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
|
|
|
|
// CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
|
|
|
|
// CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[T_VAR2:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[VAR3:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
|
|
|
|
// CHECK1-NEXT: [[VAR14:%.*]] = alloca [[STRUCT_S_0]], align 4
|
|
|
|
// CHECK1-NEXT: [[T_VAR15:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
|
|
|
|
// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S_0]], align 4
|
|
|
|
// CHECK1-NEXT: [[REF_TMP13:%.*]] = alloca [[STRUCT_S_0]], align 4
|
|
|
|
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S.0* [[VAR1]], %struct.S.0** [[VAR1_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store i32* [[T_VAR1]], i32** [[T_VAR1_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR1_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[T_VAR1_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
|
|
|
|
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
|
|
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
|
|
|
|
// CHECK1-NEXT: store i32 0, i32* [[T_VAR2]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
|
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: store i32 2147483647, i32* [[T_VAR15]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
|
|
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
|
|
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = icmp slt i32 [[TMP8]], 1
|
|
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 1
|
|
|
|
// CHECK1-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
|
|
|
|
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
|
|
// CHECK1: omp.inner.for.cond:
|
|
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
|
|
// CHECK1: omp.inner.for.body:
|
|
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK1-NEXT: switch i32 [[TMP14]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
|
|
|
|
// CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
|
|
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE6:%.*]]
|
|
|
|
// CHECK1-NEXT: ]
|
|
|
|
// CHECK1: .omp.sections.case:
|
|
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[T_VAR2]], align 4
|
|
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 0
|
|
|
|
// CHECK1-NEXT: store i32 [[TMP15]], i32* [[ARRAYIDX]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
|
|
|
|
// CHECK1: .omp.sections.case6:
|
|
|
|
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP5]], i64 0, i64 0
|
|
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = bitcast %struct.S.0* [[ARRAYIDX7]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = bitcast %struct.S.0* [[VAR3]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP16]], i8* align 4 [[TMP17]], i64 4, i1 false)
|
|
|
|
// CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
|
|
|
|
// CHECK1: .omp.sections.exit:
|
|
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
|
|
// CHECK1: omp.inner.for.inc:
|
|
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP18]], 1
|
|
|
|
// CHECK1-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
|
|
// CHECK1: omp.inner.for.end:
|
|
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]])
|
|
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = bitcast i32* [[T_VAR2]] to i8*
|
|
|
|
// CHECK1-NEXT: store i8* [[TMP20]], i8** [[TMP19]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
|
|
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = bitcast %struct.S.0* [[VAR3]] to i8*
|
|
|
|
// CHECK1-NEXT: store i8* [[TMP22]], i8** [[TMP21]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
|
|
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = bitcast %struct.S.0* [[VAR14]] to i8*
|
|
|
|
// CHECK1-NEXT: store i8* [[TMP24]], i8** [[TMP23]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
|
|
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i32* [[T_VAR15]] to i8*
|
|
|
|
// CHECK1-NEXT: store i8* [[TMP26]], i8** [[TMP25]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], i32 4, i64 32, i8* [[TMP27]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK1-NEXT: switch i32 [[TMP28]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
|
|
// CHECK1-NEXT: ]
|
|
|
|
// CHECK1: .omp.reduction.case1:
|
|
|
|
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP0]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[T_VAR2]], align 4
|
|
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
|
|
|
|
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP31:%.*]] = bitcast %struct.S.0* [[TMP1]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP32:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP31]], i8* align 4 [[TMP32]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL8:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP2]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[CALL8]], 0
|
|
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
|
|
// CHECK1: land.rhs:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL9:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TOBOOL10:%.*]] = icmp ne i32 [[CALL9]], 0
|
|
|
|
// CHECK1-NEXT: br label [[LAND_END]]
|
|
|
|
// CHECK1: land.end:
|
|
|
|
// CHECK1-NEXT: [[TMP33:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL10]], [[LAND_RHS]] ]
|
|
|
|
// CHECK1-NEXT: [[CONV:%.*]] = zext i1 [[TMP33]] to i32
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP34:%.*]] = bitcast %struct.S.0* [[TMP2]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP35:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP34]], i8* align 4 [[TMP35]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP3]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[T_VAR15]], align 4
|
|
|
|
// CHECK1-NEXT: [[CMP11:%.*]] = icmp slt i32 [[TMP36]], [[TMP37]]
|
|
|
|
// CHECK1-NEXT: br i1 [[CMP11]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
|
|
// CHECK1: cond.true:
|
|
|
|
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[TMP3]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
|
|
// CHECK1: cond.false:
|
|
|
|
// CHECK1-NEXT: [[TMP39:%.*]] = load i32, i32* [[T_VAR15]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
|
|
// CHECK1: cond.end:
|
|
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP38]], [[COND_TRUE]] ], [ [[TMP39]], [[COND_FALSE]] ]
|
|
|
|
// CHECK1-NEXT: store i32 [[COND]], i32* [[TMP3]], align 4
|
|
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
|
|
// CHECK1: .omp.reduction.case2:
|
|
|
|
// CHECK1-NEXT: [[TMP40:%.*]] = load i32, i32* [[T_VAR2]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP41:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP40]] monotonic, align 4
|
|
|
|
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP42:%.*]] = bitcast %struct.S.0* [[TMP1]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP43:%.*]] = bitcast %struct.S.0* [[CALL12]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP42]], i8* align 4 [[TMP43]], i64 4, i1 false)
|
|
|
|
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
|
|
|
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL14:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP2]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TOBOOL15:%.*]] = icmp ne i32 [[CALL14]], 0
|
|
|
|
// CHECK1-NEXT: br i1 [[TOBOOL15]], label [[LAND_RHS16:%.*]], label [[LAND_END19:%.*]]
|
|
|
|
// CHECK1: land.rhs16:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL17:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TOBOOL18:%.*]] = icmp ne i32 [[CALL17]], 0
|
|
|
|
// CHECK1-NEXT: br label [[LAND_END19]]
|
|
|
|
// CHECK1: land.end19:
|
|
|
|
// CHECK1-NEXT: [[TMP44:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE2]] ], [ [[TOBOOL18]], [[LAND_RHS16]] ]
|
|
|
|
// CHECK1-NEXT: [[CONV20:%.*]] = zext i1 [[TMP44]] to i32
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP13]], i32 [[CONV20]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP45:%.*]] = bitcast %struct.S.0* [[TMP2]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP46:%.*]] = bitcast %struct.S.0* [[REF_TMP13]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP45]], i8* align 4 [[TMP46]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP13]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
|
|
|
// CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[T_VAR15]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP48:%.*]] = atomicrmw min i32* [[TMP3]], i32 [[TMP47]] monotonic, align 4
|
|
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
|
|
// CHECK1: .omp.reduction.default:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]]) #[[ATTR4]]
|
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
|
|
|
|
// CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
|
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
|
|
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
|
|
|
|
// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
|
|
|
|
// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
|
|
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
|
|
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
|
|
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
|
|
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
|
|
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
|
|
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
|
|
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S.0*
|
|
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
|
|
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S.0*
|
|
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
|
|
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S.0*
|
|
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
|
|
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S.0*
|
|
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
|
|
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i8* [[TMP25]] to i32*
|
|
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
|
|
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
|
|
|
|
// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8* [[TMP28]] to i32*
|
|
|
|
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP11]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP8]], align 4
|
|
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
|
|
|
|
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP32:%.*]] = bitcast %struct.S.0* [[TMP17]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP33:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL2:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP23]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[CALL2]], 0
|
|
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
|
|
// CHECK1: land.rhs:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: [[CALL3:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP20]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TOBOOL4:%.*]] = icmp ne i32 [[CALL3]], 0
|
|
|
|
// CHECK1-NEXT: br label [[LAND_END]]
|
|
|
|
// CHECK1: land.end:
|
|
|
|
// CHECK1-NEXT: [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
|
|
|
|
// CHECK1-NEXT: [[CONV:%.*]] = zext i1 [[TMP34]] to i32
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP23]] to i8*
|
|
|
|
// CHECK1-NEXT: [[TMP36:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
|
|
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP29]], align 4
|
|
|
|
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[TMP26]], align 4
|
|
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP37]], [[TMP38]]
|
|
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
|
|
// CHECK1: cond.true:
|
|
|
|
// CHECK1-NEXT: [[TMP39:%.*]] = load i32, i32* [[TMP29]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
|
|
// CHECK1: cond.false:
|
|
|
|
// CHECK1-NEXT: [[TMP40:%.*]] = load i32, i32* [[TMP26]], align 4
|
|
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
|
|
// CHECK1: cond.end:
|
|
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
|
|
|
|
// CHECK1-NEXT: store i32 [[COND]], i32* [[TMP29]], align 4
|
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEanERKS0_
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR6]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S.0* [[TMP0]], %struct.S.0** [[DOTADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: ret %struct.S.0* [[THIS1]]
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEcviEv
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR6]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: ret i32 0
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
|
|
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load volatile double, double* @g, align 8
|
|
|
|
// CHECK1-NEXT: [[CONV:%.*]] = fptosi double [[TMP0]] to i32
|
|
|
|
// CHECK1-NEXT: store i32 [[CONV]], i32* [[F]], align 4
|
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
|
|
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
|
|
|
|
// CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to double
|
|
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load volatile double, double* @g, align 8
|
|
|
|
// CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV]], [[TMP1]]
|
|
|
|
// CHECK1-NEXT: [[CONV2:%.*]] = fptosi double [[ADD]] to i32
|
|
|
|
// CHECK1-NEXT: store i32 [[CONV2]], i32* [[F]], align 4
|
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK1-NEXT: entry:
|
|
|
|
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK1-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@main
|
|
|
|
// CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
|
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
|
|
|
|
// CHECK2-NEXT: [[T_VAR:%.*]] = alloca float, align 4
|
|
|
|
// CHECK2-NEXT: [[T_VAR1:%.*]] = alloca float, align 4
|
|
|
|
// CHECK2-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
|
|
|
|
// CHECK2-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
|
|
|
|
// CHECK2-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S]], align 4
|
|
|
|
// CHECK2-NEXT: [[VAR1:%.*]] = alloca [[STRUCT_S]], align 4
|
|
|
|
// CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: store float 0.000000e+00, float* [[T_VAR]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
|
|
|
|
// CHECK2-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
|
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]], float 3.000000e+00)
|
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float*, %struct.S*, %struct.S*, float*, [2 x i32]*, [2 x %struct.S]*)* @.omp_outlined. to void (i32*, i32*, ...)*), float* [[T_VAR]], %struct.S* [[VAR]], %struct.S* [[VAR1]], float* [[T_VAR1]], [2 x i32]* [[VEC]], [2 x %struct.S]* [[S_ARR]])
|
|
|
|
// CHECK2-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
|
|
|
|
// CHECK2-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4:[0-9]+]]
|
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
|
|
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
|
|
|
|
// CHECK2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
|
|
|
|
// CHECK2: arraydestroy.body:
|
|
|
|
// CHECK2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
|
|
|
|
// CHECK2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
|
|
|
|
// CHECK2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
|
|
|
|
// CHECK2: arraydestroy.done1:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
|
|
|
|
// CHECK2-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
|
|
|
|
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store float [[A]], float* [[A_ADDR]], align 4
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
|
|
|
|
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR1:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK2-NEXT: [[T_VAR_ADDR:%.*]] = alloca float*, align 8
|
|
|
|
// CHECK2-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK2-NEXT: [[VAR1_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK2-NEXT: [[T_VAR1_ADDR:%.*]] = alloca float*, align 8
|
|
|
|
// CHECK2-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
|
|
|
|
// CHECK2-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
|
|
|
|
// CHECK2-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[T_VAR2:%.*]] = alloca float, align 4
|
|
|
|
// CHECK2-NEXT: [[VAR3:%.*]] = alloca [[STRUCT_S:%.*]], align 4
|
|
|
|
// CHECK2-NEXT: [[VAR14:%.*]] = alloca [[STRUCT_S]], align 4
|
|
|
|
// CHECK2-NEXT: [[T_VAR15:%.*]] = alloca float, align 4
|
|
|
|
// CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
|
|
|
|
// CHECK2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S]], align 4
|
|
|
|
// CHECK2-NEXT: [[ATOMIC_TEMP:%.*]] = alloca float, align 4
|
|
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca float, align 4
|
|
|
|
// CHECK2-NEXT: [[REF_TMP17:%.*]] = alloca [[STRUCT_S]], align 4
|
|
|
|
// CHECK2-NEXT: [[ATOMIC_TEMP27:%.*]] = alloca float, align 4
|
|
|
|
// CHECK2-NEXT: [[_TMP28:%.*]] = alloca float, align 4
|
|
|
|
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store float* [[T_VAR]], float** [[T_VAR_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S* [[VAR1]], %struct.S** [[VAR1_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store float* [[T_VAR1]], float** [[T_VAR1_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load float*, float** [[T_VAR_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR1_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load float*, float** [[T_VAR1_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
|
|
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
|
|
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
|
|
|
|
// CHECK2-NEXT: store float 0.000000e+00, float* [[T_VAR2]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
|
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: store float 0x47EFFFFFE0000000, float* [[T_VAR15]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
|
|
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
|
|
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = icmp slt i32 [[TMP8]], 0
|
|
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 0
|
|
|
|
// CHECK2-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
|
|
|
|
// CHECK2-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
|
|
// CHECK2: omp.inner.for.cond:
|
|
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
|
|
// CHECK2: omp.inner.for.body:
|
|
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK2-NEXT: switch i32 [[TMP14]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
|
|
|
|
// CHECK2-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
|
|
|
|
// CHECK2-NEXT: ]
|
|
|
|
// CHECK2: .omp.sections.case:
|
|
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load float, float* [[T_VAR2]], align 4
|
|
|
|
// CHECK2-NEXT: [[CONV:%.*]] = fptosi float [[TMP15]] to i32
|
|
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 0
|
|
|
|
// CHECK2-NEXT: store i32 [[CONV]], i32* [[ARRAYIDX]], align 4
|
|
|
|
// CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP5]], i64 0, i64 0
|
|
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = bitcast %struct.S* [[ARRAYIDX6]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP16]], i8* align 4 [[TMP17]], i64 4, i1 false)
|
|
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load float, float* [[T_VAR15]], align 4
|
|
|
|
// CHECK2-NEXT: [[CONV7:%.*]] = fptosi float [[TMP18]] to i32
|
|
|
|
// CHECK2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 1
|
|
|
|
// CHECK2-NEXT: store i32 [[CONV7]], i32* [[ARRAYIDX8]], align 4
|
|
|
|
// CHECK2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP5]], i64 0, i64 1
|
|
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = bitcast %struct.S* [[ARRAYIDX9]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = bitcast %struct.S* [[VAR14]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP19]], i8* align 4 [[TMP20]], i64 4, i1 false)
|
|
|
|
// CHECK2-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
|
|
|
|
// CHECK2: .omp.sections.exit:
|
|
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
|
|
// CHECK2: omp.inner.for.inc:
|
|
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP21]], 1
|
|
|
|
// CHECK2-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
|
|
// CHECK2: omp.inner.for.end:
|
|
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]])
|
|
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = bitcast float* [[T_VAR2]] to i8*
|
|
|
|
// CHECK2-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
|
|
|
|
// CHECK2-NEXT: [[TMP25:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
|
|
|
|
// CHECK2-NEXT: store i8* [[TMP25]], i8** [[TMP24]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
|
|
|
|
// CHECK2-NEXT: [[TMP27:%.*]] = bitcast %struct.S* [[VAR14]] to i8*
|
|
|
|
// CHECK2-NEXT: store i8* [[TMP27]], i8** [[TMP26]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
|
|
|
|
// CHECK2-NEXT: [[TMP29:%.*]] = bitcast float* [[T_VAR15]] to i8*
|
|
|
|
// CHECK2-NEXT: store i8* [[TMP29]], i8** [[TMP28]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP30:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP31:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 4, i64 32, i8* [[TMP30]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK2-NEXT: switch i32 [[TMP31]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
|
|
// CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
|
|
// CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
|
|
// CHECK2-NEXT: ]
|
|
|
|
// CHECK2: .omp.reduction.case1:
|
|
|
|
// CHECK2-NEXT: [[TMP32:%.*]] = load float, float* [[TMP0]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP33:%.*]] = load float, float* [[T_VAR2]], align 4
|
|
|
|
// CHECK2-NEXT: [[ADD:%.*]] = fadd float [[TMP32]], [[TMP33]]
|
|
|
|
// CHECK2-NEXT: store float [[ADD]], float* [[TMP0]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP34:%.*]] = bitcast %struct.S* [[TMP1]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP35:%.*]] = bitcast %struct.S* [[CALL]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP34]], i8* align 4 [[TMP35]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL10:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP2]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TOBOOL:%.*]] = fcmp une float [[CALL10]], 0.000000e+00
|
|
|
|
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
|
|
// CHECK2: land.rhs:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL11:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TOBOOL12:%.*]] = fcmp une float [[CALL11]], 0.000000e+00
|
|
|
|
// CHECK2-NEXT: br label [[LAND_END]]
|
|
|
|
// CHECK2: land.end:
|
|
|
|
// CHECK2-NEXT: [[TMP36:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL12]], [[LAND_RHS]] ]
|
|
|
|
// CHECK2-NEXT: [[CONV13:%.*]] = uitofp i1 [[TMP36]] to float
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV13]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP37:%.*]] = bitcast %struct.S* [[TMP2]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP38:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP37]], i8* align 4 [[TMP38]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP39:%.*]] = load float, float* [[TMP3]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP40:%.*]] = load float, float* [[T_VAR15]], align 4
|
|
|
|
// CHECK2-NEXT: [[CMP14:%.*]] = fcmp olt float [[TMP39]], [[TMP40]]
|
|
|
|
// CHECK2-NEXT: br i1 [[CMP14]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
|
|
// CHECK2: cond.true:
|
|
|
|
// CHECK2-NEXT: [[TMP41:%.*]] = load float, float* [[TMP3]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
|
|
// CHECK2: cond.false:
|
|
|
|
// CHECK2-NEXT: [[TMP42:%.*]] = load float, float* [[T_VAR15]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
|
|
// CHECK2: cond.end:
|
|
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi float [ [[TMP41]], [[COND_TRUE]] ], [ [[TMP42]], [[COND_FALSE]] ]
|
|
|
|
// CHECK2-NEXT: store float [[COND]], float* [[TMP3]], align 4
|
|
|
|
// CHECK2-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
|
|
// CHECK2: .omp.reduction.case2:
|
|
|
|
// CHECK2-NEXT: [[TMP43:%.*]] = load float, float* [[T_VAR2]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP44:%.*]] = bitcast float* [[TMP0]] to i32*
|
|
|
|
// CHECK2-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i32, i32* [[TMP44]] monotonic, align 4
|
|
|
|
// CHECK2-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
|
|
// CHECK2: atomic_cont:
|
|
|
|
// CHECK2-NEXT: [[TMP45:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP53:%.*]], [[ATOMIC_CONT]] ]
|
|
|
|
// CHECK2-NEXT: [[TMP46:%.*]] = bitcast float* [[ATOMIC_TEMP]] to i32*
|
|
|
|
// CHECK2-NEXT: [[TMP47:%.*]] = bitcast i32 [[TMP45]] to float
|
|
|
|
// CHECK2-NEXT: store float [[TMP47]], float* [[TMP]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP48:%.*]] = load float, float* [[TMP]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP49:%.*]] = load float, float* [[T_VAR2]], align 4
|
|
|
|
// CHECK2-NEXT: [[ADD15:%.*]] = fadd float [[TMP48]], [[TMP49]]
|
|
|
|
// CHECK2-NEXT: store float [[ADD15]], float* [[ATOMIC_TEMP]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP50:%.*]] = load i32, i32* [[TMP46]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP51:%.*]] = bitcast float* [[TMP0]] to i32*
|
|
|
|
// CHECK2-NEXT: [[TMP52:%.*]] = cmpxchg i32* [[TMP51]], i32 [[TMP45]], i32 [[TMP50]] monotonic monotonic, align 4
|
|
|
|
// CHECK2-NEXT: [[TMP53]] = extractvalue { i32, i1 } [[TMP52]], 0
|
|
|
|
// CHECK2-NEXT: [[TMP54:%.*]] = extractvalue { i32, i1 } [[TMP52]], 1
|
|
|
|
// CHECK2-NEXT: br i1 [[TMP54]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
|
|
// CHECK2: atomic_exit:
|
|
|
|
// CHECK2-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL16:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP55:%.*]] = bitcast %struct.S* [[TMP1]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP56:%.*]] = bitcast %struct.S* [[CALL16]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP55]], i8* align 4 [[TMP56]], i64 4, i1 false)
|
|
|
|
// CHECK2-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
|
|
|
// CHECK2-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL18:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP2]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TOBOOL19:%.*]] = fcmp une float [[CALL18]], 0.000000e+00
|
|
|
|
// CHECK2-NEXT: br i1 [[TOBOOL19]], label [[LAND_RHS20:%.*]], label [[LAND_END23:%.*]]
|
|
|
|
// CHECK2: land.rhs20:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL21:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TOBOOL22:%.*]] = fcmp une float [[CALL21]], 0.000000e+00
|
|
|
|
// CHECK2-NEXT: br label [[LAND_END23]]
|
|
|
|
// CHECK2: land.end23:
|
|
|
|
// CHECK2-NEXT: [[TMP57:%.*]] = phi i1 [ false, [[ATOMIC_EXIT]] ], [ [[TOBOOL22]], [[LAND_RHS20]] ]
|
|
|
|
// CHECK2-NEXT: [[CONV24:%.*]] = uitofp i1 [[TMP57]] to float
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP17]], float [[CONV24]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP58:%.*]] = bitcast %struct.S* [[TMP2]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP59:%.*]] = bitcast %struct.S* [[REF_TMP17]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP58]], i8* align 4 [[TMP59]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP17]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
|
|
|
// CHECK2-NEXT: [[TMP60:%.*]] = load float, float* [[T_VAR15]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP61:%.*]] = bitcast float* [[TMP3]] to i32*
|
|
|
|
// CHECK2-NEXT: [[ATOMIC_LOAD25:%.*]] = load atomic i32, i32* [[TMP61]] monotonic, align 4
|
|
|
|
// CHECK2-NEXT: br label [[ATOMIC_CONT26:%.*]]
|
|
|
|
// CHECK2: atomic_cont26:
|
|
|
|
// CHECK2-NEXT: [[TMP62:%.*]] = phi i32 [ [[ATOMIC_LOAD25]], [[LAND_END23]] ], [ [[TMP72:%.*]], [[COND_END32:%.*]] ]
|
|
|
|
// CHECK2-NEXT: [[TMP63:%.*]] = bitcast float* [[ATOMIC_TEMP27]] to i32*
|
|
|
|
// CHECK2-NEXT: [[TMP64:%.*]] = bitcast i32 [[TMP62]] to float
|
|
|
|
// CHECK2-NEXT: store float [[TMP64]], float* [[_TMP28]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP65:%.*]] = load float, float* [[_TMP28]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP66:%.*]] = load float, float* [[T_VAR15]], align 4
|
|
|
|
// CHECK2-NEXT: [[CMP29:%.*]] = fcmp olt float [[TMP65]], [[TMP66]]
|
|
|
|
// CHECK2-NEXT: br i1 [[CMP29]], label [[COND_TRUE30:%.*]], label [[COND_FALSE31:%.*]]
|
|
|
|
// CHECK2: cond.true30:
|
|
|
|
// CHECK2-NEXT: [[TMP67:%.*]] = load float, float* [[_TMP28]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[COND_END32]]
|
|
|
|
// CHECK2: cond.false31:
|
|
|
|
// CHECK2-NEXT: [[TMP68:%.*]] = load float, float* [[T_VAR15]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[COND_END32]]
|
|
|
|
// CHECK2: cond.end32:
|
|
|
|
// CHECK2-NEXT: [[COND33:%.*]] = phi float [ [[TMP67]], [[COND_TRUE30]] ], [ [[TMP68]], [[COND_FALSE31]] ]
|
|
|
|
// CHECK2-NEXT: store float [[COND33]], float* [[ATOMIC_TEMP27]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP69:%.*]] = load i32, i32* [[TMP63]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP70:%.*]] = bitcast float* [[TMP3]] to i32*
|
|
|
|
// CHECK2-NEXT: [[TMP71:%.*]] = cmpxchg i32* [[TMP70]], i32 [[TMP62]], i32 [[TMP69]] monotonic monotonic, align 4
|
|
|
|
// CHECK2-NEXT: [[TMP72]] = extractvalue { i32, i1 } [[TMP71]], 0
|
|
|
|
// CHECK2-NEXT: [[TMP73:%.*]] = extractvalue { i32, i1 } [[TMP71]], 1
|
|
|
|
// CHECK2-NEXT: br i1 [[TMP73]], label [[ATOMIC_EXIT34:%.*]], label [[ATOMIC_CONT26]]
|
|
|
|
// CHECK2: atomic_exit34:
|
|
|
|
// CHECK2-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
|
|
// CHECK2: .omp.reduction.default:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]]) #[[ATTR4]]
|
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR3]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP7]])
|
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
|
|
|
|
// CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
|
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
|
|
|
|
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
|
|
|
|
// CHECK2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 4
|
|
|
|
// CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
|
|
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
|
|
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
|
|
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to float*
|
|
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
|
|
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to float*
|
|
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
|
|
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S*
|
|
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
|
|
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S*
|
|
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
|
|
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S*
|
|
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
|
|
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S*
|
|
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
|
|
|
|
// CHECK2-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP26:%.*]] = bitcast i8* [[TMP25]] to float*
|
|
|
|
// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
|
|
|
|
// CHECK2-NEXT: [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8* [[TMP28]] to float*
|
|
|
|
// CHECK2-NEXT: [[TMP30:%.*]] = load float, float* [[TMP11]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP31:%.*]] = load float, float* [[TMP8]], align 4
|
|
|
|
// CHECK2-NEXT: [[ADD:%.*]] = fadd float [[TMP30]], [[TMP31]]
|
|
|
|
// CHECK2-NEXT: store float [[ADD]], float* [[TMP11]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP32:%.*]] = bitcast %struct.S* [[TMP17]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP33:%.*]] = bitcast %struct.S* [[CALL]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL2:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP23]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TOBOOL:%.*]] = fcmp une float [[CALL2]], 0.000000e+00
|
|
|
|
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
|
|
// CHECK2: land.rhs:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL3:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP20]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TOBOOL4:%.*]] = fcmp une float [[CALL3]], 0.000000e+00
|
|
|
|
// CHECK2-NEXT: br label [[LAND_END]]
|
|
|
|
// CHECK2: land.end:
|
|
|
|
// CHECK2-NEXT: [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
|
|
|
|
// CHECK2-NEXT: [[CONV:%.*]] = uitofp i1 [[TMP34]] to float
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP35:%.*]] = bitcast %struct.S* [[TMP23]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP36:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP37:%.*]] = load float, float* [[TMP29]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP38:%.*]] = load float, float* [[TMP26]], align 4
|
|
|
|
// CHECK2-NEXT: [[CMP:%.*]] = fcmp olt float [[TMP37]], [[TMP38]]
|
|
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
|
|
// CHECK2: cond.true:
|
|
|
|
// CHECK2-NEXT: [[TMP39:%.*]] = load float, float* [[TMP29]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
|
|
// CHECK2: cond.false:
|
|
|
|
// CHECK2-NEXT: [[TMP40:%.*]] = load float, float* [[TMP26]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
|
|
// CHECK2: cond.end:
|
|
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi float [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
|
|
|
|
// CHECK2-NEXT: store float [[COND]], float* [[TMP29]], align 4
|
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEanERKS0_
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: ret %struct.S* [[THIS1]]
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEcvfEv
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR6]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: ret float 0.000000e+00
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
|
|
|
|
// CHECK2-SAME: () #[[ATTR6]] {
|
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[T:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
|
|
|
|
// CHECK2-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
|
|
|
|
// CHECK2-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
|
|
|
|
// CHECK2-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4
|
|
|
|
// CHECK2-NEXT: [[VAR1:%.*]] = alloca [[STRUCT_S_0]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: store i32 0, i32* [[T_VAR]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
|
|
|
|
// CHECK2-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
|
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]], i32 3)
|
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.S.0*, %struct.S.0*, i32*, [2 x i32]*, [2 x %struct.S.0]*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[T_VAR]], %struct.S.0* [[VAR]], %struct.S.0* [[VAR1]], i32* [[T_VAR1]], [2 x i32]* [[VEC]], [2 x %struct.S.0]* [[S_ARR]])
|
|
|
|
// CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4]]
|
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
|
|
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
|
|
|
|
// CHECK2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
|
|
|
|
// CHECK2: arraydestroy.body:
|
|
|
|
// CHECK2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
|
|
|
|
// CHECK2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
|
|
|
|
// CHECK2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
|
|
|
|
// CHECK2: arraydestroy.done1:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
|
|
|
|
// CHECK2-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
|
|
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load volatile double, double* @g, align 8
|
|
|
|
// CHECK2-NEXT: [[CONV:%.*]] = fptrunc double [[TMP0]] to float
|
|
|
|
// CHECK2-NEXT: store float [[CONV]], float* [[F]], align 4
|
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
|
|
|
|
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store float [[A]], float* [[A_ADDR]], align 4
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
|
|
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
|
|
|
|
// CHECK2-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to double
|
|
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load volatile double, double* @g, align 8
|
|
|
|
// CHECK2-NEXT: [[ADD:%.*]] = fadd double [[CONV]], [[TMP1]]
|
|
|
|
// CHECK2-NEXT: [[CONV2:%.*]] = fptrunc double [[ADD]] to float
|
|
|
|
// CHECK2-NEXT: store float [[CONV2]], float* [[F]], align 4
|
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
|
|
|
|
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]]) #[[ATTR3]] {
|
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK2-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK2-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK2-NEXT: [[VAR1_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK2-NEXT: [[T_VAR1_ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK2-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
|
|
|
|
// CHECK2-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
|
|
|
|
// CHECK2-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[T_VAR2:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[VAR3:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
|
|
|
|
// CHECK2-NEXT: [[VAR14:%.*]] = alloca [[STRUCT_S_0]], align 4
|
|
|
|
// CHECK2-NEXT: [[T_VAR15:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
|
|
|
|
// CHECK2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S_0]], align 4
|
|
|
|
// CHECK2-NEXT: [[REF_TMP13:%.*]] = alloca [[STRUCT_S_0]], align 4
|
|
|
|
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S.0* [[VAR1]], %struct.S.0** [[VAR1_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store i32* [[T_VAR1]], i32** [[T_VAR1_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR1_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[T_VAR1_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
|
|
|
|
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
|
|
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
|
|
|
|
// CHECK2-NEXT: store i32 0, i32* [[T_VAR2]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
|
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: store i32 2147483647, i32* [[T_VAR15]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
|
|
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
|
|
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = icmp slt i32 [[TMP8]], 1
|
|
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 1
|
|
|
|
// CHECK2-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
|
|
|
|
// CHECK2-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
|
|
// CHECK2: omp.inner.for.cond:
|
|
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
|
|
// CHECK2: omp.inner.for.body:
|
|
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK2-NEXT: switch i32 [[TMP14]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
|
|
|
|
// CHECK2-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
|
|
|
|
// CHECK2-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE6:%.*]]
|
|
|
|
// CHECK2-NEXT: ]
|
|
|
|
// CHECK2: .omp.sections.case:
|
|
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[T_VAR2]], align 4
|
|
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 0
|
|
|
|
// CHECK2-NEXT: store i32 [[TMP15]], i32* [[ARRAYIDX]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
|
|
|
|
// CHECK2: .omp.sections.case6:
|
|
|
|
// CHECK2-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP5]], i64 0, i64 0
|
|
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = bitcast %struct.S.0* [[ARRAYIDX7]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = bitcast %struct.S.0* [[VAR3]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP16]], i8* align 4 [[TMP17]], i64 4, i1 false)
|
|
|
|
// CHECK2-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
|
|
|
|
// CHECK2: .omp.sections.exit:
|
|
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
|
|
// CHECK2: omp.inner.for.inc:
|
|
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP18]], 1
|
|
|
|
// CHECK2-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
|
|
// CHECK2: omp.inner.for.end:
|
|
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]])
|
|
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = bitcast i32* [[T_VAR2]] to i8*
|
|
|
|
// CHECK2-NEXT: store i8* [[TMP20]], i8** [[TMP19]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
|
|
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = bitcast %struct.S.0* [[VAR3]] to i8*
|
|
|
|
// CHECK2-NEXT: store i8* [[TMP22]], i8** [[TMP21]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
|
|
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = bitcast %struct.S.0* [[VAR14]] to i8*
|
|
|
|
// CHECK2-NEXT: store i8* [[TMP24]], i8** [[TMP23]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
|
|
|
|
// CHECK2-NEXT: [[TMP26:%.*]] = bitcast i32* [[T_VAR15]] to i8*
|
|
|
|
// CHECK2-NEXT: store i8* [[TMP26]], i8** [[TMP25]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP27:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP28:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], i32 4, i64 32, i8* [[TMP27]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK2-NEXT: switch i32 [[TMP28]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
|
|
// CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
|
|
// CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
|
|
// CHECK2-NEXT: ]
|
|
|
|
// CHECK2: .omp.reduction.case1:
|
|
|
|
// CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP0]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[T_VAR2]], align 4
|
|
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
|
|
|
|
// CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP31:%.*]] = bitcast %struct.S.0* [[TMP1]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP32:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP31]], i8* align 4 [[TMP32]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL8:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP2]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[CALL8]], 0
|
|
|
|
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
|
|
// CHECK2: land.rhs:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL9:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TOBOOL10:%.*]] = icmp ne i32 [[CALL9]], 0
|
|
|
|
// CHECK2-NEXT: br label [[LAND_END]]
|
|
|
|
// CHECK2: land.end:
|
|
|
|
// CHECK2-NEXT: [[TMP33:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL10]], [[LAND_RHS]] ]
|
|
|
|
// CHECK2-NEXT: [[CONV:%.*]] = zext i1 [[TMP33]] to i32
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP34:%.*]] = bitcast %struct.S.0* [[TMP2]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP35:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP34]], i8* align 4 [[TMP35]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP3]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[T_VAR15]], align 4
|
|
|
|
// CHECK2-NEXT: [[CMP11:%.*]] = icmp slt i32 [[TMP36]], [[TMP37]]
|
|
|
|
// CHECK2-NEXT: br i1 [[CMP11]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
|
|
// CHECK2: cond.true:
|
|
|
|
// CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[TMP3]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
|
|
// CHECK2: cond.false:
|
|
|
|
// CHECK2-NEXT: [[TMP39:%.*]] = load i32, i32* [[T_VAR15]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
|
|
// CHECK2: cond.end:
|
|
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP38]], [[COND_TRUE]] ], [ [[TMP39]], [[COND_FALSE]] ]
|
|
|
|
// CHECK2-NEXT: store i32 [[COND]], i32* [[TMP3]], align 4
|
|
|
|
// CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
|
|
// CHECK2: .omp.reduction.case2:
|
|
|
|
// CHECK2-NEXT: [[TMP40:%.*]] = load i32, i32* [[T_VAR2]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP41:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP40]] monotonic, align 4
|
|
|
|
// CHECK2-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP42:%.*]] = bitcast %struct.S.0* [[TMP1]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP43:%.*]] = bitcast %struct.S.0* [[CALL12]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP42]], i8* align 4 [[TMP43]], i64 4, i1 false)
|
|
|
|
// CHECK2-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
|
|
|
// CHECK2-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL14:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP2]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TOBOOL15:%.*]] = icmp ne i32 [[CALL14]], 0
|
|
|
|
// CHECK2-NEXT: br i1 [[TOBOOL15]], label [[LAND_RHS16:%.*]], label [[LAND_END19:%.*]]
|
|
|
|
// CHECK2: land.rhs16:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL17:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TOBOOL18:%.*]] = icmp ne i32 [[CALL17]], 0
|
|
|
|
// CHECK2-NEXT: br label [[LAND_END19]]
|
|
|
|
// CHECK2: land.end19:
|
|
|
|
// CHECK2-NEXT: [[TMP44:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE2]] ], [ [[TOBOOL18]], [[LAND_RHS16]] ]
|
|
|
|
// CHECK2-NEXT: [[CONV20:%.*]] = zext i1 [[TMP44]] to i32
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP13]], i32 [[CONV20]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP45:%.*]] = bitcast %struct.S.0* [[TMP2]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP46:%.*]] = bitcast %struct.S.0* [[REF_TMP13]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP45]], i8* align 4 [[TMP46]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP13]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
|
|
|
|
// CHECK2-NEXT: [[TMP47:%.*]] = load i32, i32* [[T_VAR15]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP48:%.*]] = atomicrmw min i32* [[TMP3]], i32 [[TMP47]] monotonic, align 4
|
|
|
|
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
|
|
// CHECK2: .omp.reduction.default:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]]) #[[ATTR4]]
|
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
|
|
|
|
// CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
|
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
|
|
|
|
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
|
|
|
|
// CHECK2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
|
|
|
|
// CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
|
|
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
|
|
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
|
|
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
|
|
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
|
|
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
|
|
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
|
|
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S.0*
|
|
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
|
|
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S.0*
|
|
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
|
|
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S.0*
|
|
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
|
|
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S.0*
|
|
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
|
|
|
|
// CHECK2-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP26:%.*]] = bitcast i8* [[TMP25]] to i32*
|
|
|
|
// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
|
|
|
|
// CHECK2-NEXT: [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
|
|
|
|
// CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8* [[TMP28]] to i32*
|
|
|
|
// CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP11]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP8]], align 4
|
|
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
|
|
|
|
// CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP14]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP32:%.*]] = bitcast %struct.S.0* [[TMP17]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP33:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL2:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP23]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[CALL2]], 0
|
|
|
|
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
|
|
// CHECK2: land.rhs:
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: [[CALL3:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP20]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TOBOOL4:%.*]] = icmp ne i32 [[CALL3]], 0
|
|
|
|
// CHECK2-NEXT: br label [[LAND_END]]
|
|
|
|
// CHECK2: land.end:
|
|
|
|
// CHECK2-NEXT: [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
|
|
|
|
// CHECK2-NEXT: [[CONV:%.*]] = zext i1 [[TMP34]] to i32
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP23]] to i8*
|
|
|
|
// CHECK2-NEXT: [[TMP36:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
|
|
|
|
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP29]], align 4
|
|
|
|
// CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[TMP26]], align 4
|
|
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP37]], [[TMP38]]
|
|
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
|
|
// CHECK2: cond.true:
|
|
|
|
// CHECK2-NEXT: [[TMP39:%.*]] = load i32, i32* [[TMP29]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
|
|
// CHECK2: cond.false:
|
|
|
|
// CHECK2-NEXT: [[TMP40:%.*]] = load i32, i32* [[TMP26]], align 4
|
|
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
|
|
// CHECK2: cond.end:
|
|
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
|
|
|
|
// CHECK2-NEXT: store i32 [[COND]], i32* [[TMP29]], align 4
|
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEanERKS0_
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR6]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S.0* [[TMP0]], %struct.S.0** [[DOTADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: ret %struct.S.0* [[THIS1]]
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEcviEv
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR6]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: ret i32 0
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
|
|
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load volatile double, double* @g, align 8
|
|
|
|
// CHECK2-NEXT: [[CONV:%.*]] = fptosi double [[TMP0]] to i32
|
|
|
|
// CHECK2-NEXT: store i32 [[CONV]], i32* [[F]], align 4
|
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
|
|
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
|
|
|
|
// CHECK2-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to double
|
|
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load volatile double, double* @g, align 8
|
|
|
|
// CHECK2-NEXT: [[ADD:%.*]] = fadd double [[CONV]], [[TMP1]]
|
|
|
|
// CHECK2-NEXT: [[CONV2:%.*]] = fptosi double [[ADD]] to i32
|
|
|
|
// CHECK2-NEXT: store i32 [[CONV2]], i32* [[F]], align 4
|
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK2-NEXT: entry:
|
|
|
|
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
|
|
|
|
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
|
|
|
|
// CHECK2-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK3-LABEL: define {{[^@]+}}@main
|
|
|
|
// CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
|
|
|
|
// CHECK3-NEXT: entry:
|
|
|
|
// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
|
|
|
|
// CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK3-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 1 dereferenceable(1) [[REF_TMP]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK3-NEXT: ret i32 0
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
|
|
|
|
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2:[0-9]+]] {
|
|
|
|
// CHECK3-NEXT: entry:
|
|
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK3-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK3-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK3-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK3-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK3-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK3-NEXT: [[G:%.*]] = alloca double, align 8
|
|
|
|
// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
|
|
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
|
|
|
|
// CHECK3-NEXT: [[ATOMIC_TEMP:%.*]] = alloca double, align 8
|
|
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca double, align 8
|
|
|
|
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
|
|
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
|
|
|
|
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
|
|
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
|
|
|
|
// CHECK3-NEXT: store double 0.000000e+00, double* [[G]], align 8
|
|
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
|
|
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
|
|
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 1
|
|
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 1
|
|
|
|
// CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
|
|
|
|
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
|
|
// CHECK3: omp.inner.for.cond:
|
|
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
|
|
// CHECK3: omp.inner.for.body:
|
|
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK3-NEXT: switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
|
|
|
|
// CHECK3-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
|
|
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
|
|
|
|
// CHECK3-NEXT: ]
|
|
|
|
// CHECK3: .omp.sections.case:
|
|
|
|
// CHECK3-NEXT: store double 1.000000e+00, double* [[G]], align 8
|
|
|
|
// CHECK3-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
|
|
|
|
// CHECK3: .omp.sections.case1:
|
|
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
|
|
|
|
// CHECK3-NEXT: store double* [[G]], double** [[TMP9]], align 8
|
2021-05-13 23:20:37 +08:00
|
|
|
// CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(8) [[REF_TMP]])
|
2021-05-06 06:13:14 +08:00
|
|
|
// CHECK3-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
|
|
|
|
// CHECK3: .omp.sections.exit:
|
|
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
|
|
// CHECK3: omp.inner.for.inc:
|
|
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP10]], 1
|
|
|
|
// CHECK3-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
|
|
// CHECK3: omp.inner.for.end:
|
|
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
|
|
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = bitcast double* [[G]] to i8*
|
|
|
|
// CHECK3-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8
|
|
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
|
|
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]], i32 1, i64 8, i8* [[TMP13]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK3-NEXT: switch i32 [[TMP14]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
|
|
// CHECK3-NEXT: ]
|
|
|
|
// CHECK3: .omp.reduction.case1:
|
|
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load double, double* @g, align 8
|
|
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load double, double* [[G]], align 8
|
|
|
|
// CHECK3-NEXT: [[ADD:%.*]] = fadd double [[TMP15]], [[TMP16]]
|
|
|
|
// CHECK3-NEXT: store double [[ADD]], double* @g, align 8
|
|
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
|
|
// CHECK3: .omp.reduction.case2:
|
|
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load double, double* [[G]], align 8
|
|
|
|
// CHECK3-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i64, i64* bitcast (double* @g to i64*) monotonic, align 8
|
|
|
|
// CHECK3-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
|
|
// CHECK3: atomic_cont:
|
|
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = phi i64 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP25:%.*]], [[ATOMIC_CONT]] ]
|
|
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = bitcast double* [[ATOMIC_TEMP]] to i64*
|
|
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = bitcast i64 [[TMP18]] to double
|
|
|
|
// CHECK3-NEXT: store double [[TMP20]], double* [[TMP]], align 8
|
|
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load double, double* [[TMP]], align 8
|
|
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load double, double* [[G]], align 8
|
|
|
|
// CHECK3-NEXT: [[ADD2:%.*]] = fadd double [[TMP21]], [[TMP22]]
|
|
|
|
// CHECK3-NEXT: store double [[ADD2]], double* [[ATOMIC_TEMP]], align 8
|
|
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i64, i64* [[TMP19]], align 8
|
|
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = cmpxchg i64* bitcast (double* @g to i64*), i64 [[TMP18]], i64 [[TMP23]] monotonic monotonic, align 8
|
|
|
|
// CHECK3-NEXT: [[TMP25]] = extractvalue { i64, i1 } [[TMP24]], 0
|
|
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = extractvalue { i64, i1 } [[TMP24]], 1
|
|
|
|
// CHECK3-NEXT: br i1 [[TMP26]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
|
|
// CHECK3: atomic_exit:
|
|
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
|
|
// CHECK3: .omp.reduction.default:
|
|
|
|
// CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP1]])
|
|
|
|
// CHECK3-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
|
|
|
|
// CHECK3-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
|
|
|
|
// CHECK3-NEXT: entry:
|
|
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
|
|
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
|
|
|
|
// CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
|
|
|
|
// CHECK3-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
|
|
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
|
|
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
|
|
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
|
|
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
|
|
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
|
|
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
|
|
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double*
|
|
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
|
|
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
|
|
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double*
|
|
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load double, double* [[TMP11]], align 8
|
|
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load double, double* [[TMP8]], align 8
|
|
|
|
// CHECK3-NEXT: [[ADD:%.*]] = fadd double [[TMP12]], [[TMP13]]
|
|
|
|
// CHECK3-NEXT: store double [[ADD]], double* [[TMP11]], align 8
|
|
|
|
// CHECK3-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK4-LABEL: define {{[^@]+}}@main
|
|
|
|
// CHECK4-SAME: () #[[ATTR1:[0-9]+]] {
|
|
|
|
// CHECK4-NEXT: entry:
|
|
|
|
// CHECK4-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK4-NEXT: store i32 0, i32* [[RETVAL]], align 4
|
|
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load i8*, i8** getelementptr inbounds ([[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to %struct.__block_literal_generic*), i32 0, i32 3), align 8
|
|
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to void (i8*)*
|
|
|
|
// CHECK4-NEXT: call void [[TMP1]](i8* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to i8*))
|
|
|
|
// CHECK4-NEXT: ret i32 0
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK4-LABEL: define {{[^@]+}}@__main_block_invoke
|
|
|
|
// CHECK4-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2:[0-9]+]] {
|
|
|
|
// CHECK4-NEXT: entry:
|
|
|
|
// CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
|
|
|
|
// CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
|
|
|
|
// CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
|
|
|
|
// CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
|
|
|
|
// CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
|
|
|
|
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
|
|
|
|
// CHECK4-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
|
|
|
|
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
|
|
// CHECK4-NEXT: entry:
|
|
|
|
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK4-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK4-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK4-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK4-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK4-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
|
|
|
|
// CHECK4-NEXT: [[G:%.*]] = alloca double, align 8
|
|
|
|
// CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, align 8
|
|
|
|
// CHECK4-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
|
|
|
|
// CHECK4-NEXT: [[ATOMIC_TEMP:%.*]] = alloca double, align 8
|
|
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca double, align 8
|
|
|
|
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
|
|
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
|
|
|
|
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
|
|
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
|
|
|
|
// CHECK4-NEXT: store double 0.000000e+00, double* [[G]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
|
|
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
|
|
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 1
|
|
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 1
|
|
|
|
// CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
|
|
|
|
// CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
|
|
// CHECK4: omp.inner.for.cond:
|
|
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
|
|
|
|
// CHECK4-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
|
|
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
|
|
// CHECK4: omp.inner.for.body:
|
|
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK4-NEXT: switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
|
|
|
|
// CHECK4-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
|
|
|
|
// CHECK4-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
|
|
|
|
// CHECK4-NEXT: ]
|
|
|
|
// CHECK4: .omp.sections.case:
|
|
|
|
// CHECK4-NEXT: store double 1.000000e+00, double* [[G]], align 8
|
|
|
|
// CHECK4-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
|
|
|
|
// CHECK4: .omp.sections.case1:
|
|
|
|
// CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 0
|
|
|
|
// CHECK4-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8
|
|
|
|
// CHECK4-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 1
|
|
|
|
// CHECK4-NEXT: store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8
|
|
|
|
// CHECK4-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 2
|
|
|
|
// CHECK4-NEXT: store i32 0, i32* [[BLOCK_RESERVED]], align 4
|
|
|
|
// CHECK4-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 3
|
|
|
|
// CHECK4-NEXT: store i8* bitcast (void (i8*)* @_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 8
|
|
|
|
// CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 4
|
|
|
|
// CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.1 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
|
|
|
|
// CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 5
|
|
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = load volatile double, double* [[G]], align 8
|
|
|
|
// CHECK4-NEXT: store volatile double [[TMP9]], double* [[BLOCK_CAPTURED]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]] to void ()*
|
|
|
|
// CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP10]] to %struct.__block_literal_generic*
|
|
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
|
|
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
|
|
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP11]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to void (i8*)*
|
|
|
|
// CHECK4-NEXT: call void [[TMP14]](i8* [[TMP12]])
|
|
|
|
// CHECK4-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
|
|
|
|
// CHECK4: .omp.sections.exit:
|
|
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
|
|
// CHECK4: omp.inner.for.inc:
|
|
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK4-NEXT: [[INC:%.*]] = add nsw i32 [[TMP15]], 1
|
|
|
|
// CHECK4-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
|
|
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
|
|
// CHECK4: omp.inner.for.end:
|
|
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
|
|
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = bitcast double* [[G]] to i8*
|
|
|
|
// CHECK4-NEXT: store i8* [[TMP17]], i8** [[TMP16]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
|
|
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]], i32 1, i64 8, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK4-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
|
|
// CHECK4-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
|
|
// CHECK4-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
|
|
// CHECK4-NEXT: ]
|
|
|
|
// CHECK4: .omp.reduction.case1:
|
|
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = load double, double* @g, align 8
|
|
|
|
// CHECK4-NEXT: [[TMP21:%.*]] = load double, double* [[G]], align 8
|
|
|
|
// CHECK4-NEXT: [[ADD:%.*]] = fadd double [[TMP20]], [[TMP21]]
|
|
|
|
// CHECK4-NEXT: store double [[ADD]], double* @g, align 8
|
|
|
|
// CHECK4-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK4-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
|
|
// CHECK4: .omp.reduction.case2:
|
|
|
|
// CHECK4-NEXT: [[TMP22:%.*]] = load double, double* [[G]], align 8
|
|
|
|
// CHECK4-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i64, i64* bitcast (double* @g to i64*) monotonic, align 8
|
|
|
|
// CHECK4-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
|
|
// CHECK4: atomic_cont:
|
|
|
|
// CHECK4-NEXT: [[TMP23:%.*]] = phi i64 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP30:%.*]], [[ATOMIC_CONT]] ]
|
|
|
|
// CHECK4-NEXT: [[TMP24:%.*]] = bitcast double* [[ATOMIC_TEMP]] to i64*
|
|
|
|
// CHECK4-NEXT: [[TMP25:%.*]] = bitcast i64 [[TMP23]] to double
|
|
|
|
// CHECK4-NEXT: store double [[TMP25]], double* [[TMP]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP26:%.*]] = load double, double* [[TMP]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP27:%.*]] = load double, double* [[G]], align 8
|
|
|
|
// CHECK4-NEXT: [[ADD2:%.*]] = fadd double [[TMP26]], [[TMP27]]
|
|
|
|
// CHECK4-NEXT: store double [[ADD2]], double* [[ATOMIC_TEMP]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP28:%.*]] = load i64, i64* [[TMP24]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP29:%.*]] = cmpxchg i64* bitcast (double* @g to i64*), i64 [[TMP23]], i64 [[TMP28]] monotonic monotonic, align 8
|
|
|
|
// CHECK4-NEXT: [[TMP30]] = extractvalue { i64, i1 } [[TMP29]], 0
|
|
|
|
// CHECK4-NEXT: [[TMP31:%.*]] = extractvalue { i64, i1 } [[TMP29]], 1
|
|
|
|
// CHECK4-NEXT: br i1 [[TMP31]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
|
|
// CHECK4: atomic_exit:
|
|
|
|
// CHECK4-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], [8 x i32]* @.gomp_critical_user_.reduction.var)
|
|
|
|
// CHECK4-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
|
|
// CHECK4: .omp.reduction.default:
|
|
|
|
// CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP1]])
|
|
|
|
// CHECK4-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK4-LABEL: define {{[^@]+}}@_block_invoke
|
|
|
|
// CHECK4-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2]] {
|
|
|
|
// CHECK4-NEXT: entry:
|
|
|
|
// CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
|
|
|
|
// CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>*, align 8
|
|
|
|
// CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
|
|
|
|
// CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>*
|
|
|
|
// CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>** [[BLOCK_ADDR]], align 8
|
|
|
|
// CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 5
|
|
|
|
// CHECK4-NEXT: store double 2.000000e+00, double* [[BLOCK_CAPTURE_ADDR]], align 8
|
|
|
|
// CHECK4-NEXT: ret void
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
|
|
|
|
// CHECK4-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
|
|
|
|
// CHECK4-NEXT: entry:
|
|
|
|
// CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
|
|
|
|
// CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
|
|
|
|
// CHECK4-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
|
|
|
|
// CHECK4-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
|
|
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
|
|
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
|
|
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double*
|
|
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
|
|
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double*
|
|
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = load double, double* [[TMP11]], align 8
|
|
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load double, double* [[TMP8]], align 8
|
|
|
|
// CHECK4-NEXT: [[ADD:%.*]] = fadd double [[TMP12]], [[TMP13]]
|
|
|
|
// CHECK4-NEXT: store double [[ADD]], double* [[TMP11]], align 8
|
|
|
|
// CHECK4-NEXT: ret void
|
|
|
|
//
|
2021-05-19 10:52:53 +08:00
|
|
|
//
|