forked from OSchip/llvm-project
7769 lines
521 KiB
C++
7769 lines
521 KiB
C++
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
|
|
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1
|
|
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK2
|
|
|
|
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
|
|
|
|
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4
|
|
|
|
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -gno-column-info -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5
|
|
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK6
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// expected-no-diagnostics
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp -DOMP5 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK11
|
|
// RUN: %clang_cc1 -fopenmp -DOMP5 -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -fopenmp -DOMP5 -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12
|
|
// RUN: %clang_cc1 -verify -fopenmp-simd -DOMP5 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -fopenmp-simd -DOMP5 -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -fopenmp-simd -DOMP5 -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
#ifndef HEADER
|
|
#define HEADER
|
|
|
|
#ifndef OMP5
|
|
|
|
void with_var_schedule() {
|
|
double a = 5;
|
|
|
|
#pragma omp parallel for schedule(static, char(a)) private(a)
|
|
for (unsigned long long i = 1; i < 2 + a; ++i) {
|
|
}
|
|
}
|
|
|
|
void without_schedule_clause(float *a, float *b, float *c, float *d) {
|
|
#pragma omp parallel for
|
|
// UB = min(UB, GlobalUB)
|
|
// Loop header
|
|
for (int i = 33; i < 32000000; i += 7) {
|
|
// Start of body: calculate i from IV:
|
|
// ... loop body ...
|
|
// End of body: store into a[i]:
|
|
a[i] = b[i] * c[i] * d[i];
|
|
}
|
|
}
|
|
|
|
void static_not_chunked(float *a, float *b, float *c, float *d) {
|
|
#pragma omp parallel for schedule(static)
|
|
// UB = min(UB, GlobalUB)
|
|
// Loop header
|
|
for (int i = 32000000; i > 33; i += -7) {
|
|
// Start of body: calculate i from IV:
|
|
// ... loop body ...
|
|
// End of body: store into a[i]:
|
|
a[i] = b[i] * c[i] * d[i];
|
|
}
|
|
}
|
|
|
|
void static_chunked(float *a, float *b, float *c, float *d) {
|
|
#pragma omp parallel for schedule(static, 5)
|
|
// UB = min(UB, GlobalUB)
|
|
|
|
// Outer loop header
|
|
|
|
// Loop header
|
|
for (unsigned i = 131071; i <= 2147483647; i += 127) {
|
|
// Start of body: calculate i from IV:
|
|
// ... loop body ...
|
|
// End of body: store into a[i]:
|
|
a[i] = b[i] * c[i] * d[i];
|
|
}
|
|
// Update the counters, adding stride
|
|
|
|
}
|
|
|
|
void dynamic1(float *a, float *b, float *c, float *d) {
|
|
#pragma omp parallel for schedule(dynamic)
|
|
|
|
// Loop header
|
|
|
|
for (unsigned long long i = 131071; i < 2147483647; i += 127) {
|
|
// Start of body: calculate i from IV:
|
|
// ... loop body ...
|
|
// End of body: store into a[i]:
|
|
a[i] = b[i] * c[i] * d[i];
|
|
}
|
|
}
|
|
|
|
void guided7(float *a, float *b, float *c, float *d) {
|
|
#pragma omp parallel for schedule(guided, 7)
|
|
|
|
// Loop header
|
|
|
|
for (unsigned long long i = 131071; i < 2147483647; i += 127) {
|
|
// Start of body: calculate i from IV:
|
|
// ... loop body ...
|
|
// End of body: store into a[i]:
|
|
a[i] = b[i] * c[i] * d[i];
|
|
}
|
|
}
|
|
|
|
void test_auto(float *a, float *b, float *c, float *d) {
|
|
unsigned int x = 0;
|
|
unsigned int y = 0;
|
|
#pragma omp parallel for schedule(auto) collapse(2)
|
|
|
|
// Loop header
|
|
|
|
// FIXME: When the iteration count of some nested loop is not a known constant,
|
|
// we should pre-calculate it, like we do for the total number of iterations!
|
|
for (char i = static_cast<char>(y); i <= '9'; ++i)
|
|
for (x = 11; x > 0; --x) {
|
|
// Start of body: indices are calculated from IV:
|
|
// ... loop body ...
|
|
// End of body: store into a[i]:
|
|
a[i] = b[i] * c[i] * d[i];
|
|
}
|
|
}
|
|
|
|
void runtime(float *a, float *b, float *c, float *d) {
|
|
int x = 0;
|
|
#pragma omp parallel for collapse(2) schedule(runtime)
|
|
|
|
// Loop header
|
|
|
|
for (unsigned char i = '0' ; i <= '9'; ++i)
|
|
for (x = -10; x < 10; ++x) {
|
|
// Start of body: indices are calculated from IV:
|
|
// ... loop body ...
|
|
// End of body: store into a[i]:
|
|
a[i] = b[i] * c[i] * d[i];
|
|
}
|
|
}
|
|
|
|
int foo() { extern void mayThrow(); mayThrow(); return 0; };
|
|
|
|
void parallel_for(float *a, const int n) {
|
|
float arr[n];
|
|
#pragma omp parallel for schedule(static, 5) private(arr) default(none) firstprivate(n) shared(a)
|
|
for (unsigned i = 131071; i <= 2147483647; i += 127)
|
|
a[i] += foo() + arr[i] + n;
|
|
}
|
|
// Check source line corresponds to "#pragma omp parallel for schedule(static, 5)" above:
|
|
|
|
#else // OMP5
|
|
int increment () {
|
|
#pragma omp for
|
|
// Determine UB = min(UB, GlobalUB)
|
|
|
|
// Loop header
|
|
|
|
for (int i = 0 ; i != 5; ++i)
|
|
// Start of body: calculate i from IV:
|
|
;
|
|
return 0;
|
|
}
|
|
|
|
int decrement_nowait () {
|
|
#pragma omp for nowait
|
|
// Determine UB = min(UB, GlobalUB)
|
|
|
|
// Loop header
|
|
for (int j = 5 ; j != 0; --j)
|
|
// Start of body: calculate i from IV:
|
|
;
|
|
return 0;
|
|
}
|
|
|
|
void range_for_single() {
|
|
int arr[10] = {0};
|
|
#pragma omp parallel for
|
|
for (auto &a : arr)
|
|
(void)a;
|
|
}
|
|
|
|
|
|
// __range = arr;
|
|
|
|
// __end = end(_range);
|
|
|
|
|
|
// calculate number of elements.
|
|
|
|
// __begin = begin(range);
|
|
|
|
// __begin >= __end ? goto then : goto exit;
|
|
|
|
|
|
// lb = 0;
|
|
|
|
// ub = number of elements
|
|
|
|
// stride = 1;
|
|
|
|
// is_last = 0;
|
|
|
|
// loop.
|
|
|
|
// ub = (ub > number_of_elems ? number_of_elems : ub);
|
|
|
|
|
|
|
|
// OMP%: store i64 [[MIN]], i64* [[UB]],
|
|
|
|
// iv = lb;
|
|
|
|
// goto loop;
|
|
// loop:
|
|
|
|
|
|
// iv <= ub ? goto body : goto end;
|
|
|
|
// body:
|
|
// __begin = begin(arr) + iv * 1;
|
|
|
|
// a = *__begin;
|
|
|
|
// (void)a;
|
|
|
|
// iv += 1;
|
|
|
|
// goto loop;
|
|
|
|
// end:
|
|
// exit:
|
|
|
|
void range_for_collapsed() {
|
|
int arr[10] = {0};
|
|
#pragma omp parallel for collapse(2)
|
|
for (auto &a : arr)
|
|
for (auto b : arr)
|
|
a = b;
|
|
}
|
|
#endif // OMP5
|
|
|
|
#endif // HEADER
|
|
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z17with_var_schedulev
|
|
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[A:%.*]] = alloca double, align 8
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: store double 5.000000e+00, double* [[A]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load double, double* [[A]], align 8
|
|
// CHECK1-NEXT: [[CONV:%.*]] = fptosi double [[TMP0]] to i8
|
|
// CHECK1-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
|
|
// CHECK1-NEXT: store i8 [[TMP1]], i8* [[CONV1]], align 1
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
|
|
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP2]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
|
|
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca double, align 8
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[A:%.*]] = alloca double, align 8
|
|
// CHECK1-NEXT: [[I5:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load double, double* undef, align 8
|
|
// CHECK1-NEXT: [[ADD:%.*]] = fadd double 2.000000e+00, [[TMP0]]
|
|
// CHECK1-NEXT: store double [[ADD]], double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load double, double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK1-NEXT: [[SUB:%.*]] = fsub double [[TMP1]], 1.000000e+00
|
|
// CHECK1-NEXT: [[DIV:%.*]] = fdiv double [[SUB]], 1.000000e+00
|
|
// CHECK1-NEXT: [[CONV3:%.*]] = fptoui double [[DIV]] to i64
|
|
// CHECK1-NEXT: [[SUB4:%.*]] = sub i64 [[CONV3]], 1
|
|
// CHECK1-NEXT: store i64 [[SUB4]], i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK1-NEXT: store i64 1, i64* [[I]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load double, double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK1-NEXT: [[CMP:%.*]] = fcmp olt double 1.000000e+00, [[TMP2]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1: omp.precond.then:
|
|
// CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP3]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV]], align 8
|
|
// CHECK1-NEXT: [[CONV6:%.*]] = sext i8 [[TMP4]] to i64
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV6]])
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1: omp.dispatch.cond:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK1-NEXT: [[CMP7:%.*]] = icmp ugt i64 [[TMP7]], [[TMP8]]
|
|
// CHECK1-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP11]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: [[ADD8:%.*]] = add i64 [[TMP13]], 1
|
|
// CHECK1-NEXT: [[CMP9:%.*]] = icmp ult i64 [[TMP12]], [[ADD8]]
|
|
// CHECK1-NEXT: br i1 [[CMP9]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1: omp.dispatch.body:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: [[ADD10:%.*]] = add i64 [[TMP15]], 1
|
|
// CHECK1-NEXT: [[CMP11:%.*]] = icmp ult i64 [[TMP14]], [[ADD10]]
|
|
// CHECK1-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP16]], 1
|
|
// CHECK1-NEXT: [[ADD12:%.*]] = add i64 1, [[MUL]]
|
|
// CHECK1-NEXT: store i64 [[ADD12]], i64* [[I5]], align 8
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[ADD13:%.*]] = add i64 [[TMP17]], 1
|
|
// CHECK1-NEXT: store i64 [[ADD13]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1: omp.dispatch.inc:
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-NEXT: [[ADD14:%.*]] = add i64 [[TMP18]], [[TMP19]]
|
|
// CHECK1-NEXT: store i64 [[ADD14]], i64* [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-NEXT: [[ADD15:%.*]] = add i64 [[TMP20]], [[TMP21]]
|
|
// CHECK1-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1: omp.dispatch.end:
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP23]])
|
|
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z23without_schedule_clausePfS_S_S_
|
|
// CHECK1-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
|
|
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
|
|
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM2]]
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX3]], align 4
|
|
// CHECK1-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]]
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM5]]
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX6]], align 4
|
|
// CHECK1-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]]
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM8]]
|
|
// CHECK1-NEXT: store float [[MUL7]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP23]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
|
|
// CHECK1-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
|
|
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
|
|
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[SUB]], i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM2]]
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX3]], align 4
|
|
// CHECK1-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]]
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM5]]
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX6]], align 4
|
|
// CHECK1-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]]
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM8]]
|
|
// CHECK1-NEXT: store float [[MUL7]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z14static_chunkedPfS_S_S_
|
|
// CHECK1-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
|
|
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 16908288, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 5)
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1: omp.dispatch.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]]
|
|
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1: omp.dispatch.body:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[IDXPROM3]]
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4
|
|
// CHECK1-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]]
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[IDXPROM6]]
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load float, float* [[ARRAYIDX7]], align 4
|
|
// CHECK1-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]]
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP23]], i64 [[IDXPROM9]]
|
|
// CHECK1-NEXT: store float [[MUL8]], float* [[ARRAYIDX10]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1: omp.dispatch.inc:
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]]
|
|
// CHECK1-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]]
|
|
// CHECK1-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1: omp.dispatch.end:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
|
|
// CHECK1-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..4 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
|
|
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 1073741859, i64 0, i64 16908287, i64 1, i64 1)
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1: omp.dispatch.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1: omp.dispatch.body:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP7]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add i64 [[TMP9]], 1
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP8]], [[ADD]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 127
|
|
// CHECK1-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
|
|
// CHECK1-NEXT: store i64 [[ADD1]], i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[TMP12]]
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[TMP15]]
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[MUL3:%.*]] = fmul float [[TMP13]], [[TMP16]]
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[TMP18]]
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP19]]
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[TMP21]]
|
|
// CHECK1-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4, !llvm.access.group !5
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK1-NEXT: [[ADD7:%.*]] = add i64 [[TMP22]], 1
|
|
// CHECK1-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1: omp.dispatch.inc:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1: omp.dispatch.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z7guided7PfS_S_S_
|
|
// CHECK1-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..5 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..5
|
|
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 1073741860, i64 0, i64 16908287, i64 1, i64 7)
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1: omp.dispatch.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1: omp.dispatch.body:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP7]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add i64 [[TMP9]], 1
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP8]], [[ADD]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 127
|
|
// CHECK1-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
|
|
// CHECK1-NEXT: store i64 [[ADD1]], i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[TMP12]]
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[TMP15]]
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[MUL3:%.*]] = fmul float [[TMP13]], [[TMP16]]
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[TMP18]]
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP19]]
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[TMP21]]
|
|
// CHECK1-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4, !llvm.access.group !8
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK1-NEXT: [[ADD7:%.*]] = add i64 [[TMP22]], 1
|
|
// CHECK1-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1: omp.dispatch.inc:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1: omp.dispatch.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
|
|
// CHECK1-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[Y:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, i32* [[X]], align 4
|
|
// CHECK1-NEXT: store i32 0, i32* [[Y]], align 4
|
|
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[Y]], float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6
|
|
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I7:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[X8:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[TMP5]] to i8
|
|
// CHECK1-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-NEXT: [[CONV3:%.*]] = sext i8 [[TMP6]] to i32
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]]
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
|
|
// CHECK1-NEXT: [[CONV4:%.*]] = zext i32 [[DIV]] to i64
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11
|
|
// CHECK1-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK1-NEXT: store i64 [[SUB5]], i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-NEXT: store i8 [[TMP7]], i8* [[I]], align 1
|
|
// CHECK1-NEXT: store i32 11, i32* [[X]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK1-NEXT: [[CONV6:%.*]] = sext i8 [[TMP8]] to i32
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV6]], 57
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1: omp.precond.then:
|
|
// CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 1073741862, i64 0, i64 [[TMP10]], i64 1, i64 1)
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1: omp.dispatch.cond:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1: omp.dispatch.body:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP16]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[CMP9:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]]
|
|
// CHECK1-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[CONV10:%.*]] = sext i8 [[TMP19]] to i64
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[DIV11:%.*]] = sdiv i64 [[TMP20]], 11
|
|
// CHECK1-NEXT: [[MUL12:%.*]] = mul nsw i64 [[DIV11]], 1
|
|
// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i64 [[CONV10]], [[MUL12]]
|
|
// CHECK1-NEXT: [[CONV14:%.*]] = trunc i64 [[ADD13]] to i8
|
|
// CHECK1-NEXT: store i8 [[CONV14]], i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[DIV15:%.*]] = sdiv i64 [[TMP22]], 11
|
|
// CHECK1-NEXT: [[MUL16:%.*]] = mul nsw i64 [[DIV15]], 11
|
|
// CHECK1-NEXT: [[SUB17:%.*]] = sub nsw i64 [[TMP21]], [[MUL16]]
|
|
// CHECK1-NEXT: [[MUL18:%.*]] = mul nsw i64 [[SUB17]], 1
|
|
// CHECK1-NEXT: [[SUB19:%.*]] = sub nsw i64 11, [[MUL18]]
|
|
// CHECK1-NEXT: [[CONV20:%.*]] = trunc i64 [[SUB19]] to i32
|
|
// CHECK1-NEXT: store i32 [[CONV20]], i32* [[X8]], align 4, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP24]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP23]], i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[IDXPROM21:%.*]] = sext i8 [[TMP27]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX22:%.*]] = getelementptr inbounds float, float* [[TMP26]], i64 [[IDXPROM21]]
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = load float, float* [[ARRAYIDX22]], align 4, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[MUL23:%.*]] = fmul float [[TMP25]], [[TMP28]]
|
|
// CHECK1-NEXT: [[TMP29:%.*]] = load float*, float** [[TMP4]], align 8, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[TMP30:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[IDXPROM24:%.*]] = sext i8 [[TMP30]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, float* [[TMP29]], i64 [[IDXPROM24]]
|
|
// CHECK1-NEXT: [[TMP31:%.*]] = load float, float* [[ARRAYIDX25]], align 4, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[MUL26:%.*]] = fmul float [[MUL23]], [[TMP31]]
|
|
// CHECK1-NEXT: [[TMP32:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[TMP33:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[IDXPROM27:%.*]] = sext i8 [[TMP33]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, float* [[TMP32]], i64 [[IDXPROM27]]
|
|
// CHECK1-NEXT: store float [[MUL26]], float* [[ARRAYIDX28]], align 4, !llvm.access.group !11
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK1-NEXT: [[ADD29:%.*]] = add nsw i64 [[TMP34]], 1
|
|
// CHECK1-NEXT: store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1: omp.dispatch.inc:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1: omp.dispatch.end:
|
|
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
|
|
// CHECK1-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, i32* [[X]], align 4
|
|
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7
|
|
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 199, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 1073741861, i32 0, i32 199, i32 1, i32 1)
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1: omp.dispatch.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1: omp.dispatch.body:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 20
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]]
|
|
// CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8
|
|
// CHECK1-NEXT: store i8 [[CONV]], i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP12]], 20
|
|
// CHECK1-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 20
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL3]]
|
|
// CHECK1-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 -10, [[MUL4]]
|
|
// CHECK1-NEXT: store i32 [[ADD5]], i32* [[X]], align 4, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP14]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP13]], i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[IDXPROM6:%.*]] = zext i8 [[TMP17]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP16]], i64 [[IDXPROM6]]
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX7]], align 4, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[MUL8:%.*]] = fmul float [[TMP15]], [[TMP18]]
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[IDXPROM9:%.*]] = zext i8 [[TMP20]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP19]], i64 [[IDXPROM9]]
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[MUL11:%.*]] = fmul float [[MUL8]], [[TMP21]]
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[IDXPROM12:%.*]] = zext i8 [[TMP23]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, float* [[TMP22]], i64 [[IDXPROM12]]
|
|
// CHECK1-NEXT: store float [[MUL11]], float* [[ARRAYIDX13]], align 4, !llvm.access.group !14
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP24]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1: omp.dispatch.inc:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1: omp.dispatch.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z3foov
|
|
// CHECK1-SAME: () #[[ATTR3:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: call void @_Z8mayThrowv()
|
|
// CHECK1-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z12parallel_forPfi
|
|
// CHECK1-SAME: (float* [[A:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
|
// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
|
|
// CHECK1-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
|
|
// CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 16
|
|
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
|
|
// CHECK1-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
|
|
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, i64, i64)* @.omp_outlined..8 to void (i32*, i32*, ...)*), float** [[A_ADDR]], i64 [[TMP1]], i64 [[TMP4]])
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
|
|
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP5]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..8
|
|
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], i64 [[VLA:%.*]], i64 [[N:%.*]]) #[[ATTR1]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
|
// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
|
|
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 16908288, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
|
|
// CHECK1-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
|
|
// CHECK1-NEXT: [[VLA1:%.*]] = alloca float, i64 [[TMP1]], align 16
|
|
// CHECK1-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 5)
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK1: omp.dispatch.cond:
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP5]], 16908288
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_CLEANUP:%.*]]
|
|
// CHECK1: omp.dispatch.cleanup:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK1: omp.dispatch.body:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP3:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
|
|
// CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
|
|
// CHECK1: omp.inner.for.cond.cleanup:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP12]], 127
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[CALL:%.*]] = invoke i32 @_Z3foov()
|
|
// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
|
|
// CHECK1: invoke.cont:
|
|
// CHECK1-NEXT: [[CONV4:%.*]] = sitofp i32 [[CALL]] to float
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP13]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[VLA1]], i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[ADD5:%.*]] = fadd float [[CONV4]], [[TMP14]]
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[CONV]], align 8
|
|
// CHECK1-NEXT: [[CONV6:%.*]] = sitofp i32 [[TMP15]] to float
|
|
// CHECK1-NEXT: [[ADD7:%.*]] = fadd float [[ADD5]], [[CONV6]]
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM8:%.*]] = zext i32 [[TMP17]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP16]], i64 [[IDXPROM8]]
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX9]], align 4
|
|
// CHECK1-NEXT: [[ADD10:%.*]] = fadd float [[TMP18]], [[ADD7]]
|
|
// CHECK1-NEXT: store float [[ADD10]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD11:%.*]] = add i32 [[TMP19]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK1: omp.dispatch.inc:
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD12:%.*]] = add i32 [[TMP20]], [[TMP21]]
|
|
// CHECK1-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD13:%.*]] = add i32 [[TMP22]], [[TMP23]]
|
|
// CHECK1-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK1: omp.dispatch.end:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
|
|
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP24]])
|
|
// CHECK1-NEXT: ret void
|
|
// CHECK1: terminate.lpad:
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = landingpad { i8*, i32 }
|
|
// CHECK1-NEXT: catch i8* null
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = extractvalue { i8*, i32 } [[TMP25]], 0
|
|
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP26]]) #[[ATTR7:[0-9]+]]
|
|
// CHECK1-NEXT: unreachable
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate
|
|
// CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] comdat {
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR7]]
|
|
// CHECK1-NEXT: unreachable
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@_Z17with_var_schedulev
|
|
// CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[A:%.*]] = alloca double, align 8
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: store double 5.000000e+00, double* [[A]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load double, double* [[A]], align 8
|
|
// CHECK2-NEXT: [[CONV:%.*]] = fptosi double [[TMP0]] to i8
|
|
// CHECK2-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
|
|
// CHECK2-NEXT: store i8 [[TMP1]], i8* [[CONV1]], align 1
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
|
|
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP2]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
|
|
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca double, align 8
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[A:%.*]] = alloca double, align 8
|
|
// CHECK2-NEXT: [[I5:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load double, double* undef, align 8
|
|
// CHECK2-NEXT: [[ADD:%.*]] = fadd double 2.000000e+00, [[TMP0]]
|
|
// CHECK2-NEXT: store double [[ADD]], double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load double, double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK2-NEXT: [[SUB:%.*]] = fsub double [[TMP1]], 1.000000e+00
|
|
// CHECK2-NEXT: [[DIV:%.*]] = fdiv double [[SUB]], 1.000000e+00
|
|
// CHECK2-NEXT: [[CONV3:%.*]] = fptoui double [[DIV]] to i64
|
|
// CHECK2-NEXT: [[SUB4:%.*]] = sub i64 [[CONV3]], 1
|
|
// CHECK2-NEXT: store i64 [[SUB4]], i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK2-NEXT: store i64 1, i64* [[I]], align 8
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load double, double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK2-NEXT: [[CMP:%.*]] = fcmp olt double 1.000000e+00, [[TMP2]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK2: omp.precond.then:
|
|
// CHECK2-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK2-NEXT: store i64 [[TMP3]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK2-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV]], align 8
|
|
// CHECK2-NEXT: [[CONV6:%.*]] = sext i8 [[TMP4]] to i64
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV6]])
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK2: omp.dispatch.cond:
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK2-NEXT: [[CMP7:%.*]] = icmp ugt i64 [[TMP7]], [[TMP8]]
|
|
// CHECK2-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK2: cond.true:
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK2: cond.false:
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
// CHECK2: cond.end:
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i64 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK2-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK2-NEXT: store i64 [[TMP11]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK2-NEXT: [[ADD8:%.*]] = add i64 [[TMP13]], 1
|
|
// CHECK2-NEXT: [[CMP9:%.*]] = icmp ult i64 [[TMP12]], [[ADD8]]
|
|
// CHECK2-NEXT: br i1 [[CMP9]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK2: omp.dispatch.body:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK2-NEXT: [[ADD10:%.*]] = add i64 [[TMP15]], 1
|
|
// CHECK2-NEXT: [[CMP11:%.*]] = icmp ult i64 [[TMP14]], [[ADD10]]
|
|
// CHECK2-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul i64 [[TMP16]], 1
|
|
// CHECK2-NEXT: [[ADD12:%.*]] = add i64 1, [[MUL]]
|
|
// CHECK2-NEXT: store i64 [[ADD12]], i64* [[I5]], align 8
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK2-NEXT: [[ADD13:%.*]] = add i64 [[TMP17]], 1
|
|
// CHECK2-NEXT: store i64 [[ADD13]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK2: omp.dispatch.inc:
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK2-NEXT: [[ADD14:%.*]] = add i64 [[TMP18]], [[TMP19]]
|
|
// CHECK2-NEXT: store i64 [[ADD14]], i64* [[DOTOMP_LB]], align 8
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK2-NEXT: [[ADD15:%.*]] = add i64 [[TMP20]], [[TMP21]]
|
|
// CHECK2-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK2: omp.dispatch.end:
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP23]])
|
|
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK2: omp.precond.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@_Z23without_schedule_clausePfS_S_S_
|
|
// CHECK2-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
|
|
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK2: cond.true:
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK2: cond.false:
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
// CHECK2: cond.end:
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
|
|
// CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
|
|
// CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM]]
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM2]]
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX3]], align 4
|
|
// CHECK2-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]]
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM5]]
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX6]], align 4
|
|
// CHECK2-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]]
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM8]]
|
|
// CHECK2-NEXT: store float [[MUL7]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP23]], 1
|
|
// CHECK2-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
|
|
// CHECK2-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..2
|
|
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK2: cond.true:
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK2: cond.false:
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
// CHECK2: cond.end:
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
|
|
// CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
|
|
// CHECK2-NEXT: store i32 [[SUB]], i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM]]
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM2]]
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX3]], align 4
|
|
// CHECK2-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]]
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM5]]
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX6]], align 4
|
|
// CHECK2-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]]
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM8]]
|
|
// CHECK2-NEXT: store float [[MUL7]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1
|
|
// CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@_Z14static_chunkedPfS_S_S_
|
|
// CHECK2-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
|
|
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 16908288, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 5)
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK2: omp.dispatch.cond:
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK2: cond.true:
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK2: cond.false:
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
// CHECK2: cond.end:
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]]
|
|
// CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK2: omp.dispatch.body:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]]
|
|
// CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
|
|
// CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[IDXPROM]]
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[IDXPROM3]]
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4
|
|
// CHECK2-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]]
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[IDXPROM6]]
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load float, float* [[ARRAYIDX7]], align 4
|
|
// CHECK2-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]]
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP23]], i64 [[IDXPROM9]]
|
|
// CHECK2-NEXT: store float [[MUL8]], float* [[ARRAYIDX10]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1
|
|
// CHECK2-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK2: omp.dispatch.inc:
|
|
// CHECK2-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]]
|
|
// CHECK2-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]]
|
|
// CHECK2-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK2: omp.dispatch.end:
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
|
|
// CHECK2-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..4 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4
|
|
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK2-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK2-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 35, i64 0, i64 16908287, i64 1, i64 1)
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK2: omp.dispatch.cond:
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK2: omp.dispatch.body:
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK2-NEXT: store i64 [[TMP7]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add i64 [[TMP9]], 1
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP8]], [[ADD]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 127
|
|
// CHECK2-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
|
|
// CHECK2-NEXT: store i64 [[ADD1]], i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[TMP12]]
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[TMP15]]
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[MUL3:%.*]] = fmul float [[TMP13]], [[TMP16]]
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[TMP18]]
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP19]]
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[TMP21]]
|
|
// CHECK2-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4, !llvm.access.group !5
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK2-NEXT: [[ADD7:%.*]] = add i64 [[TMP22]], 1
|
|
// CHECK2-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK2: omp.dispatch.inc:
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK2: omp.dispatch.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@_Z7guided7PfS_S_S_
|
|
// CHECK2-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..5 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..5
|
|
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK2-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK2-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 36, i64 0, i64 16908287, i64 1, i64 7)
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK2: omp.dispatch.cond:
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK2: omp.dispatch.body:
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK2-NEXT: store i64 [[TMP7]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add i64 [[TMP9]], 1
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP8]], [[ADD]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 127
|
|
// CHECK2-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
|
|
// CHECK2-NEXT: store i64 [[ADD1]], i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[TMP12]]
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[TMP15]]
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[MUL3:%.*]] = fmul float [[TMP13]], [[TMP16]]
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[TMP18]]
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP19]]
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[TMP21]]
|
|
// CHECK2-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4, !llvm.access.group !8
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK2-NEXT: [[ADD7:%.*]] = add i64 [[TMP22]], 1
|
|
// CHECK2-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK2: omp.dispatch.inc:
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK2: omp.dispatch.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
|
|
// CHECK2-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[Y:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: store i32 0, i32* [[X]], align 4
|
|
// CHECK2-NEXT: store i32 0, i32* [[Y]], align 4
|
|
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[Y]], float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..6
|
|
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK2-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I7:%.*]] = alloca i8, align 1
|
|
// CHECK2-NEXT: [[X8:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
|
|
// CHECK2-NEXT: [[CONV:%.*]] = trunc i32 [[TMP5]] to i8
|
|
// CHECK2-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK2-NEXT: [[CONV3:%.*]] = sext i8 [[TMP6]] to i32
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]]
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1
|
|
// CHECK2-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
|
|
// CHECK2-NEXT: [[CONV4:%.*]] = zext i32 [[DIV]] to i64
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11
|
|
// CHECK2-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK2-NEXT: store i64 [[SUB5]], i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK2-NEXT: store i8 [[TMP7]], i8* [[I]], align 1
|
|
// CHECK2-NEXT: store i32 11, i32* [[X]], align 4
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK2-NEXT: [[CONV6:%.*]] = sext i8 [[TMP8]] to i32
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV6]], 57
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK2: omp.precond.then:
|
|
// CHECK2-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK2-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK2-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_dispatch_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 38, i64 0, i64 [[TMP10]], i64 1, i64 1)
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK2: omp.dispatch.cond:
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
|
|
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK2: omp.dispatch.body:
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK2-NEXT: store i64 [[TMP16]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[CMP9:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]]
|
|
// CHECK2-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[CONV10:%.*]] = sext i8 [[TMP19]] to i64
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[DIV11:%.*]] = sdiv i64 [[TMP20]], 11
|
|
// CHECK2-NEXT: [[MUL12:%.*]] = mul nsw i64 [[DIV11]], 1
|
|
// CHECK2-NEXT: [[ADD13:%.*]] = add nsw i64 [[CONV10]], [[MUL12]]
|
|
// CHECK2-NEXT: [[CONV14:%.*]] = trunc i64 [[ADD13]] to i8
|
|
// CHECK2-NEXT: store i8 [[CONV14]], i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[DIV15:%.*]] = sdiv i64 [[TMP22]], 11
|
|
// CHECK2-NEXT: [[MUL16:%.*]] = mul nsw i64 [[DIV15]], 11
|
|
// CHECK2-NEXT: [[SUB17:%.*]] = sub nsw i64 [[TMP21]], [[MUL16]]
|
|
// CHECK2-NEXT: [[MUL18:%.*]] = mul nsw i64 [[SUB17]], 1
|
|
// CHECK2-NEXT: [[SUB19:%.*]] = sub nsw i64 11, [[MUL18]]
|
|
// CHECK2-NEXT: [[CONV20:%.*]] = trunc i64 [[SUB19]] to i32
|
|
// CHECK2-NEXT: store i32 [[CONV20]], i32* [[X8]], align 4, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP24]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP23]], i64 [[IDXPROM]]
|
|
// CHECK2-NEXT: [[TMP25:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[TMP26:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[TMP27:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[IDXPROM21:%.*]] = sext i8 [[TMP27]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX22:%.*]] = getelementptr inbounds float, float* [[TMP26]], i64 [[IDXPROM21]]
|
|
// CHECK2-NEXT: [[TMP28:%.*]] = load float, float* [[ARRAYIDX22]], align 4, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[MUL23:%.*]] = fmul float [[TMP25]], [[TMP28]]
|
|
// CHECK2-NEXT: [[TMP29:%.*]] = load float*, float** [[TMP4]], align 8, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[TMP30:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[IDXPROM24:%.*]] = sext i8 [[TMP30]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, float* [[TMP29]], i64 [[IDXPROM24]]
|
|
// CHECK2-NEXT: [[TMP31:%.*]] = load float, float* [[ARRAYIDX25]], align 4, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[MUL26:%.*]] = fmul float [[MUL23]], [[TMP31]]
|
|
// CHECK2-NEXT: [[TMP32:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[TMP33:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[IDXPROM27:%.*]] = sext i8 [[TMP33]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, float* [[TMP32]], i64 [[IDXPROM27]]
|
|
// CHECK2-NEXT: store float [[MUL26]], float* [[ARRAYIDX28]], align 4, !llvm.access.group !11
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK2-NEXT: [[ADD29:%.*]] = add nsw i64 [[TMP34]], 1
|
|
// CHECK2-NEXT: store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK2: omp.dispatch.inc:
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK2: omp.dispatch.end:
|
|
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK2: omp.precond.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
|
|
// CHECK2-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: store i32 0, i32* [[X]], align 4
|
|
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7
|
|
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK2-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 199, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 37, i32 0, i32 199, i32 1, i32 1)
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK2: omp.dispatch.cond:
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
|
|
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK2: omp.dispatch.body:
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 20
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]]
|
|
// CHECK2-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8
|
|
// CHECK2-NEXT: store i8 [[CONV]], i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP12]], 20
|
|
// CHECK2-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 20
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL3]]
|
|
// CHECK2-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK2-NEXT: [[ADD5:%.*]] = add nsw i32 -10, [[MUL4]]
|
|
// CHECK2-NEXT: store i32 [[ADD5]], i32* [[X]], align 4, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP14]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP13]], i64 [[IDXPROM]]
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[IDXPROM6:%.*]] = zext i8 [[TMP17]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP16]], i64 [[IDXPROM6]]
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX7]], align 4, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[MUL8:%.*]] = fmul float [[TMP15]], [[TMP18]]
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[IDXPROM9:%.*]] = zext i8 [[TMP20]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP19]], i64 [[IDXPROM9]]
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[MUL11:%.*]] = fmul float [[MUL8]], [[TMP21]]
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[IDXPROM12:%.*]] = zext i8 [[TMP23]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, float* [[TMP22]], i64 [[IDXPROM12]]
|
|
// CHECK2-NEXT: store float [[MUL11]], float* [[ARRAYIDX13]], align 4, !llvm.access.group !14
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK2-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP24]], 1
|
|
// CHECK2-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK2: omp.dispatch.inc:
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK2: omp.dispatch.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@_Z3foov
|
|
// CHECK2-SAME: () #[[ATTR3:[0-9]+]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: call void @_Z8mayThrowv()
|
|
// CHECK2-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@_Z12parallel_forPfi
|
|
// CHECK2-SAME: (float* [[A:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
|
// CHECK2-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
|
|
// CHECK2-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
|
|
// CHECK2-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 16
|
|
// CHECK2-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
|
|
// CHECK2-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
|
|
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, i64, i64)* @.omp_outlined..8 to void (i32*, i32*, ...)*), float** [[A_ADDR]], i64 [[TMP1]], i64 [[TMP4]])
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
|
|
// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP5]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..8
|
|
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], i64 [[VLA:%.*]], i64 [[N:%.*]]) #[[ATTR1]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
|
// CHECK2-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 16908288, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
|
|
// CHECK2-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
|
|
// CHECK2-NEXT: [[VLA1:%.*]] = alloca float, i64 [[TMP1]], align 16
|
|
// CHECK2-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 5)
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK2: omp.dispatch.cond:
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP5]], 16908288
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK2: cond.true:
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK2: cond.false:
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
// CHECK2: cond.end:
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
|
|
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP8]], [[TMP9]]
|
|
// CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_CLEANUP:%.*]]
|
|
// CHECK2: omp.dispatch.cleanup:
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK2: omp.dispatch.body:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP3:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
|
|
// CHECK2-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
|
|
// CHECK2: omp.inner.for.cond.cleanup:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul i32 [[TMP12]], 127
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
|
|
// CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[CALL:%.*]] = invoke i32 @_Z3foov()
|
|
// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
|
|
// CHECK2: invoke.cont:
|
|
// CHECK2-NEXT: [[CONV4:%.*]] = sitofp i32 [[CALL]] to float
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP13]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[VLA1]], i64 [[IDXPROM]]
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK2-NEXT: [[ADD5:%.*]] = fadd float [[CONV4]], [[TMP14]]
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[CONV]], align 8
|
|
// CHECK2-NEXT: [[CONV6:%.*]] = sitofp i32 [[TMP15]] to float
|
|
// CHECK2-NEXT: [[ADD7:%.*]] = fadd float [[ADD5]], [[CONV6]]
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM8:%.*]] = zext i32 [[TMP17]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP16]], i64 [[IDXPROM8]]
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX9]], align 4
|
|
// CHECK2-NEXT: [[ADD10:%.*]] = fadd float [[TMP18]], [[ADD7]]
|
|
// CHECK2-NEXT: store float [[ADD10]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[ADD11:%.*]] = add i32 [[TMP19]], 1
|
|
// CHECK2-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK2: omp.dispatch.inc:
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD12:%.*]] = add i32 [[TMP20]], [[TMP21]]
|
|
// CHECK2-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD13:%.*]] = add i32 [[TMP22]], [[TMP23]]
|
|
// CHECK2-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK2: omp.dispatch.end:
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
|
|
// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP24]])
|
|
// CHECK2-NEXT: ret void
|
|
// CHECK2: terminate.lpad:
|
|
// CHECK2-NEXT: [[TMP25:%.*]] = landingpad { i8*, i32 }
|
|
// CHECK2-NEXT: catch i8* null
|
|
// CHECK2-NEXT: [[TMP26:%.*]] = extractvalue { i8*, i32 } [[TMP25]], 0
|
|
// CHECK2-NEXT: call void @__clang_call_terminate(i8* [[TMP26]]) #[[ATTR7:[0-9]+]]
|
|
// CHECK2-NEXT: unreachable
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@__clang_call_terminate
|
|
// CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] comdat {
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK2-NEXT: call void @_ZSt9terminatev() #[[ATTR7]]
|
|
// CHECK2-NEXT: unreachable
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z17with_var_schedulev
|
|
// CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[A:%.*]] = alloca double, align 8
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: store double 5.000000e+00, double* [[A]], align 8
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load double, double* [[A]], align 8
|
|
// CHECK3-NEXT: [[CONV:%.*]] = fptosi double [[TMP0]] to i8
|
|
// CHECK3-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
|
|
// CHECK3-NEXT: store i8 [[TMP1]], i8* [[CONV1]], align 1
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
|
|
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP2]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
|
|
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca double, align 8
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[A:%.*]] = alloca double, align 8
|
|
// CHECK3-NEXT: [[I5:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
|
|
// CHECK3-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load double, double* undef, align 8
|
|
// CHECK3-NEXT: [[ADD:%.*]] = fadd double 2.000000e+00, [[TMP0]]
|
|
// CHECK3-NEXT: store double [[ADD]], double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load double, double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK3-NEXT: [[SUB:%.*]] = fsub double [[TMP1]], 1.000000e+00
|
|
// CHECK3-NEXT: [[DIV:%.*]] = fdiv double [[SUB]], 1.000000e+00
|
|
// CHECK3-NEXT: [[CONV3:%.*]] = fptoui double [[DIV]] to i64
|
|
// CHECK3-NEXT: [[SUB4:%.*]] = sub i64 [[CONV3]], 1
|
|
// CHECK3-NEXT: store i64 [[SUB4]], i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK3-NEXT: store i64 1, i64* [[I]], align 8
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load double, double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK3-NEXT: [[CMP:%.*]] = fcmp olt double 1.000000e+00, [[TMP2]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK3: omp.precond.then:
|
|
// CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK3-NEXT: store i64 [[TMP3]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV]], align 8
|
|
// CHECK3-NEXT: [[CONV6:%.*]] = sext i8 [[TMP4]] to i64
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV6]])
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3: omp.dispatch.cond:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK3-NEXT: [[CMP7:%.*]] = icmp ugt i64 [[TMP7]], [[TMP8]]
|
|
// CHECK3-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i64 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: store i64 [[TMP11]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: [[ADD8:%.*]] = add i64 [[TMP13]], 1
|
|
// CHECK3-NEXT: [[CMP9:%.*]] = icmp ult i64 [[TMP12]], [[ADD8]]
|
|
// CHECK3-NEXT: br i1 [[CMP9]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3: omp.dispatch.body:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: [[ADD10:%.*]] = add i64 [[TMP15]], 1
|
|
// CHECK3-NEXT: [[CMP11:%.*]] = icmp ult i64 [[TMP14]], [[ADD10]]
|
|
// CHECK3-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP16]], 1
|
|
// CHECK3-NEXT: [[ADD12:%.*]] = add i64 1, [[MUL]]
|
|
// CHECK3-NEXT: store i64 [[ADD12]], i64* [[I5]], align 8
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[ADD13:%.*]] = add i64 [[TMP17]], 1
|
|
// CHECK3-NEXT: store i64 [[ADD13]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3: omp.dispatch.inc:
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-NEXT: [[ADD14:%.*]] = add i64 [[TMP18]], [[TMP19]]
|
|
// CHECK3-NEXT: store i64 [[ADD14]], i64* [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-NEXT: [[ADD15:%.*]] = add i64 [[TMP20]], [[TMP21]]
|
|
// CHECK3-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3: omp.dispatch.end:
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP23]])
|
|
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK3: omp.precond.end:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z23without_schedule_clausePfS_S_S_
|
|
// CHECK3-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
|
|
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
|
|
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM]]
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM2]]
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX3]], align 4
|
|
// CHECK3-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]]
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM5]]
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX6]], align 4
|
|
// CHECK3-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]]
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM8]]
|
|
// CHECK3-NEXT: store float [[MUL7]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP23]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
|
|
// CHECK3-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2
|
|
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
|
|
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[SUB]], i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM]]
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM2]]
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX3]], align 4
|
|
// CHECK3-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]]
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM5]]
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX6]], align 4
|
|
// CHECK3-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]]
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM8]]
|
|
// CHECK3-NEXT: store float [[MUL7]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z14static_chunkedPfS_S_S_
|
|
// CHECK3-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3
|
|
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 16908288, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 5)
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3: omp.dispatch.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]]
|
|
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3: omp.dispatch.body:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[IDXPROM]]
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[IDXPROM3]]
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4
|
|
// CHECK3-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]]
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[IDXPROM6]]
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load float, float* [[ARRAYIDX7]], align 4
|
|
// CHECK3-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]]
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP23]], i64 [[IDXPROM9]]
|
|
// CHECK3-NEXT: store float [[MUL8]], float* [[ARRAYIDX10]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3: omp.dispatch.inc:
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]]
|
|
// CHECK3-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]]
|
|
// CHECK3-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3: omp.dispatch.end:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
|
|
// CHECK3-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..4 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4
|
|
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 1073741859, i64 0, i64 16908287, i64 1, i64 1)
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3: omp.dispatch.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3: omp.dispatch.body:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: store i64 [[TMP7]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add i64 [[TMP9]], 1
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP8]], [[ADD]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 127
|
|
// CHECK3-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
|
|
// CHECK3-NEXT: store i64 [[ADD1]], i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[TMP12]]
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[TMP15]]
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[MUL3:%.*]] = fmul float [[TMP13]], [[TMP16]]
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[TMP18]]
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP19]]
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[TMP21]]
|
|
// CHECK3-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4, !llvm.access.group !5
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK3-NEXT: [[ADD7:%.*]] = add i64 [[TMP22]], 1
|
|
// CHECK3-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3: omp.dispatch.inc:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3: omp.dispatch.end:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z7guided7PfS_S_S_
|
|
// CHECK3-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..5 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..5
|
|
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 1073741860, i64 0, i64 16908287, i64 1, i64 7)
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3: omp.dispatch.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3: omp.dispatch.body:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: store i64 [[TMP7]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add i64 [[TMP9]], 1
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP8]], [[ADD]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 127
|
|
// CHECK3-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
|
|
// CHECK3-NEXT: store i64 [[ADD1]], i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[TMP12]]
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[TMP15]]
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[MUL3:%.*]] = fmul float [[TMP13]], [[TMP16]]
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[TMP18]]
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP19]]
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[TMP21]]
|
|
// CHECK3-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4, !llvm.access.group !8
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK3-NEXT: [[ADD7:%.*]] = add i64 [[TMP22]], 1
|
|
// CHECK3-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3: omp.dispatch.inc:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3: omp.dispatch.end:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
|
|
// CHECK3-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[Y:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: store i32 0, i32* [[X]], align 4
|
|
// CHECK3-NEXT: store i32 0, i32* [[Y]], align 4
|
|
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[Y]], float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..6
|
|
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I7:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[X8:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[TMP5]] to i8
|
|
// CHECK3-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-NEXT: [[CONV3:%.*]] = sext i8 [[TMP6]] to i32
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]]
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1
|
|
// CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
|
|
// CHECK3-NEXT: [[CONV4:%.*]] = zext i32 [[DIV]] to i64
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11
|
|
// CHECK3-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK3-NEXT: store i64 [[SUB5]], i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-NEXT: store i8 [[TMP7]], i8* [[I]], align 1
|
|
// CHECK3-NEXT: store i32 11, i32* [[X]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK3-NEXT: [[CONV6:%.*]] = sext i8 [[TMP8]] to i32
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV6]], 57
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK3: omp.precond.then:
|
|
// CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK3-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 1073741862, i64 0, i64 [[TMP10]], i64 1, i64 1)
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3: omp.dispatch.cond:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3: omp.dispatch.body:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: store i64 [[TMP16]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[CMP9:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]]
|
|
// CHECK3-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[CONV10:%.*]] = sext i8 [[TMP19]] to i64
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[DIV11:%.*]] = sdiv i64 [[TMP20]], 11
|
|
// CHECK3-NEXT: [[MUL12:%.*]] = mul nsw i64 [[DIV11]], 1
|
|
// CHECK3-NEXT: [[ADD13:%.*]] = add nsw i64 [[CONV10]], [[MUL12]]
|
|
// CHECK3-NEXT: [[CONV14:%.*]] = trunc i64 [[ADD13]] to i8
|
|
// CHECK3-NEXT: store i8 [[CONV14]], i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[DIV15:%.*]] = sdiv i64 [[TMP22]], 11
|
|
// CHECK3-NEXT: [[MUL16:%.*]] = mul nsw i64 [[DIV15]], 11
|
|
// CHECK3-NEXT: [[SUB17:%.*]] = sub nsw i64 [[TMP21]], [[MUL16]]
|
|
// CHECK3-NEXT: [[MUL18:%.*]] = mul nsw i64 [[SUB17]], 1
|
|
// CHECK3-NEXT: [[SUB19:%.*]] = sub nsw i64 11, [[MUL18]]
|
|
// CHECK3-NEXT: [[CONV20:%.*]] = trunc i64 [[SUB19]] to i32
|
|
// CHECK3-NEXT: store i32 [[CONV20]], i32* [[X8]], align 4, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP24]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP23]], i64 [[IDXPROM]]
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[TMP27:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[IDXPROM21:%.*]] = sext i8 [[TMP27]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX22:%.*]] = getelementptr inbounds float, float* [[TMP26]], i64 [[IDXPROM21]]
|
|
// CHECK3-NEXT: [[TMP28:%.*]] = load float, float* [[ARRAYIDX22]], align 4, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[MUL23:%.*]] = fmul float [[TMP25]], [[TMP28]]
|
|
// CHECK3-NEXT: [[TMP29:%.*]] = load float*, float** [[TMP4]], align 8, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[TMP30:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[IDXPROM24:%.*]] = sext i8 [[TMP30]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, float* [[TMP29]], i64 [[IDXPROM24]]
|
|
// CHECK3-NEXT: [[TMP31:%.*]] = load float, float* [[ARRAYIDX25]], align 4, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[MUL26:%.*]] = fmul float [[MUL23]], [[TMP31]]
|
|
// CHECK3-NEXT: [[TMP32:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[TMP33:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[IDXPROM27:%.*]] = sext i8 [[TMP33]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, float* [[TMP32]], i64 [[IDXPROM27]]
|
|
// CHECK3-NEXT: store float [[MUL26]], float* [[ARRAYIDX28]], align 4, !llvm.access.group !11
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK3-NEXT: [[ADD29:%.*]] = add nsw i64 [[TMP34]], 1
|
|
// CHECK3-NEXT: store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3: omp.dispatch.inc:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3: omp.dispatch.end:
|
|
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK3: omp.precond.end:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
|
|
// CHECK3-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: store i32 0, i32* [[X]], align 4
|
|
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7
|
|
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 199, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 1073741861, i32 0, i32 199, i32 1, i32 1)
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3: omp.dispatch.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3: omp.dispatch.body:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 20
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]]
|
|
// CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8
|
|
// CHECK3-NEXT: store i8 [[CONV]], i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP12]], 20
|
|
// CHECK3-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 20
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL3]]
|
|
// CHECK3-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 -10, [[MUL4]]
|
|
// CHECK3-NEXT: store i32 [[ADD5]], i32* [[X]], align 4, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP14]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP13]], i64 [[IDXPROM]]
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[IDXPROM6:%.*]] = zext i8 [[TMP17]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP16]], i64 [[IDXPROM6]]
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX7]], align 4, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[MUL8:%.*]] = fmul float [[TMP15]], [[TMP18]]
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[IDXPROM9:%.*]] = zext i8 [[TMP20]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP19]], i64 [[IDXPROM9]]
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[MUL11:%.*]] = fmul float [[MUL8]], [[TMP21]]
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[IDXPROM12:%.*]] = zext i8 [[TMP23]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, float* [[TMP22]], i64 [[IDXPROM12]]
|
|
// CHECK3-NEXT: store float [[MUL11]], float* [[ARRAYIDX13]], align 4, !llvm.access.group !14
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK3-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP24]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3: omp.dispatch.inc:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3: omp.dispatch.end:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z3foov
|
|
// CHECK3-SAME: () #[[ATTR3:[0-9]+]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: call void @_Z8mayThrowv()
|
|
// CHECK3-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z12parallel_forPfi
|
|
// CHECK3-SAME: (float* [[A:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
|
// CHECK3-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
|
|
// CHECK3-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
|
|
// CHECK3-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 16
|
|
// CHECK3-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
|
|
// CHECK3-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
|
|
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, i64, i64)* @.omp_outlined..8 to void (i32*, i32*, ...)*), float** [[A_ADDR]], i64 [[TMP1]], i64 [[TMP4]])
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
|
|
// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP5]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..8
|
|
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], i64 [[VLA:%.*]], i64 [[N:%.*]]) #[[ATTR1]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
|
// CHECK3-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
|
|
// CHECK3-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
|
|
// CHECK3-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 16908288, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
|
|
// CHECK3-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
|
|
// CHECK3-NEXT: [[VLA1:%.*]] = alloca float, i64 [[TMP1]], align 16
|
|
// CHECK3-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 5)
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK3: omp.dispatch.cond:
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP5]], 16908288
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_CLEANUP:%.*]]
|
|
// CHECK3: omp.dispatch.cleanup:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK3: omp.dispatch.body:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP3:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
|
|
// CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
|
|
// CHECK3: omp.inner.for.cond.cleanup:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul i32 [[TMP12]], 127
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[CALL:%.*]] = invoke i32 @_Z3foov()
|
|
// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
|
|
// CHECK3: invoke.cont:
|
|
// CHECK3-NEXT: [[CONV4:%.*]] = sitofp i32 [[CALL]] to float
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP13]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[VLA1]], i64 [[IDXPROM]]
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[ADD5:%.*]] = fadd float [[CONV4]], [[TMP14]]
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[CONV]], align 8
|
|
// CHECK3-NEXT: [[CONV6:%.*]] = sitofp i32 [[TMP15]] to float
|
|
// CHECK3-NEXT: [[ADD7:%.*]] = fadd float [[ADD5]], [[CONV6]]
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK3-NEXT: [[IDXPROM8:%.*]] = zext i32 [[TMP17]] to i64
|
|
// CHECK3-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP16]], i64 [[IDXPROM8]]
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX9]], align 4
|
|
// CHECK3-NEXT: [[ADD10:%.*]] = fadd float [[TMP18]], [[ADD7]]
|
|
// CHECK3-NEXT: store float [[ADD10]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD11:%.*]] = add i32 [[TMP19]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK3: omp.dispatch.inc:
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD12:%.*]] = add i32 [[TMP20]], [[TMP21]]
|
|
// CHECK3-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD13:%.*]] = add i32 [[TMP22]], [[TMP23]]
|
|
// CHECK3-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK3: omp.dispatch.end:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
|
|
// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP24]])
|
|
// CHECK3-NEXT: ret void
|
|
// CHECK3: terminate.lpad:
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = landingpad { i8*, i32 }
|
|
// CHECK3-NEXT: catch i8* null
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = extractvalue { i8*, i32 } [[TMP25]], 0
|
|
// CHECK3-NEXT: call void @__clang_call_terminate(i8* [[TMP26]]) #[[ATTR7:[0-9]+]]
|
|
// CHECK3-NEXT: unreachable
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@__clang_call_terminate
|
|
// CHECK3-SAME: (i8* [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] comdat {
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK3-NEXT: call void @_ZSt9terminatev() #[[ATTR7]]
|
|
// CHECK3-NEXT: unreachable
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@_Z17with_var_schedulev
|
|
// CHECK4-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[A:%.*]] = alloca double, align 8
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: store double 5.000000e+00, double* [[A]], align 8
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load double, double* [[A]], align 8
|
|
// CHECK4-NEXT: [[CONV:%.*]] = fptosi double [[TMP0]] to i8
|
|
// CHECK4-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK4-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
|
|
// CHECK4-NEXT: store i8 [[TMP1]], i8* [[CONV1]], align 1
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
|
|
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP2]])
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
|
|
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca double, align 8
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[A:%.*]] = alloca double, align 8
|
|
// CHECK4-NEXT: [[I5:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
|
|
// CHECK4-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load double, double* undef, align 8
|
|
// CHECK4-NEXT: [[ADD:%.*]] = fadd double 2.000000e+00, [[TMP0]]
|
|
// CHECK4-NEXT: store double [[ADD]], double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load double, double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK4-NEXT: [[SUB:%.*]] = fsub double [[TMP1]], 1.000000e+00
|
|
// CHECK4-NEXT: [[DIV:%.*]] = fdiv double [[SUB]], 1.000000e+00
|
|
// CHECK4-NEXT: [[CONV3:%.*]] = fptoui double [[DIV]] to i64
|
|
// CHECK4-NEXT: [[SUB4:%.*]] = sub i64 [[CONV3]], 1
|
|
// CHECK4-NEXT: store i64 [[SUB4]], i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK4-NEXT: store i64 1, i64* [[I]], align 8
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load double, double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK4-NEXT: [[CMP:%.*]] = fcmp olt double 1.000000e+00, [[TMP2]]
|
|
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK4: omp.precond.then:
|
|
// CHECK4-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK4-NEXT: store i64 [[TMP3]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK4-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV]], align 8
|
|
// CHECK4-NEXT: [[CONV6:%.*]] = sext i8 [[TMP4]] to i64
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV6]])
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK4: omp.dispatch.cond:
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK4-NEXT: [[CMP7:%.*]] = icmp ugt i64 [[TMP7]], [[TMP8]]
|
|
// CHECK4-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK4: cond.true:
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK4-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK4: cond.false:
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK4-NEXT: br label [[COND_END]]
|
|
// CHECK4: cond.end:
|
|
// CHECK4-NEXT: [[COND:%.*]] = phi i64 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK4-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK4-NEXT: store i64 [[TMP11]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK4-NEXT: [[ADD8:%.*]] = add i64 [[TMP13]], 1
|
|
// CHECK4-NEXT: [[CMP9:%.*]] = icmp ult i64 [[TMP12]], [[ADD8]]
|
|
// CHECK4-NEXT: br i1 [[CMP9]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK4: omp.dispatch.body:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK4: omp.inner.for.cond:
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK4-NEXT: [[ADD10:%.*]] = add i64 [[TMP15]], 1
|
|
// CHECK4-NEXT: [[CMP11:%.*]] = icmp ult i64 [[TMP14]], [[ADD10]]
|
|
// CHECK4-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK4: omp.inner.for.body:
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK4-NEXT: [[MUL:%.*]] = mul i64 [[TMP16]], 1
|
|
// CHECK4-NEXT: [[ADD12:%.*]] = add i64 1, [[MUL]]
|
|
// CHECK4-NEXT: store i64 [[ADD12]], i64* [[I5]], align 8
|
|
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK4: omp.body.continue:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK4: omp.inner.for.inc:
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK4-NEXT: [[ADD13:%.*]] = add i64 [[TMP17]], 1
|
|
// CHECK4-NEXT: store i64 [[ADD13]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK4: omp.inner.for.end:
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK4: omp.dispatch.inc:
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK4-NEXT: [[ADD14:%.*]] = add i64 [[TMP18]], [[TMP19]]
|
|
// CHECK4-NEXT: store i64 [[ADD14]], i64* [[DOTOMP_LB]], align 8
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK4-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK4-NEXT: [[ADD15:%.*]] = add i64 [[TMP20]], [[TMP21]]
|
|
// CHECK4-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK4: omp.dispatch.end:
|
|
// CHECK4-NEXT: [[TMP22:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP23]])
|
|
// CHECK4-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK4: omp.precond.end:
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@_Z23without_schedule_clausePfS_S_S_
|
|
// CHECK4-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1
|
|
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
|
|
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK4: cond.true:
|
|
// CHECK4-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK4: cond.false:
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: br label [[COND_END]]
|
|
// CHECK4: cond.end:
|
|
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK4: omp.inner.for.cond:
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
|
|
// CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK4: omp.inner.for.body:
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
|
|
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
|
|
// CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM]]
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM2]]
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX3]], align 4
|
|
// CHECK4-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]]
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM5]]
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX6]], align 4
|
|
// CHECK4-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]]
|
|
// CHECK4-NEXT: [[TMP21:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM8]]
|
|
// CHECK4-NEXT: store float [[MUL7]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK4: omp.body.continue:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK4: omp.inner.for.inc:
|
|
// CHECK4-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP23]], 1
|
|
// CHECK4-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK4: omp.inner.for.end:
|
|
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK4: omp.loop.exit:
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
|
|
// CHECK4-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..2
|
|
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
|
|
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK4: cond.true:
|
|
// CHECK4-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK4: cond.false:
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: br label [[COND_END]]
|
|
// CHECK4: cond.end:
|
|
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK4: omp.inner.for.cond:
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
|
|
// CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK4: omp.inner.for.body:
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
|
|
// CHECK4-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
|
|
// CHECK4-NEXT: store i32 [[SUB]], i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM]]
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM2]]
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX3]], align 4
|
|
// CHECK4-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]]
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM5]]
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX6]], align 4
|
|
// CHECK4-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]]
|
|
// CHECK4-NEXT: [[TMP21:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM8]]
|
|
// CHECK4-NEXT: store float [[MUL7]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK4: omp.body.continue:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK4: omp.inner.for.inc:
|
|
// CHECK4-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1
|
|
// CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK4: omp.inner.for.end:
|
|
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK4: omp.loop.exit:
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@_Z14static_chunkedPfS_S_S_
|
|
// CHECK4-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3
|
|
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: store i32 16908288, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 5)
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK4: omp.dispatch.cond:
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288
|
|
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK4: cond.true:
|
|
// CHECK4-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK4: cond.false:
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: br label [[COND_END]]
|
|
// CHECK4: cond.end:
|
|
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]]
|
|
// CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK4: omp.dispatch.body:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK4: omp.inner.for.cond:
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]]
|
|
// CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK4: omp.inner.for.body:
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127
|
|
// CHECK4-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
|
|
// CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[IDXPROM]]
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[IDXPROM3]]
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4
|
|
// CHECK4-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]]
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK4-NEXT: [[TMP21:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[IDXPROM6]]
|
|
// CHECK4-NEXT: [[TMP22:%.*]] = load float, float* [[ARRAYIDX7]], align 4
|
|
// CHECK4-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]]
|
|
// CHECK4-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP23]], i64 [[IDXPROM9]]
|
|
// CHECK4-NEXT: store float [[MUL8]], float* [[ARRAYIDX10]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK4: omp.body.continue:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK4: omp.inner.for.inc:
|
|
// CHECK4-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1
|
|
// CHECK4-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK4: omp.inner.for.end:
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK4: omp.dispatch.inc:
|
|
// CHECK4-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]]
|
|
// CHECK4-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]]
|
|
// CHECK4-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK4: omp.dispatch.end:
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
|
|
// CHECK4-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..4 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..4
|
|
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK4-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK4-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 35, i64 0, i64 16908287, i64 1, i64 1)
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK4: omp.dispatch.cond:
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK4: omp.dispatch.body:
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK4-NEXT: store i64 [[TMP7]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK4: omp.inner.for.cond:
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[ADD:%.*]] = add i64 [[TMP9]], 1
|
|
// CHECK4-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP8]], [[ADD]]
|
|
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK4: omp.inner.for.body:
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 127
|
|
// CHECK4-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
|
|
// CHECK4-NEXT: store i64 [[ADD1]], i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[TMP12]]
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[TMP15]]
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[MUL3:%.*]] = fmul float [[TMP13]], [[TMP16]]
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[TMP18]]
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP19]]
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[TMP21:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[TMP21]]
|
|
// CHECK4-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4, !llvm.access.group !5
|
|
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK4: omp.body.continue:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK4: omp.inner.for.inc:
|
|
// CHECK4-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK4-NEXT: [[ADD7:%.*]] = add i64 [[TMP22]], 1
|
|
// CHECK4-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
|
|
// CHECK4: omp.inner.for.end:
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK4: omp.dispatch.inc:
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK4: omp.dispatch.end:
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@_Z7guided7PfS_S_S_
|
|
// CHECK4-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..5 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..5
|
|
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK4-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK4-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 36, i64 0, i64 16908287, i64 1, i64 7)
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK4: omp.dispatch.cond:
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK4: omp.dispatch.body:
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK4-NEXT: store i64 [[TMP7]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK4: omp.inner.for.cond:
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[ADD:%.*]] = add i64 [[TMP9]], 1
|
|
// CHECK4-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP8]], [[ADD]]
|
|
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK4: omp.inner.for.body:
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 127
|
|
// CHECK4-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
|
|
// CHECK4-NEXT: store i64 [[ADD1]], i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[TMP12]]
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[TMP15]]
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[MUL3:%.*]] = fmul float [[TMP13]], [[TMP16]]
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[TMP18]]
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP19]]
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[TMP21:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[TMP21]]
|
|
// CHECK4-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4, !llvm.access.group !8
|
|
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK4: omp.body.continue:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK4: omp.inner.for.inc:
|
|
// CHECK4-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK4-NEXT: [[ADD7:%.*]] = add i64 [[TMP22]], 1
|
|
// CHECK4-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
|
|
// CHECK4: omp.inner.for.end:
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK4: omp.dispatch.inc:
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK4: omp.dispatch.end:
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
|
|
// CHECK4-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[Y:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: store i32 0, i32* [[X]], align 4
|
|
// CHECK4-NEXT: store i32 0, i32* [[Y]], align 4
|
|
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[Y]], float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..6
|
|
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK4-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I7:%.*]] = alloca i8, align 1
|
|
// CHECK4-NEXT: [[X8:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
|
|
// CHECK4-NEXT: [[CONV:%.*]] = trunc i32 [[TMP5]] to i8
|
|
// CHECK4-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK4-NEXT: [[CONV3:%.*]] = sext i8 [[TMP6]] to i32
|
|
// CHECK4-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]]
|
|
// CHECK4-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1
|
|
// CHECK4-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
|
|
// CHECK4-NEXT: [[CONV4:%.*]] = zext i32 [[DIV]] to i64
|
|
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11
|
|
// CHECK4-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK4-NEXT: store i64 [[SUB5]], i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK4-NEXT: store i8 [[TMP7]], i8* [[I]], align 1
|
|
// CHECK4-NEXT: store i32 11, i32* [[X]], align 4
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK4-NEXT: [[CONV6:%.*]] = sext i8 [[TMP8]] to i32
|
|
// CHECK4-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV6]], 57
|
|
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK4: omp.precond.then:
|
|
// CHECK4-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK4-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK4-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_dispatch_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 38, i64 0, i64 [[TMP10]], i64 1, i64 1)
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK4: omp.dispatch.cond:
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
|
|
// CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK4: omp.dispatch.body:
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK4-NEXT: store i64 [[TMP16]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK4: omp.inner.for.cond:
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[CMP9:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]]
|
|
// CHECK4-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK4: omp.inner.for.body:
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[CONV10:%.*]] = sext i8 [[TMP19]] to i64
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[DIV11:%.*]] = sdiv i64 [[TMP20]], 11
|
|
// CHECK4-NEXT: [[MUL12:%.*]] = mul nsw i64 [[DIV11]], 1
|
|
// CHECK4-NEXT: [[ADD13:%.*]] = add nsw i64 [[CONV10]], [[MUL12]]
|
|
// CHECK4-NEXT: [[CONV14:%.*]] = trunc i64 [[ADD13]] to i8
|
|
// CHECK4-NEXT: store i8 [[CONV14]], i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[DIV15:%.*]] = sdiv i64 [[TMP22]], 11
|
|
// CHECK4-NEXT: [[MUL16:%.*]] = mul nsw i64 [[DIV15]], 11
|
|
// CHECK4-NEXT: [[SUB17:%.*]] = sub nsw i64 [[TMP21]], [[MUL16]]
|
|
// CHECK4-NEXT: [[MUL18:%.*]] = mul nsw i64 [[SUB17]], 1
|
|
// CHECK4-NEXT: [[SUB19:%.*]] = sub nsw i64 11, [[MUL18]]
|
|
// CHECK4-NEXT: [[CONV20:%.*]] = trunc i64 [[SUB19]] to i32
|
|
// CHECK4-NEXT: store i32 [[CONV20]], i32* [[X8]], align 4, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[TMP24:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP24]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP23]], i64 [[IDXPROM]]
|
|
// CHECK4-NEXT: [[TMP25:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[TMP26:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[TMP27:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[IDXPROM21:%.*]] = sext i8 [[TMP27]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX22:%.*]] = getelementptr inbounds float, float* [[TMP26]], i64 [[IDXPROM21]]
|
|
// CHECK4-NEXT: [[TMP28:%.*]] = load float, float* [[ARRAYIDX22]], align 4, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[MUL23:%.*]] = fmul float [[TMP25]], [[TMP28]]
|
|
// CHECK4-NEXT: [[TMP29:%.*]] = load float*, float** [[TMP4]], align 8, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[TMP30:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[IDXPROM24:%.*]] = sext i8 [[TMP30]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, float* [[TMP29]], i64 [[IDXPROM24]]
|
|
// CHECK4-NEXT: [[TMP31:%.*]] = load float, float* [[ARRAYIDX25]], align 4, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[MUL26:%.*]] = fmul float [[MUL23]], [[TMP31]]
|
|
// CHECK4-NEXT: [[TMP32:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[TMP33:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[IDXPROM27:%.*]] = sext i8 [[TMP33]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, float* [[TMP32]], i64 [[IDXPROM27]]
|
|
// CHECK4-NEXT: store float [[MUL26]], float* [[ARRAYIDX28]], align 4, !llvm.access.group !11
|
|
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK4: omp.body.continue:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK4: omp.inner.for.inc:
|
|
// CHECK4-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK4-NEXT: [[ADD29:%.*]] = add nsw i64 [[TMP34]], 1
|
|
// CHECK4-NEXT: store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
|
|
// CHECK4: omp.inner.for.end:
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK4: omp.dispatch.inc:
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK4: omp.dispatch.end:
|
|
// CHECK4-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK4: omp.precond.end:
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
|
|
// CHECK4-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: store i32 0, i32* [[X]], align 4
|
|
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..7
|
|
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK4-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: store i32 199, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 37, i32 0, i32 199, i32 1, i32 1)
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK4: omp.dispatch.cond:
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
|
|
// CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK4: omp.dispatch.body:
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK4: omp.inner.for.cond:
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK4: omp.inner.for.body:
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 20
|
|
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]]
|
|
// CHECK4-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8
|
|
// CHECK4-NEXT: store i8 [[CONV]], i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP12]], 20
|
|
// CHECK4-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 20
|
|
// CHECK4-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL3]]
|
|
// CHECK4-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK4-NEXT: [[ADD5:%.*]] = add nsw i32 -10, [[MUL4]]
|
|
// CHECK4-NEXT: store i32 [[ADD5]], i32* [[X]], align 4, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP14]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP13]], i64 [[IDXPROM]]
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[IDXPROM6:%.*]] = zext i8 [[TMP17]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP16]], i64 [[IDXPROM6]]
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX7]], align 4, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[MUL8:%.*]] = fmul float [[TMP15]], [[TMP18]]
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[IDXPROM9:%.*]] = zext i8 [[TMP20]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP19]], i64 [[IDXPROM9]]
|
|
// CHECK4-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[MUL11:%.*]] = fmul float [[MUL8]], [[TMP21]]
|
|
// CHECK4-NEXT: [[TMP22:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[TMP23:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[IDXPROM12:%.*]] = zext i8 [[TMP23]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, float* [[TMP22]], i64 [[IDXPROM12]]
|
|
// CHECK4-NEXT: store float [[MUL11]], float* [[ARRAYIDX13]], align 4, !llvm.access.group !14
|
|
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK4: omp.body.continue:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK4: omp.inner.for.inc:
|
|
// CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK4-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP24]], 1
|
|
// CHECK4-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
|
|
// CHECK4: omp.inner.for.end:
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK4: omp.dispatch.inc:
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK4: omp.dispatch.end:
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@_Z3foov
|
|
// CHECK4-SAME: () #[[ATTR3:[0-9]+]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: call void @_Z8mayThrowv()
|
|
// CHECK4-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@_Z12parallel_forPfi
|
|
// CHECK4-SAME: (float* [[A:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
|
// CHECK4-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
|
|
// CHECK4-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
|
|
// CHECK4-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 16
|
|
// CHECK4-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
|
|
// CHECK4-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
|
|
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, i64, i64)* @.omp_outlined..8 to void (i32*, i32*, ...)*), float** [[A_ADDR]], i64 [[TMP1]], i64 [[TMP4]])
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
|
|
// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP5]])
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..8
|
|
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], i64 [[VLA:%.*]], i64 [[N:%.*]]) #[[ATTR1]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK4-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
|
// CHECK4-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
|
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
|
|
// CHECK4-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
|
|
// CHECK4-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: store i32 16908288, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
|
|
// CHECK4-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
|
|
// CHECK4-NEXT: [[VLA1:%.*]] = alloca float, i64 [[TMP1]], align 16
|
|
// CHECK4-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 5)
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK4: omp.dispatch.cond:
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP5]], 16908288
|
|
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK4: cond.true:
|
|
// CHECK4-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK4: cond.false:
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: br label [[COND_END]]
|
|
// CHECK4: cond.end:
|
|
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
|
|
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP8]], [[TMP9]]
|
|
// CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_CLEANUP:%.*]]
|
|
// CHECK4: omp.dispatch.cleanup:
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK4: omp.dispatch.body:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK4: omp.inner.for.cond:
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[CMP3:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
|
|
// CHECK4-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
|
|
// CHECK4: omp.inner.for.cond.cleanup:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK4: omp.inner.for.body:
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[MUL:%.*]] = mul i32 [[TMP12]], 127
|
|
// CHECK4-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
|
|
// CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[CALL:%.*]] = invoke i32 @_Z3foov()
|
|
// CHECK4-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
|
|
// CHECK4: invoke.cont:
|
|
// CHECK4-NEXT: [[CONV4:%.*]] = sitofp i32 [[CALL]] to float
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP13]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[VLA1]], i64 [[IDXPROM]]
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK4-NEXT: [[ADD5:%.*]] = fadd float [[CONV4]], [[TMP14]]
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[CONV]], align 8
|
|
// CHECK4-NEXT: [[CONV6:%.*]] = sitofp i32 [[TMP15]] to float
|
|
// CHECK4-NEXT: [[ADD7:%.*]] = fadd float [[ADD5]], [[CONV6]]
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK4-NEXT: [[IDXPROM8:%.*]] = zext i32 [[TMP17]] to i64
|
|
// CHECK4-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP16]], i64 [[IDXPROM8]]
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX9]], align 4
|
|
// CHECK4-NEXT: [[ADD10:%.*]] = fadd float [[TMP18]], [[ADD7]]
|
|
// CHECK4-NEXT: store float [[ADD10]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK4: omp.body.continue:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK4: omp.inner.for.inc:
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[ADD11:%.*]] = add i32 [[TMP19]], 1
|
|
// CHECK4-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK4: omp.inner.for.end:
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK4: omp.dispatch.inc:
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: [[ADD12:%.*]] = add i32 [[TMP20]], [[TMP21]]
|
|
// CHECK4-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: [[ADD13:%.*]] = add i32 [[TMP22]], [[TMP23]]
|
|
// CHECK4-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK4: omp.dispatch.end:
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
|
|
// CHECK4-NEXT: [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
|
|
// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP24]])
|
|
// CHECK4-NEXT: ret void
|
|
// CHECK4: terminate.lpad:
|
|
// CHECK4-NEXT: [[TMP25:%.*]] = landingpad { i8*, i32 }
|
|
// CHECK4-NEXT: catch i8* null
|
|
// CHECK4-NEXT: [[TMP26:%.*]] = extractvalue { i8*, i32 } [[TMP25]], 0
|
|
// CHECK4-NEXT: call void @__clang_call_terminate(i8* [[TMP26]]) #[[ATTR7:[0-9]+]]
|
|
// CHECK4-NEXT: unreachable
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@__clang_call_terminate
|
|
// CHECK4-SAME: (i8* [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] comdat {
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK4-NEXT: call void @_ZSt9terminatev() #[[ATTR7]]
|
|
// CHECK4-NEXT: unreachable
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z17with_var_schedulev
|
|
// CHECK5-SAME: () #[[ATTR0:[0-9]+]] !dbg [[DBG7:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[A:%.*]] = alloca double, align 8
|
|
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK5-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: store double 5.000000e+00, double* [[A]], align 8, !dbg [[DBG10:![0-9]+]]
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load double, double* [[A]], align 8, !dbg [[DBG11:![0-9]+]]
|
|
// CHECK5-NEXT: [[CONV:%.*]] = fptosi double [[TMP0]] to i8, !dbg [[DBG11]]
|
|
// CHECK5-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1, !dbg [[DBG11]]
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !dbg [[DBG11]]
|
|
// CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*, !dbg [[DBG11]]
|
|
// CHECK5-NEXT: store i8 [[TMP1]], i8* [[CONV1]], align 1, !dbg [[DBG11]]
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !dbg [[DBG11]]
|
|
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP2]]), !dbg [[DBG11]]
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG12:![0-9]+]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
|
|
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] !dbg [[DBG13:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca double, align 8
|
|
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[A:%.*]] = alloca double, align 8
|
|
// CHECK5-NEXT: [[I5:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
|
|
// CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*, !dbg [[DBG14:![0-9]+]]
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load double, double* undef, align 8, !dbg [[DBG15:![0-9]+]]
|
|
// CHECK5-NEXT: [[ADD:%.*]] = fadd double 2.000000e+00, [[TMP0]], !dbg [[DBG15]]
|
|
// CHECK5-NEXT: store double [[ADD]], double* [[DOTCAPTURE_EXPR_1]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load double, double* [[DOTCAPTURE_EXPR_1]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[SUB:%.*]] = fsub double [[TMP1]], 1.000000e+00, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[DIV:%.*]] = fdiv double [[SUB]], 1.000000e+00, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[CONV3:%.*]] = fptoui double [[DIV]] to i64, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[SUB4:%.*]] = sub i64 [[CONV3]], 1, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: store i64 [[SUB4]], i64* [[DOTCAPTURE_EXPR_2]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: store i64 1, i64* [[I]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = load double, double* [[DOTCAPTURE_EXPR_1]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[CMP:%.*]] = fcmp olt double 1.000000e+00, [[TMP2]], !dbg [[DBG15]]
|
|
// CHECK5-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]], !dbg [[DBG14]]
|
|
// CHECK5: omp.precond.then:
|
|
// CHECK5-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: store i64 [[TMP3]], i64* [[DOTOMP_UB]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV]], align 8, !dbg [[DBG14]]
|
|
// CHECK5-NEXT: [[CONV6:%.*]] = sext i8 [[TMP4]] to i64, !dbg [[DBG14]]
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG14]]
|
|
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4, !dbg [[DBG14]]
|
|
// CHECK5-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV6]]), !dbg [[DBG14]]
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]], !dbg [[DBG14]]
|
|
// CHECK5: omp.dispatch.cond:
|
|
// CHECK5-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[CMP7:%.*]] = icmp ugt i64 [[TMP7]], [[TMP8]], !dbg [[DBG15]]
|
|
// CHECK5-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]], !dbg [[DBG15]]
|
|
// CHECK5: cond.true:
|
|
// CHECK5-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: br label [[COND_END:%.*]], !dbg [[DBG15]]
|
|
// CHECK5: cond.false:
|
|
// CHECK5-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: br label [[COND_END]], !dbg [[DBG15]]
|
|
// CHECK5: cond.end:
|
|
// CHECK5-NEXT: [[COND:%.*]] = phi i64 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ], !dbg [[DBG15]]
|
|
// CHECK5-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: store i64 [[TMP11]], i64* [[DOTOMP_IV]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[ADD8:%.*]] = add i64 [[TMP13]], 1, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[CMP9:%.*]] = icmp ult i64 [[TMP12]], [[ADD8]], !dbg [[DBG15]]
|
|
// CHECK5-NEXT: br i1 [[CMP9]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]], !dbg [[DBG14]]
|
|
// CHECK5: omp.dispatch.body:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]], !dbg [[DBG14]]
|
|
// CHECK5: omp.inner.for.cond:
|
|
// CHECK5-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[ADD10:%.*]] = add i64 [[TMP15]], 1, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[CMP11:%.*]] = icmp ult i64 [[TMP14]], [[ADD10]], !dbg [[DBG15]]
|
|
// CHECK5-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]], !dbg [[DBG14]]
|
|
// CHECK5: omp.inner.for.body:
|
|
// CHECK5-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[MUL:%.*]] = mul i64 [[TMP16]], 1, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[ADD12:%.*]] = add i64 1, [[MUL]], !dbg [[DBG15]]
|
|
// CHECK5-NEXT: store i64 [[ADD12]], i64* [[I5]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]], !dbg [[DBG16:![0-9]+]]
|
|
// CHECK5: omp.body.continue:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]], !dbg [[DBG14]]
|
|
// CHECK5: omp.inner.for.inc:
|
|
// CHECK5-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[ADD13:%.*]] = add i64 [[TMP17]], 1, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: store i64 [[ADD13]], i64* [[DOTOMP_IV]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !dbg [[DBG14]], !llvm.loop [[LOOP17:![0-9]+]]
|
|
// CHECK5: omp.inner.for.end:
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]], !dbg [[DBG14]]
|
|
// CHECK5: omp.dispatch.inc:
|
|
// CHECK5-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[ADD14:%.*]] = add i64 [[TMP18]], [[TMP19]], !dbg [[DBG15]]
|
|
// CHECK5-NEXT: store i64 [[ADD14]], i64* [[DOTOMP_LB]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: [[ADD15:%.*]] = add i64 [[TMP20]], [[TMP21]], !dbg [[DBG15]]
|
|
// CHECK5-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_UB]], align 8, !dbg [[DBG15]]
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND]], !dbg [[DBG14]], !llvm.loop [[LOOP18:![0-9]+]]
|
|
// CHECK5: omp.dispatch.end:
|
|
// CHECK5-NEXT: [[TMP22:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG14]]
|
|
// CHECK5-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4, !dbg [[DBG14]]
|
|
// CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP23]]), !dbg [[DBG14]]
|
|
// CHECK5-NEXT: br label [[OMP_PRECOND_END]], !dbg [[DBG14]]
|
|
// CHECK5: omp.precond.end:
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG16]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z23without_schedule_clausePfS_S_S_
|
|
// CHECK5-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] !dbg [[DBG21:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB9:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]]), !dbg [[DBG22:![0-9]+]]
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG23:![0-9]+]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..1
|
|
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] !dbg [[DBG24:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8, !dbg [[DBG25:![0-9]+]]
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8, !dbg [[DBG25]]
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8, !dbg [[DBG25]]
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8, !dbg [[DBG25]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG26:![0-9]+]]
|
|
// CHECK5-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG25]]
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4, !dbg [[DBG25]]
|
|
// CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB6:[0-9]+]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1), !dbg [[DBG25]]
|
|
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]], !dbg [[DBG26]]
|
|
// CHECK5: cond.true:
|
|
// CHECK5-NEXT: br label [[COND_END:%.*]], !dbg [[DBG26]]
|
|
// CHECK5: cond.false:
|
|
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: br label [[COND_END]], !dbg [[DBG26]]
|
|
// CHECK5: cond.end:
|
|
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ], !dbg [[DBG26]]
|
|
// CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]], !dbg [[DBG25]]
|
|
// CHECK5: omp.inner.for.cond:
|
|
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]], !dbg [[DBG26]]
|
|
// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]], !dbg [[DBG25]]
|
|
// CHECK5: omp.inner.for.body:
|
|
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]], !dbg [[DBG26]]
|
|
// CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: [[TMP12:%.*]] = load float*, float** [[TMP1]], align 8, !dbg [[DBG27:![0-9]+]]
|
|
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM]], !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[TMP15:%.*]] = load float*, float** [[TMP2]], align 8, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM2]], !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX3]], align 4, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]], !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[TMP18:%.*]] = load float*, float** [[TMP3]], align 8, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM5]], !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX6]], align 4, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]], !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[TMP21:%.*]] = load float*, float** [[TMP0]], align 8, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM8]], !dbg [[DBG27]]
|
|
// CHECK5-NEXT: store float [[MUL7]], float* [[ARRAYIDX9]], align 4, !dbg [[DBG27]]
|
|
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]], !dbg [[DBG28:![0-9]+]]
|
|
// CHECK5: omp.body.continue:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]], !dbg [[DBG25]]
|
|
// CHECK5: omp.inner.for.inc:
|
|
// CHECK5-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP23]], 1, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !dbg [[DBG26]]
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !dbg [[DBG25]], !llvm.loop [[LOOP29:![0-9]+]]
|
|
// CHECK5: omp.inner.for.end:
|
|
// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]], !dbg [[DBG25]]
|
|
// CHECK5: omp.loop.exit:
|
|
// CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB8:[0-9]+]], i32 [[TMP5]]), !dbg [[DBG25]]
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG28]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
|
|
// CHECK5-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] !dbg [[DBG30:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB14:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]]), !dbg [[DBG31:![0-9]+]]
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG32:![0-9]+]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..2
|
|
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] !dbg [[DBG33:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]]
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8, !dbg [[DBG34]]
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8, !dbg [[DBG34]]
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8, !dbg [[DBG34]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG35:![0-9]+]]
|
|
// CHECK5-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG34]]
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4, !dbg [[DBG34]]
|
|
// CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB11:[0-9]+]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1), !dbg [[DBG34]]
|
|
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]], !dbg [[DBG35]]
|
|
// CHECK5: cond.true:
|
|
// CHECK5-NEXT: br label [[COND_END:%.*]], !dbg [[DBG35]]
|
|
// CHECK5: cond.false:
|
|
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: br label [[COND_END]], !dbg [[DBG35]]
|
|
// CHECK5: cond.end:
|
|
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ], !dbg [[DBG35]]
|
|
// CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]], !dbg [[DBG34]]
|
|
// CHECK5: omp.inner.for.cond:
|
|
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]], !dbg [[DBG35]]
|
|
// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]], !dbg [[DBG34]]
|
|
// CHECK5: omp.inner.for.body:
|
|
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]], !dbg [[DBG35]]
|
|
// CHECK5-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: [[TMP12:%.*]] = load float*, float** [[TMP1]], align 8, !dbg [[DBG36:![0-9]+]]
|
|
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM]], !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[TMP15:%.*]] = load float*, float** [[TMP2]], align 8, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM2]], !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX3]], align 4, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]], !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[TMP18:%.*]] = load float*, float** [[TMP3]], align 8, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM5]], !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX6]], align 4, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]], !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[TMP21:%.*]] = load float*, float** [[TMP0]], align 8, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM8]], !dbg [[DBG36]]
|
|
// CHECK5-NEXT: store float [[MUL7]], float* [[ARRAYIDX9]], align 4, !dbg [[DBG36]]
|
|
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]], !dbg [[DBG37:![0-9]+]]
|
|
// CHECK5: omp.body.continue:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]], !dbg [[DBG34]]
|
|
// CHECK5: omp.inner.for.inc:
|
|
// CHECK5-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !dbg [[DBG35]]
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !dbg [[DBG34]], !llvm.loop [[LOOP38:![0-9]+]]
|
|
// CHECK5: omp.inner.for.end:
|
|
// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]], !dbg [[DBG34]]
|
|
// CHECK5: omp.loop.exit:
|
|
// CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB13:[0-9]+]], i32 [[TMP5]]), !dbg [[DBG34]]
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG37]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z14static_chunkedPfS_S_S_
|
|
// CHECK5-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] !dbg [[DBG39:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB19:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]]), !dbg [[DBG40:![0-9]+]]
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG41:![0-9]+]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..3
|
|
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] !dbg [[DBG42:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8, !dbg [[DBG43:![0-9]+]]
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8, !dbg [[DBG43]]
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8, !dbg [[DBG43]]
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8, !dbg [[DBG43]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG44:![0-9]+]]
|
|
// CHECK5-NEXT: store i32 16908288, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG43]]
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4, !dbg [[DBG43]]
|
|
// CHECK5-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB16:[0-9]+]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 5), !dbg [[DBG43]]
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]], !dbg [[DBG43]]
|
|
// CHECK5: omp.dispatch.cond:
|
|
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]], !dbg [[DBG44]]
|
|
// CHECK5: cond.true:
|
|
// CHECK5-NEXT: br label [[COND_END:%.*]], !dbg [[DBG44]]
|
|
// CHECK5: cond.false:
|
|
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: br label [[COND_END]], !dbg [[DBG44]]
|
|
// CHECK5: cond.end:
|
|
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ], !dbg [[DBG44]]
|
|
// CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]], !dbg [[DBG44]]
|
|
// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]], !dbg [[DBG43]]
|
|
// CHECK5: omp.dispatch.body:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]], !dbg [[DBG43]]
|
|
// CHECK5: omp.inner.for.cond:
|
|
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]], !dbg [[DBG44]]
|
|
// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]], !dbg [[DBG43]]
|
|
// CHECK5: omp.inner.for.body:
|
|
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]], !dbg [[DBG44]]
|
|
// CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP1]], align 8, !dbg [[DBG45:![0-9]+]]
|
|
// CHECK5-NEXT: [[TMP15:%.*]] = load i32, i32* [[I]], align 4, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[IDXPROM]], !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX]], align 4, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP2]], align 8, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[TMP18:%.*]] = load i32, i32* [[I]], align 4, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[IDXPROM3]], !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]], !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP3]], align 8, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[TMP21:%.*]] = load i32, i32* [[I]], align 4, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[IDXPROM6]], !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[TMP22:%.*]] = load float, float* [[ARRAYIDX7]], align 4, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]], !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP0]], align 8, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[TMP24:%.*]] = load i32, i32* [[I]], align 4, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP23]], i64 [[IDXPROM9]], !dbg [[DBG45]]
|
|
// CHECK5-NEXT: store float [[MUL8]], float* [[ARRAYIDX10]], align 4, !dbg [[DBG45]]
|
|
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]], !dbg [[DBG46:![0-9]+]]
|
|
// CHECK5: omp.body.continue:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]], !dbg [[DBG43]]
|
|
// CHECK5: omp.inner.for.inc:
|
|
// CHECK5-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !dbg [[DBG43]], !llvm.loop [[LOOP47:![0-9]+]]
|
|
// CHECK5: omp.inner.for.end:
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]], !dbg [[DBG43]]
|
|
// CHECK5: omp.dispatch.inc:
|
|
// CHECK5-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]], !dbg [[DBG44]]
|
|
// CHECK5-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]], !dbg [[DBG44]]
|
|
// CHECK5-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4, !dbg [[DBG44]]
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND]], !dbg [[DBG43]], !llvm.loop [[LOOP48:![0-9]+]]
|
|
// CHECK5: omp.dispatch.end:
|
|
// CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB18:[0-9]+]], i32 [[TMP5]]), !dbg [[DBG43]]
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG46]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
|
|
// CHECK5-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] !dbg [[DBG49:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB21:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..4 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]]), !dbg [[DBG50:![0-9]+]]
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG51:![0-9]+]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..4
|
|
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] !dbg [[DBG52:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8, !dbg [[DBG53:![0-9]+]]
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8, !dbg [[DBG53]]
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8, !dbg [[DBG53]]
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8, !dbg [[DBG53]]
|
|
// CHECK5-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8, !dbg [[DBG54:![0-9]+]]
|
|
// CHECK5-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8, !dbg [[DBG54]]
|
|
// CHECK5-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8, !dbg [[DBG54]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4, !dbg [[DBG54]]
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG53]]
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4, !dbg [[DBG53]]
|
|
// CHECK5-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB21]], i32 [[TMP5]], i32 1073741859, i64 0, i64 16908287, i64 1, i64 1), !dbg [[DBG53]]
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]], !dbg [[DBG53]]
|
|
// CHECK5: omp.dispatch.cond:
|
|
// CHECK5-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB21]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]]), !dbg [[DBG53]]
|
|
// CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0, !dbg [[DBG53]]
|
|
// CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]], !dbg [[DBG53]]
|
|
// CHECK5: omp.dispatch.body:
|
|
// CHECK5-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8, !dbg [[DBG54]]
|
|
// CHECK5-NEXT: store i64 [[TMP7]], i64* [[DOTOMP_IV]], align 8, !dbg [[DBG54]]
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]], !dbg [[DBG53]]
|
|
// CHECK5: omp.inner.for.cond:
|
|
// CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG54]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !dbg [[DBG54]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[ADD:%.*]] = add i64 [[TMP9]], 1, !dbg [[DBG54]]
|
|
// CHECK5-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP8]], [[ADD]], !dbg [[DBG54]]
|
|
// CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]], !dbg [[DBG53]]
|
|
// CHECK5: omp.inner.for.body:
|
|
// CHECK5-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG54]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 127, !dbg [[DBG54]]
|
|
// CHECK5-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]], !dbg [[DBG54]]
|
|
// CHECK5-NEXT: store i64 [[ADD1]], i64* [[I]], align 8, !dbg [[DBG54]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[TMP11:%.*]] = load float*, float** [[TMP1]], align 8, !dbg [[DBG56:![0-9]+]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[TMP12:%.*]] = load i64, i64* [[I]], align 8, !dbg [[DBG56]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[TMP12]], !dbg [[DBG56]]
|
|
// CHECK5-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX]], align 4, !dbg [[DBG56]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP2]], align 8, !dbg [[DBG56]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[TMP15:%.*]] = load i64, i64* [[I]], align 8, !dbg [[DBG56]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[TMP15]], !dbg [[DBG56]]
|
|
// CHECK5-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !dbg [[DBG56]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[MUL3:%.*]] = fmul float [[TMP13]], [[TMP16]], !dbg [[DBG56]]
|
|
// CHECK5-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP3]], align 8, !dbg [[DBG56]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[TMP18:%.*]] = load i64, i64* [[I]], align 8, !dbg [[DBG56]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[TMP18]], !dbg [[DBG56]]
|
|
// CHECK5-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4, !dbg [[DBG56]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP19]], !dbg [[DBG56]]
|
|
// CHECK5-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP0]], align 8, !dbg [[DBG56]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[TMP21:%.*]] = load i64, i64* [[I]], align 8, !dbg [[DBG56]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[TMP21]], !dbg [[DBG56]]
|
|
// CHECK5-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4, !dbg [[DBG56]], !llvm.access.group !55
|
|
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]], !dbg [[DBG57:![0-9]+]]
|
|
// CHECK5: omp.body.continue:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]], !dbg [[DBG53]]
|
|
// CHECK5: omp.inner.for.inc:
|
|
// CHECK5-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG54]], !llvm.access.group !55
|
|
// CHECK5-NEXT: [[ADD7:%.*]] = add i64 [[TMP22]], 1, !dbg [[DBG54]]
|
|
// CHECK5-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !dbg [[DBG54]], !llvm.access.group !55
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !dbg [[DBG53]], !llvm.loop [[LOOP58:![0-9]+]]
|
|
// CHECK5: omp.inner.for.end:
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]], !dbg [[DBG53]]
|
|
// CHECK5: omp.dispatch.inc:
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND]], !dbg [[DBG53]], !llvm.loop [[LOOP60:![0-9]+]]
|
|
// CHECK5: omp.dispatch.end:
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG57]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z7guided7PfS_S_S_
|
|
// CHECK5-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] !dbg [[DBG61:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB23:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..5 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]]), !dbg [[DBG62:![0-9]+]]
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG63:![0-9]+]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..5
|
|
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] !dbg [[DBG64:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8, !dbg [[DBG65:![0-9]+]]
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8, !dbg [[DBG65]]
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8, !dbg [[DBG65]]
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8, !dbg [[DBG65]]
|
|
// CHECK5-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8, !dbg [[DBG66:![0-9]+]]
|
|
// CHECK5-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8, !dbg [[DBG66]]
|
|
// CHECK5-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8, !dbg [[DBG66]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4, !dbg [[DBG66]]
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG65]]
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4, !dbg [[DBG65]]
|
|
// CHECK5-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB23]], i32 [[TMP5]], i32 1073741860, i64 0, i64 16908287, i64 1, i64 7), !dbg [[DBG65]]
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]], !dbg [[DBG65]]
|
|
// CHECK5: omp.dispatch.cond:
|
|
// CHECK5-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB23]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]]), !dbg [[DBG65]]
|
|
// CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0, !dbg [[DBG65]]
|
|
// CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]], !dbg [[DBG65]]
|
|
// CHECK5: omp.dispatch.body:
|
|
// CHECK5-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8, !dbg [[DBG66]]
|
|
// CHECK5-NEXT: store i64 [[TMP7]], i64* [[DOTOMP_IV]], align 8, !dbg [[DBG66]]
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]], !dbg [[DBG65]]
|
|
// CHECK5: omp.inner.for.cond:
|
|
// CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG66]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !dbg [[DBG66]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[ADD:%.*]] = add i64 [[TMP9]], 1, !dbg [[DBG66]]
|
|
// CHECK5-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP8]], [[ADD]], !dbg [[DBG66]]
|
|
// CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]], !dbg [[DBG65]]
|
|
// CHECK5: omp.inner.for.body:
|
|
// CHECK5-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG66]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 127, !dbg [[DBG66]]
|
|
// CHECK5-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]], !dbg [[DBG66]]
|
|
// CHECK5-NEXT: store i64 [[ADD1]], i64* [[I]], align 8, !dbg [[DBG66]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[TMP11:%.*]] = load float*, float** [[TMP1]], align 8, !dbg [[DBG68:![0-9]+]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[TMP12:%.*]] = load i64, i64* [[I]], align 8, !dbg [[DBG68]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[TMP12]], !dbg [[DBG68]]
|
|
// CHECK5-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX]], align 4, !dbg [[DBG68]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP2]], align 8, !dbg [[DBG68]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[TMP15:%.*]] = load i64, i64* [[I]], align 8, !dbg [[DBG68]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[TMP15]], !dbg [[DBG68]]
|
|
// CHECK5-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !dbg [[DBG68]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[MUL3:%.*]] = fmul float [[TMP13]], [[TMP16]], !dbg [[DBG68]]
|
|
// CHECK5-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP3]], align 8, !dbg [[DBG68]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[TMP18:%.*]] = load i64, i64* [[I]], align 8, !dbg [[DBG68]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[TMP18]], !dbg [[DBG68]]
|
|
// CHECK5-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4, !dbg [[DBG68]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP19]], !dbg [[DBG68]]
|
|
// CHECK5-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP0]], align 8, !dbg [[DBG68]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[TMP21:%.*]] = load i64, i64* [[I]], align 8, !dbg [[DBG68]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[TMP21]], !dbg [[DBG68]]
|
|
// CHECK5-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4, !dbg [[DBG68]], !llvm.access.group !67
|
|
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]], !dbg [[DBG69:![0-9]+]]
|
|
// CHECK5: omp.body.continue:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]], !dbg [[DBG65]]
|
|
// CHECK5: omp.inner.for.inc:
|
|
// CHECK5-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG66]], !llvm.access.group !67
|
|
// CHECK5-NEXT: [[ADD7:%.*]] = add i64 [[TMP22]], 1, !dbg [[DBG66]]
|
|
// CHECK5-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !dbg [[DBG66]], !llvm.access.group !67
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !dbg [[DBG65]], !llvm.loop [[LOOP70:![0-9]+]]
|
|
// CHECK5: omp.inner.for.end:
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]], !dbg [[DBG65]]
|
|
// CHECK5: omp.dispatch.inc:
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND]], !dbg [[DBG65]], !llvm.loop [[LOOP72:![0-9]+]]
|
|
// CHECK5: omp.dispatch.end:
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG69]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
|
|
// CHECK5-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] !dbg [[DBG73:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[Y:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: store i32 0, i32* [[X]], align 4, !dbg [[DBG74:![0-9]+]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[Y]], align 4, !dbg [[DBG75:![0-9]+]]
|
|
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB25:[0-9]+]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[Y]], float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]]), !dbg [[DBG76:![0-9]+]]
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG77:![0-9]+]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..6
|
|
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] !dbg [[DBG78:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK5-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[I7:%.*]] = alloca i8, align 1
|
|
// CHECK5-NEXT: [[X8:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[Y_ADDR]], align 8, !dbg [[DBG79:![0-9]+]]
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load float**, float*** [[A_ADDR]], align 8, !dbg [[DBG79]]
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = load float**, float*** [[B_ADDR]], align 8, !dbg [[DBG79]]
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load float**, float*** [[C_ADDR]], align 8, !dbg [[DBG79]]
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load float**, float*** [[D_ADDR]], align 8, !dbg [[DBG79]]
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4, !dbg [[DBG80:![0-9]+]]
|
|
// CHECK5-NEXT: [[CONV:%.*]] = trunc i32 [[TMP5]] to i8, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[CONV3:%.*]] = sext i8 [[TMP6]] to i32, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]], !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[CONV4:%.*]] = zext i32 [[DIV]] to i64, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11, !dbg [[DBG81:![0-9]+]]
|
|
// CHECK5-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1, !dbg [[DBG81]]
|
|
// CHECK5-NEXT: store i64 [[SUB5]], i64* [[DOTCAPTURE_EXPR_2]], align 8, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: store i8 [[TMP7]], i8* [[I]], align 1, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: store i32 11, i32* [[X]], align 4, !dbg [[DBG81]]
|
|
// CHECK5-NEXT: [[TMP8:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[CONV6:%.*]] = sext i8 [[TMP8]] to i32, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV6]], 57, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]], !dbg [[DBG79]]
|
|
// CHECK5: omp.precond.then:
|
|
// CHECK5-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8, !dbg [[DBG81]]
|
|
// CHECK5-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_UB]], align 8, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8, !dbg [[DBG81]]
|
|
// CHECK5-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG79]]
|
|
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4, !dbg [[DBG79]]
|
|
// CHECK5-NEXT: call void @__kmpc_dispatch_init_8(%struct.ident_t* @[[GLOB25]], i32 [[TMP12]], i32 1073741862, i64 0, i64 [[TMP10]], i64 1, i64 1), !dbg [[DBG79]]
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]], !dbg [[DBG79]]
|
|
// CHECK5: omp.dispatch.cond:
|
|
// CHECK5-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG79]]
|
|
// CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4, !dbg [[DBG79]]
|
|
// CHECK5-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_8(%struct.ident_t* @[[GLOB25]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]]), !dbg [[DBG79]]
|
|
// CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0, !dbg [[DBG79]]
|
|
// CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]], !dbg [[DBG79]]
|
|
// CHECK5: omp.dispatch.body:
|
|
// CHECK5-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: store i64 [[TMP16]], i64* [[DOTOMP_IV]], align 8, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]], !dbg [[DBG79]]
|
|
// CHECK5: omp.inner.for.cond:
|
|
// CHECK5-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG80]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !dbg [[DBG80]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[CMP9:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]], !dbg [[DBG80]]
|
|
// CHECK5-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]], !dbg [[DBG79]]
|
|
// CHECK5: omp.inner.for.body:
|
|
// CHECK5-NEXT: [[TMP19:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !dbg [[DBG80]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[CONV10:%.*]] = sext i8 [[TMP19]] to i64, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG80]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[DIV11:%.*]] = sdiv i64 [[TMP20]], 11, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[MUL12:%.*]] = mul nsw i64 [[DIV11]], 1, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[ADD13:%.*]] = add nsw i64 [[CONV10]], [[MUL12]], !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[CONV14:%.*]] = trunc i64 [[ADD13]] to i8, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: store i8 [[CONV14]], i8* [[I7]], align 1, !dbg [[DBG80]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG80]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG80]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[DIV15:%.*]] = sdiv i64 [[TMP22]], 11, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[MUL16:%.*]] = mul nsw i64 [[DIV15]], 11, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[SUB17:%.*]] = sub nsw i64 [[TMP21]], [[MUL16]], !dbg [[DBG80]]
|
|
// CHECK5-NEXT: [[MUL18:%.*]] = mul nsw i64 [[SUB17]], 1, !dbg [[DBG81]]
|
|
// CHECK5-NEXT: [[SUB19:%.*]] = sub nsw i64 11, [[MUL18]], !dbg [[DBG81]]
|
|
// CHECK5-NEXT: [[CONV20:%.*]] = trunc i64 [[SUB19]] to i32, !dbg [[DBG81]]
|
|
// CHECK5-NEXT: store i32 [[CONV20]], i32* [[X8]], align 4, !dbg [[DBG81]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP2]], align 8, !dbg [[DBG83:![0-9]+]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[TMP24:%.*]] = load i8, i8* [[I7]], align 1, !dbg [[DBG83]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP24]] to i64, !dbg [[DBG83]]
|
|
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP23]], i64 [[IDXPROM]], !dbg [[DBG83]]
|
|
// CHECK5-NEXT: [[TMP25:%.*]] = load float, float* [[ARRAYIDX]], align 4, !dbg [[DBG83]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[TMP26:%.*]] = load float*, float** [[TMP3]], align 8, !dbg [[DBG83]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[TMP27:%.*]] = load i8, i8* [[I7]], align 1, !dbg [[DBG83]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[IDXPROM21:%.*]] = sext i8 [[TMP27]] to i64, !dbg [[DBG83]]
|
|
// CHECK5-NEXT: [[ARRAYIDX22:%.*]] = getelementptr inbounds float, float* [[TMP26]], i64 [[IDXPROM21]], !dbg [[DBG83]]
|
|
// CHECK5-NEXT: [[TMP28:%.*]] = load float, float* [[ARRAYIDX22]], align 4, !dbg [[DBG83]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[MUL23:%.*]] = fmul float [[TMP25]], [[TMP28]], !dbg [[DBG83]]
|
|
// CHECK5-NEXT: [[TMP29:%.*]] = load float*, float** [[TMP4]], align 8, !dbg [[DBG83]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[TMP30:%.*]] = load i8, i8* [[I7]], align 1, !dbg [[DBG83]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[IDXPROM24:%.*]] = sext i8 [[TMP30]] to i64, !dbg [[DBG83]]
|
|
// CHECK5-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, float* [[TMP29]], i64 [[IDXPROM24]], !dbg [[DBG83]]
|
|
// CHECK5-NEXT: [[TMP31:%.*]] = load float, float* [[ARRAYIDX25]], align 4, !dbg [[DBG83]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[MUL26:%.*]] = fmul float [[MUL23]], [[TMP31]], !dbg [[DBG83]]
|
|
// CHECK5-NEXT: [[TMP32:%.*]] = load float*, float** [[TMP1]], align 8, !dbg [[DBG83]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[TMP33:%.*]] = load i8, i8* [[I7]], align 1, !dbg [[DBG83]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[IDXPROM27:%.*]] = sext i8 [[TMP33]] to i64, !dbg [[DBG83]]
|
|
// CHECK5-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, float* [[TMP32]], i64 [[IDXPROM27]], !dbg [[DBG83]]
|
|
// CHECK5-NEXT: store float [[MUL26]], float* [[ARRAYIDX28]], align 4, !dbg [[DBG83]], !llvm.access.group !82
|
|
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]], !dbg [[DBG84:![0-9]+]]
|
|
// CHECK5: omp.body.continue:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]], !dbg [[DBG79]]
|
|
// CHECK5: omp.inner.for.inc:
|
|
// CHECK5-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !dbg [[DBG80]], !llvm.access.group !82
|
|
// CHECK5-NEXT: [[ADD29:%.*]] = add nsw i64 [[TMP34]], 1, !dbg [[DBG80]]
|
|
// CHECK5-NEXT: store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8, !dbg [[DBG80]], !llvm.access.group !82
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !dbg [[DBG79]], !llvm.loop [[LOOP85:![0-9]+]]
|
|
// CHECK5: omp.inner.for.end:
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]], !dbg [[DBG79]]
|
|
// CHECK5: omp.dispatch.inc:
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND]], !dbg [[DBG79]], !llvm.loop [[LOOP87:![0-9]+]]
|
|
// CHECK5: omp.dispatch.end:
|
|
// CHECK5-NEXT: br label [[OMP_PRECOND_END]], !dbg [[DBG79]]
|
|
// CHECK5: omp.precond.end:
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG84]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
|
|
// CHECK5-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] !dbg [[DBG88:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: store i32 0, i32* [[X]], align 4, !dbg [[DBG89:![0-9]+]]
|
|
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB27:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]]), !dbg [[DBG90:![0-9]+]]
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG91:![0-9]+]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..7
|
|
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] !dbg [[DBG92:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK5-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8, !dbg [[DBG93:![0-9]+]]
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8, !dbg [[DBG93]]
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8, !dbg [[DBG93]]
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8, !dbg [[DBG93]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG94:![0-9]+]]
|
|
// CHECK5-NEXT: store i32 199, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG94]]
|
|
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4, !dbg [[DBG94]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4, !dbg [[DBG94]]
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG93]]
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4, !dbg [[DBG93]]
|
|
// CHECK5-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB27]], i32 [[TMP5]], i32 1073741861, i32 0, i32 199, i32 1, i32 1), !dbg [[DBG93]]
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]], !dbg [[DBG93]]
|
|
// CHECK5: omp.dispatch.cond:
|
|
// CHECK5-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB27]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]), !dbg [[DBG93]]
|
|
// CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0, !dbg [[DBG93]]
|
|
// CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]], !dbg [[DBG93]]
|
|
// CHECK5: omp.dispatch.body:
|
|
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG94]]
|
|
// CHECK5-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4, !dbg [[DBG94]]
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]], !dbg [[DBG93]]
|
|
// CHECK5: omp.inner.for.cond:
|
|
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG94]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG94]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]], !dbg [[DBG94]]
|
|
// CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]], !dbg [[DBG93]]
|
|
// CHECK5: omp.inner.for.body:
|
|
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG94]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 20, !dbg [[DBG94]]
|
|
// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1, !dbg [[DBG94]]
|
|
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]], !dbg [[DBG94]]
|
|
// CHECK5-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8, !dbg [[DBG94]]
|
|
// CHECK5-NEXT: store i8 [[CONV]], i8* [[I]], align 1, !dbg [[DBG94]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG94]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG94]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP12]], 20, !dbg [[DBG94]]
|
|
// CHECK5-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 20, !dbg [[DBG94]]
|
|
// CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL3]], !dbg [[DBG94]]
|
|
// CHECK5-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1, !dbg [[DBG96:![0-9]+]]
|
|
// CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 -10, [[MUL4]], !dbg [[DBG96]]
|
|
// CHECK5-NEXT: store i32 [[ADD5]], i32* [[X]], align 4, !dbg [[DBG96]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[TMP13:%.*]] = load float*, float** [[TMP1]], align 8, !dbg [[DBG97:![0-9]+]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[TMP14:%.*]] = load i8, i8* [[I]], align 1, !dbg [[DBG97]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP14]] to i64, !dbg [[DBG97]]
|
|
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP13]], i64 [[IDXPROM]], !dbg [[DBG97]]
|
|
// CHECK5-NEXT: [[TMP15:%.*]] = load float, float* [[ARRAYIDX]], align 4, !dbg [[DBG97]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[TMP16:%.*]] = load float*, float** [[TMP2]], align 8, !dbg [[DBG97]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[TMP17:%.*]] = load i8, i8* [[I]], align 1, !dbg [[DBG97]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[IDXPROM6:%.*]] = zext i8 [[TMP17]] to i64, !dbg [[DBG97]]
|
|
// CHECK5-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP16]], i64 [[IDXPROM6]], !dbg [[DBG97]]
|
|
// CHECK5-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX7]], align 4, !dbg [[DBG97]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[MUL8:%.*]] = fmul float [[TMP15]], [[TMP18]], !dbg [[DBG97]]
|
|
// CHECK5-NEXT: [[TMP19:%.*]] = load float*, float** [[TMP3]], align 8, !dbg [[DBG97]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[TMP20:%.*]] = load i8, i8* [[I]], align 1, !dbg [[DBG97]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[IDXPROM9:%.*]] = zext i8 [[TMP20]] to i64, !dbg [[DBG97]]
|
|
// CHECK5-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP19]], i64 [[IDXPROM9]], !dbg [[DBG97]]
|
|
// CHECK5-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !dbg [[DBG97]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[MUL11:%.*]] = fmul float [[MUL8]], [[TMP21]], !dbg [[DBG97]]
|
|
// CHECK5-NEXT: [[TMP22:%.*]] = load float*, float** [[TMP0]], align 8, !dbg [[DBG97]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[TMP23:%.*]] = load i8, i8* [[I]], align 1, !dbg [[DBG97]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[IDXPROM12:%.*]] = zext i8 [[TMP23]] to i64, !dbg [[DBG97]]
|
|
// CHECK5-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, float* [[TMP22]], i64 [[IDXPROM12]], !dbg [[DBG97]]
|
|
// CHECK5-NEXT: store float [[MUL11]], float* [[ARRAYIDX13]], align 4, !dbg [[DBG97]], !llvm.access.group !95
|
|
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]], !dbg [[DBG98:![0-9]+]]
|
|
// CHECK5: omp.body.continue:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]], !dbg [[DBG93]]
|
|
// CHECK5: omp.inner.for.inc:
|
|
// CHECK5-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG94]], !llvm.access.group !95
|
|
// CHECK5-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP24]], 1, !dbg [[DBG94]]
|
|
// CHECK5-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV]], align 4, !dbg [[DBG94]], !llvm.access.group !95
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !dbg [[DBG93]], !llvm.loop [[LOOP99:![0-9]+]]
|
|
// CHECK5: omp.inner.for.end:
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]], !dbg [[DBG93]]
|
|
// CHECK5: omp.dispatch.inc:
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND]], !dbg [[DBG93]], !llvm.loop [[LOOP101:![0-9]+]]
|
|
// CHECK5: omp.dispatch.end:
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG98]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z3foov
|
|
// CHECK5-SAME: () #[[ATTR3:[0-9]+]] !dbg [[DBG102:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: call void @_Z8mayThrowv(), !dbg [[DBG103:![0-9]+]]
|
|
// CHECK5-NEXT: ret i32 0, !dbg [[DBG103]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@_Z12parallel_forPfi
|
|
// CHECK5-SAME: (float* [[A:%.*]], i32 [[N:%.*]]) #[[ATTR0]] !dbg [[DBG104:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
|
// CHECK5-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4, !dbg [[DBG105:![0-9]+]]
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG105]]
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG105]]
|
|
// CHECK5-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG105]]
|
|
// CHECK5-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 16, !dbg [[DBG105]]
|
|
// CHECK5-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG105]]
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4, !dbg [[DBG106:![0-9]+]]
|
|
// CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*, !dbg [[DBG106]]
|
|
// CHECK5-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4, !dbg [[DBG106]]
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8, !dbg [[DBG106]]
|
|
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB32:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, i64, i64)* @.omp_outlined..8 to void (i32*, i32*, ...)*), float** [[A_ADDR]], i64 [[TMP1]], i64 [[TMP4]]), !dbg [[DBG106]]
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG107:![0-9]+]]
|
|
// CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP5]]), !dbg [[DBG107]]
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG107]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..8
|
|
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], i64 [[VLA:%.*]], i64 [[N:%.*]]) #[[ATTR1]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG108:![0-9]+]] {
|
|
// CHECK5-NEXT: entry:
|
|
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK5-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
|
// CHECK5-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
|
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK5-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK5-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
|
|
// CHECK5-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
|
|
// CHECK5-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8, !dbg [[DBG109:![0-9]+]]
|
|
// CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG109]]
|
|
// CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*, !dbg [[DBG109]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG110:![0-9]+]]
|
|
// CHECK5-NEXT: store i32 16908288, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG109]]
|
|
// CHECK5-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG109]]
|
|
// CHECK5-NEXT: [[VLA1:%.*]] = alloca float, i64 [[TMP1]], align 16, !dbg [[DBG109]]
|
|
// CHECK5-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG109]]
|
|
// CHECK5-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG109]]
|
|
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4, !dbg [[DBG109]]
|
|
// CHECK5-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB29:[0-9]+]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 5), !dbg [[DBG109]]
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]], !dbg [[DBG109]]
|
|
// CHECK5: omp.dispatch.cond:
|
|
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP5]], 16908288, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]], !dbg [[DBG110]]
|
|
// CHECK5: cond.true:
|
|
// CHECK5-NEXT: br label [[COND_END:%.*]], !dbg [[DBG110]]
|
|
// CHECK5: cond.false:
|
|
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: br label [[COND_END]], !dbg [[DBG110]]
|
|
// CHECK5: cond.end:
|
|
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ], !dbg [[DBG110]]
|
|
// CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP8]], [[TMP9]], !dbg [[DBG110]]
|
|
// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_CLEANUP:%.*]], !dbg [[DBG109]]
|
|
// CHECK5: omp.dispatch.cleanup:
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_END:%.*]], !dbg [[DBG109]]
|
|
// CHECK5: omp.dispatch.body:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]], !dbg [[DBG109]]
|
|
// CHECK5: omp.inner.for.cond:
|
|
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[CMP3:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]], !dbg [[DBG110]]
|
|
// CHECK5-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]], !dbg [[DBG109]]
|
|
// CHECK5: omp.inner.for.cond.cleanup:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_END:%.*]], !dbg [[DBG109]]
|
|
// CHECK5: omp.inner.for.body:
|
|
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[MUL:%.*]] = mul i32 [[TMP12]], 127, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]], !dbg [[DBG110]]
|
|
// CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[CALL:%.*]] = invoke i32 @_Z3foov()
|
|
// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG111:![0-9]+]]
|
|
// CHECK5: invoke.cont:
|
|
// CHECK5-NEXT: [[CONV4:%.*]] = sitofp i32 [[CALL]] to float, !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP13]] to i64, !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[VLA1]], i64 [[IDXPROM]], !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4, !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[ADD5:%.*]] = fadd float [[CONV4]], [[TMP14]], !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[TMP15:%.*]] = load i32, i32* [[CONV]], align 8, !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[CONV6:%.*]] = sitofp i32 [[TMP15]] to float, !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[ADD7:%.*]] = fadd float [[ADD5]], [[CONV6]], !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[TMP16:%.*]] = load float*, float** [[TMP0]], align 8, !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4, !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[IDXPROM8:%.*]] = zext i32 [[TMP17]] to i64, !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP16]], i64 [[IDXPROM8]], !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX9]], align 4, !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[ADD10:%.*]] = fadd float [[TMP18]], [[ADD7]], !dbg [[DBG111]]
|
|
// CHECK5-NEXT: store float [[ADD10]], float* [[ARRAYIDX9]], align 4, !dbg [[DBG111]]
|
|
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]], !dbg [[DBG111]]
|
|
// CHECK5: omp.body.continue:
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]], !dbg [[DBG109]]
|
|
// CHECK5: omp.inner.for.inc:
|
|
// CHECK5-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[ADD11:%.*]] = add i32 [[TMP19]], 1, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !dbg [[DBG109]], !llvm.loop [[LOOP112:![0-9]+]]
|
|
// CHECK5: omp.inner.for.end:
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]], !dbg [[DBG109]]
|
|
// CHECK5: omp.dispatch.inc:
|
|
// CHECK5-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[ADD12:%.*]] = add i32 [[TMP20]], [[TMP21]], !dbg [[DBG110]]
|
|
// CHECK5-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: [[ADD13:%.*]] = add i32 [[TMP22]], [[TMP23]], !dbg [[DBG110]]
|
|
// CHECK5-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4, !dbg [[DBG110]]
|
|
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND]], !dbg [[DBG109]], !llvm.loop [[LOOP113:![0-9]+]]
|
|
// CHECK5: omp.dispatch.end:
|
|
// CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB31:[0-9]+]], i32 [[TMP4]]), !dbg [[DBG109]]
|
|
// CHECK5-NEXT: [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG109]]
|
|
// CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP24]]), !dbg [[DBG109]]
|
|
// CHECK5-NEXT: ret void, !dbg [[DBG111]]
|
|
// CHECK5: terminate.lpad:
|
|
// CHECK5-NEXT: [[TMP25:%.*]] = landingpad { i8*, i32 }
|
|
// CHECK5-NEXT: catch i8* null, !dbg [[DBG111]]
|
|
// CHECK5-NEXT: [[TMP26:%.*]] = extractvalue { i8*, i32 } [[TMP25]], 0, !dbg [[DBG111]]
|
|
// CHECK5-NEXT: call void @__clang_call_terminate(i8* [[TMP26]]) #[[ATTR7:[0-9]+]], !dbg [[DBG111]]
|
|
// CHECK5-NEXT: unreachable, !dbg [[DBG111]]
|
|
//
|
|
//
|
|
// CHECK5-LABEL: define {{[^@]+}}@__clang_call_terminate
|
|
// CHECK5-SAME: (i8* [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] {
|
|
// CHECK5-NEXT: [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK5-NEXT: call void @_ZSt9terminatev() #[[ATTR7]]
|
|
// CHECK5-NEXT: unreachable
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@_Z17with_var_schedulev
|
|
// CHECK6-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[A:%.*]] = alloca double, align 8
|
|
// CHECK6-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK6-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: store double 5.000000e+00, double* [[A]], align 8
|
|
// CHECK6-NEXT: [[TMP0:%.*]] = load double, double* [[A]], align 8
|
|
// CHECK6-NEXT: [[CONV:%.*]] = fptosi double [[TMP0]] to i8
|
|
// CHECK6-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK6-NEXT: [[TMP1:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK6-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
|
|
// CHECK6-NEXT: store i8 [[TMP1]], i8* [[CONV1]], align 1
|
|
// CHECK6-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
|
|
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP2]])
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined.
|
|
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca double, align 8
|
|
// CHECK6-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[A:%.*]] = alloca double, align 8
|
|
// CHECK6-NEXT: [[I5:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
|
|
// CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
|
|
// CHECK6-NEXT: [[TMP0:%.*]] = load double, double* undef, align 8
|
|
// CHECK6-NEXT: [[ADD:%.*]] = fadd double 2.000000e+00, [[TMP0]]
|
|
// CHECK6-NEXT: store double [[ADD]], double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK6-NEXT: [[TMP1:%.*]] = load double, double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK6-NEXT: [[SUB:%.*]] = fsub double [[TMP1]], 1.000000e+00
|
|
// CHECK6-NEXT: [[DIV:%.*]] = fdiv double [[SUB]], 1.000000e+00
|
|
// CHECK6-NEXT: [[CONV3:%.*]] = fptoui double [[DIV]] to i64
|
|
// CHECK6-NEXT: [[SUB4:%.*]] = sub i64 [[CONV3]], 1
|
|
// CHECK6-NEXT: store i64 [[SUB4]], i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK6-NEXT: store i64 1, i64* [[I]], align 8
|
|
// CHECK6-NEXT: [[TMP2:%.*]] = load double, double* [[DOTCAPTURE_EXPR_1]], align 8
|
|
// CHECK6-NEXT: [[CMP:%.*]] = fcmp olt double 1.000000e+00, [[TMP2]]
|
|
// CHECK6-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK6: omp.precond.then:
|
|
// CHECK6-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK6-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK6-NEXT: store i64 [[TMP3]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK6-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK6-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV]], align 8
|
|
// CHECK6-NEXT: [[CONV6:%.*]] = sext i8 [[TMP4]] to i64
|
|
// CHECK6-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
|
|
// CHECK6-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 33, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 [[CONV6]])
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK6: omp.dispatch.cond:
|
|
// CHECK6-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK6-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK6-NEXT: [[CMP7:%.*]] = icmp ugt i64 [[TMP7]], [[TMP8]]
|
|
// CHECK6-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK6: cond.true:
|
|
// CHECK6-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK6-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK6: cond.false:
|
|
// CHECK6-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK6-NEXT: br label [[COND_END]]
|
|
// CHECK6: cond.end:
|
|
// CHECK6-NEXT: [[COND:%.*]] = phi i64 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK6-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK6-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK6-NEXT: store i64 [[TMP11]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK6-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK6-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK6-NEXT: [[ADD8:%.*]] = add i64 [[TMP13]], 1
|
|
// CHECK6-NEXT: [[CMP9:%.*]] = icmp ult i64 [[TMP12]], [[ADD8]]
|
|
// CHECK6-NEXT: br i1 [[CMP9]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK6: omp.dispatch.body:
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK6: omp.inner.for.cond:
|
|
// CHECK6-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK6-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK6-NEXT: [[ADD10:%.*]] = add i64 [[TMP15]], 1
|
|
// CHECK6-NEXT: [[CMP11:%.*]] = icmp ult i64 [[TMP14]], [[ADD10]]
|
|
// CHECK6-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK6: omp.inner.for.body:
|
|
// CHECK6-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK6-NEXT: [[MUL:%.*]] = mul i64 [[TMP16]], 1
|
|
// CHECK6-NEXT: [[ADD12:%.*]] = add i64 1, [[MUL]]
|
|
// CHECK6-NEXT: store i64 [[ADD12]], i64* [[I5]], align 8
|
|
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK6: omp.body.continue:
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK6: omp.inner.for.inc:
|
|
// CHECK6-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK6-NEXT: [[ADD13:%.*]] = add i64 [[TMP17]], 1
|
|
// CHECK6-NEXT: store i64 [[ADD13]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK6: omp.inner.for.end:
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK6: omp.dispatch.inc:
|
|
// CHECK6-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK6-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK6-NEXT: [[ADD14:%.*]] = add i64 [[TMP18]], [[TMP19]]
|
|
// CHECK6-NEXT: store i64 [[ADD14]], i64* [[DOTOMP_LB]], align 8
|
|
// CHECK6-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK6-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK6-NEXT: [[ADD15:%.*]] = add i64 [[TMP20]], [[TMP21]]
|
|
// CHECK6-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK6: omp.dispatch.end:
|
|
// CHECK6-NEXT: [[TMP22:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4
|
|
// CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP23]])
|
|
// CHECK6-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK6: omp.precond.end:
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@_Z23without_schedule_clausePfS_S_S_
|
|
// CHECK6-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..1
|
|
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK6-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK6-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK6-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
|
|
// CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK6: cond.true:
|
|
// CHECK6-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK6: cond.false:
|
|
// CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: br label [[COND_END]]
|
|
// CHECK6: cond.end:
|
|
// CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK6-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK6: omp.inner.for.cond:
|
|
// CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
|
|
// CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK6: omp.inner.for.body:
|
|
// CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
|
|
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]]
|
|
// CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[TMP12:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM]]
|
|
// CHECK6-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK6-NEXT: [[TMP15:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK6-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM2]]
|
|
// CHECK6-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX3]], align 4
|
|
// CHECK6-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]]
|
|
// CHECK6-NEXT: [[TMP18:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK6-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM5]]
|
|
// CHECK6-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX6]], align 4
|
|
// CHECK6-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]]
|
|
// CHECK6-NEXT: [[TMP21:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK6-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM8]]
|
|
// CHECK6-NEXT: store float [[MUL7]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK6: omp.body.continue:
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK6: omp.inner.for.inc:
|
|
// CHECK6-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP23]], 1
|
|
// CHECK6-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK6: omp.inner.for.end:
|
|
// CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK6: omp.loop.exit:
|
|
// CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
|
|
// CHECK6-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..2
|
|
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK6-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK6-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK6-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423
|
|
// CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK6: cond.true:
|
|
// CHECK6-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK6: cond.false:
|
|
// CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: br label [[COND_END]]
|
|
// CHECK6: cond.end:
|
|
// CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK6-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK6: omp.inner.for.cond:
|
|
// CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
|
|
// CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK6: omp.inner.for.body:
|
|
// CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7
|
|
// CHECK6-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
|
|
// CHECK6-NEXT: store i32 [[SUB]], i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[TMP12:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM]]
|
|
// CHECK6-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK6-NEXT: [[TMP15:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK6-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM2]]
|
|
// CHECK6-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX3]], align 4
|
|
// CHECK6-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]]
|
|
// CHECK6-NEXT: [[TMP18:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK6-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM5]]
|
|
// CHECK6-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX6]], align 4
|
|
// CHECK6-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]]
|
|
// CHECK6-NEXT: [[TMP21:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK6-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM8]]
|
|
// CHECK6-NEXT: store float [[MUL7]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK6: omp.body.continue:
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK6: omp.inner.for.inc:
|
|
// CHECK6-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1
|
|
// CHECK6-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK6: omp.inner.for.end:
|
|
// CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK6: omp.loop.exit:
|
|
// CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@_Z14static_chunkedPfS_S_S_
|
|
// CHECK6-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..3
|
|
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK6-NEXT: store i32 16908288, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK6-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK6-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 5)
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK6: omp.dispatch.cond:
|
|
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288
|
|
// CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK6: cond.true:
|
|
// CHECK6-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK6: cond.false:
|
|
// CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: br label [[COND_END]]
|
|
// CHECK6: cond.end:
|
|
// CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
|
|
// CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK6-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]]
|
|
// CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK6: omp.dispatch.body:
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK6: omp.inner.for.cond:
|
|
// CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]]
|
|
// CHECK6-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK6: omp.inner.for.body:
|
|
// CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127
|
|
// CHECK6-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
|
|
// CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP1]], align 8
|
|
// CHECK6-NEXT: [[TMP15:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[IDXPROM]]
|
|
// CHECK6-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK6-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP2]], align 8
|
|
// CHECK6-NEXT: [[TMP18:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[IDXPROM3]]
|
|
// CHECK6-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4
|
|
// CHECK6-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]]
|
|
// CHECK6-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP3]], align 8
|
|
// CHECK6-NEXT: [[TMP21:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[IDXPROM6]]
|
|
// CHECK6-NEXT: [[TMP22:%.*]] = load float, float* [[ARRAYIDX7]], align 4
|
|
// CHECK6-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]]
|
|
// CHECK6-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK6-NEXT: [[TMP24:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP23]], i64 [[IDXPROM9]]
|
|
// CHECK6-NEXT: store float [[MUL8]], float* [[ARRAYIDX10]], align 4
|
|
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK6: omp.body.continue:
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK6: omp.inner.for.inc:
|
|
// CHECK6-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1
|
|
// CHECK6-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK6: omp.inner.for.end:
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK6: omp.dispatch.inc:
|
|
// CHECK6-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK6-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK6-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]]
|
|
// CHECK6-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
|
|
// CHECK6-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK6-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]]
|
|
// CHECK6-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK6: omp.dispatch.end:
|
|
// CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
|
|
// CHECK6-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..4 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..4
|
|
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK6-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK6-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK6-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK6-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 1073741859, i64 0, i64 16908287, i64 1, i64 1)
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK6: omp.dispatch.cond:
|
|
// CHECK6-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK6-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK6-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK6: omp.dispatch.body:
|
|
// CHECK6-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK6-NEXT: store i64 [[TMP7]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK6: omp.inner.for.cond:
|
|
// CHECK6-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[ADD:%.*]] = add i64 [[TMP9]], 1
|
|
// CHECK6-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP8]], [[ADD]]
|
|
// CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK6: omp.inner.for.body:
|
|
// CHECK6-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 127
|
|
// CHECK6-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
|
|
// CHECK6-NEXT: store i64 [[ADD1]], i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[TMP11:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[TMP12:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[TMP12]]
|
|
// CHECK6-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[TMP15:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[TMP15]]
|
|
// CHECK6-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[MUL3:%.*]] = fmul float [[TMP13]], [[TMP16]]
|
|
// CHECK6-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[TMP18:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[TMP18]]
|
|
// CHECK6-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP19]]
|
|
// CHECK6-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[TMP21:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[TMP21]]
|
|
// CHECK6-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4, !llvm.access.group !5
|
|
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK6: omp.body.continue:
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK6: omp.inner.for.inc:
|
|
// CHECK6-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK6-NEXT: [[ADD7:%.*]] = add i64 [[TMP22]], 1
|
|
// CHECK6-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !5
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
|
|
// CHECK6: omp.inner.for.end:
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK6: omp.dispatch.inc:
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK6: omp.dispatch.end:
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@_Z7guided7PfS_S_S_
|
|
// CHECK6-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..5 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..5
|
|
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[TMP:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[I:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK6-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK6-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK6-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK6-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 1073741860, i64 0, i64 16908287, i64 1, i64 7)
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK6: omp.dispatch.cond:
|
|
// CHECK6-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK6-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK6-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK6: omp.dispatch.body:
|
|
// CHECK6-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK6-NEXT: store i64 [[TMP7]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK6: omp.inner.for.cond:
|
|
// CHECK6-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[ADD:%.*]] = add i64 [[TMP9]], 1
|
|
// CHECK6-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP8]], [[ADD]]
|
|
// CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK6: omp.inner.for.body:
|
|
// CHECK6-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 127
|
|
// CHECK6-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
|
|
// CHECK6-NEXT: store i64 [[ADD1]], i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[TMP11:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[TMP12:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[TMP12]]
|
|
// CHECK6-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[TMP14:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[TMP15:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[TMP15]]
|
|
// CHECK6-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[MUL3:%.*]] = fmul float [[TMP13]], [[TMP16]]
|
|
// CHECK6-NEXT: [[TMP17:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[TMP18:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[TMP18]]
|
|
// CHECK6-NEXT: [[TMP19:%.*]] = load float, float* [[ARRAYIDX4]], align 4, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP19]]
|
|
// CHECK6-NEXT: [[TMP20:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[TMP21:%.*]] = load i64, i64* [[I]], align 8, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP20]], i64 [[TMP21]]
|
|
// CHECK6-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4, !llvm.access.group !8
|
|
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK6: omp.body.continue:
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK6: omp.inner.for.inc:
|
|
// CHECK6-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK6-NEXT: [[ADD7:%.*]] = add i64 [[TMP22]], 1
|
|
// CHECK6-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !8
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
|
|
// CHECK6: omp.inner.for.end:
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK6: omp.dispatch.inc:
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK6: omp.dispatch.end:
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
|
|
// CHECK6-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[Y:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: store i32 0, i32* [[X]], align 4
|
|
// CHECK6-NEXT: store i32 0, i32* [[Y]], align 4
|
|
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[Y]], float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..6
|
|
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[Y:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK6-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
|
|
// CHECK6-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK6-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[I7:%.*]] = alloca i8, align 1
|
|
// CHECK6-NEXT: [[X8:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP0:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP1:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP2:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP3:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP4:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
|
|
// CHECK6-NEXT: [[CONV:%.*]] = trunc i32 [[TMP5]] to i8
|
|
// CHECK6-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK6-NEXT: [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK6-NEXT: [[CONV3:%.*]] = sext i8 [[TMP6]] to i32
|
|
// CHECK6-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]]
|
|
// CHECK6-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1
|
|
// CHECK6-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
|
|
// CHECK6-NEXT: [[CONV4:%.*]] = zext i32 [[DIV]] to i64
|
|
// CHECK6-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11
|
|
// CHECK6-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK6-NEXT: store i64 [[SUB5]], i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK6-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK6-NEXT: store i8 [[TMP7]], i8* [[I]], align 1
|
|
// CHECK6-NEXT: store i32 11, i32* [[X]], align 4
|
|
// CHECK6-NEXT: [[TMP8:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
|
|
// CHECK6-NEXT: [[CONV6:%.*]] = sext i8 [[TMP8]] to i32
|
|
// CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV6]], 57
|
|
// CHECK6-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK6: omp.precond.then:
|
|
// CHECK6-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK6-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK6-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK6-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK6-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK6-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
|
|
// CHECK6-NEXT: call void @__kmpc_dispatch_init_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 1073741862, i64 0, i64 [[TMP10]], i64 1, i64 1)
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK6: omp.dispatch.cond:
|
|
// CHECK6-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
|
|
// CHECK6-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_8(%struct.ident_t* @[[GLOB2]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
|
|
// CHECK6-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
|
|
// CHECK6-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK6: omp.dispatch.body:
|
|
// CHECK6-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK6-NEXT: store i64 [[TMP16]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK6: omp.inner.for.cond:
|
|
// CHECK6-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[CMP9:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]]
|
|
// CHECK6-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK6: omp.inner.for.body:
|
|
// CHECK6-NEXT: [[TMP19:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[CONV10:%.*]] = sext i8 [[TMP19]] to i64
|
|
// CHECK6-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[DIV11:%.*]] = sdiv i64 [[TMP20]], 11
|
|
// CHECK6-NEXT: [[MUL12:%.*]] = mul nsw i64 [[DIV11]], 1
|
|
// CHECK6-NEXT: [[ADD13:%.*]] = add nsw i64 [[CONV10]], [[MUL12]]
|
|
// CHECK6-NEXT: [[CONV14:%.*]] = trunc i64 [[ADD13]] to i8
|
|
// CHECK6-NEXT: store i8 [[CONV14]], i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[DIV15:%.*]] = sdiv i64 [[TMP22]], 11
|
|
// CHECK6-NEXT: [[MUL16:%.*]] = mul nsw i64 [[DIV15]], 11
|
|
// CHECK6-NEXT: [[SUB17:%.*]] = sub nsw i64 [[TMP21]], [[MUL16]]
|
|
// CHECK6-NEXT: [[MUL18:%.*]] = mul nsw i64 [[SUB17]], 1
|
|
// CHECK6-NEXT: [[SUB19:%.*]] = sub nsw i64 11, [[MUL18]]
|
|
// CHECK6-NEXT: [[CONV20:%.*]] = trunc i64 [[SUB19]] to i32
|
|
// CHECK6-NEXT: store i32 [[CONV20]], i32* [[X8]], align 4, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[TMP24:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP24]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP23]], i64 [[IDXPROM]]
|
|
// CHECK6-NEXT: [[TMP25:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[TMP26:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[TMP27:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[IDXPROM21:%.*]] = sext i8 [[TMP27]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX22:%.*]] = getelementptr inbounds float, float* [[TMP26]], i64 [[IDXPROM21]]
|
|
// CHECK6-NEXT: [[TMP28:%.*]] = load float, float* [[ARRAYIDX22]], align 4, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[MUL23:%.*]] = fmul float [[TMP25]], [[TMP28]]
|
|
// CHECK6-NEXT: [[TMP29:%.*]] = load float*, float** [[TMP4]], align 8, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[TMP30:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[IDXPROM24:%.*]] = sext i8 [[TMP30]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, float* [[TMP29]], i64 [[IDXPROM24]]
|
|
// CHECK6-NEXT: [[TMP31:%.*]] = load float, float* [[ARRAYIDX25]], align 4, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[MUL26:%.*]] = fmul float [[MUL23]], [[TMP31]]
|
|
// CHECK6-NEXT: [[TMP32:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[TMP33:%.*]] = load i8, i8* [[I7]], align 1, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[IDXPROM27:%.*]] = sext i8 [[TMP33]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, float* [[TMP32]], i64 [[IDXPROM27]]
|
|
// CHECK6-NEXT: store float [[MUL26]], float* [[ARRAYIDX28]], align 4, !llvm.access.group !11
|
|
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK6: omp.body.continue:
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK6: omp.inner.for.inc:
|
|
// CHECK6-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK6-NEXT: [[ADD29:%.*]] = add nsw i64 [[TMP34]], 1
|
|
// CHECK6-NEXT: store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !11
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
|
|
// CHECK6: omp.inner.for.end:
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK6: omp.dispatch.inc:
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK6: omp.dispatch.end:
|
|
// CHECK6-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK6: omp.precond.end:
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
|
|
// CHECK6-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: store i32 0, i32* [[X]], align 4
|
|
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), float** [[A_ADDR]], float** [[B_ADDR]], float** [[C_ADDR]], float** [[D_ADDR]])
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..7
|
|
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], float** nonnull align 8 dereferenceable(8) [[B:%.*]], float** nonnull align 8 dereferenceable(8) [[C:%.*]], float** nonnull align 8 dereferenceable(8) [[D:%.*]]) #[[ATTR1]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[TMP:%.*]] = alloca i8, align 1
|
|
// CHECK6-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[I:%.*]] = alloca i8, align 1
|
|
// CHECK6-NEXT: [[X:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[B]], float*** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[C]], float*** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[D]], float*** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP1:%.*]] = load float**, float*** [[B_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP2:%.*]] = load float**, float*** [[C_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP3:%.*]] = load float**, float*** [[D_ADDR]], align 8
|
|
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK6-NEXT: store i32 199, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK6-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
|
// CHECK6-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 1073741861, i32 0, i32 199, i32 1, i32 1)
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK6: omp.dispatch.cond:
|
|
// CHECK6-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
|
|
// CHECK6-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0
|
|
// CHECK6-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK6: omp.dispatch.body:
|
|
// CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK6-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK6: omp.inner.for.cond:
|
|
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK6: omp.inner.for.body:
|
|
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 20
|
|
// CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]]
|
|
// CHECK6-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8
|
|
// CHECK6-NEXT: store i8 [[CONV]], i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP12]], 20
|
|
// CHECK6-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 20
|
|
// CHECK6-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL3]]
|
|
// CHECK6-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK6-NEXT: [[ADD5:%.*]] = add nsw i32 -10, [[MUL4]]
|
|
// CHECK6-NEXT: store i32 [[ADD5]], i32* [[X]], align 4, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[TMP13:%.*]] = load float*, float** [[TMP1]], align 8, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[TMP14:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP14]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP13]], i64 [[IDXPROM]]
|
|
// CHECK6-NEXT: [[TMP15:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[TMP16:%.*]] = load float*, float** [[TMP2]], align 8, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[TMP17:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[IDXPROM6:%.*]] = zext i8 [[TMP17]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP16]], i64 [[IDXPROM6]]
|
|
// CHECK6-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX7]], align 4, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[MUL8:%.*]] = fmul float [[TMP15]], [[TMP18]]
|
|
// CHECK6-NEXT: [[TMP19:%.*]] = load float*, float** [[TMP3]], align 8, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[TMP20:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[IDXPROM9:%.*]] = zext i8 [[TMP20]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP19]], i64 [[IDXPROM9]]
|
|
// CHECK6-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[MUL11:%.*]] = fmul float [[MUL8]], [[TMP21]]
|
|
// CHECK6-NEXT: [[TMP22:%.*]] = load float*, float** [[TMP0]], align 8, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[TMP23:%.*]] = load i8, i8* [[I]], align 1, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[IDXPROM12:%.*]] = zext i8 [[TMP23]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, float* [[TMP22]], i64 [[IDXPROM12]]
|
|
// CHECK6-NEXT: store float [[MUL11]], float* [[ARRAYIDX13]], align 4, !llvm.access.group !14
|
|
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK6: omp.body.continue:
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK6: omp.inner.for.inc:
|
|
// CHECK6-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK6-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP24]], 1
|
|
// CHECK6-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
|
|
// CHECK6: omp.inner.for.end:
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK6: omp.dispatch.inc:
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK6: omp.dispatch.end:
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@_Z3foov
|
|
// CHECK6-SAME: () #[[ATTR0]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: call void @_Z8mayThrowv()
|
|
// CHECK6-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@_Z12parallel_forPfi
|
|
// CHECK6-SAME: (float* [[A:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
|
|
// CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
|
// CHECK6-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
|
|
// CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
|
|
// CHECK6-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
|
|
// CHECK6-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
|
|
// CHECK6-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
|
|
// CHECK6-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 16
|
|
// CHECK6-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
|
|
// CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
|
|
// CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
|
|
// CHECK6-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4
|
|
// CHECK6-NEXT: [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
|
|
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, i64, i64)* @.omp_outlined..8 to void (i32*, i32*, ...)*), float** [[A_ADDR]], i64 [[TMP1]], i64 [[TMP4]])
|
|
// CHECK6-NEXT: [[TMP5:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
|
|
// CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP5]])
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..8
|
|
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], float** nonnull align 8 dereferenceable(8) [[A:%.*]], i64 [[VLA:%.*]], i64 [[N:%.*]]) #[[ATTR1]] {
|
|
// CHECK6-NEXT: entry:
|
|
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float**, align 8
|
|
// CHECK6-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
|
|
// CHECK6-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
|
|
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: store float** [[A]], float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
|
|
// CHECK6-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP0:%.*]] = load float**, float*** [[A_ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
|
|
// CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
|
|
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK6-NEXT: store i32 16908288, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK6-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
|
|
// CHECK6-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
|
|
// CHECK6-NEXT: [[VLA1:%.*]] = alloca float, i64 [[TMP1]], align 16
|
|
// CHECK6-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
|
|
// CHECK6-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
|
|
// CHECK6-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 5)
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK6: omp.dispatch.cond:
|
|
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP5]], 16908288
|
|
// CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK6: cond.true:
|
|
// CHECK6-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK6: cond.false:
|
|
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: br label [[COND_END]]
|
|
// CHECK6: cond.end:
|
|
// CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
|
|
// CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK6-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP8]], [[TMP9]]
|
|
// CHECK6-NEXT: br i1 [[CMP2]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_CLEANUP:%.*]]
|
|
// CHECK6: omp.dispatch.cleanup:
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK6: omp.dispatch.body:
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK6: omp.inner.for.cond:
|
|
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[CMP3:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
|
|
// CHECK6-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
|
|
// CHECK6: omp.inner.for.cond.cleanup:
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK6: omp.inner.for.body:
|
|
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[MUL:%.*]] = mul i32 [[TMP12]], 127
|
|
// CHECK6-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]]
|
|
// CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[CALL:%.*]] = call i32 @_Z3foov()
|
|
// CHECK6-NEXT: [[CONV4:%.*]] = sitofp i32 [[CALL]] to float
|
|
// CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP13]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[VLA1]], i64 [[IDXPROM]]
|
|
// CHECK6-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX]], align 4
|
|
// CHECK6-NEXT: [[ADD5:%.*]] = fadd float [[CONV4]], [[TMP14]]
|
|
// CHECK6-NEXT: [[TMP15:%.*]] = load i32, i32* [[CONV]], align 8
|
|
// CHECK6-NEXT: [[CONV6:%.*]] = sitofp i32 [[TMP15]] to float
|
|
// CHECK6-NEXT: [[ADD7:%.*]] = fadd float [[ADD5]], [[CONV6]]
|
|
// CHECK6-NEXT: [[TMP16:%.*]] = load float*, float** [[TMP0]], align 8
|
|
// CHECK6-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4
|
|
// CHECK6-NEXT: [[IDXPROM8:%.*]] = zext i32 [[TMP17]] to i64
|
|
// CHECK6-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP16]], i64 [[IDXPROM8]]
|
|
// CHECK6-NEXT: [[TMP18:%.*]] = load float, float* [[ARRAYIDX9]], align 4
|
|
// CHECK6-NEXT: [[ADD10:%.*]] = fadd float [[TMP18]], [[ADD7]]
|
|
// CHECK6-NEXT: store float [[ADD10]], float* [[ARRAYIDX9]], align 4
|
|
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK6: omp.body.continue:
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK6: omp.inner.for.inc:
|
|
// CHECK6-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: [[ADD11:%.*]] = add i32 [[TMP19]], 1
|
|
// CHECK6-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK6: omp.inner.for.end:
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK6: omp.dispatch.inc:
|
|
// CHECK6-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK6-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK6-NEXT: [[ADD12:%.*]] = add i32 [[TMP20]], [[TMP21]]
|
|
// CHECK6-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
|
|
// CHECK6-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK6-NEXT: [[ADD13:%.*]] = add i32 [[TMP22]], [[TMP23]]
|
|
// CHECK6-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK6: omp.dispatch.end:
|
|
// CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
|
|
// CHECK6-NEXT: [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
|
|
// CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP24]])
|
|
// CHECK6-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK11-LABEL: define {{[^@]+}}@_Z9incrementv
|
|
// CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// CHECK11-NEXT: entry:
|
|
// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
|
|
// CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK11-NEXT: store i32 4, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], 4
|
|
// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK11: cond.true:
|
|
// CHECK11-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK11: cond.false:
|
|
// CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK11-NEXT: br label [[COND_END]]
|
|
// CHECK11: cond.end:
|
|
// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 4, [[COND_TRUE]] ], [ [[TMP2]], [[COND_FALSE]] ]
|
|
// CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK11-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK11: omp.inner.for.cond:
|
|
// CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
|
|
// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK11: omp.inner.for.body:
|
|
// CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP6]], 1
|
|
// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK11: omp.body.continue:
|
|
// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK11: omp.inner.for.inc:
|
|
// CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP7]], 1
|
|
// CHECK11-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK11: omp.inner.for.end:
|
|
// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK11: omp.loop.exit:
|
|
// CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK11-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP0]])
|
|
// CHECK11-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// CHECK11-LABEL: define {{[^@]+}}@_Z16decrement_nowaitv
|
|
// CHECK11-SAME: () #[[ATTR0]] {
|
|
// CHECK11-NEXT: entry:
|
|
// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
|
|
// CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK11-NEXT: store i32 4, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], 4
|
|
// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK11: cond.true:
|
|
// CHECK11-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK11: cond.false:
|
|
// CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK11-NEXT: br label [[COND_END]]
|
|
// CHECK11: cond.end:
|
|
// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 4, [[COND_TRUE]] ], [ [[TMP2]], [[COND_FALSE]] ]
|
|
// CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK11-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK11: omp.inner.for.cond:
|
|
// CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
|
|
// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK11: omp.inner.for.body:
|
|
// CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP6]], 1
|
|
// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 5, [[MUL]]
|
|
// CHECK11-NEXT: store i32 [[SUB]], i32* [[J]], align 4
|
|
// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK11: omp.body.continue:
|
|
// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK11: omp.inner.for.inc:
|
|
// CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1
|
|
// CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK11: omp.inner.for.end:
|
|
// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK11: omp.loop.exit:
|
|
// CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK11-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// CHECK11-LABEL: define {{[^@]+}}@_Z16range_for_singlev
|
|
// CHECK11-SAME: () #[[ATTR3:[0-9]+]] {
|
|
// CHECK11-NEXT: entry:
|
|
// CHECK11-NEXT: [[ARR:%.*]] = alloca [10 x i32], align 16
|
|
// CHECK11-NEXT: [[TMP0:%.*]] = bitcast [10 x i32]* [[ARR]] to i8*
|
|
// CHECK11-NEXT: call void @llvm.memset.p0i8.i64(i8* align 16 [[TMP0]], i8 0, i64 40, i1 false)
|
|
// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined. to void (i32*, i32*, ...)*), [10 x i32]* [[ARR]])
|
|
// CHECK11-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined.
|
|
// CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[ARR:%.*]]) #[[ATTR5:[0-9]+]] {
|
|
// CHECK11-NEXT: entry:
|
|
// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[ARR_ADDR:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK11-NEXT: [[TMP:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[__RANGE1:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK11-NEXT: [[__END1:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
|
|
// CHECK11-NEXT: [[__BEGIN1:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[__BEGIN15:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[A:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK11-NEXT: store [10 x i32]* [[ARR]], [10 x i32]** [[ARR_ADDR]], align 8
|
|
// CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[ARR_ADDR]], align 8
|
|
// CHECK11-NEXT: store [10 x i32]* [[TMP0]], [10 x i32]** [[__RANGE1]], align 8
|
|
// CHECK11-NEXT: [[TMP1:%.*]] = load [10 x i32]*, [10 x i32]** [[__RANGE1]], align 8
|
|
// CHECK11-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP1]], i64 0, i64 0
|
|
// CHECK11-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAYDECAY]], i64 10
|
|
// CHECK11-NEXT: store i32* [[ADD_PTR]], i32** [[__END1]], align 8
|
|
// CHECK11-NEXT: [[TMP2:%.*]] = load [10 x i32]*, [10 x i32]** [[__RANGE1]], align 8
|
|
// CHECK11-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP2]], i64 0, i64 0
|
|
// CHECK11-NEXT: store i32* [[ARRAYDECAY1]], i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK11-NEXT: [[TMP3:%.*]] = load i32*, i32** [[__END1]], align 8
|
|
// CHECK11-NEXT: store i32* [[TMP3]], i32** [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK11-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK11-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK11-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint i32* [[TMP4]] to i64
|
|
// CHECK11-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint i32* [[TMP5]] to i64
|
|
// CHECK11-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]]
|
|
// CHECK11-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 4
|
|
// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1
|
|
// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[SUB]], 1
|
|
// CHECK11-NEXT: [[DIV:%.*]] = sdiv i64 [[ADD]], 1
|
|
// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i64 [[DIV]], 1
|
|
// CHECK11-NEXT: store i64 [[SUB4]], i64* [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK11-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK11-NEXT: store i32* [[TMP6]], i32** [[__BEGIN1]], align 8
|
|
// CHECK11-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK11-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK11-NEXT: [[CMP:%.*]] = icmp ult i32* [[TMP7]], [[TMP8]]
|
|
// CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK11: omp.precond.then:
|
|
// CHECK11-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK11-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK11-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK11-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
|
|
// CHECK11-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
|
|
// CHECK11-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK11-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK11-NEXT: [[CMP6:%.*]] = icmp sgt i64 [[TMP12]], [[TMP13]]
|
|
// CHECK11-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK11: cond.true:
|
|
// CHECK11-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK11-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK11: cond.false:
|
|
// CHECK11-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK11-NEXT: br label [[COND_END]]
|
|
// CHECK11: cond.end:
|
|
// CHECK11-NEXT: [[COND:%.*]] = phi i64 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
|
|
// CHECK11-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK11-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK11-NEXT: store i64 [[TMP16]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK11: omp.inner.for.cond:
|
|
// CHECK11-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK11-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK11-NEXT: [[CMP7:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]]
|
|
// CHECK11-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK11: omp.inner.for.body:
|
|
// CHECK11-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK11-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP20]], 1
|
|
// CHECK11-NEXT: [[ADD_PTR8:%.*]] = getelementptr inbounds i32, i32* [[TMP19]], i64 [[MUL]]
|
|
// CHECK11-NEXT: store i32* [[ADD_PTR8]], i32** [[__BEGIN15]], align 8
|
|
// CHECK11-NEXT: [[TMP21:%.*]] = load i32*, i32** [[__BEGIN15]], align 8
|
|
// CHECK11-NEXT: store i32* [[TMP21]], i32** [[A]], align 8
|
|
// CHECK11-NEXT: [[TMP22:%.*]] = load i32*, i32** [[A]], align 8
|
|
// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK11: omp.body.continue:
|
|
// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK11: omp.inner.for.inc:
|
|
// CHECK11-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK11-NEXT: [[ADD9:%.*]] = add nsw i64 [[TMP23]], 1
|
|
// CHECK11-NEXT: store i64 [[ADD9]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK11: omp.inner.for.end:
|
|
// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK11: omp.loop.exit:
|
|
// CHECK11-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
|
|
// CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP25]])
|
|
// CHECK11-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK11: omp.precond.end:
|
|
// CHECK11-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK11-LABEL: define {{[^@]+}}@_Z19range_for_collapsedv
|
|
// CHECK11-SAME: () #[[ATTR3]] {
|
|
// CHECK11-NEXT: entry:
|
|
// CHECK11-NEXT: [[ARR:%.*]] = alloca [10 x i32], align 16
|
|
// CHECK11-NEXT: [[TMP0:%.*]] = bitcast [10 x i32]* [[ARR]] to i8*
|
|
// CHECK11-NEXT: call void @llvm.memset.p0i8.i64(i8* align 16 [[TMP0]], i8 0, i64 40, i1 false)
|
|
// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), [10 x i32]* [[ARR]])
|
|
// CHECK11-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1
|
|
// CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[ARR:%.*]]) #[[ATTR5]] {
|
|
// CHECK11-NEXT: entry:
|
|
// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[ARR_ADDR:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK11-NEXT: [[TMP:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[__RANGE1:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK11-NEXT: [[__END1:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[__RANGE2:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK11-NEXT: [[__END2:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i64, align 8
|
|
// CHECK11-NEXT: [[__BEGIN1:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[__BEGIN2:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: [[__BEGIN119:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[__BEGIN220:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[A:%.*]] = alloca i32*, align 8
|
|
// CHECK11-NEXT: [[B:%.*]] = alloca i32, align 4
|
|
// CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK11-NEXT: store [10 x i32]* [[ARR]], [10 x i32]** [[ARR_ADDR]], align 8
|
|
// CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[ARR_ADDR]], align 8
|
|
// CHECK11-NEXT: store [10 x i32]* [[TMP0]], [10 x i32]** [[__RANGE1]], align 8
|
|
// CHECK11-NEXT: [[TMP1:%.*]] = load [10 x i32]*, [10 x i32]** [[__RANGE1]], align 8
|
|
// CHECK11-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP1]], i64 0, i64 0
|
|
// CHECK11-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAYDECAY]], i64 10
|
|
// CHECK11-NEXT: store i32* [[ADD_PTR]], i32** [[__END1]], align 8
|
|
// CHECK11-NEXT: store [10 x i32]* [[TMP0]], [10 x i32]** [[__RANGE2]], align 8
|
|
// CHECK11-NEXT: [[TMP2:%.*]] = load [10 x i32]*, [10 x i32]** [[__RANGE2]], align 8
|
|
// CHECK11-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP2]], i64 0, i64 0
|
|
// CHECK11-NEXT: [[ADD_PTR3:%.*]] = getelementptr inbounds i32, i32* [[ARRAYDECAY2]], i64 10
|
|
// CHECK11-NEXT: store i32* [[ADD_PTR3]], i32** [[__END2]], align 8
|
|
// CHECK11-NEXT: [[TMP3:%.*]] = load [10 x i32]*, [10 x i32]** [[__RANGE1]], align 8
|
|
// CHECK11-NEXT: [[ARRAYDECAY4:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP3]], i64 0, i64 0
|
|
// CHECK11-NEXT: store i32* [[ARRAYDECAY4]], i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK11-NEXT: [[TMP4:%.*]] = load i32*, i32** [[__END1]], align 8
|
|
// CHECK11-NEXT: store i32* [[TMP4]], i32** [[DOTCAPTURE_EXPR_5]], align 8
|
|
// CHECK11-NEXT: [[TMP5:%.*]] = load [10 x i32]*, [10 x i32]** [[__RANGE2]], align 8
|
|
// CHECK11-NEXT: [[ARRAYDECAY7:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP5]], i64 0, i64 0
|
|
// CHECK11-NEXT: store i32* [[ARRAYDECAY7]], i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK11-NEXT: [[TMP6:%.*]] = load i32*, i32** [[__END2]], align 8
|
|
// CHECK11-NEXT: store i32* [[TMP6]], i32** [[DOTCAPTURE_EXPR_8]], align 8
|
|
// CHECK11-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_5]], align 8
|
|
// CHECK11-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK11-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint i32* [[TMP7]] to i64
|
|
// CHECK11-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint i32* [[TMP8]] to i64
|
|
// CHECK11-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]]
|
|
// CHECK11-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 4
|
|
// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1
|
|
// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[SUB]], 1
|
|
// CHECK11-NEXT: [[DIV:%.*]] = sdiv i64 [[ADD]], 1
|
|
// CHECK11-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_8]], align 8
|
|
// CHECK11-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK11-NEXT: [[SUB_PTR_LHS_CAST10:%.*]] = ptrtoint i32* [[TMP9]] to i64
|
|
// CHECK11-NEXT: [[SUB_PTR_RHS_CAST11:%.*]] = ptrtoint i32* [[TMP10]] to i64
|
|
// CHECK11-NEXT: [[SUB_PTR_SUB12:%.*]] = sub i64 [[SUB_PTR_LHS_CAST10]], [[SUB_PTR_RHS_CAST11]]
|
|
// CHECK11-NEXT: [[SUB_PTR_DIV13:%.*]] = sdiv exact i64 [[SUB_PTR_SUB12]], 4
|
|
// CHECK11-NEXT: [[SUB14:%.*]] = sub nsw i64 [[SUB_PTR_DIV13]], 1
|
|
// CHECK11-NEXT: [[ADD15:%.*]] = add nsw i64 [[SUB14]], 1
|
|
// CHECK11-NEXT: [[DIV16:%.*]] = sdiv i64 [[ADD15]], 1
|
|
// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[DIV]], [[DIV16]]
|
|
// CHECK11-NEXT: [[SUB17:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK11-NEXT: store i64 [[SUB17]], i64* [[DOTCAPTURE_EXPR_9]], align 8
|
|
// CHECK11-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK11-NEXT: store i32* [[TMP11]], i32** [[__BEGIN1]], align 8
|
|
// CHECK11-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK11-NEXT: store i32* [[TMP12]], i32** [[__BEGIN2]], align 8
|
|
// CHECK11-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK11-NEXT: [[TMP14:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_5]], align 8
|
|
// CHECK11-NEXT: [[CMP:%.*]] = icmp ult i32* [[TMP13]], [[TMP14]]
|
|
// CHECK11-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK11: land.lhs.true:
|
|
// CHECK11-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK11-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_8]], align 8
|
|
// CHECK11-NEXT: [[CMP18:%.*]] = icmp ult i32* [[TMP15]], [[TMP16]]
|
|
// CHECK11-NEXT: br i1 [[CMP18]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
|
|
// CHECK11: omp.precond.then:
|
|
// CHECK11-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK11-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_9]], align 8
|
|
// CHECK11-NEXT: store i64 [[TMP17]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK11-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK11-NEXT: [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
|
|
// CHECK11-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP19]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
|
|
// CHECK11-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK11-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_9]], align 8
|
|
// CHECK11-NEXT: [[CMP21:%.*]] = icmp sgt i64 [[TMP20]], [[TMP21]]
|
|
// CHECK11-NEXT: br i1 [[CMP21]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK11: cond.true:
|
|
// CHECK11-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_9]], align 8
|
|
// CHECK11-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK11: cond.false:
|
|
// CHECK11-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK11-NEXT: br label [[COND_END]]
|
|
// CHECK11: cond.end:
|
|
// CHECK11-NEXT: [[COND:%.*]] = phi i64 [ [[TMP22]], [[COND_TRUE]] ], [ [[TMP23]], [[COND_FALSE]] ]
|
|
// CHECK11-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK11-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK11-NEXT: store i64 [[TMP24]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK11: omp.inner.for.cond:
|
|
// CHECK11-NEXT: [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK11-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK11-NEXT: [[CMP22:%.*]] = icmp sle i64 [[TMP25]], [[TMP26]]
|
|
// CHECK11-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK11: omp.inner.for.body:
|
|
// CHECK11-NEXT: [[TMP27:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK11-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK11-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_8]], align 8
|
|
// CHECK11-NEXT: [[TMP30:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK11-NEXT: [[SUB_PTR_LHS_CAST23:%.*]] = ptrtoint i32* [[TMP29]] to i64
|
|
// CHECK11-NEXT: [[SUB_PTR_RHS_CAST24:%.*]] = ptrtoint i32* [[TMP30]] to i64
|
|
// CHECK11-NEXT: [[SUB_PTR_SUB25:%.*]] = sub i64 [[SUB_PTR_LHS_CAST23]], [[SUB_PTR_RHS_CAST24]]
|
|
// CHECK11-NEXT: [[SUB_PTR_DIV26:%.*]] = sdiv exact i64 [[SUB_PTR_SUB25]], 4
|
|
// CHECK11-NEXT: [[SUB27:%.*]] = sub nsw i64 [[SUB_PTR_DIV26]], 1
|
|
// CHECK11-NEXT: [[ADD28:%.*]] = add nsw i64 [[SUB27]], 1
|
|
// CHECK11-NEXT: [[DIV29:%.*]] = sdiv i64 [[ADD28]], 1
|
|
// CHECK11-NEXT: [[MUL30:%.*]] = mul nsw i64 1, [[DIV29]]
|
|
// CHECK11-NEXT: [[DIV31:%.*]] = sdiv i64 [[TMP28]], [[MUL30]]
|
|
// CHECK11-NEXT: [[MUL32:%.*]] = mul nsw i64 [[DIV31]], 1
|
|
// CHECK11-NEXT: [[ADD_PTR33:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i64 [[MUL32]]
|
|
// CHECK11-NEXT: store i32* [[ADD_PTR33]], i32** [[__BEGIN119]], align 8
|
|
// CHECK11-NEXT: [[TMP31:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK11-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK11-NEXT: [[TMP33:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK11-NEXT: [[TMP34:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_8]], align 8
|
|
// CHECK11-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK11-NEXT: [[SUB_PTR_LHS_CAST34:%.*]] = ptrtoint i32* [[TMP34]] to i64
|
|
// CHECK11-NEXT: [[SUB_PTR_RHS_CAST35:%.*]] = ptrtoint i32* [[TMP35]] to i64
|
|
// CHECK11-NEXT: [[SUB_PTR_SUB36:%.*]] = sub i64 [[SUB_PTR_LHS_CAST34]], [[SUB_PTR_RHS_CAST35]]
|
|
// CHECK11-NEXT: [[SUB_PTR_DIV37:%.*]] = sdiv exact i64 [[SUB_PTR_SUB36]], 4
|
|
// CHECK11-NEXT: [[SUB38:%.*]] = sub nsw i64 [[SUB_PTR_DIV37]], 1
|
|
// CHECK11-NEXT: [[ADD39:%.*]] = add nsw i64 [[SUB38]], 1
|
|
// CHECK11-NEXT: [[DIV40:%.*]] = sdiv i64 [[ADD39]], 1
|
|
// CHECK11-NEXT: [[MUL41:%.*]] = mul nsw i64 1, [[DIV40]]
|
|
// CHECK11-NEXT: [[DIV42:%.*]] = sdiv i64 [[TMP33]], [[MUL41]]
|
|
// CHECK11-NEXT: [[TMP36:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_8]], align 8
|
|
// CHECK11-NEXT: [[TMP37:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK11-NEXT: [[SUB_PTR_LHS_CAST43:%.*]] = ptrtoint i32* [[TMP36]] to i64
|
|
// CHECK11-NEXT: [[SUB_PTR_RHS_CAST44:%.*]] = ptrtoint i32* [[TMP37]] to i64
|
|
// CHECK11-NEXT: [[SUB_PTR_SUB45:%.*]] = sub i64 [[SUB_PTR_LHS_CAST43]], [[SUB_PTR_RHS_CAST44]]
|
|
// CHECK11-NEXT: [[SUB_PTR_DIV46:%.*]] = sdiv exact i64 [[SUB_PTR_SUB45]], 4
|
|
// CHECK11-NEXT: [[SUB47:%.*]] = sub nsw i64 [[SUB_PTR_DIV46]], 1
|
|
// CHECK11-NEXT: [[ADD48:%.*]] = add nsw i64 [[SUB47]], 1
|
|
// CHECK11-NEXT: [[DIV49:%.*]] = sdiv i64 [[ADD48]], 1
|
|
// CHECK11-NEXT: [[MUL50:%.*]] = mul nsw i64 1, [[DIV49]]
|
|
// CHECK11-NEXT: [[MUL51:%.*]] = mul nsw i64 [[DIV42]], [[MUL50]]
|
|
// CHECK11-NEXT: [[SUB52:%.*]] = sub nsw i64 [[TMP32]], [[MUL51]]
|
|
// CHECK11-NEXT: [[MUL53:%.*]] = mul nsw i64 [[SUB52]], 1
|
|
// CHECK11-NEXT: [[ADD_PTR54:%.*]] = getelementptr inbounds i32, i32* [[TMP31]], i64 [[MUL53]]
|
|
// CHECK11-NEXT: store i32* [[ADD_PTR54]], i32** [[__BEGIN220]], align 8
|
|
// CHECK11-NEXT: [[TMP38:%.*]] = load i32*, i32** [[__BEGIN119]], align 8
|
|
// CHECK11-NEXT: store i32* [[TMP38]], i32** [[A]], align 8
|
|
// CHECK11-NEXT: [[TMP39:%.*]] = load i32*, i32** [[__BEGIN220]], align 8
|
|
// CHECK11-NEXT: [[TMP40:%.*]] = load i32, i32* [[TMP39]], align 4
|
|
// CHECK11-NEXT: store i32 [[TMP40]], i32* [[B]], align 4
|
|
// CHECK11-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4
|
|
// CHECK11-NEXT: [[TMP42:%.*]] = load i32*, i32** [[A]], align 8
|
|
// CHECK11-NEXT: store i32 [[TMP41]], i32* [[TMP42]], align 4
|
|
// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK11: omp.body.continue:
|
|
// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK11: omp.inner.for.inc:
|
|
// CHECK11-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK11-NEXT: [[ADD55:%.*]] = add nsw i64 [[TMP43]], 1
|
|
// CHECK11-NEXT: store i64 [[ADD55]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK11: omp.inner.for.end:
|
|
// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK11: omp.loop.exit:
|
|
// CHECK11-NEXT: [[TMP44:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK11-NEXT: [[TMP45:%.*]] = load i32, i32* [[TMP44]], align 4
|
|
// CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP45]])
|
|
// CHECK11-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK11: omp.precond.end:
|
|
// CHECK11-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK12-LABEL: define {{[^@]+}}@_Z9incrementv
|
|
// CHECK12-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// CHECK12-NEXT: entry:
|
|
// CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
|
|
// CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK12-NEXT: store i32 4, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], 4
|
|
// CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK12: cond.true:
|
|
// CHECK12-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK12: cond.false:
|
|
// CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: br label [[COND_END]]
|
|
// CHECK12: cond.end:
|
|
// CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 4, [[COND_TRUE]] ], [ [[TMP2]], [[COND_FALSE]] ]
|
|
// CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK12: omp.inner.for.cond:
|
|
// CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
|
|
// CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK12: omp.inner.for.body:
|
|
// CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP6]], 1
|
|
// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK12: omp.body.continue:
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK12: omp.inner.for.inc:
|
|
// CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP7]], 1
|
|
// CHECK12-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK12: omp.inner.for.end:
|
|
// CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK12: omp.loop.exit:
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK12-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP0]])
|
|
// CHECK12-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// CHECK12-LABEL: define {{[^@]+}}@_Z16decrement_nowaitv
|
|
// CHECK12-SAME: () #[[ATTR0]] {
|
|
// CHECK12-NEXT: entry:
|
|
// CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
|
|
// CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK12-NEXT: store i32 4, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
|
|
// CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], 4
|
|
// CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK12: cond.true:
|
|
// CHECK12-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK12: cond.false:
|
|
// CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: br label [[COND_END]]
|
|
// CHECK12: cond.end:
|
|
// CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 4, [[COND_TRUE]] ], [ [[TMP2]], [[COND_FALSE]] ]
|
|
// CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK12: omp.inner.for.cond:
|
|
// CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]]
|
|
// CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK12: omp.inner.for.body:
|
|
// CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP6]], 1
|
|
// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 5, [[MUL]]
|
|
// CHECK12-NEXT: store i32 [[SUB]], i32* [[J]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK12: omp.body.continue:
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK12: omp.inner.for.inc:
|
|
// CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1
|
|
// CHECK12-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK12: omp.inner.for.end:
|
|
// CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK12: omp.loop.exit:
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
|
|
// CHECK12-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// CHECK12-LABEL: define {{[^@]+}}@_Z16range_for_singlev
|
|
// CHECK12-SAME: () #[[ATTR3:[0-9]+]] {
|
|
// CHECK12-NEXT: entry:
|
|
// CHECK12-NEXT: [[ARR:%.*]] = alloca [10 x i32], align 16
|
|
// CHECK12-NEXT: [[TMP0:%.*]] = bitcast [10 x i32]* [[ARR]] to i8*
|
|
// CHECK12-NEXT: call void @llvm.memset.p0i8.i64(i8* align 16 [[TMP0]], i8 0, i64 40, i1 false)
|
|
// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined. to void (i32*, i32*, ...)*), [10 x i32]* [[ARR]])
|
|
// CHECK12-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined.
|
|
// CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[ARR:%.*]]) #[[ATTR5:[0-9]+]] {
|
|
// CHECK12-NEXT: entry:
|
|
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[ARR_ADDR:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK12-NEXT: [[TMP:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[__RANGE1:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK12-NEXT: [[__END1:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
|
|
// CHECK12-NEXT: [[__BEGIN1:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[__BEGIN15:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[A:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK12-NEXT: store [10 x i32]* [[ARR]], [10 x i32]** [[ARR_ADDR]], align 8
|
|
// CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[ARR_ADDR]], align 8
|
|
// CHECK12-NEXT: store [10 x i32]* [[TMP0]], [10 x i32]** [[__RANGE1]], align 8
|
|
// CHECK12-NEXT: [[TMP1:%.*]] = load [10 x i32]*, [10 x i32]** [[__RANGE1]], align 8
|
|
// CHECK12-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP1]], i64 0, i64 0
|
|
// CHECK12-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAYDECAY]], i64 10
|
|
// CHECK12-NEXT: store i32* [[ADD_PTR]], i32** [[__END1]], align 8
|
|
// CHECK12-NEXT: [[TMP2:%.*]] = load [10 x i32]*, [10 x i32]** [[__RANGE1]], align 8
|
|
// CHECK12-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP2]], i64 0, i64 0
|
|
// CHECK12-NEXT: store i32* [[ARRAYDECAY1]], i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK12-NEXT: [[TMP3:%.*]] = load i32*, i32** [[__END1]], align 8
|
|
// CHECK12-NEXT: store i32* [[TMP3]], i32** [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK12-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK12-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK12-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint i32* [[TMP4]] to i64
|
|
// CHECK12-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint i32* [[TMP5]] to i64
|
|
// CHECK12-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]]
|
|
// CHECK12-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 4
|
|
// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1
|
|
// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[SUB]], 1
|
|
// CHECK12-NEXT: [[DIV:%.*]] = sdiv i64 [[ADD]], 1
|
|
// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i64 [[DIV]], 1
|
|
// CHECK12-NEXT: store i64 [[SUB4]], i64* [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK12-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK12-NEXT: store i32* [[TMP6]], i32** [[__BEGIN1]], align 8
|
|
// CHECK12-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK12-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_2]], align 8
|
|
// CHECK12-NEXT: [[CMP:%.*]] = icmp ult i32* [[TMP7]], [[TMP8]]
|
|
// CHECK12-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK12: omp.precond.then:
|
|
// CHECK12-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK12-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK12-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK12-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK12-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
|
|
// CHECK12-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK12-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK12-NEXT: [[CMP6:%.*]] = icmp sgt i64 [[TMP12]], [[TMP13]]
|
|
// CHECK12-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK12: cond.true:
|
|
// CHECK12-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK12-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK12: cond.false:
|
|
// CHECK12-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK12-NEXT: br label [[COND_END]]
|
|
// CHECK12: cond.end:
|
|
// CHECK12-NEXT: [[COND:%.*]] = phi i64 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
|
|
// CHECK12-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK12-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK12-NEXT: store i64 [[TMP16]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK12: omp.inner.for.cond:
|
|
// CHECK12-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK12-NEXT: [[TMP18:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK12-NEXT: [[CMP7:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]]
|
|
// CHECK12-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK12: omp.inner.for.body:
|
|
// CHECK12-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK12-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK12-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP20]], 1
|
|
// CHECK12-NEXT: [[ADD_PTR8:%.*]] = getelementptr inbounds i32, i32* [[TMP19]], i64 [[MUL]]
|
|
// CHECK12-NEXT: store i32* [[ADD_PTR8]], i32** [[__BEGIN15]], align 8
|
|
// CHECK12-NEXT: [[TMP21:%.*]] = load i32*, i32** [[__BEGIN15]], align 8
|
|
// CHECK12-NEXT: store i32* [[TMP21]], i32** [[A]], align 8
|
|
// CHECK12-NEXT: [[TMP22:%.*]] = load i32*, i32** [[A]], align 8
|
|
// CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK12: omp.body.continue:
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK12: omp.inner.for.inc:
|
|
// CHECK12-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK12-NEXT: [[ADD9:%.*]] = add nsw i64 [[TMP23]], 1
|
|
// CHECK12-NEXT: store i64 [[ADD9]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK12: omp.inner.for.end:
|
|
// CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK12: omp.loop.exit:
|
|
// CHECK12-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK12-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP25]])
|
|
// CHECK12-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK12: omp.precond.end:
|
|
// CHECK12-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK12-LABEL: define {{[^@]+}}@_Z19range_for_collapsedv
|
|
// CHECK12-SAME: () #[[ATTR3]] {
|
|
// CHECK12-NEXT: entry:
|
|
// CHECK12-NEXT: [[ARR:%.*]] = alloca [10 x i32], align 16
|
|
// CHECK12-NEXT: [[TMP0:%.*]] = bitcast [10 x i32]* [[ARR]] to i8*
|
|
// CHECK12-NEXT: call void @llvm.memset.p0i8.i64(i8* align 16 [[TMP0]], i8 0, i64 40, i1 false)
|
|
// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), [10 x i32]* [[ARR]])
|
|
// CHECK12-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..1
|
|
// CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[ARR:%.*]]) #[[ATTR5]] {
|
|
// CHECK12-NEXT: entry:
|
|
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[ARR_ADDR:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK12-NEXT: [[TMP:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[__RANGE1:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK12-NEXT: [[__END1:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[__RANGE2:%.*]] = alloca [10 x i32]*, align 8
|
|
// CHECK12-NEXT: [[__END2:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i64, align 8
|
|
// CHECK12-NEXT: [[__BEGIN1:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[__BEGIN2:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[__BEGIN119:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[__BEGIN220:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[A:%.*]] = alloca i32*, align 8
|
|
// CHECK12-NEXT: [[B:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK12-NEXT: store [10 x i32]* [[ARR]], [10 x i32]** [[ARR_ADDR]], align 8
|
|
// CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[ARR_ADDR]], align 8
|
|
// CHECK12-NEXT: store [10 x i32]* [[TMP0]], [10 x i32]** [[__RANGE1]], align 8
|
|
// CHECK12-NEXT: [[TMP1:%.*]] = load [10 x i32]*, [10 x i32]** [[__RANGE1]], align 8
|
|
// CHECK12-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP1]], i64 0, i64 0
|
|
// CHECK12-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAYDECAY]], i64 10
|
|
// CHECK12-NEXT: store i32* [[ADD_PTR]], i32** [[__END1]], align 8
|
|
// CHECK12-NEXT: store [10 x i32]* [[TMP0]], [10 x i32]** [[__RANGE2]], align 8
|
|
// CHECK12-NEXT: [[TMP2:%.*]] = load [10 x i32]*, [10 x i32]** [[__RANGE2]], align 8
|
|
// CHECK12-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP2]], i64 0, i64 0
|
|
// CHECK12-NEXT: [[ADD_PTR3:%.*]] = getelementptr inbounds i32, i32* [[ARRAYDECAY2]], i64 10
|
|
// CHECK12-NEXT: store i32* [[ADD_PTR3]], i32** [[__END2]], align 8
|
|
// CHECK12-NEXT: [[TMP3:%.*]] = load [10 x i32]*, [10 x i32]** [[__RANGE1]], align 8
|
|
// CHECK12-NEXT: [[ARRAYDECAY4:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP3]], i64 0, i64 0
|
|
// CHECK12-NEXT: store i32* [[ARRAYDECAY4]], i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK12-NEXT: [[TMP4:%.*]] = load i32*, i32** [[__END1]], align 8
|
|
// CHECK12-NEXT: store i32* [[TMP4]], i32** [[DOTCAPTURE_EXPR_5]], align 8
|
|
// CHECK12-NEXT: [[TMP5:%.*]] = load [10 x i32]*, [10 x i32]** [[__RANGE2]], align 8
|
|
// CHECK12-NEXT: [[ARRAYDECAY7:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP5]], i64 0, i64 0
|
|
// CHECK12-NEXT: store i32* [[ARRAYDECAY7]], i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK12-NEXT: [[TMP6:%.*]] = load i32*, i32** [[__END2]], align 8
|
|
// CHECK12-NEXT: store i32* [[TMP6]], i32** [[DOTCAPTURE_EXPR_8]], align 8
|
|
// CHECK12-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_5]], align 8
|
|
// CHECK12-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK12-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint i32* [[TMP7]] to i64
|
|
// CHECK12-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint i32* [[TMP8]] to i64
|
|
// CHECK12-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]]
|
|
// CHECK12-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 4
|
|
// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1
|
|
// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[SUB]], 1
|
|
// CHECK12-NEXT: [[DIV:%.*]] = sdiv i64 [[ADD]], 1
|
|
// CHECK12-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_8]], align 8
|
|
// CHECK12-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK12-NEXT: [[SUB_PTR_LHS_CAST10:%.*]] = ptrtoint i32* [[TMP9]] to i64
|
|
// CHECK12-NEXT: [[SUB_PTR_RHS_CAST11:%.*]] = ptrtoint i32* [[TMP10]] to i64
|
|
// CHECK12-NEXT: [[SUB_PTR_SUB12:%.*]] = sub i64 [[SUB_PTR_LHS_CAST10]], [[SUB_PTR_RHS_CAST11]]
|
|
// CHECK12-NEXT: [[SUB_PTR_DIV13:%.*]] = sdiv exact i64 [[SUB_PTR_SUB12]], 4
|
|
// CHECK12-NEXT: [[SUB14:%.*]] = sub nsw i64 [[SUB_PTR_DIV13]], 1
|
|
// CHECK12-NEXT: [[ADD15:%.*]] = add nsw i64 [[SUB14]], 1
|
|
// CHECK12-NEXT: [[DIV16:%.*]] = sdiv i64 [[ADD15]], 1
|
|
// CHECK12-NEXT: [[MUL:%.*]] = mul nsw i64 [[DIV]], [[DIV16]]
|
|
// CHECK12-NEXT: [[SUB17:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK12-NEXT: store i64 [[SUB17]], i64* [[DOTCAPTURE_EXPR_9]], align 8
|
|
// CHECK12-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK12-NEXT: store i32* [[TMP11]], i32** [[__BEGIN1]], align 8
|
|
// CHECK12-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK12-NEXT: store i32* [[TMP12]], i32** [[__BEGIN2]], align 8
|
|
// CHECK12-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK12-NEXT: [[TMP14:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_5]], align 8
|
|
// CHECK12-NEXT: [[CMP:%.*]] = icmp ult i32* [[TMP13]], [[TMP14]]
|
|
// CHECK12-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK12: land.lhs.true:
|
|
// CHECK12-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK12-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_8]], align 8
|
|
// CHECK12-NEXT: [[CMP18:%.*]] = icmp ult i32* [[TMP15]], [[TMP16]]
|
|
// CHECK12-NEXT: br i1 [[CMP18]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
|
|
// CHECK12: omp.precond.then:
|
|
// CHECK12-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK12-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_9]], align 8
|
|
// CHECK12-NEXT: store i64 [[TMP17]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK12-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
|
|
// CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK12-NEXT: [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK12-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP19]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
|
|
// CHECK12-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK12-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_9]], align 8
|
|
// CHECK12-NEXT: [[CMP21:%.*]] = icmp sgt i64 [[TMP20]], [[TMP21]]
|
|
// CHECK12-NEXT: br i1 [[CMP21]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK12: cond.true:
|
|
// CHECK12-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_9]], align 8
|
|
// CHECK12-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK12: cond.false:
|
|
// CHECK12-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK12-NEXT: br label [[COND_END]]
|
|
// CHECK12: cond.end:
|
|
// CHECK12-NEXT: [[COND:%.*]] = phi i64 [ [[TMP22]], [[COND_TRUE]] ], [ [[TMP23]], [[COND_FALSE]] ]
|
|
// CHECK12-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
|
|
// CHECK12-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
|
|
// CHECK12-NEXT: store i64 [[TMP24]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK12: omp.inner.for.cond:
|
|
// CHECK12-NEXT: [[TMP25:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK12-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
|
|
// CHECK12-NEXT: [[CMP22:%.*]] = icmp sle i64 [[TMP25]], [[TMP26]]
|
|
// CHECK12-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK12: omp.inner.for.body:
|
|
// CHECK12-NEXT: [[TMP27:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_]], align 8
|
|
// CHECK12-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK12-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_8]], align 8
|
|
// CHECK12-NEXT: [[TMP30:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK12-NEXT: [[SUB_PTR_LHS_CAST23:%.*]] = ptrtoint i32* [[TMP29]] to i64
|
|
// CHECK12-NEXT: [[SUB_PTR_RHS_CAST24:%.*]] = ptrtoint i32* [[TMP30]] to i64
|
|
// CHECK12-NEXT: [[SUB_PTR_SUB25:%.*]] = sub i64 [[SUB_PTR_LHS_CAST23]], [[SUB_PTR_RHS_CAST24]]
|
|
// CHECK12-NEXT: [[SUB_PTR_DIV26:%.*]] = sdiv exact i64 [[SUB_PTR_SUB25]], 4
|
|
// CHECK12-NEXT: [[SUB27:%.*]] = sub nsw i64 [[SUB_PTR_DIV26]], 1
|
|
// CHECK12-NEXT: [[ADD28:%.*]] = add nsw i64 [[SUB27]], 1
|
|
// CHECK12-NEXT: [[DIV29:%.*]] = sdiv i64 [[ADD28]], 1
|
|
// CHECK12-NEXT: [[MUL30:%.*]] = mul nsw i64 1, [[DIV29]]
|
|
// CHECK12-NEXT: [[DIV31:%.*]] = sdiv i64 [[TMP28]], [[MUL30]]
|
|
// CHECK12-NEXT: [[MUL32:%.*]] = mul nsw i64 [[DIV31]], 1
|
|
// CHECK12-NEXT: [[ADD_PTR33:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i64 [[MUL32]]
|
|
// CHECK12-NEXT: store i32* [[ADD_PTR33]], i32** [[__BEGIN119]], align 8
|
|
// CHECK12-NEXT: [[TMP31:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK12-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK12-NEXT: [[TMP33:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK12-NEXT: [[TMP34:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_8]], align 8
|
|
// CHECK12-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK12-NEXT: [[SUB_PTR_LHS_CAST34:%.*]] = ptrtoint i32* [[TMP34]] to i64
|
|
// CHECK12-NEXT: [[SUB_PTR_RHS_CAST35:%.*]] = ptrtoint i32* [[TMP35]] to i64
|
|
// CHECK12-NEXT: [[SUB_PTR_SUB36:%.*]] = sub i64 [[SUB_PTR_LHS_CAST34]], [[SUB_PTR_RHS_CAST35]]
|
|
// CHECK12-NEXT: [[SUB_PTR_DIV37:%.*]] = sdiv exact i64 [[SUB_PTR_SUB36]], 4
|
|
// CHECK12-NEXT: [[SUB38:%.*]] = sub nsw i64 [[SUB_PTR_DIV37]], 1
|
|
// CHECK12-NEXT: [[ADD39:%.*]] = add nsw i64 [[SUB38]], 1
|
|
// CHECK12-NEXT: [[DIV40:%.*]] = sdiv i64 [[ADD39]], 1
|
|
// CHECK12-NEXT: [[MUL41:%.*]] = mul nsw i64 1, [[DIV40]]
|
|
// CHECK12-NEXT: [[DIV42:%.*]] = sdiv i64 [[TMP33]], [[MUL41]]
|
|
// CHECK12-NEXT: [[TMP36:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_8]], align 8
|
|
// CHECK12-NEXT: [[TMP37:%.*]] = load i32*, i32** [[DOTCAPTURE_EXPR_6]], align 8
|
|
// CHECK12-NEXT: [[SUB_PTR_LHS_CAST43:%.*]] = ptrtoint i32* [[TMP36]] to i64
|
|
// CHECK12-NEXT: [[SUB_PTR_RHS_CAST44:%.*]] = ptrtoint i32* [[TMP37]] to i64
|
|
// CHECK12-NEXT: [[SUB_PTR_SUB45:%.*]] = sub i64 [[SUB_PTR_LHS_CAST43]], [[SUB_PTR_RHS_CAST44]]
|
|
// CHECK12-NEXT: [[SUB_PTR_DIV46:%.*]] = sdiv exact i64 [[SUB_PTR_SUB45]], 4
|
|
// CHECK12-NEXT: [[SUB47:%.*]] = sub nsw i64 [[SUB_PTR_DIV46]], 1
|
|
// CHECK12-NEXT: [[ADD48:%.*]] = add nsw i64 [[SUB47]], 1
|
|
// CHECK12-NEXT: [[DIV49:%.*]] = sdiv i64 [[ADD48]], 1
|
|
// CHECK12-NEXT: [[MUL50:%.*]] = mul nsw i64 1, [[DIV49]]
|
|
// CHECK12-NEXT: [[MUL51:%.*]] = mul nsw i64 [[DIV42]], [[MUL50]]
|
|
// CHECK12-NEXT: [[SUB52:%.*]] = sub nsw i64 [[TMP32]], [[MUL51]]
|
|
// CHECK12-NEXT: [[MUL53:%.*]] = mul nsw i64 [[SUB52]], 1
|
|
// CHECK12-NEXT: [[ADD_PTR54:%.*]] = getelementptr inbounds i32, i32* [[TMP31]], i64 [[MUL53]]
|
|
// CHECK12-NEXT: store i32* [[ADD_PTR54]], i32** [[__BEGIN220]], align 8
|
|
// CHECK12-NEXT: [[TMP38:%.*]] = load i32*, i32** [[__BEGIN119]], align 8
|
|
// CHECK12-NEXT: store i32* [[TMP38]], i32** [[A]], align 8
|
|
// CHECK12-NEXT: [[TMP39:%.*]] = load i32*, i32** [[__BEGIN220]], align 8
|
|
// CHECK12-NEXT: [[TMP40:%.*]] = load i32, i32* [[TMP39]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP40]], i32* [[B]], align 4
|
|
// CHECK12-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4
|
|
// CHECK12-NEXT: [[TMP42:%.*]] = load i32*, i32** [[A]], align 8
|
|
// CHECK12-NEXT: store i32 [[TMP41]], i32* [[TMP42]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK12: omp.body.continue:
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK12: omp.inner.for.inc:
|
|
// CHECK12-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
|
|
// CHECK12-NEXT: [[ADD55:%.*]] = add nsw i64 [[TMP43]], 1
|
|
// CHECK12-NEXT: store i64 [[ADD55]], i64* [[DOTOMP_IV]], align 8
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK12: omp.inner.for.end:
|
|
// CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK12: omp.loop.exit:
|
|
// CHECK12-NEXT: [[TMP44:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK12-NEXT: [[TMP45:%.*]] = load i32, i32* [[TMP44]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP45]])
|
|
// CHECK12-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK12: omp.precond.end:
|
|
// CHECK12-NEXT: ret void
|
|
//
|