2016-06-17 02:39:34 +08:00
|
|
|
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-32
|
|
|
|
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-pc-linux-gnu -emit-pch -o %t %s
|
|
|
|
// RUN: %clang_cc1 -fopenmp -x c++ -triple i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-32
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA -check-prefix=LAMBDA-32 %s
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS -check-prefix=BLOCKS-32 %s
|
|
|
|
|
2017-12-30 02:07:07 +08:00
|
|
|
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
|
|
|
|
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-pc-linux-gnu -emit-pch -o %t %s
|
|
|
|
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
|
|
|
|
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
|
|
|
|
|
2016-06-17 02:39:34 +08:00
|
|
|
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-64
|
|
|
|
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-pc-linux-gnu -emit-pch -o %t %s
|
|
|
|
// RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-64
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-pc-linux-gnu -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA -check-prefix=LAMBDA-64 %s
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-pc-linux-gnu -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS -check-prefix=BLOCKS-64 %s
|
|
|
|
|
2017-12-30 02:07:07 +08:00
|
|
|
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-pc-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY1 %s
|
|
|
|
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-pc-linux-gnu -emit-pch -o %t %s
|
|
|
|
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1 %s
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-pc-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY1 %s
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-pc-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY1 %s
|
|
|
|
// SIMD-ONLY1-NOT: {{__kmpc|__tgt}}
|
|
|
|
|
2015-05-20 12:24:19 +08:00
|
|
|
// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DARRAY -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=ARRAY %s
|
2017-12-30 02:07:07 +08:00
|
|
|
|
|
|
|
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DARRAY -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY2 %s
|
|
|
|
// SIMD-ONLY2-NOT: {{__kmpc|__tgt}}
|
2014-10-08 22:01:46 +08:00
|
|
|
// expected-no-diagnostics
|
2015-05-19 20:31:28 +08:00
|
|
|
#ifndef ARRAY
|
2014-10-08 22:01:46 +08:00
|
|
|
#ifndef HEADER
|
|
|
|
#define HEADER
|
|
|
|
|
2019-04-09 03:06:42 +08:00
|
|
|
enum omp_allocator_handle_t {
|
|
|
|
omp_null_allocator = 0,
|
|
|
|
omp_default_mem_alloc = 1,
|
|
|
|
omp_large_cap_mem_alloc = 2,
|
|
|
|
omp_const_mem_alloc = 3,
|
|
|
|
omp_high_bw_mem_alloc = 4,
|
|
|
|
omp_low_lat_mem_alloc = 5,
|
|
|
|
omp_cgroup_mem_alloc = 6,
|
|
|
|
omp_pteam_mem_alloc = 7,
|
|
|
|
omp_thread_mem_alloc = 8,
|
|
|
|
KMP_ALLOCATOR_MAX_HANDLE = __UINTPTR_MAX__
|
|
|
|
};
|
2019-04-04 01:57:06 +08:00
|
|
|
|
2014-10-08 22:01:46 +08:00
|
|
|
struct St {
|
|
|
|
int a, b;
|
|
|
|
St() : a(0), b(0) {}
|
|
|
|
St(const St &st) : a(st.a + st.b), b(0) {}
|
|
|
|
~St() {}
|
|
|
|
};
|
|
|
|
|
2015-09-10 17:48:30 +08:00
|
|
|
volatile int g __attribute__((aligned(128))) = 1212;
|
2014-10-08 22:01:46 +08:00
|
|
|
|
2016-02-17 21:19:37 +08:00
|
|
|
struct SS {
|
|
|
|
int a;
|
|
|
|
int b : 4;
|
|
|
|
int &c;
|
2016-04-22 17:05:03 +08:00
|
|
|
int e[4];
|
2016-02-17 21:19:37 +08:00
|
|
|
SS(int &d) : a(0), b(0), c(d) {
|
2016-04-22 17:05:03 +08:00
|
|
|
#pragma omp parallel firstprivate(a, b, c, e)
|
2016-02-17 21:19:37 +08:00
|
|
|
#ifdef LAMBDA
|
|
|
|
[&]() {
|
|
|
|
++this->a, --b, (this)->c /= 1;
|
|
|
|
#pragma omp parallel firstprivate(a, b, c)
|
|
|
|
++(this)->a, --b, this->c /= 1;
|
|
|
|
}();
|
|
|
|
#elif defined(BLOCKS)
|
|
|
|
^{
|
|
|
|
++a;
|
|
|
|
--this->b;
|
|
|
|
(this)->c /= 1;
|
|
|
|
#pragma omp parallel firstprivate(a, b, c)
|
|
|
|
++(this)->a, --b, this->c /= 1;
|
|
|
|
}();
|
|
|
|
#else
|
2016-04-22 17:05:03 +08:00
|
|
|
++this->a, --b, c /= 1, e[2] = 1111;
|
2016-02-17 21:19:37 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template<typename T>
|
|
|
|
struct SST {
|
|
|
|
T a;
|
|
|
|
SST() : a(T()) {
|
|
|
|
#pragma omp parallel firstprivate(a)
|
|
|
|
#ifdef LAMBDA
|
|
|
|
[&]() {
|
|
|
|
[&]() {
|
|
|
|
++this->a;
|
|
|
|
#pragma omp parallel firstprivate(a)
|
|
|
|
++(this)->a;
|
|
|
|
}();
|
|
|
|
}();
|
|
|
|
#elif defined(BLOCKS)
|
|
|
|
^{
|
|
|
|
^{
|
|
|
|
++a;
|
|
|
|
#pragma omp parallel firstprivate(a)
|
|
|
|
++(this)->a;
|
|
|
|
}();
|
|
|
|
}();
|
|
|
|
#else
|
|
|
|
++(this)->a;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-10-08 22:01:46 +08:00
|
|
|
template <class T>
|
|
|
|
struct S {
|
|
|
|
T f;
|
|
|
|
S(T a) : f(a + g) {}
|
|
|
|
S() : f(g) {}
|
|
|
|
S(const S &s, St t = St()) : f(s.f + t.a) {}
|
|
|
|
operator T() { return T(); }
|
|
|
|
~S() {}
|
|
|
|
};
|
|
|
|
|
2016-02-17 21:19:37 +08:00
|
|
|
// CHECK: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
|
|
|
|
// LAMBDA: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
|
|
|
|
// BLOCKS: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float }
|
|
|
|
// CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} }
|
|
|
|
// CHECK-DAG: [[ST_TY:%.+]] = type { i{{[0-9]+}}, i{{[0-9]+}} }
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T tmain() {
|
|
|
|
S<T> test;
|
2016-02-17 21:19:37 +08:00
|
|
|
SST<T> sst;
|
2015-09-10 17:48:30 +08:00
|
|
|
T t_var __attribute__((aligned(128))) = T();
|
|
|
|
T vec[] __attribute__((aligned(128))) = {1, 2};
|
|
|
|
S<T> s_arr[] __attribute__((aligned(128))) = {1, 2};
|
|
|
|
S<T> var __attribute__((aligned(128))) (3);
|
2014-10-08 22:01:46 +08:00
|
|
|
#pragma omp parallel firstprivate(t_var, vec, s_arr, var)
|
|
|
|
{
|
|
|
|
vec[0] = t_var;
|
|
|
|
s_arr[0] = var;
|
|
|
|
}
|
2015-04-02 21:07:08 +08:00
|
|
|
#pragma omp parallel firstprivate(t_var)
|
|
|
|
{}
|
2014-10-08 22:01:46 +08:00
|
|
|
return T();
|
|
|
|
}
|
|
|
|
|
|
|
|
int main() {
|
2015-09-16 02:56:58 +08:00
|
|
|
static int sivar;
|
2016-02-17 21:19:37 +08:00
|
|
|
SS ss(sivar);
|
2014-12-16 15:00:22 +08:00
|
|
|
#ifdef LAMBDA
|
2020-02-04 02:09:39 +08:00
|
|
|
// LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212,
|
2014-12-16 15:00:22 +08:00
|
|
|
// LAMBDA-LABEL: @main
|
2016-02-17 21:19:37 +08:00
|
|
|
// LAMBDA: alloca [[SS_TY]],
|
|
|
|
// LAMBDA: alloca [[CAP_TY:%.+]],
|
|
|
|
// LAMBDA: call{{.*}} void [[OUTER_LAMBDA:@[^(]+]]([[CAP_TY]]*
|
2014-12-16 15:00:22 +08:00
|
|
|
[&]() {
|
|
|
|
// LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
|
2015-09-16 02:56:58 +08:00
|
|
|
// LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_call({{.+}}, i32 2, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i32* [[G]], {{.+}})
|
|
|
|
#pragma omp parallel firstprivate(g, sivar)
|
2014-12-16 15:00:22 +08:00
|
|
|
{
|
2016-02-17 21:19:37 +08:00
|
|
|
// LAMBDA: define {{.+}} @{{.+}}([[SS_TY]]*
|
|
|
|
// LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
|
|
|
|
// LAMBDA: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
|
|
|
|
// LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
|
|
|
|
// LAMBDA: store i8
|
|
|
|
// LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
|
|
|
|
// LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
|
|
|
|
// LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
|
|
|
|
// LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
|
2016-06-17 02:39:34 +08:00
|
|
|
// LAMBDA: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*, [[iz:i64|i32]], {{i64|i32}}, {{i64|i32}}, [4 x i{{[0-9]+}}]*)* [[SS_MICROTASK:@.+]] to void
|
2016-02-17 21:19:37 +08:00
|
|
|
// LAMBDA: ret
|
|
|
|
|
2016-06-17 02:39:34 +08:00
|
|
|
// LAMBDA: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, [[iz]] {{.+}}, [[iz]] {{.+}}, [[iz]] {{.+}}, [4 x i{{[0-9]+}}]* {{.+}})
|
2016-02-17 21:19:37 +08:00
|
|
|
// LAMBDA-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %
|
|
|
|
// LAMBDA: call{{.*}} void
|
|
|
|
// LAMBDA: ret void
|
|
|
|
|
2016-06-17 02:39:34 +08:00
|
|
|
// LAMBDA: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, [[iz]] {{.+}}, [[iz]] {{.+}}, [[iz]] {{.+}})
|
2016-02-17 21:19:37 +08:00
|
|
|
// LAMBDA: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
|
|
|
|
// LAMBDA: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
|
|
|
|
// LAMBDA: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
|
2016-06-17 02:39:34 +08:00
|
|
|
// LAMBDA-64: [[A_CONV:%.+]] = bitcast i64* [[A_PRIV]] to i32*
|
|
|
|
// LAMBDA-64: [[B_CONV:%.+]] = bitcast i64* [[B_PRIV]] to i32*
|
|
|
|
// LAMBDA-64: [[C_CONV:%.+]] = bitcast i64* [[C_PRIV]] to i32*
|
2019-05-24 02:19:54 +08:00
|
|
|
// LAMBDA-64: store i32* [[A_CONV]], i32** [[REFA:%.+]],
|
|
|
|
// LAMBDA-32: store i32* [[A_PRIV]], i32** [[REFA:%.+]],
|
2016-06-17 02:39:34 +08:00
|
|
|
// LAMBDA-64: store i32* [[C_CONV]], i32** [[REFC:%.+]],
|
|
|
|
// LAMBDA-32: store i32* [[C_PRIV]], i32** [[REFC:%.+]],
|
2016-02-17 21:19:37 +08:00
|
|
|
// LAMBDA-NEXT: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
|
|
|
|
// LAMBDA-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
|
|
|
|
// LAMBDA-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
|
|
|
|
// LAMBDA-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
|
2016-06-17 02:39:34 +08:00
|
|
|
// LAMBDA-64-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_CONV]],
|
|
|
|
// LAMBDA-32-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
|
2016-02-17 21:19:37 +08:00
|
|
|
// LAMBDA-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
|
2016-06-17 02:39:34 +08:00
|
|
|
// LAMBDA-64-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_CONV]],
|
|
|
|
// LAMBDA-32-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
|
2016-02-17 21:19:37 +08:00
|
|
|
// LAMBDA-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
|
|
|
|
// LAMBDA-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
|
|
|
|
// LAMBDA-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
|
|
|
|
// LAMBDA-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
|
|
|
|
// LAMBDA-NEXT: ret void
|
|
|
|
|
2016-06-17 02:39:34 +08:00
|
|
|
// LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}, [[iz]] {{.*}}%{{.+}})
|
2015-09-16 02:56:58 +08:00
|
|
|
// LAMBDA: [[SIVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
|
2016-05-17 16:55:33 +08:00
|
|
|
// LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 128
|
2015-09-10 16:12:02 +08:00
|
|
|
// LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_REF_ADDR:%.+]]
|
2016-06-17 02:39:34 +08:00
|
|
|
// LAMBDA-64: [[SIVAR_PRIVATE_CONV:%.+]] = bitcast i64* [[SIVAR_PRIVATE_ADDR]] to i32*
|
2015-09-11 18:29:41 +08:00
|
|
|
// LAMBDA: [[G_VAL:%.+]] = load volatile i{{[0-9]+}}, i{{[0-9]+}}* [[G_REF]], align 128
|
|
|
|
// LAMBDA: store i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G_PRIVATE_ADDR]], align 128
|
2016-02-15 16:07:17 +08:00
|
|
|
// LAMBDA-NOT: call {{.*}}void @__kmpc_barrier(
|
2014-12-16 15:00:22 +08:00
|
|
|
g = 1;
|
2015-09-16 02:56:58 +08:00
|
|
|
sivar = 2;
|
2015-07-14 18:32:29 +08:00
|
|
|
// LAMBDA: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
|
2016-06-17 02:39:34 +08:00
|
|
|
// LAMBDA-64: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[SIVAR_PRIVATE_CONV]],
|
|
|
|
// LAMBDA-32: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]],
|
2015-02-28 03:18:17 +08:00
|
|
|
// LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
|
2014-12-16 15:00:22 +08:00
|
|
|
// LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]]
|
2015-09-16 02:56:58 +08:00
|
|
|
// LAMBDA: [[SIVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
|
2016-06-17 02:39:34 +08:00
|
|
|
// LAMBDA-64: store i{{[0-9]+}}* [[SIVAR_PRIVATE_CONV]], i{{[0-9]+}}** [[SIVAR_PRIVATE_ADDR_REF]]
|
|
|
|
// LAMBDA-32: store i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]], i{{[0-9]+}}** [[SIVAR_PRIVATE_ADDR_REF]]
|
2015-06-30 01:29:50 +08:00
|
|
|
// LAMBDA: call{{.*}} void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
|
2014-12-16 15:00:22 +08:00
|
|
|
[&]() {
|
|
|
|
// LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
|
|
|
|
// LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
|
|
|
|
g = 2;
|
2015-09-16 02:56:58 +08:00
|
|
|
sivar = 4;
|
2015-02-28 05:19:58 +08:00
|
|
|
// LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
|
2015-02-28 03:18:17 +08:00
|
|
|
// LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
|
2015-02-28 05:19:58 +08:00
|
|
|
// LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]]
|
2015-09-16 02:56:58 +08:00
|
|
|
// LAMBDA: [[SIVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
|
|
|
|
// LAMBDA: [[SIVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_PTR_REF]]
|
|
|
|
// LAMBDA: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SIVAR_REF]]
|
2014-12-16 15:00:22 +08:00
|
|
|
}();
|
|
|
|
}
|
|
|
|
}();
|
|
|
|
return 0;
|
|
|
|
#elif defined(BLOCKS)
|
2020-02-04 02:09:39 +08:00
|
|
|
// BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212,
|
2014-12-16 15:00:22 +08:00
|
|
|
// BLOCKS-LABEL: @main
|
2016-02-17 21:19:37 +08:00
|
|
|
// BLOCKS: call
|
2015-06-30 01:29:50 +08:00
|
|
|
// BLOCKS: call {{.*}}void {{%.+}}(i8
|
2014-12-16 15:00:22 +08:00
|
|
|
^{
|
|
|
|
// BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
|
2015-09-16 02:56:58 +08:00
|
|
|
// BLOCKS: call {{.*}}void {{.+}} @__kmpc_fork_call({{.+}}, i32 2, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i32* [[G]], {{.+}})
|
|
|
|
#pragma omp parallel firstprivate(g, sivar)
|
2014-12-16 15:00:22 +08:00
|
|
|
{
|
2016-06-17 02:39:34 +08:00
|
|
|
// BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}, [[iz:i64|i32]] {{.*}}%{{.+}})
|
2015-09-16 02:56:58 +08:00
|
|
|
// BLOCKS: [[SIVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
|
2016-05-17 16:55:33 +08:00
|
|
|
// BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 128
|
2015-09-16 02:56:58 +08:00
|
|
|
// BLOCKS: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_REF_ADDR:%.+]]
|
2016-06-17 02:39:34 +08:00
|
|
|
// BLOCKS-64: [[SIVAR_PRIVATE_CONV:%.+]] = bitcast i64* [[SIVAR_PRIVATE_ADDR]] to i32*
|
2015-09-11 18:29:41 +08:00
|
|
|
// BLOCKS: [[G_VAL:%.+]] = load volatile i{{[0-9]+}}, i{{[0-9]+}}* [[G_REF]], align 128
|
|
|
|
// BLOCKS: store i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G_PRIVATE_ADDR]], align 128
|
2016-02-15 16:07:17 +08:00
|
|
|
// BLOCKS-NOT: call {{.*}}void @__kmpc_barrier(
|
2014-12-16 15:00:22 +08:00
|
|
|
g = 1;
|
2015-09-16 02:56:58 +08:00
|
|
|
sivar = 2;
|
2015-07-14 18:32:29 +08:00
|
|
|
// BLOCKS: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
|
2016-06-17 02:39:34 +08:00
|
|
|
// BLOCKS-64: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[SIVAR_PRIVATE_CONV]],
|
|
|
|
// BLOCKS-32: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]],
|
2014-12-16 15:00:22 +08:00
|
|
|
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
|
|
|
// BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]]
|
|
|
|
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
2015-09-16 02:56:58 +08:00
|
|
|
// BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
|
2016-06-17 02:39:34 +08:00
|
|
|
// BLOCKS-64: i{{[0-9]+}}* [[SIVAR_PRIVATE_CONV]]
|
|
|
|
// BLOCKS-32: i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]]
|
2015-09-16 02:56:58 +08:00
|
|
|
// BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
|
2015-06-30 01:29:50 +08:00
|
|
|
// BLOCKS: call {{.*}}void {{%.+}}(i8
|
2014-12-16 15:00:22 +08:00
|
|
|
^{
|
|
|
|
// BLOCKS: define {{.+}} void {{@.+}}(i8*
|
|
|
|
g = 2;
|
2015-09-16 02:56:58 +08:00
|
|
|
sivar = 4;
|
2014-12-16 15:00:22 +08:00
|
|
|
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
2015-07-14 18:32:29 +08:00
|
|
|
// BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}*
|
2014-12-16 15:00:22 +08:00
|
|
|
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
|
2015-09-16 02:56:58 +08:00
|
|
|
// BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
|
|
|
|
// BLOCKS: store i{{[0-9]+}} 4, i{{[0-9]+}}*
|
|
|
|
// BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
|
2014-12-16 15:00:22 +08:00
|
|
|
// BLOCKS: ret
|
|
|
|
}();
|
|
|
|
}
|
|
|
|
}();
|
|
|
|
return 0;
|
2016-02-17 21:19:37 +08:00
|
|
|
// BLOCKS: define {{.+}} @{{.+}}([[SS_TY]]*
|
|
|
|
// BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
|
|
|
|
// BLOCKS: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
|
|
|
|
// BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
|
|
|
|
// BLOCKS: store i8
|
|
|
|
// BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
|
|
|
|
// BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
|
|
|
|
// BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
|
|
|
|
// BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
|
2016-06-17 02:39:34 +08:00
|
|
|
// BLOCKS: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*, [[iz]], [[iz]], [[iz]], [4 x i{{[0-9]+}}]*)* [[SS_MICROTASK:@.+]] to void
|
2016-02-17 21:19:37 +08:00
|
|
|
// BLOCKS: ret
|
|
|
|
|
2016-06-17 02:39:34 +08:00
|
|
|
// BLOCKS: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, [[iz]] {{.+}}, [[iz]] {{.+}}, [[iz]] {{.+}}, [4 x i{{[0-9]+}}]* {{.+}})
|
2016-02-17 21:19:37 +08:00
|
|
|
// BLOCKS-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %
|
|
|
|
// BLOCKS: call{{.*}} void
|
|
|
|
// BLOCKS: ret void
|
|
|
|
|
2016-06-17 02:39:34 +08:00
|
|
|
// BLOCKS: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, [[iz]] {{.+}}, [[iz]] {{.+}}, [[iz]] {{.+}})
|
2016-02-17 21:19:37 +08:00
|
|
|
// BLOCKS: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
|
|
|
|
// BLOCKS: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
|
|
|
|
// BLOCKS: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
|
2016-06-17 02:39:34 +08:00
|
|
|
// BLOCKS-64: [[A_CONV:%.+]] = bitcast i64* [[A_PRIV]] to i32*
|
|
|
|
// BLOCKS-64: [[B_CONV:%.+]] = bitcast i64* [[B_PRIV]] to i32*
|
|
|
|
// BLOCKS-64: [[C_CONV:%.+]] = bitcast i64* [[C_PRIV]] to i32*
|
2019-05-24 02:19:54 +08:00
|
|
|
// BLOCKS-64: store i32* [[A_CONV]], i32** [[REFA:%.+]],
|
|
|
|
// BLOCKS-32: store i32* [[A_PRIV]], i32** [[REFA:%.+]],
|
2016-06-17 02:39:34 +08:00
|
|
|
// BLOCKS-64: store i32* [[C_CONV]], i32** [[REFC:%.+]],
|
|
|
|
// BLOCKS-32: store i32* [[C_PRIV]], i32** [[REFC:%.+]],
|
2016-02-17 21:19:37 +08:00
|
|
|
// BLOCKS-NEXT: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
|
|
|
|
// BLOCKS-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
|
|
|
|
// BLOCKS-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
|
|
|
|
// BLOCKS-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
|
2016-06-17 02:39:34 +08:00
|
|
|
// BLOCKS-64-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_CONV]],
|
|
|
|
// BLOCKS-32-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
|
2016-02-17 21:19:37 +08:00
|
|
|
// BLOCKS-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
|
2016-06-17 02:39:34 +08:00
|
|
|
// BLOCKS-64-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_CONV]],
|
|
|
|
// BLOCKS-32-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
|
2016-02-17 21:19:37 +08:00
|
|
|
// BLOCKS-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
|
|
|
|
// BLOCKS-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
|
|
|
|
// BLOCKS-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
|
|
|
|
// BLOCKS-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
|
|
|
|
// BLOCKS-NEXT: ret void
|
2014-12-16 15:00:22 +08:00
|
|
|
#else
|
2014-10-08 22:01:46 +08:00
|
|
|
S<float> test;
|
|
|
|
int t_var = 0;
|
|
|
|
int vec[] = {1, 2};
|
|
|
|
S<float> s_arr[] = {1, 2};
|
|
|
|
S<float> var(3);
|
2015-09-16 02:56:58 +08:00
|
|
|
#pragma omp parallel firstprivate(t_var, vec, s_arr, var, sivar)
|
2014-10-08 22:01:46 +08:00
|
|
|
{
|
|
|
|
vec[0] = t_var;
|
|
|
|
s_arr[0] = var;
|
2015-09-16 02:56:58 +08:00
|
|
|
sivar = 2;
|
2014-10-08 22:01:46 +08:00
|
|
|
}
|
2019-05-24 06:30:43 +08:00
|
|
|
const int a = 0;
|
|
|
|
#pragma omp parallel allocate(omp_default_mem_alloc: t_var) firstprivate(t_var, a)
|
|
|
|
{ t_var = a; }
|
2014-10-08 22:01:46 +08:00
|
|
|
return tmain<int>();
|
2014-12-16 15:00:22 +08:00
|
|
|
#endif
|
2014-10-08 22:01:46 +08:00
|
|
|
}
|
|
|
|
|
2014-10-10 10:50:06 +08:00
|
|
|
// CHECK: define {{.*}}i{{[0-9]+}} @main()
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
|
2016-06-17 02:39:34 +08:00
|
|
|
// CHECK: [[T_VAR:%.+]] = alloca i32,
|
|
|
|
// CHECK: [[T_VARCAST:%.+]] = alloca [[iz:i64|i32]],
|
|
|
|
// CHECK: [[SIVARCAST:%.+]] = alloca [[iz]],
|
2019-05-24 06:30:43 +08:00
|
|
|
// CHECK: [[A:%.+]] = alloca i32,
|
2019-04-04 01:57:06 +08:00
|
|
|
// CHECK: [[T_VARCAST1:%.+]] = alloca [[iz:i64|i32]],
|
2018-10-15 23:43:00 +08:00
|
|
|
// CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]])
|
2016-06-17 02:39:34 +08:00
|
|
|
// CHECK: [[T_VARVAL:%.+]] = load i32, i32* [[T_VAR]],
|
|
|
|
// CHECK-64: [[T_VARCONV:%.+]] = bitcast i64* [[T_VARCAST]] to i32*
|
|
|
|
// CHECK-64: store i32 [[T_VARVAL]], i32* [[T_VARCONV]],
|
|
|
|
// CHECK-32: store i32 [[T_VARVAL]], i32* [[T_VARCAST]],
|
|
|
|
// CHECK: [[T_VARPVT:%.+]] = load [[iz]], [[iz]]* [[T_VARCAST]],
|
|
|
|
// CHECK: [[SIVARVAL:%.+]] = load i32, i32* @{{.+}},
|
|
|
|
// CHECK-64: [[SIVARCONV:%.+]] = bitcast i64* [[SIVARCAST]] to i32*
|
|
|
|
// CHECK-64: store i32 [[SIVARVAL]], i32* [[SIVARCONV]],
|
|
|
|
// CHECK-32: store i32 [[SIVARVAL]], i32* [[SIVARCAST]],
|
|
|
|
// CHECK: [[SIVARPVT:%.+]] = load [[iz]], [[iz]]* [[SIVARCAST]],
|
|
|
|
// CHECK: call {{.*}}void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [2 x i32]*, [[iz]], [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]*, i{{[0-9]+}})* [[MAIN_MICROTASK:@.+]] to void {{.*}}[[iz]] [[T_VARPVT]],{{.*}}[[iz]] [[SIVARPVT]]
|
2019-04-04 01:57:06 +08:00
|
|
|
// CHECK: [[T_VARVAL:%.+]] = load i32, i32* [[T_VAR]],
|
|
|
|
// CHECK-64: [[T_VARCONV:%.+]] = bitcast i64* [[T_VARCAST1]] to i32*
|
|
|
|
// CHECK-64: store i32 [[T_VARVAL]], i32* [[T_VARCONV]],
|
|
|
|
// CHECK-32: store i32 [[T_VARVAL]], i32* [[T_VARCAST1]],
|
|
|
|
// CHECK: [[T_VARPVT:%.+]] = load [[iz]], [[iz]]* [[T_VARCAST1]],
|
|
|
|
// CHECK: call {{.*}}void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[iz]])* [[MAIN_MICROTASK1:@.+]] to void {{.*}}[[iz]] [[T_VARPVT]])
|
2014-10-10 10:50:06 +08:00
|
|
|
// CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]()
|
2014-10-08 23:39:06 +08:00
|
|
|
// CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK: ret
|
|
|
|
//
|
2016-06-17 02:39:34 +08:00
|
|
|
// CHECK: define internal {{.*}}void [[MAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [2 x i32]* dereferenceable(8) %{{.+}}, [[iz]] {{.*}}%{{.+}}, [2 x [[S_FLOAT_TY]]]* dereferenceable(8) %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}}, [[iz]] {{.*}}[[SIVAR:%.+]])
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}},
|
2016-05-17 16:55:33 +08:00
|
|
|
// CHECK: [[SIVAR7_PRIV:%.+]] = alloca i{{[0-9]+}},
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}],
|
|
|
|
// CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]],
|
|
|
|
// CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]],
|
2014-10-10 20:19:54 +08:00
|
|
|
// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
|
2015-09-10 16:12:02 +08:00
|
|
|
|
|
|
|
// CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** %
|
2016-05-17 16:55:33 +08:00
|
|
|
// CHECK-NOT: load i{{[0-9]+}}*, i{{[0-9]+}}** %
|
2016-06-17 02:39:34 +08:00
|
|
|
// CHECK-64: [[T_VAR_CONV:%.+]] = bitcast i64* [[T_VAR_PRIV]] to i32*
|
2015-09-10 16:12:02 +08:00
|
|
|
// CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** %
|
|
|
|
// CHECK: [[VAR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** %
|
2016-05-17 16:55:33 +08:00
|
|
|
// CHECK-NOT: load i{{[0-9]+}}*, i{{[0-9]+}}** %
|
2016-06-17 02:39:34 +08:00
|
|
|
// CHECK-64: [[SIVAR7_CONV:%.+]] = bitcast i64* [[SIVAR7_PRIV]] to i32*
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8*
|
|
|
|
// CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8*
|
Change memcpy/memove/memset to have dest and source alignment attributes (Step 1).
Summary:
Upstream LLVM is changing the the prototypes of the @llvm.memcpy/memmove/memset
intrinsics. This change updates the Clang tests for this change.
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change removes the alignment argument in favour of placing the alignment
attribute on the source and destination pointers of the memory intrinsic call.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
At this time the source and destination alignments must be the same (Step 1).
Step 2 of the change, to be landed shortly, will relax that contraint and allow
the source and destination to have different alignments.
llvm-svn: 322964
2018-01-20 01:12:54 +08:00
|
|
|
// CHECK: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_DEST]], i8* align {{[0-9]+}} [[VEC_SRC]],
|
2015-02-28 03:18:17 +08:00
|
|
|
// CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_PRIV]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
|
2015-04-14 13:11:24 +08:00
|
|
|
// CHECK: [[S_ARR_BEGIN:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[S_ARR_REF]] to [[S_FLOAT_TY]]*
|
2015-02-28 03:18:17 +08:00
|
|
|
// CHECK: [[S_ARR_PRIV_END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_PRIV_BEGIN]], i{{[0-9]+}} 2
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[S_ARR_PRIV_BEGIN]], [[S_ARR_PRIV_END]]
|
|
|
|
// CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
|
|
|
|
// CHECK: [[S_ARR_BODY]]
|
2018-10-15 23:43:00 +08:00
|
|
|
// CHECK: call {{.*}} [[ST_TY_DEFAULT_CONSTR:@.+]]([[ST_TY]]* [[ST_TY_TEMP:%.+]])
|
|
|
|
// CHECK: call {{.*}} [[S_FLOAT_TY_COPY_CONSTR:@.+]]([[S_FLOAT_TY]]* {{.+}}, [[S_FLOAT_TY]]* {{.+}}, [[ST_TY]]* [[ST_TY_TEMP]])
|
2014-10-08 23:39:06 +08:00
|
|
|
// CHECK: call {{.*}} [[ST_TY_DESTR:@.+]]([[ST_TY]]* [[ST_TY_TEMP]])
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK: br i1 {{.+}}, label %{{.+}}, label %[[S_ARR_BODY]]
|
2018-10-15 23:43:00 +08:00
|
|
|
// CHECK: call {{.*}} [[ST_TY_DEFAULT_CONSTR]]([[ST_TY]]* [[ST_TY_TEMP:%.+]])
|
|
|
|
// CHECK: call {{.*}} [[S_FLOAT_TY_COPY_CONSTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]], [[S_FLOAT_TY]]* {{.*}} [[VAR_REF]], [[ST_TY]]* [[ST_TY_TEMP]])
|
2014-10-08 23:39:06 +08:00
|
|
|
// CHECK: call {{.*}} [[ST_TY_DESTR]]([[ST_TY]]* [[ST_TY_TEMP]])
|
2015-09-16 02:56:58 +08:00
|
|
|
|
2016-06-17 02:39:34 +08:00
|
|
|
// CHECK-64: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[SIVAR7_CONV]],
|
|
|
|
// CHECK-32: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[SIVAR7_PRIV]],
|
2015-09-16 02:56:58 +08:00
|
|
|
|
2014-10-08 23:39:06 +08:00
|
|
|
// CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
|
|
|
|
// CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]*
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK: ret void
|
2019-04-04 01:57:06 +08:00
|
|
|
|
|
|
|
|
|
|
|
// CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[iz]] [[T_VAR:%.+]])
|
|
|
|
// CHECK: [[GTID_ADDR:%.+]] = alloca i32*,
|
|
|
|
// CHECK: store [[iz]] [[T_VAR]], [[iz]]* [[T_VAR_ADDR:%.+]],
|
|
|
|
// CHECK-64: [[BC:%.+]] = bitcast [[iz]]* [[T_VAR_ADDR]] to i32*
|
|
|
|
// CHECK: [[GTID_PTR:%.+]] = load i32*, i32** [[GTID_ADDR]],
|
|
|
|
// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_PTR]],
|
2019-04-09 03:06:42 +08:00
|
|
|
// CHECK: [[T_VAR_VOID_PTR:%.+]] = call i8* @__kmpc_alloc(i32 [[GTID]], [[iz]] 4, i8* inttoptr ([[iz]] 1 to i8*))
|
2019-04-04 01:57:06 +08:00
|
|
|
// CHECK: [[T_VAR_PRIV:%.+]] = bitcast i8* [[T_VAR_VOID_PTR]] to i32*
|
|
|
|
// CHECK-32: [[T_VAR_VAL:%.+]] = load i32, i32* [[T_VAR_ADDR]],
|
|
|
|
// CHECK-64: [[T_VAR_VAL:%.+]] = load i32, i32* [[BC]],
|
|
|
|
// CHECK: store i32 [[T_VAR_VAL]], i32* [[T_VAR_PRIV]],
|
2019-05-24 06:30:43 +08:00
|
|
|
// CHECK: store i32 0, i32* [[T_VAR_PRIV]],
|
2019-04-09 03:06:42 +08:00
|
|
|
// CHECK: call void @__kmpc_free(i32 [[GTID]], i8* [[T_VAR_VOID_PTR]], i8* inttoptr ([[iz]] 1 to i8*))
|
2019-04-04 01:57:06 +08:00
|
|
|
// CHECK: ret void
|
|
|
|
|
|
|
|
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
|
|
|
|
// CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
|
2018-10-15 23:43:00 +08:00
|
|
|
// CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]])
|
2015-09-10 16:12:02 +08:00
|
|
|
// CHECK: call {{.*}}void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [2 x i32]*, i32*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]*)* [[TMAIN_MICROTASK:@.+]] to void
|
2014-10-08 23:39:06 +08:00
|
|
|
// CHECK: call {{.*}} [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK: ret
|
|
|
|
//
|
2016-02-17 23:36:39 +08:00
|
|
|
// CHECK: define {{.+}} @{{.+}}([[SS_TY]]*
|
2016-02-17 21:19:37 +08:00
|
|
|
// CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
|
|
|
|
// CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
|
|
|
|
// CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
|
|
|
|
// CHECK: store i8
|
|
|
|
// CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
|
|
|
|
// CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
|
|
|
|
// CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
|
|
|
|
// CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
|
2016-06-17 02:39:34 +08:00
|
|
|
// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*, [[iz]], [[iz]], [[iz]], [4 x i32]*)* [[SS_MICROTASK:@.+]] to void
|
2016-02-17 21:19:37 +08:00
|
|
|
// CHECK: ret
|
|
|
|
|
2016-06-17 02:39:34 +08:00
|
|
|
// CHECK: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, [[iz]] {{.+}}, [[iz]] {{.+}}, [[iz]] {{.+}}, [4 x i{{[0-9]+}}]* {{.+}})
|
2016-02-17 21:19:37 +08:00
|
|
|
// CHECK: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
|
|
|
|
// CHECK: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
|
|
|
|
// CHECK: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
|
2016-04-22 17:05:03 +08:00
|
|
|
// CHECK: [[E_PRIV:%.+]] = alloca [4 x i{{[0-9]+}}],
|
2016-02-17 21:19:37 +08:00
|
|
|
// CHECK: store i{{[0-9]+}} {{.+}}, i{{[0-9]+}}* [[A_PRIV]]
|
|
|
|
// CHECK: store i{{[0-9]+}} {{.+}}, i{{[0-9]+}}* [[B_PRIV]]
|
|
|
|
// CHECK: store i{{[0-9]+}} {{.+}}, i{{[0-9]+}}* [[C_PRIV]]
|
2016-06-17 02:39:34 +08:00
|
|
|
// CHECK-64: [[A_CONV:%.+]] = bitcast i64* [[A_PRIV:%.+]] to i32*
|
|
|
|
// CHECK-64: [[B_CONV:%.+]] = bitcast i64* [[B_PRIV:%.+]] to i32*
|
|
|
|
// CHECK-64: [[C_CONV:%.+]] = bitcast i64* [[C_PRIV:%.+]] to i32*
|
2019-05-24 02:19:54 +08:00
|
|
|
// CHECK-64: store i32* [[A_CONV]], i32** [[REFA:%.+]],
|
|
|
|
// CHECK-32: store i32* [[A_PRIV]], i32** [[REFA:%.+]],
|
2016-06-17 02:39:34 +08:00
|
|
|
// CHECK-64: store i32* [[C_CONV]], i32** [[REFC:%.+]],
|
|
|
|
// CHECK-32: store i32* [[C_PRIV]], i32** [[REFC:%.+]],
|
2016-04-22 17:05:03 +08:00
|
|
|
// CHECK: bitcast [4 x i{{[0-9]+}}]* [[E_PRIV]] to i8*
|
|
|
|
// CHECK: bitcast [4 x i{{[0-9]+}}]* %{{.+}} to i8*
|
|
|
|
// CHECK: call void @llvm.memcpy
|
|
|
|
// CHECK: store [4 x i{{[0-9]+}}]* [[E_PRIV]], [4 x i{{[0-9]+}}]** [[REFE:%.+]],
|
2016-02-17 21:19:37 +08:00
|
|
|
// CHECK-NEXT: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
|
|
|
|
// CHECK-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
|
|
|
|
// CHECK-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
|
|
|
|
// CHECK-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
|
2016-06-17 02:39:34 +08:00
|
|
|
// CHECK-64-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_CONV]],
|
|
|
|
// CHECK-32-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
|
2016-02-17 21:19:37 +08:00
|
|
|
// CHECK-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
|
2016-06-17 02:39:34 +08:00
|
|
|
// CHECK-64-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_CONV]],
|
|
|
|
// CHECK-32-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
|
2016-02-17 21:19:37 +08:00
|
|
|
// CHECK-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
|
|
|
|
// CHECK-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
|
|
|
|
// CHECK-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
|
|
|
|
// CHECK-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
|
2016-04-22 17:05:03 +08:00
|
|
|
// CHECK-NEXT: [[E_PRIV:%.+]] = load [4 x i{{[0-9]+}}]*, [4 x i{{[0-9]+}}]** [[REFE]],
|
|
|
|
// CHECK-NEXT: [[E_PRIV_2:%.+]] = getelementptr inbounds [4 x i{{[0-9]+}}], [4 x i{{[0-9]+}}]* [[E_PRIV]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
|
|
|
|
// CHECK-NEXT: store i32 1111, i32* [[E_PRIV_2]],
|
2016-02-17 21:19:37 +08:00
|
|
|
// CHECK-NEXT: ret void
|
|
|
|
|
2015-09-10 16:12:02 +08:00
|
|
|
// CHECK: define internal {{.*}}void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [2 x i32]* dereferenceable(8) %{{.+}}, i32* dereferenceable(4) %{{.+}}, [2 x [[S_INT_TY]]]* dereferenceable(8) %{{.+}}, [[S_INT_TY]]* dereferenceable(4) %{{.+}})
|
2015-09-10 17:48:30 +08:00
|
|
|
// CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, align 128
|
|
|
|
// CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], align 128
|
|
|
|
// CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]], align 128
|
|
|
|
// CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], align 128
|
2014-10-10 20:19:54 +08:00
|
|
|
// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
|
2015-09-10 16:12:02 +08:00
|
|
|
|
|
|
|
// CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** %
|
|
|
|
// CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
|
|
|
|
// CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** %
|
|
|
|
// CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
|
|
|
|
|
2015-09-11 18:29:41 +08:00
|
|
|
// CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_REF]], align 128
|
|
|
|
// CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_PRIV]], align 128
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8*
|
|
|
|
// CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8*
|
Change memcpy/memove/memset to have dest and source alignment attributes (Step 1).
Summary:
Upstream LLVM is changing the the prototypes of the @llvm.memcpy/memmove/memset
intrinsics. This change updates the Clang tests for this change.
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change removes the alignment argument in favour of placing the alignment
attribute on the source and destination pointers of the memory intrinsic call.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
At this time the source and destination alignments must be the same (Step 1).
Step 2 of the change, to be landed shortly, will relax that contraint and allow
the source and destination to have different alignments.
llvm-svn: 322964
2018-01-20 01:12:54 +08:00
|
|
|
// CHECK: call void @llvm.memcpy.{{.+}}(i8* align 128 [[VEC_DEST]], i8* align 128 [[VEC_SRC]], i{{[0-9]+}} {{[0-9]+}}, i1
|
2015-02-28 03:18:17 +08:00
|
|
|
// CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
|
2015-04-14 13:11:24 +08:00
|
|
|
// CHECK: [[S_ARR_BEGIN:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[S_ARR_REF]] to [[S_INT_TY]]*
|
2015-02-28 03:18:17 +08:00
|
|
|
// CHECK: [[S_ARR_PRIV_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_PRIV_BEGIN]], i{{[0-9]+}} 2
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_PRIV_BEGIN]], [[S_ARR_PRIV_END]]
|
|
|
|
// CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
|
|
|
|
// CHECK: [[S_ARR_BODY]]
|
2018-10-15 23:43:00 +08:00
|
|
|
// CHECK: call {{.*}} [[ST_TY_DEFAULT_CONSTR]]([[ST_TY]]* [[ST_TY_TEMP:%.+]])
|
|
|
|
// CHECK: call {{.*}} [[S_INT_TY_COPY_CONSTR:@.+]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}}, [[ST_TY]]* [[ST_TY_TEMP]])
|
2014-10-08 23:39:06 +08:00
|
|
|
// CHECK: call {{.*}} [[ST_TY_DESTR]]([[ST_TY]]* [[ST_TY_TEMP]])
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK: br i1 {{.+}}, label %{{.+}}, label %[[S_ARR_BODY]]
|
2018-10-15 23:43:00 +08:00
|
|
|
// CHECK: call {{.*}} [[ST_TY_DEFAULT_CONSTR]]([[ST_TY]]* [[ST_TY_TEMP:%.+]])
|
|
|
|
// CHECK: call {{.*}} [[S_INT_TY_COPY_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]], [[S_INT_TY]]* {{.*}} [[VAR_REF]], [[ST_TY]]* [[ST_TY_TEMP]])
|
2014-10-08 23:39:06 +08:00
|
|
|
// CHECK: call {{.*}} [[ST_TY_DESTR]]([[ST_TY]]* [[ST_TY_TEMP]])
|
2016-02-15 16:07:17 +08:00
|
|
|
// CHECK-NOT: call {{.*}}void @__kmpc_barrier(
|
2014-10-08 23:39:06 +08:00
|
|
|
// CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]])
|
|
|
|
// CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]*
|
2014-10-08 22:01:46 +08:00
|
|
|
// CHECK: ret void
|
2015-05-19 20:31:28 +08:00
|
|
|
|
2014-10-08 22:01:46 +08:00
|
|
|
#endif
|
2015-05-19 20:31:28 +08:00
|
|
|
#else
|
2019-04-09 03:06:42 +08:00
|
|
|
|
|
|
|
enum omp_allocator_handle_t {
|
|
|
|
omp_null_allocator = 0,
|
|
|
|
omp_default_mem_alloc = 1,
|
|
|
|
omp_large_cap_mem_alloc = 2,
|
|
|
|
omp_const_mem_alloc = 3,
|
|
|
|
omp_high_bw_mem_alloc = 4,
|
|
|
|
omp_low_lat_mem_alloc = 5,
|
|
|
|
omp_cgroup_mem_alloc = 6,
|
|
|
|
omp_pteam_mem_alloc = 7,
|
|
|
|
omp_thread_mem_alloc = 8,
|
|
|
|
KMP_ALLOCATOR_MAX_HANDLE = __UINTPTR_MAX__
|
|
|
|
};
|
2019-04-04 01:57:06 +08:00
|
|
|
|
2015-05-19 20:31:28 +08:00
|
|
|
struct St {
|
|
|
|
int a, b;
|
|
|
|
St() : a(0), b(0) {}
|
|
|
|
St(const St &) { }
|
|
|
|
~St() {}
|
2015-09-10 16:12:02 +08:00
|
|
|
void St_func(St s[2], int n, long double vla1[n]) {
|
2015-09-11 18:29:41 +08:00
|
|
|
double vla2[n][n] __attribute__((aligned(128)));
|
2015-09-10 16:12:02 +08:00
|
|
|
a = b;
|
2019-04-04 01:57:06 +08:00
|
|
|
#pragma omp parallel allocate(omp_thread_mem_alloc:vla2) firstprivate(s, vla1, vla2)
|
2015-09-10 16:12:02 +08:00
|
|
|
vla1[b] = vla2[1][n - 1] = a = b;
|
|
|
|
}
|
2015-05-19 20:31:28 +08:00
|
|
|
};
|
|
|
|
|
2015-09-10 16:12:02 +08:00
|
|
|
// ARRAY-LABEL: array_func
|
2015-05-21 17:47:46 +08:00
|
|
|
void array_func(float a[3], St s[2], int n, long double vla1[n]) {
|
2015-09-11 18:29:41 +08:00
|
|
|
double vla2[n][n] __attribute__((aligned(128)));
|
2015-05-19 20:31:28 +08:00
|
|
|
// ARRAY: @__kmpc_fork_call(
|
2016-05-17 16:55:33 +08:00
|
|
|
// ARRAY-DAG: [[PRIV_S:%.+]] = alloca %struct.St*,
|
|
|
|
// ARRAY-DAG: [[PRIV_VLA1:%.+]] = alloca x86_fp80*,
|
|
|
|
// ARRAY-DAG: [[PRIV_A:%.+]] = alloca float*,
|
2015-09-10 16:12:02 +08:00
|
|
|
// ARRAY-DAG: [[PRIV_VLA2:%.+]] = alloca double*,
|
2016-05-17 16:55:33 +08:00
|
|
|
// ARRAY-DAG: store %struct.St* %{{.+}}, %struct.St** [[PRIV_S]],
|
|
|
|
// ARRAY-DAG: store x86_fp80* %{{.+}}, x86_fp80** [[PRIV_VLA1]],
|
|
|
|
// ARRAY-DAG: store float* %{{.+}}, float** [[PRIV_A]],
|
2015-09-10 16:12:02 +08:00
|
|
|
// ARRAY-DAG: store double* %{{.+}}, double** [[PRIV_VLA2]],
|
2015-05-20 11:46:04 +08:00
|
|
|
// ARRAY: call i8* @llvm.stacksave()
|
|
|
|
// ARRAY: [[SIZE:%.+]] = mul nuw i64 %{{.+}}, 8
|
Change memcpy/memove/memset to have dest and source alignment attributes (Step 1).
Summary:
Upstream LLVM is changing the the prototypes of the @llvm.memcpy/memmove/memset
intrinsics. This change updates the Clang tests for this change.
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change removes the alignment argument in favour of placing the alignment
attribute on the source and destination pointers of the memory intrinsic call.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
At this time the source and destination alignments must be the same (Step 1).
Step 2 of the change, to be landed shortly, will relax that contraint and allow
the source and destination to have different alignments.
llvm-svn: 322964
2018-01-20 01:12:54 +08:00
|
|
|
// ARRAY: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 %{{.+}}, i8* align 128 %{{.+}}, i64 [[SIZE]], i1 false)
|
2015-05-20 11:46:04 +08:00
|
|
|
#pragma omp parallel firstprivate(a, s, vla1, vla2)
|
2015-09-10 16:12:02 +08:00
|
|
|
s[0].St_func(s, n, vla1);
|
2015-05-19 20:31:28 +08:00
|
|
|
;
|
|
|
|
}
|
2015-09-10 16:12:02 +08:00
|
|
|
|
|
|
|
// ARRAY-LABEL: St_func
|
|
|
|
// ARRAY: @__kmpc_fork_call(
|
2016-05-17 16:55:33 +08:00
|
|
|
// ARRAY-DAG: [[PRIV_VLA1:%.+]] = alloca x86_fp80*,
|
|
|
|
// ARRAY-DAG: [[PRIV_S:%.+]] = alloca %struct.St*,
|
2015-09-10 16:12:02 +08:00
|
|
|
// ARRAY-DAG: [[PRIV_VLA2:%.+]] = alloca double*,
|
2016-05-17 16:55:33 +08:00
|
|
|
// ARRAY-DAG: store %struct.St* %{{.+}}, %struct.St** [[PRIV_S]],
|
|
|
|
// ARRAY-DAG: store x86_fp80* %{{.+}}, x86_fp80** [[PRIV_VLA1]],
|
2015-09-10 16:12:02 +08:00
|
|
|
// ARRAY-DAG: store double* %{{.+}}, double** [[PRIV_VLA2]],
|
|
|
|
// ARRAY: [[SIZE:%.+]] = mul nuw i64 %{{.+}}, 8
|
2019-04-04 01:57:06 +08:00
|
|
|
// ARRAY: [[SZ1:%.+]] = add nuw i64 [[SIZE]], 127
|
|
|
|
// ARRAY: [[SZ2:%.+]] = udiv i64 [[SZ1]], 128
|
|
|
|
// ARRAY: [[SIZE:%.+]] = mul nuw i64 [[SZ2]], 128
|
2019-04-09 03:06:42 +08:00
|
|
|
// ARRAY: [[VLA2_VOID_PTR:%.+]] = call i8* @__kmpc_alloc(i32 [[GTID:%.+]], i64 [[SIZE]], i8* inttoptr (i64 8 to i8*))
|
2019-04-04 01:57:06 +08:00
|
|
|
// ARRAY: [[VLA2_PTR:%.+]] = bitcast i8* [[VLA2_VOID_PTR]] to double*
|
|
|
|
// ARRAY: [[SIZE:%.+]] = mul nuw i64 %{{.+}}, 8
|
|
|
|
// ARRAY: [[BC:%.+]] = bitcast double* [[VLA2_PTR]] to i8*
|
|
|
|
// ARRAY: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[BC]], i8* align 128 %{{.+}}, i64 [[SIZE]], i1 false)
|
2019-04-09 03:06:42 +08:00
|
|
|
// ARRAY: call void @__kmpc_free(i32 [[GTID]], i8* [[VLA2_VOID_PTR]], i8* inttoptr (i64 8 to i8*))
|
2019-04-04 01:57:06 +08:00
|
|
|
// ARRAY-NEXT: ret void
|
2015-05-19 20:31:28 +08:00
|
|
|
#endif
|
|
|
|
|
2014-10-08 22:01:46 +08:00
|
|
|
|