llvm-project/clang/test/OpenMP/reduction_compound_op.cpp

2559 lines
180 KiB
C++

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
//RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fopenmp -DNORM \
//RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix NORM
//RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fopenmp -DCOMP \
//RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix COMP
// Prefer compound operators since that is what the spec seems to say.
//RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fopenmp -DNORM -DCOMP \
//RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix COMP
//RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fopenmp-simd -DNORM \
//RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix SIMD-ONLY
//RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fopenmp-simd -DCOMP \
//RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix SIMD-ONLY
//RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fopenmp-simd -DNORM -DCOMP \
//RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix SIMD-ONLY
// SIMD-ONLY-NOT: {{__kmpc|__tgt}}
struct Point {
int x = 0;
int y = 0;
#if NORM
Point operator+(Point const &other) const;
Point operator-(Point const &other) const;
Point operator*(Point const &other) const;
Point operator&(Point const &other) const;
Point operator|(Point const &other) const;
Point operator^(Point const &other) const;
#endif
Point operator&&(Point const &other) const;
Point operator||(Point const &other) const;
Point &operator=(Point const &other);
#if COMP
Point &operator+=(Point const &other);
Point &operator*=(Point const &other);
Point &operator&=(Point const &other);
Point &operator|=(Point const &other);
Point &operator^=(Point const &other);
#endif
};
void work(Point &P, int N, Point const *Points);
void foo(int N, Point const *Points) {
Point Red;
#pragma omp parallel for reduction(+: Red)
for (unsigned I = 0; I < N; ++I)
work(Red, I, Points);
#pragma omp parallel for reduction(-: Red)
for (unsigned I = 0; I < N; ++I)
work(Red, I, Points);
#pragma omp parallel for reduction(*: Red)
for (unsigned I = 0; I < N; ++I)
work(Red, I, Points);
#pragma omp parallel for reduction(&: Red)
for (unsigned I = 0; I < N; ++I)
work(Red, I, Points);
#pragma omp parallel for reduction(|: Red)
for (unsigned I = 0; I < N; ++I)
work(Red, I, Points);
#pragma omp parallel for reduction(^: Red)
for (unsigned I = 0; I < N; ++I)
work(Red, I, Points);
#pragma omp parallel for reduction(&&: Red)
for (unsigned I = 0; I < N; ++I)
work(Red, I, Points);
#pragma omp parallel for reduction(||: Red)
for (unsigned I = 0; I < N; ++I)
work(Red, I, Points);
}
// NORM-LABEL: define {{[^@]+}}@_Z3fooiPK5Point
// NORM-SAME: (i32 [[N:%.*]], %struct.Point* [[POINTS:%.*]]) #[[ATTR0:[0-9]+]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point*, align 8
// NORM-NEXT: [[RED:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// NORM-NEXT: store %struct.Point* [[POINTS]], %struct.Point** [[POINTS_ADDR]], align 8
// NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED]]) #[[ATTR4:[0-9]+]]
// NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@_ZN5PointC1Ev
// NORM-SAME: (%struct.Point* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
// NORM-NEXT: entry:
// NORM-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.Point*, align 8
// NORM-NEXT: store %struct.Point* [[THIS]], %struct.Point** [[THIS_ADDR]], align 8
// NORM-NEXT: [[THIS1:%.*]] = load %struct.Point*, %struct.Point** [[THIS_ADDR]], align 8
// NORM-NEXT: call void @_ZN5PointC2Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR4]]
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp_outlined.
// NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2:[0-9]+]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// NORM-NEXT: [[I:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: [[I4:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 0, i32* [[I]], align 4
// NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// NORM: omp.precond.then:
// NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// NORM: cond.true:
// NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: br label [[COND_END:%.*]]
// NORM: cond.false:
// NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: br label [[COND_END]]
// NORM: cond.end:
// NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// NORM: omp.inner.for.cond:
// NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// NORM: omp.inner.for.body:
// NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// NORM: omp.body.continue:
// NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// NORM: omp.inner.for.inc:
// NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND]]
// NORM: omp.inner.for.end:
// NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// NORM: omp.loop.exit:
// NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// NORM-NEXT: ]
// NORM: .omp.reduction.case1:
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointplERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4
// NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.case2:
// NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
// NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointplERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64*
// NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4
// NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]])
// NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.default:
// NORM-NEXT: br label [[OMP_PRECOND_END]]
// NORM: omp.precond.end:
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
// NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointplERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4
// NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp_outlined..1
// NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// NORM-NEXT: [[I:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: [[I4:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 0, i32* [[I]], align 4
// NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// NORM: omp.precond.then:
// NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// NORM: cond.true:
// NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: br label [[COND_END:%.*]]
// NORM: cond.false:
// NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: br label [[COND_END]]
// NORM: cond.end:
// NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// NORM: omp.inner.for.cond:
// NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// NORM: omp.inner.for.body:
// NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// NORM: omp.body.continue:
// NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// NORM: omp.inner.for.inc:
// NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND]]
// NORM: omp.inner.for.end:
// NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// NORM: omp.loop.exit:
// NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// NORM-NEXT: ]
// NORM: .omp.reduction.case1:
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointplERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4
// NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.case2:
// NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
// NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointplERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64*
// NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4
// NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]])
// NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.default:
// NORM-NEXT: br label [[OMP_PRECOND_END]]
// NORM: omp.precond.end:
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
// NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointplERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4
// NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp_outlined..3
// NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// NORM-NEXT: [[I:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: [[I4:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 0, i32* [[I]], align 4
// NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// NORM: omp.precond.then:
// NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// NORM: cond.true:
// NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: br label [[COND_END:%.*]]
// NORM: cond.false:
// NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: br label [[COND_END]]
// NORM: cond.end:
// NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// NORM: omp.inner.for.cond:
// NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// NORM: omp.inner.for.body:
// NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// NORM: omp.body.continue:
// NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// NORM: omp.inner.for.inc:
// NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND]]
// NORM: omp.inner.for.end:
// NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// NORM: omp.loop.exit:
// NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.4, [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// NORM-NEXT: ]
// NORM: .omp.reduction.case1:
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointmlERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4
// NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.case2:
// NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
// NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointmlERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64*
// NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4
// NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]])
// NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.default:
// NORM-NEXT: br label [[OMP_PRECOND_END]]
// NORM: omp.precond.end:
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.4
// NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointmlERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4
// NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp_outlined..5
// NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// NORM-NEXT: [[I:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: [[I4:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 0, i32* [[I]], align 4
// NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// NORM: omp.precond.then:
// NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// NORM: cond.true:
// NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: br label [[COND_END:%.*]]
// NORM: cond.false:
// NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: br label [[COND_END]]
// NORM: cond.end:
// NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// NORM: omp.inner.for.cond:
// NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// NORM: omp.inner.for.body:
// NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// NORM: omp.body.continue:
// NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// NORM: omp.inner.for.inc:
// NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND]]
// NORM: omp.inner.for.end:
// NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// NORM: omp.loop.exit:
// NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.6, [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// NORM-NEXT: ]
// NORM: .omp.reduction.case1:
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointanERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4
// NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.case2:
// NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
// NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointanERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64*
// NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4
// NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]])
// NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.default:
// NORM-NEXT: br label [[OMP_PRECOND_END]]
// NORM: omp.precond.end:
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.6
// NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointanERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4
// NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp_outlined..7
// NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// NORM-NEXT: [[I:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: [[I4:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 0, i32* [[I]], align 4
// NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// NORM: omp.precond.then:
// NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// NORM: cond.true:
// NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: br label [[COND_END:%.*]]
// NORM: cond.false:
// NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: br label [[COND_END]]
// NORM: cond.end:
// NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// NORM: omp.inner.for.cond:
// NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// NORM: omp.inner.for.body:
// NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// NORM: omp.body.continue:
// NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// NORM: omp.inner.for.inc:
// NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND]]
// NORM: omp.inner.for.end:
// NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// NORM: omp.loop.exit:
// NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.8, [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// NORM-NEXT: ]
// NORM: .omp.reduction.case1:
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointorERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4
// NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.case2:
// NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
// NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointorERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64*
// NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4
// NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]])
// NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.default:
// NORM-NEXT: br label [[OMP_PRECOND_END]]
// NORM: omp.precond.end:
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.8
// NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointorERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4
// NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp_outlined..9
// NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// NORM-NEXT: [[I:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: [[I4:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 0, i32* [[I]], align 4
// NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// NORM: omp.precond.then:
// NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// NORM: cond.true:
// NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: br label [[COND_END:%.*]]
// NORM: cond.false:
// NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: br label [[COND_END]]
// NORM: cond.end:
// NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// NORM: omp.inner.for.cond:
// NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// NORM: omp.inner.for.body:
// NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// NORM: omp.body.continue:
// NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// NORM: omp.inner.for.inc:
// NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND]]
// NORM: omp.inner.for.end:
// NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// NORM: omp.loop.exit:
// NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.10, [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// NORM-NEXT: ]
// NORM: .omp.reduction.case1:
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointeoERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4
// NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.case2:
// NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
// NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointeoERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64*
// NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4
// NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]])
// NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.default:
// NORM-NEXT: br label [[OMP_PRECOND_END]]
// NORM: omp.precond.end:
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.10
// NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointeoERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4
// NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp_outlined..11
// NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// NORM-NEXT: [[I:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: [[I4:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 0, i32* [[I]], align 4
// NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// NORM: omp.precond.then:
// NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// NORM: cond.true:
// NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: br label [[COND_END:%.*]]
// NORM: cond.false:
// NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: br label [[COND_END]]
// NORM: cond.end:
// NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// NORM: omp.inner.for.cond:
// NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// NORM: omp.inner.for.body:
// NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// NORM: omp.body.continue:
// NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// NORM: omp.inner.for.inc:
// NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND]]
// NORM: omp.inner.for.end:
// NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// NORM: omp.loop.exit:
// NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.12, [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// NORM-NEXT: ]
// NORM: .omp.reduction.case1:
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointaaERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4
// NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.case2:
// NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
// NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointaaERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64*
// NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4
// NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]])
// NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.default:
// NORM-NEXT: br label [[OMP_PRECOND_END]]
// NORM: omp.precond.end:
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.12
// NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointaaERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4
// NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp_outlined..13
// NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// NORM-NEXT: [[I:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: [[I4:%.*]] = alloca i32, align 4
// NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4
// NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 0, i32* [[I]], align 4
// NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// NORM: omp.precond.then:
// NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// NORM: cond.true:
// NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// NORM-NEXT: br label [[COND_END:%.*]]
// NORM: cond.false:
// NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: br label [[COND_END]]
// NORM: cond.end:
// NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// NORM: omp.inner.for.cond:
// NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// NORM: omp.inner.for.body:
// NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// NORM: omp.body.continue:
// NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// NORM: omp.inner.for.inc:
// NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// NORM-NEXT: br label [[OMP_INNER_FOR_COND]]
// NORM: omp.inner.for.end:
// NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// NORM: omp.loop.exit:
// NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.14, [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// NORM-NEXT: ]
// NORM: .omp.reduction.case1:
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointooERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4
// NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.case2:
// NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
// NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointooERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64*
// NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4
// NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]])
// NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// NORM: .omp.reduction.default:
// NORM-NEXT: br label [[OMP_PRECOND_END]]
// NORM: omp.precond.end:
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.14
// NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
// NORM-NEXT: entry:
// NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointooERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4
// NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// NORM-NEXT: ret void
//
//
// NORM-LABEL: define {{[^@]+}}@_ZN5PointC2Ev
// NORM-SAME: (%struct.Point* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// NORM-NEXT: entry:
// NORM-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.Point*, align 8
// NORM-NEXT: store %struct.Point* [[THIS]], %struct.Point** [[THIS_ADDR]], align 8
// NORM-NEXT: [[THIS1:%.*]] = load %struct.Point*, %struct.Point** [[THIS_ADDR]], align 8
// NORM-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_POINT:%.*]], %struct.Point* [[THIS1]], i32 0, i32 0
// NORM-NEXT: store i32 0, i32* [[X]], align 4
// NORM-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_POINT]], %struct.Point* [[THIS1]], i32 0, i32 1
// NORM-NEXT: store i32 0, i32* [[Y]], align 4
// NORM-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@_Z3fooiPK5Point
// COMP-SAME: (i32 [[N:%.*]], %struct.Point* [[POINTS:%.*]]) #[[ATTR0:[0-9]+]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point*, align 8
// COMP-NEXT: [[RED:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// COMP-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// COMP-NEXT: store %struct.Point* [[POINTS]], %struct.Point** [[POINTS_ADDR]], align 8
// COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED]]) #[[ATTR4:[0-9]+]]
// COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]])
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@_ZN5PointC1Ev
// COMP-SAME: (%struct.Point* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
// COMP-NEXT: entry:
// COMP-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.Point*, align 8
// COMP-NEXT: store %struct.Point* [[THIS]], %struct.Point** [[THIS_ADDR]], align 8
// COMP-NEXT: [[THIS1:%.*]] = load %struct.Point*, %struct.Point** [[THIS_ADDR]], align 8
// COMP-NEXT: call void @_ZN5PointC2Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR4]]
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp_outlined.
// COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2:[0-9]+]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// COMP-NEXT: [[I:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// COMP-NEXT: [[I4:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 0, i32* [[I]], align 4
// COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// COMP: omp.precond.then:
// COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// COMP: cond.true:
// COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: br label [[COND_END:%.*]]
// COMP: cond.false:
// COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: br label [[COND_END]]
// COMP: cond.end:
// COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// COMP: omp.inner.for.cond:
// COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// COMP: omp.inner.for.body:
// COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// COMP: omp.body.continue:
// COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// COMP: omp.inner.for.inc:
// COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND]]
// COMP: omp.inner.for.end:
// COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// COMP: omp.loop.exit:
// COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// COMP-NEXT: ]
// COMP: .omp.reduction.case1:
// COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointpLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.case2:
// COMP-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
// COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointpLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.default:
// COMP-NEXT: br label [[OMP_PRECOND_END]]
// COMP: omp.precond.end:
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
// COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointpLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp_outlined..1
// COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// COMP-NEXT: [[I:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// COMP-NEXT: [[I4:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 0, i32* [[I]], align 4
// COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// COMP: omp.precond.then:
// COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// COMP: cond.true:
// COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: br label [[COND_END:%.*]]
// COMP: cond.false:
// COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: br label [[COND_END]]
// COMP: cond.end:
// COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// COMP: omp.inner.for.cond:
// COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// COMP: omp.inner.for.body:
// COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// COMP: omp.body.continue:
// COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// COMP: omp.inner.for.inc:
// COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND]]
// COMP: omp.inner.for.end:
// COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// COMP: omp.loop.exit:
// COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// COMP-NEXT: ]
// COMP: .omp.reduction.case1:
// COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointpLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.case2:
// COMP-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
// COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointpLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.default:
// COMP-NEXT: br label [[OMP_PRECOND_END]]
// COMP: omp.precond.end:
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
// COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointpLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp_outlined..3
// COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// COMP-NEXT: [[I:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// COMP-NEXT: [[I4:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 0, i32* [[I]], align 4
// COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// COMP: omp.precond.then:
// COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// COMP: cond.true:
// COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: br label [[COND_END:%.*]]
// COMP: cond.false:
// COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: br label [[COND_END]]
// COMP: cond.end:
// COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// COMP: omp.inner.for.cond:
// COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// COMP: omp.inner.for.body:
// COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// COMP: omp.body.continue:
// COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// COMP: omp.inner.for.inc:
// COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND]]
// COMP: omp.inner.for.end:
// COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// COMP: omp.loop.exit:
// COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.4, [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// COMP-NEXT: ]
// COMP: .omp.reduction.case1:
// COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointmLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.case2:
// COMP-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
// COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointmLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.default:
// COMP-NEXT: br label [[OMP_PRECOND_END]]
// COMP: omp.precond.end:
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.4
// COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointmLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp_outlined..5
// COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// COMP-NEXT: [[I:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// COMP-NEXT: [[I4:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 0, i32* [[I]], align 4
// COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// COMP: omp.precond.then:
// COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// COMP: cond.true:
// COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: br label [[COND_END:%.*]]
// COMP: cond.false:
// COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: br label [[COND_END]]
// COMP: cond.end:
// COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// COMP: omp.inner.for.cond:
// COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// COMP: omp.inner.for.body:
// COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// COMP: omp.body.continue:
// COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// COMP: omp.inner.for.inc:
// COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND]]
// COMP: omp.inner.for.end:
// COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// COMP: omp.loop.exit:
// COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.6, [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// COMP-NEXT: ]
// COMP: .omp.reduction.case1:
// COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaNERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.case2:
// COMP-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
// COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaNERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.default:
// COMP-NEXT: br label [[OMP_PRECOND_END]]
// COMP: omp.precond.end:
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.6
// COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaNERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp_outlined..7
// COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// COMP-NEXT: [[I:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// COMP-NEXT: [[I4:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 0, i32* [[I]], align 4
// COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// COMP: omp.precond.then:
// COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// COMP: cond.true:
// COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: br label [[COND_END:%.*]]
// COMP: cond.false:
// COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: br label [[COND_END]]
// COMP: cond.end:
// COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// COMP: omp.inner.for.cond:
// COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// COMP: omp.inner.for.body:
// COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// COMP: omp.body.continue:
// COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// COMP: omp.inner.for.inc:
// COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND]]
// COMP: omp.inner.for.end:
// COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// COMP: omp.loop.exit:
// COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.8, [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// COMP-NEXT: ]
// COMP: .omp.reduction.case1:
// COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointoRERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.case2:
// COMP-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
// COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointoRERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.default:
// COMP-NEXT: br label [[OMP_PRECOND_END]]
// COMP: omp.precond.end:
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.8
// COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointoRERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp_outlined..9
// COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// COMP-NEXT: [[I:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// COMP-NEXT: [[I4:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 0, i32* [[I]], align 4
// COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// COMP: omp.precond.then:
// COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// COMP: cond.true:
// COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: br label [[COND_END:%.*]]
// COMP: cond.false:
// COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: br label [[COND_END]]
// COMP: cond.end:
// COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// COMP: omp.inner.for.cond:
// COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// COMP: omp.inner.for.body:
// COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// COMP: omp.body.continue:
// COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// COMP: omp.inner.for.inc:
// COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND]]
// COMP: omp.inner.for.end:
// COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// COMP: omp.loop.exit:
// COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.10, [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// COMP-NEXT: ]
// COMP: .omp.reduction.case1:
// COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointeOERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.case2:
// COMP-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4
// COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointeOERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.default:
// COMP-NEXT: br label [[OMP_PRECOND_END]]
// COMP: omp.precond.end:
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.10
// COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointeOERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp_outlined..11
// COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// COMP-NEXT: [[I:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// COMP-NEXT: [[I4:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// COMP-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4
// COMP-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4
// COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 0, i32* [[I]], align 4
// COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// COMP: omp.precond.then:
// COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// COMP: cond.true:
// COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: br label [[COND_END:%.*]]
// COMP: cond.false:
// COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: br label [[COND_END]]
// COMP: cond.end:
// COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// COMP: omp.inner.for.cond:
// COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// COMP: omp.inner.for.body:
// COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// COMP: omp.body.continue:
// COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// COMP: omp.inner.for.inc:
// COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND]]
// COMP: omp.inner.for.end:
// COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// COMP: omp.loop.exit:
// COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.12, [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// COMP-NEXT: ]
// COMP: .omp.reduction.case1:
// COMP-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointaaERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// COMP-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4
// COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.case2:
// COMP-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
// COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointaaERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64*
// COMP-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4
// COMP-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]])
// COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.default:
// COMP-NEXT: br label [[OMP_PRECOND_END]]
// COMP: omp.precond.end:
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.12
// COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// COMP-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// COMP-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointaaERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// COMP-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// COMP-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4
// COMP-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp_outlined..13
// COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8
// COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8
// COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8
// COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// COMP-NEXT: [[I:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// COMP-NEXT: [[I4:%.*]] = alloca i32, align 4
// COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// COMP-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4
// COMP-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4
// COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8
// COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
// COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0
// COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1
// COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1
// COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 0, i32* [[I]], align 4
// COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]]
// COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// COMP: omp.precond.then:
// COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]]
// COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]]
// COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// COMP: cond.true:
// COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// COMP-NEXT: br label [[COND_END:%.*]]
// COMP: cond.false:
// COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: br label [[COND_END]]
// COMP: cond.end:
// COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// COMP: omp.inner.for.cond:
// COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1
// COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]]
// COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// COMP: omp.inner.for.body:
// COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1
// COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]]
// COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4
// COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4
// COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8
// COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]])
// COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// COMP: omp.body.continue:
// COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// COMP: omp.inner.for.inc:
// COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1
// COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// COMP-NEXT: br label [[OMP_INNER_FOR_COND]]
// COMP: omp.inner.for.end:
// COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// COMP: omp.loop.exit:
// COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8*
// COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
// COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
// COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.14, [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// COMP-NEXT: ]
// COMP: .omp.reduction.case1:
// COMP-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointooERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// COMP-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4
// COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.case2:
// COMP-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// COMP-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
// COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointooERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]])
// COMP-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64*
// COMP-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4
// COMP-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]])
// COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// COMP: .omp.reduction.default:
// COMP-NEXT: br label [[OMP_PRECOND_END]]
// COMP: omp.precond.end:
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.14
// COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
// COMP-NEXT: entry:
// COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// COMP-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4
// COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point*
// COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point*
// COMP-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointooERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]])
// COMP-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64*
// COMP-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4
// COMP-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]])
// COMP-NEXT: ret void
//
//
// COMP-LABEL: define {{[^@]+}}@_ZN5PointC2Ev
// COMP-SAME: (%struct.Point* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// COMP-NEXT: entry:
// COMP-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.Point*, align 8
// COMP-NEXT: store %struct.Point* [[THIS]], %struct.Point** [[THIS_ADDR]], align 8
// COMP-NEXT: [[THIS1:%.*]] = load %struct.Point*, %struct.Point** [[THIS_ADDR]], align 8
// COMP-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_POINT:%.*]], %struct.Point* [[THIS1]], i32 0, i32 0
// COMP-NEXT: store i32 0, i32* [[X]], align 4
// COMP-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_POINT]], %struct.Point* [[THIS1]], i32 0, i32 1
// COMP-NEXT: store i32 0, i32* [[Y]], align 4
// COMP-NEXT: ret void
//