forked from OSchip/llvm-project
Mixed type atomic routines added for capture and update/capture reverse.
New mixed type atomic routines added for regular capture operations as well as reverse update/capture operations. LHS - all integer and float types (no complex so far), RHS - float16. Patch by Olga Malysheva Differential Revision: https://reviews.llvm.org/D25275 llvm-svn: 284489
This commit is contained in:
parent
ca3072ac58
commit
55466e9106
|
@ -924,6 +924,59 @@ kmp_set_disp_num_buffers 890
|
|||
__kmpc_atomic_start 2410
|
||||
__kmpc_atomic_end 2411
|
||||
|
||||
%ifdef HAVE_QUAD
|
||||
__kmpc_atomic_fixed1_add_cpt_fp
|
||||
__kmpc_atomic_fixed1_sub_cpt_fp
|
||||
__kmpc_atomic_fixed1_mul_cpt_fp
|
||||
__kmpc_atomic_fixed1_div_cpt_fp
|
||||
__kmpc_atomic_fixed1u_add_cpt_fp
|
||||
__kmpc_atomic_fixed1u_sub_cpt_fp
|
||||
__kmpc_atomic_fixed1u_mul_cpt_fp
|
||||
__kmpc_atomic_fixed1u_div_cpt_fp
|
||||
|
||||
__kmpc_atomic_fixed2_add_cpt_fp
|
||||
__kmpc_atomic_fixed2_sub_cpt_fp
|
||||
__kmpc_atomic_fixed2_mul_cpt_fp
|
||||
__kmpc_atomic_fixed2_div_cpt_fp
|
||||
__kmpc_atomic_fixed2u_add_cpt_fp
|
||||
__kmpc_atomic_fixed2u_sub_cpt_fp
|
||||
__kmpc_atomic_fixed2u_mul_cpt_fp
|
||||
__kmpc_atomic_fixed2u_div_cpt_fp
|
||||
|
||||
__kmpc_atomic_fixed4_add_cpt_fp
|
||||
__kmpc_atomic_fixed4_sub_cpt_fp
|
||||
__kmpc_atomic_fixed4_mul_cpt_fp
|
||||
__kmpc_atomic_fixed4_div_cpt_fp
|
||||
__kmpc_atomic_fixed4u_add_cpt_fp
|
||||
__kmpc_atomic_fixed4u_sub_cpt_fp
|
||||
__kmpc_atomic_fixed4u_mul_cpt_fp
|
||||
__kmpc_atomic_fixed4u_div_cpt_fp
|
||||
|
||||
__kmpc_atomic_fixed8_add_cpt_fp
|
||||
__kmpc_atomic_fixed8_sub_cpt_fp
|
||||
__kmpc_atomic_fixed8_mul_cpt_fp
|
||||
__kmpc_atomic_fixed8_div_cpt_fp
|
||||
__kmpc_atomic_fixed8u_add_cpt_fp
|
||||
__kmpc_atomic_fixed8u_sub_cpt_fp
|
||||
__kmpc_atomic_fixed8u_mul_cpt_fp
|
||||
__kmpc_atomic_fixed8u_div_cpt_fp
|
||||
|
||||
__kmpc_atomic_float4_add_cpt_fp
|
||||
__kmpc_atomic_float4_sub_cpt_fp
|
||||
__kmpc_atomic_float4_mul_cpt_fp
|
||||
__kmpc_atomic_float4_div_cpt_fp
|
||||
|
||||
__kmpc_atomic_float8_add_cpt_fp
|
||||
__kmpc_atomic_float8_sub_cpt_fp
|
||||
__kmpc_atomic_float8_mul_cpt_fp
|
||||
__kmpc_atomic_float8_div_cpt_fp
|
||||
|
||||
__kmpc_atomic_float10_add_cpt_fp
|
||||
__kmpc_atomic_float10_sub_cpt_fp
|
||||
__kmpc_atomic_float10_mul_cpt_fp
|
||||
__kmpc_atomic_float10_div_cpt_fp
|
||||
%endif
|
||||
|
||||
%ifdef OMP_40
|
||||
|
||||
# ATOMIC extensions for OpenMP 4.0 spec (x86 and x64 only)
|
||||
|
@ -1002,11 +1055,120 @@ kmp_set_disp_num_buffers 890
|
|||
%endif
|
||||
%endif
|
||||
|
||||
__kmpc_atomic_fixed1_sub_rev 2470
|
||||
__kmpc_atomic_fixed1_div_rev 2471
|
||||
__kmpc_atomic_fixed1u_div_rev 2472
|
||||
__kmpc_atomic_fixed1_shl_rev 2473
|
||||
__kmpc_atomic_fixed1_shr_rev 2474
|
||||
__kmpc_atomic_fixed1u_shr_rev 2475
|
||||
__kmpc_atomic_fixed2_sub_rev 2476
|
||||
__kmpc_atomic_fixed2_div_rev 2477
|
||||
__kmpc_atomic_fixed2u_div_rev 2478
|
||||
__kmpc_atomic_fixed2_shl_rev 2479
|
||||
__kmpc_atomic_fixed2_shr_rev 2480
|
||||
__kmpc_atomic_fixed2u_shr_rev 2481
|
||||
__kmpc_atomic_fixed4_sub_rev 2482
|
||||
__kmpc_atomic_fixed4_div_rev 2483
|
||||
__kmpc_atomic_fixed4u_div_rev 2484
|
||||
__kmpc_atomic_fixed4_shl_rev 2485
|
||||
__kmpc_atomic_fixed4_shr_rev 2486
|
||||
__kmpc_atomic_fixed4u_shr_rev 2487
|
||||
__kmpc_atomic_fixed8_sub_rev 2488
|
||||
__kmpc_atomic_fixed8_div_rev 2489
|
||||
__kmpc_atomic_fixed8u_div_rev 2490
|
||||
__kmpc_atomic_fixed8_shl_rev 2491
|
||||
__kmpc_atomic_fixed8_shr_rev 2492
|
||||
__kmpc_atomic_fixed8u_shr_rev 2493
|
||||
__kmpc_atomic_float4_sub_rev 2494
|
||||
__kmpc_atomic_float4_div_rev 2495
|
||||
__kmpc_atomic_float8_sub_rev 2496
|
||||
__kmpc_atomic_float8_div_rev 2497
|
||||
__kmpc_atomic_float10_sub_rev 2498
|
||||
__kmpc_atomic_float10_div_rev 2499
|
||||
%ifdef HAVE_QUAD
|
||||
__kmpc_atomic_float16_sub_rev 2500
|
||||
__kmpc_atomic_float16_div_rev 2501
|
||||
%endif
|
||||
__kmpc_atomic_cmplx4_sub_rev 2502
|
||||
__kmpc_atomic_cmplx4_div_rev 2503
|
||||
__kmpc_atomic_cmplx8_sub_rev 2504
|
||||
__kmpc_atomic_cmplx8_div_rev 2505
|
||||
__kmpc_atomic_cmplx10_sub_rev 2506
|
||||
__kmpc_atomic_cmplx10_div_rev 2507
|
||||
%ifdef HAVE_QUAD
|
||||
__kmpc_atomic_cmplx16_sub_rev 2508
|
||||
__kmpc_atomic_cmplx16_div_rev 2509
|
||||
%ifdef arch_32
|
||||
__kmpc_atomic_float16_sub_a16_rev 2510
|
||||
__kmpc_atomic_float16_div_a16_rev 2511
|
||||
__kmpc_atomic_cmplx16_sub_a16_rev 2512
|
||||
__kmpc_atomic_cmplx16_div_a16_rev 2513
|
||||
%endif
|
||||
%endif
|
||||
|
||||
%ifdef HAVE_QUAD
|
||||
__kmpc_atomic_fixed1_sub_rev_fp
|
||||
__kmpc_atomic_fixed1_div_rev_fp
|
||||
__kmpc_atomic_fixed1u_div_rev_fp
|
||||
__kmpc_atomic_fixed2_sub_rev_fp
|
||||
__kmpc_atomic_fixed2_div_rev_fp
|
||||
__kmpc_atomic_fixed2u_div_rev_fp
|
||||
__kmpc_atomic_fixed4_sub_rev_fp
|
||||
__kmpc_atomic_fixed4_div_rev_fp
|
||||
__kmpc_atomic_fixed4u_div_rev_fp
|
||||
__kmpc_atomic_fixed8_sub_rev_fp
|
||||
__kmpc_atomic_fixed8_div_rev_fp
|
||||
__kmpc_atomic_fixed8u_div_rev_fp
|
||||
__kmpc_atomic_float4_sub_rev_fp
|
||||
__kmpc_atomic_float4_div_rev_fp
|
||||
__kmpc_atomic_float8_sub_rev_fp
|
||||
__kmpc_atomic_float8_div_rev_fp
|
||||
__kmpc_atomic_float10_sub_rev_fp
|
||||
__kmpc_atomic_float10_div_rev_fp
|
||||
|
||||
__kmpc_atomic_fixed1_sub_cpt_rev_fp
|
||||
__kmpc_atomic_fixed1u_sub_cpt_rev_fp
|
||||
__kmpc_atomic_fixed1_div_cpt_rev_fp
|
||||
__kmpc_atomic_fixed1u_div_cpt_rev_fp
|
||||
__kmpc_atomic_fixed2_sub_cpt_rev_fp
|
||||
__kmpc_atomic_fixed2u_sub_cpt_rev_fp
|
||||
__kmpc_atomic_fixed2_div_cpt_rev_fp
|
||||
__kmpc_atomic_fixed2u_div_cpt_rev_fp
|
||||
__kmpc_atomic_fixed4_sub_cpt_rev_fp
|
||||
__kmpc_atomic_fixed4u_sub_cpt_rev_fp
|
||||
__kmpc_atomic_fixed4_div_cpt_rev_fp
|
||||
__kmpc_atomic_fixed4u_div_cpt_rev_fp
|
||||
__kmpc_atomic_fixed8_sub_cpt_rev_fp
|
||||
__kmpc_atomic_fixed8u_sub_cpt_rev_fp
|
||||
__kmpc_atomic_fixed8_div_cpt_rev_fp
|
||||
__kmpc_atomic_fixed8u_div_cpt_rev_fp
|
||||
__kmpc_atomic_float4_sub_cpt_rev_fp
|
||||
__kmpc_atomic_float4_div_cpt_rev_fp
|
||||
__kmpc_atomic_float8_sub_cpt_rev_fp
|
||||
__kmpc_atomic_float8_div_cpt_rev_fp
|
||||
__kmpc_atomic_float10_sub_cpt_rev_fp
|
||||
__kmpc_atomic_float10_div_cpt_rev_fp
|
||||
%endif
|
||||
%endif # OMP_40
|
||||
|
||||
|
||||
%endif # arch_64
|
||||
|
||||
%ifdef HAVE_QUAD
|
||||
__kmpc_atomic_fixed1u_add_fp
|
||||
__kmpc_atomic_fixed1u_sub_fp
|
||||
__kmpc_atomic_fixed1u_mul_fp
|
||||
__kmpc_atomic_fixed2u_add_fp
|
||||
__kmpc_atomic_fixed2u_sub_fp
|
||||
__kmpc_atomic_fixed2u_mul_fp
|
||||
__kmpc_atomic_fixed4u_add_fp
|
||||
__kmpc_atomic_fixed4u_sub_fp
|
||||
__kmpc_atomic_fixed4u_mul_fp
|
||||
__kmpc_atomic_fixed8u_add_fp
|
||||
__kmpc_atomic_fixed8u_sub_fp
|
||||
__kmpc_atomic_fixed8u_mul_fp
|
||||
%endif
|
||||
|
||||
%endif
|
||||
|
||||
# end of file #
|
||||
|
|
|
@ -1389,6 +1389,21 @@ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE)
|
|||
}
|
||||
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
|
||||
// -------------------------------------------------------------------------
|
||||
#define ATOMIC_CMPXCHG_REV_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
|
||||
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
|
||||
OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \
|
||||
OP_CMPXCHG_REV(TYPE,BITS,OP) \
|
||||
}
|
||||
#define ATOMIC_CRITICAL_REV_FP(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \
|
||||
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
|
||||
OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \
|
||||
OP_CRITICAL_REV(OP,LCK_ID) \
|
||||
}
|
||||
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
|
||||
|
||||
// RHS=float8
|
||||
ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_float8
|
||||
ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_float8
|
||||
|
@ -1455,6 +1470,39 @@ ATOMIC_CRITICAL_FP( float10, long double, add, +, fp, _Quad, 10r, 1 )
|
|||
ATOMIC_CRITICAL_FP( float10, long double, sub, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_fp
|
||||
ATOMIC_CRITICAL_FP( float10, long double, mul, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_fp
|
||||
ATOMIC_CRITICAL_FP( float10, long double, div, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_fp
|
||||
|
||||
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
|
||||
// Reverse operations
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed1, char, sub_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_rev_fp
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed1u, uchar, sub_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_rev_fp
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed1, char, div_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_rev_fp
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed1u, uchar, div_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_rev_fp
|
||||
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed2, short, sub_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_rev_fp
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed2u, ushort, sub_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_rev_fp
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed2, short, div_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_rev_fp
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed2u, ushort, div_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_rev_fp
|
||||
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed4, kmp_int32, sub_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_rev_fp
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed4u, kmp_uint32, sub_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_rev_fp
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed4, kmp_int32, div_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_rev_fp
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed4u, kmp_uint32, div_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_rev_fp
|
||||
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed8, kmp_int64, sub_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_rev_fp
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed8u, kmp_uint64, sub_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_rev_fp
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed8, kmp_int64, div_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_rev_fp
|
||||
ATOMIC_CMPXCHG_REV_MIX( fixed8u, kmp_uint64, div_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_rev_fp
|
||||
|
||||
ATOMIC_CMPXCHG_REV_MIX( float4, kmp_real32, sub_rev, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_rev_fp
|
||||
ATOMIC_CMPXCHG_REV_MIX( float4, kmp_real32, div_rev, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_rev_fp
|
||||
|
||||
ATOMIC_CMPXCHG_REV_MIX( float8, kmp_real64, sub_rev, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_rev_fp
|
||||
ATOMIC_CMPXCHG_REV_MIX( float8, kmp_real64, div_rev, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_rev_fp
|
||||
|
||||
ATOMIC_CRITICAL_REV_FP( float10, long double, sub_rev, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_rev_fp
|
||||
ATOMIC_CRITICAL_REV_FP( float10, long double, div_rev, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_rev_fp
|
||||
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
|
||||
|
||||
#endif
|
||||
|
||||
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
|
||||
|
@ -1924,6 +1972,92 @@ ATOMIC_CMPXCHG_CPT( float8, div_cpt, kmp_real64, 64, /, KMP_ARCH_X86 ) // __k
|
|||
ATOMIC_CMPXCHG_CPT( float8, mul_cpt, kmp_real64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt
|
||||
// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG
|
||||
|
||||
//////////////////////////////////
|
||||
|
||||
// CAPTURE routines for mixed types RHS=float16
|
||||
#if KMP_HAVE_QUAD
|
||||
|
||||
// Beginning of a definition (provides name, parameters, gebug trace)
|
||||
// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed)
|
||||
// OP_ID - operation identifier (add, sub, mul, ...)
|
||||
// TYPE - operands' type
|
||||
#define ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \
|
||||
TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( ident_t *id_ref, int gtid, TYPE * lhs, RTYPE rhs, int flag ) \
|
||||
{ \
|
||||
KMP_DEBUG_ASSERT( __kmp_init_serial ); \
|
||||
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", gtid ));
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
#define ATOMIC_CMPXCHG_CPT_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
|
||||
ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \
|
||||
TYPE new_value; \
|
||||
OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \
|
||||
OP_CMPXCHG_CPT(TYPE,BITS,OP) \
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
#define ATOMIC_CRITICAL_CPT_MIX(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \
|
||||
ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \
|
||||
TYPE new_value; \
|
||||
OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) /* send assignment */ \
|
||||
OP_CRITICAL_CPT(OP##=,LCK_ID) /* send assignment */ \
|
||||
}
|
||||
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, add_cpt, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, add_cpt, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_add_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, sub_cpt, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, sub_cpt, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, mul_cpt, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, mul_cpt, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_mul_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, div_cpt, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, div_cpt, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_fp
|
||||
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, add_cpt, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, add_cpt, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_add_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, sub_cpt, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, sub_cpt, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, mul_cpt, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, mul_cpt, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_mul_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, div_cpt, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, div_cpt, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_fp
|
||||
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, add_cpt, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, add_cpt, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_add_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, sub_cpt, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, sub_cpt, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, mul_cpt, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, mul_cpt, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_mul_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, div_cpt, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, div_cpt, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_cpt_fp
|
||||
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, add_cpt, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, add_cpt, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_add_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, sub_cpt, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, sub_cpt, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, mul_cpt, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, mul_cpt, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_mul_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, div_cpt, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, div_cpt, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_fp
|
||||
|
||||
ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, add_cpt, 32, +, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, sub_cpt, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, mul_cpt, 32, *, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, div_cpt, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_fp
|
||||
|
||||
ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, add_cpt, 64, +, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, sub_cpt, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, mul_cpt, 64, *, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt_fp
|
||||
ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, div_cpt, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_fp
|
||||
|
||||
ATOMIC_CRITICAL_CPT_MIX( float10, long double, add_cpt, +, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_add_cpt_fp
|
||||
ATOMIC_CRITICAL_CPT_MIX( float10, long double, sub_cpt, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_fp
|
||||
ATOMIC_CRITICAL_CPT_MIX( float10, long double, mul_cpt, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_cpt_fp
|
||||
ATOMIC_CRITICAL_CPT_MIX( float10, long double, div_cpt, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_fp
|
||||
|
||||
#endif //KMP_HAVE_QUAD
|
||||
|
||||
///////////////////////////////////
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// Routines for C/C++ Reduction operators && and ||
|
||||
// ------------------------------------------------------------------------
|
||||
|
@ -2404,6 +2538,61 @@ ATOMIC_CRITICAL_CPT_REV( cmplx16, div_cpt_rev, CPLX128_LEG, /, 32c, 1 )
|
|||
#endif
|
||||
#endif
|
||||
|
||||
// Capture reverse for mixed type: RHS=float16
|
||||
#if KMP_HAVE_QUAD
|
||||
|
||||
// Beginning of a definition (provides name, parameters, gebug trace)
|
||||
// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed)
|
||||
// OP_ID - operation identifier (add, sub, mul, ...)
|
||||
// TYPE - operands' type
|
||||
// -------------------------------------------------------------------------
|
||||
#define ATOMIC_CMPXCHG_CPT_REV_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
|
||||
ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \
|
||||
TYPE new_value; \
|
||||
OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \
|
||||
OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
#define ATOMIC_CRITICAL_CPT_REV_MIX(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \
|
||||
ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \
|
||||
TYPE new_value; \
|
||||
OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) /* send assignment */ \
|
||||
OP_CRITICAL_CPT_REV(OP,LCK_ID) /* send assignment */ \
|
||||
}
|
||||
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1, char, sub_cpt_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_rev_fp
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1u, uchar, sub_cpt_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_cpt_rev_fp
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1, char, div_cpt_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_rev_fp
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1u, uchar, div_cpt_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_rev_fp
|
||||
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2, short, sub_cpt_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_rev_fp
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2u, ushort, sub_cpt_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_cpt_rev_fp
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2, short, div_cpt_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_rev_fp
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2u, ushort, div_cpt_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_rev_fp
|
||||
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4, kmp_int32, sub_cpt_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_cpt_rev_fp
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4u, kmp_uint32, sub_cpt_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_cpt_rev_fp
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4, kmp_int32, div_cpt_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_cpt_rev_fp
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4u, kmp_uint32, div_cpt_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_cpt_rev_fp
|
||||
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8, kmp_int64, sub_cpt_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_rev_fp
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8u, kmp_uint64, sub_cpt_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_cpt_rev_fp
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8, kmp_int64, div_cpt_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_rev_fp
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8u, kmp_uint64, div_cpt_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_rev_fp
|
||||
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( float4, kmp_real32, sub_cpt_rev, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_rev_fp
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( float4, kmp_real32, div_cpt_rev, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_rev_fp
|
||||
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( float8, kmp_real64, sub_cpt_rev, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_rev_fp
|
||||
ATOMIC_CMPXCHG_CPT_REV_MIX( float8, kmp_real64, div_cpt_rev, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_rev_fp
|
||||
|
||||
ATOMIC_CRITICAL_CPT_REV_MIX( float10, long double, sub_cpt_rev, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_rev_fp
|
||||
ATOMIC_CRITICAL_CPT_REV_MIX( float10, long double, div_cpt_rev, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_rev_fp
|
||||
|
||||
#endif //KMP_HAVE_QUAD
|
||||
|
||||
|
||||
// OpenMP 4.0 Capture-write (swap): {v = x; x = expr;}
|
||||
|
||||
#define ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
|
||||
|
|
|
@ -726,6 +726,27 @@ void __kmpc_atomic_float10_add_fp( ident_t *id_ref, int gtid, long double * lhs,
|
|||
void __kmpc_atomic_float10_sub_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_float10_mul_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_float10_div_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs );
|
||||
|
||||
// Reverse operations
|
||||
void __kmpc_atomic_fixed1_sub_rev_fp( ident_t *id_ref, int gtid, char * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_fixed1_div_rev_fp( ident_t *id_ref, int gtid, char * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_fixed1u_div_rev_fp( ident_t *id_ref, int gtid, unsigned char * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_fixed2_sub_rev_fp( ident_t *id_ref, int gtid, short * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_fixed2_div_rev_fp( ident_t *id_ref, int gtid, short * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_fixed2u_div_rev_fp( ident_t *id_ref, int gtid, unsigned short * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_fixed4_sub_rev_fp( ident_t *id_ref, int gtid, kmp_int32 * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_fixed4_div_rev_fp( ident_t *id_ref, int gtid, kmp_int32 * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_fixed4u_div_rev_fp( ident_t *id_ref, int gtid, kmp_uint32 * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_fixed8_sub_rev_fp( ident_t *id_ref, int gtid, kmp_int64 * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_fixed8_div_rev_fp( ident_t *id_ref, int gtid, kmp_int64 * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_fixed8u_div_rev_fp( ident_t *id_ref, int gtid, kmp_uint64 * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_float4_sub_rev_fp( ident_t *id_ref, int gtid, float * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_float4_div_rev_fp( ident_t *id_ref, int gtid, float * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_float8_sub_rev_fp( ident_t *id_ref, int gtid, double * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_float8_div_rev_fp( ident_t *id_ref, int gtid, double * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_float10_sub_rev_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs );
|
||||
void __kmpc_atomic_float10_div_rev_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs );
|
||||
|
||||
#endif // KMP_HAVE_QUAD
|
||||
|
||||
// RHS=cmplx8
|
||||
|
@ -1035,10 +1056,89 @@ kmp_cmplx80 __kmpc_atomic_cmplx10_swp( ident_t *id_ref, int gtid, kmp_cmplx80 *
|
|||
CPLX128_LEG __kmpc_atomic_cmplx16_swp( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs );
|
||||
#if ( KMP_ARCH_X86 )
|
||||
Quad_a16_t __kmpc_atomic_float16_a16_swp( ident_t *id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs );
|
||||
kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_a16_swp( ident_t *id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs );
|
||||
kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_a16_swp( ident_t *id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs );
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Capture routines for mixed types (RHS=float16)
|
||||
#if KMP_HAVE_QUAD
|
||||
|
||||
char __kmpc_atomic_fixed1_add_cpt_fp( ident_t *id_ref, int gtid, char * lhs, _Quad rhs, int flag );
|
||||
char __kmpc_atomic_fixed1_sub_cpt_fp( ident_t *id_ref, int gtid, char * lhs, _Quad rhs, int flag );
|
||||
char __kmpc_atomic_fixed1_mul_cpt_fp( ident_t *id_ref, int gtid, char * lhs, _Quad rhs, int flag );
|
||||
char __kmpc_atomic_fixed1_div_cpt_fp( ident_t *id_ref, int gtid, char * lhs, _Quad rhs, int flag );
|
||||
unsigned char __kmpc_atomic_fixed1u_add_cpt_fp( ident_t *id_ref, int gtid, unsigned char * lhs, _Quad rhs, int flag );
|
||||
unsigned char __kmpc_atomic_fixed1u_sub_cpt_fp( ident_t *id_ref, int gtid, unsigned char * lhs, _Quad rhs, int flag );
|
||||
unsigned char __kmpc_atomic_fixed1u_mul_cpt_fp( ident_t *id_ref, int gtid, unsigned char * lhs, _Quad rhs, int flag );
|
||||
unsigned char __kmpc_atomic_fixed1u_div_cpt_fp( ident_t *id_ref, int gtid, unsigned char * lhs, _Quad rhs, int flag );
|
||||
|
||||
short __kmpc_atomic_fixed2_add_cpt_fp( ident_t *id_ref, int gtid, short * lhs, _Quad rhs, int flag );
|
||||
short __kmpc_atomic_fixed2_sub_cpt_fp( ident_t *id_ref, int gtid, short * lhs, _Quad rhs, int flag );
|
||||
short __kmpc_atomic_fixed2_mul_cpt_fp( ident_t *id_ref, int gtid, short * lhs, _Quad rhs, int flag );
|
||||
short __kmpc_atomic_fixed2_div_cpt_fp( ident_t *id_ref, int gtid, short * lhs, _Quad rhs, int flag );
|
||||
unsigned short __kmpc_atomic_fixed2u_add_cpt_fp( ident_t *id_ref, int gtid, unsigned short * lhs, _Quad rhs, int flag );
|
||||
unsigned short __kmpc_atomic_fixed2u_sub_cpt_fp( ident_t *id_ref, int gtid, unsigned short * lhs, _Quad rhs, int flag );
|
||||
unsigned short __kmpc_atomic_fixed2u_mul_cpt_fp( ident_t *id_ref, int gtid, unsigned short * lhs, _Quad rhs, int flag );
|
||||
unsigned short __kmpc_atomic_fixed2u_div_cpt_fp( ident_t *id_ref, int gtid, unsigned short * lhs, _Quad rhs, int flag );
|
||||
|
||||
kmp_int32 __kmpc_atomic_fixed4_add_cpt_fp( ident_t *id_ref, int gtid, kmp_int32 * lhs, _Quad rhs, int flag );
|
||||
kmp_int32 __kmpc_atomic_fixed4_sub_cpt_fp( ident_t *id_ref, int gtid, kmp_int32 * lhs, _Quad rhs, int flag );
|
||||
kmp_int32 __kmpc_atomic_fixed4_mul_cpt_fp( ident_t *id_ref, int gtid, kmp_int32 * lhs, _Quad rhs, int flag );
|
||||
kmp_int32 __kmpc_atomic_fixed4_div_cpt_fp( ident_t *id_ref, int gtid, kmp_int32 * lhs, _Quad rhs, int flag );
|
||||
kmp_uint32 __kmpc_atomic_fixed4u_add_cpt_fp( ident_t *id_ref, int gtid, kmp_uint32 * lhs, _Quad rhs, int flag );
|
||||
kmp_uint32 __kmpc_atomic_fixed4u_sub_cpt_fp( ident_t *id_ref, int gtid, kmp_uint32 * lhs, _Quad rhs, int flag );
|
||||
kmp_uint32 __kmpc_atomic_fixed4u_mul_cpt_fp( ident_t *id_ref, int gtid, kmp_uint32 * lhs, _Quad rhs, int flag );
|
||||
kmp_uint32 __kmpc_atomic_fixed4u_div_cpt_fp( ident_t *id_ref, int gtid, kmp_uint32 * lhs, _Quad rhs, int flag );
|
||||
|
||||
kmp_int64 __kmpc_atomic_fixed8_add_cpt_fp( ident_t *id_ref, int gtid, kmp_int64 * lhs, _Quad rhs, int flag );
|
||||
kmp_int64 __kmpc_atomic_fixed8_sub_cpt_fp( ident_t *id_ref, int gtid, kmp_int64 * lhs, _Quad rhs, int flag );
|
||||
kmp_int64 __kmpc_atomic_fixed8_mul_cpt_fp( ident_t *id_ref, int gtid, kmp_int64 * lhs, _Quad rhs, int flag );
|
||||
kmp_int64 __kmpc_atomic_fixed8_div_cpt_fp( ident_t *id_ref, int gtid, kmp_int64 * lhs, _Quad rhs, int flag );
|
||||
kmp_uint64 __kmpc_atomic_fixed8u_add_cpt_fp( ident_t *id_ref, int gtid, kmp_uint64 * lhs, _Quad rhs, int flag );
|
||||
kmp_uint64 __kmpc_atomic_fixed8u_sub_cpt_fp( ident_t *id_ref, int gtid, kmp_uint64 * lhs, _Quad rhs, int flag );
|
||||
kmp_uint64 __kmpc_atomic_fixed8u_mul_cpt_fp( ident_t *id_ref, int gtid, kmp_uint64 * lhs, _Quad rhs, int flag );
|
||||
kmp_uint64 __kmpc_atomic_fixed8u_div_cpt_fp( ident_t *id_ref, int gtid, kmp_uint64 * lhs, _Quad rhs, int flag );
|
||||
|
||||
float __kmpc_atomic_float4_add_cpt_fp( ident_t *id_ref, int gtid, kmp_real32 * lhs, _Quad rhs, int flag );
|
||||
float __kmpc_atomic_float4_sub_cpt_fp( ident_t *id_ref, int gtid, kmp_real32 * lhs, _Quad rhs, int flag );
|
||||
float __kmpc_atomic_float4_mul_cpt_fp( ident_t *id_ref, int gtid, kmp_real32 * lhs, _Quad rhs, int flag );
|
||||
float __kmpc_atomic_float4_div_cpt_fp( ident_t *id_ref, int gtid, kmp_real32 * lhs, _Quad rhs, int flag );
|
||||
|
||||
double __kmpc_atomic_float8_add_cpt_fp( ident_t *id_ref, int gtid, kmp_real64 * lhs, _Quad rhs, int flag );
|
||||
double __kmpc_atomic_float8_sub_cpt_fp( ident_t *id_ref, int gtid, kmp_real64 * lhs, _Quad rhs, int flag );
|
||||
double __kmpc_atomic_float8_mul_cpt_fp( ident_t *id_ref, int gtid, kmp_real64 * lhs, _Quad rhs, int flag );
|
||||
double __kmpc_atomic_float8_div_cpt_fp( ident_t *id_ref, int gtid, kmp_real64 * lhs, _Quad rhs, int flag );
|
||||
|
||||
long double __kmpc_atomic_float10_add_cpt_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs, int flag );
|
||||
long double __kmpc_atomic_float10_sub_cpt_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs, int flag );
|
||||
long double __kmpc_atomic_float10_mul_cpt_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs, int flag );
|
||||
long double __kmpc_atomic_float10_div_cpt_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs, int flag );
|
||||
|
||||
char __kmpc_atomic_fixed1_sub_cpt_rev_fp( ident_t *id_ref, int gtid, char * lhs, _Quad rhs, int flag );
|
||||
unsigned char __kmpc_atomic_fixed1u_sub_cpt_rev_fp( ident_t *id_ref, int gtid, unsigned char * lhs, _Quad rhs, int flag );
|
||||
char __kmpc_atomic_fixed1_div_cpt_rev_fp( ident_t *id_ref, int gtid, char * lhs, _Quad rhs, int flag );
|
||||
unsigned char __kmpc_atomic_fixed1u_div_cpt_rev_fp( ident_t *id_ref, int gtid, unsigned char * lhs, _Quad rhs, int flag );
|
||||
short __kmpc_atomic_fixed2_sub_cpt_rev_fp( ident_t *id_ref, int gtid, short * lhs, _Quad rhs, int flag );
|
||||
unsigned short __kmpc_atomic_fixed2u_sub_cpt_rev_fp( ident_t *id_ref, int gtid, unsigned short * lhs, _Quad rhs, int flag );
|
||||
short __kmpc_atomic_fixed2_div_cpt_rev_fp( ident_t *id_ref, int gtid, short * lhs, _Quad rhs, int flag );
|
||||
unsigned short __kmpc_atomic_fixed2u_div_cpt_rev_fp( ident_t *id_ref, int gtid, unsigned short * lhs, _Quad rhs, int flag );
|
||||
kmp_int32 __kmpc_atomic_fixed4_sub_cpt_rev_fp( ident_t *id_ref, int gtid, kmp_int32 * lhs, _Quad rhs, int flag );
|
||||
kmp_uint32 __kmpc_atomic_fixed4u_sub_cpt_rev_fp( ident_t *id_ref, int gtid, kmp_uint32 * lhs, _Quad rhs, int flag );
|
||||
kmp_int32 __kmpc_atomic_fixed4_div_cpt_rev_fp( ident_t *id_ref, int gtid, kmp_int32 * lhs, _Quad rhs, int flag );
|
||||
kmp_uint32 __kmpc_atomic_fixed4u_div_cpt_rev_fp( ident_t *id_ref, int gtid, kmp_uint32 * lhs, _Quad rhs, int flag );
|
||||
kmp_int64 __kmpc_atomic_fixed8_sub_cpt_rev_fp( ident_t *id_ref, int gtid, kmp_int64 * lhs, _Quad rhs, int flag );
|
||||
kmp_uint64 __kmpc_atomic_fixed8u_sub_cpt_rev_fp( ident_t *id_ref, int gtid, kmp_uint64 * lhs, _Quad rhs, int flag );
|
||||
kmp_int64 __kmpc_atomic_fixed8_div_cpt_rev_fp( ident_t *id_ref, int gtid, kmp_int64 * lhs, _Quad rhs, int flag );
|
||||
kmp_uint64 __kmpc_atomic_fixed8u_div_cpt_rev_fp( ident_t *id_ref, int gtid, kmp_uint64 * lhs, _Quad rhs, int flag );
|
||||
float __kmpc_atomic_float4_sub_cpt_rev_fp( ident_t *id_ref, int gtid, float * lhs, _Quad rhs, int flag );
|
||||
float __kmpc_atomic_float4_div_cpt_rev_fp( ident_t *id_ref, int gtid, float * lhs, _Quad rhs, int flag );
|
||||
double __kmpc_atomic_float8_sub_cpt_rev_fp( ident_t *id_ref, int gtid, double * lhs, _Quad rhs, int flag );
|
||||
double __kmpc_atomic_float8_div_cpt_rev_fp( ident_t *id_ref, int gtid, double * lhs, _Quad rhs, int flag );
|
||||
long double __kmpc_atomic_float10_sub_cpt_rev_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs, int flag );
|
||||
long double __kmpc_atomic_float10_div_cpt_rev_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs, int flag );
|
||||
|
||||
#endif // KMP_HAVE_QUAD
|
||||
|
||||
// End of OpenMP 4.0 capture
|
||||
|
||||
#endif //OMP_40_ENABLED
|
||||
|
|
Loading…
Reference in New Issue